repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
jeremyng123/pytm | pytm/__init__.py | e0bbbbbcfa387887753e27f78678c6004edf0e85 | __all__ = ['Element', 'Server', 'ExternalEntity', 'Datastore', 'Actor', 'Process', 'SetOfProcesses', 'Dataflow', 'Boundary', 'TM', 'Action', 'Lambda', 'Threat']
from .pytm import Element, Server, ExternalEntity, Dataflow, Datastore, Actor, Process, SetOfProcesses, Boundary, TM, Action, Lambda, Threat
| [] |
6un9-h0-Dan/malchive | malchive/utilities/comguidtoyara.py | 1d150430559a307cdfee49d47799c95caea47415 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2021 The MITRE Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import sys
import struct
import binascii
import logging
import argparse
import progressbar
from datetime import datetime
from Registry import Registry
__version__ = "1.0.0"
__author__ = "Jason Batchelor"
log = logging.getLogger(__name__)
def iid_text_to_bin(iid):
"""
Process an IID and convert to a YARA compliant search string.
Below describes the GUID structure used to describe an identifier
for a MAPI interface:
https://msdn.microsoft.com/en-us/library/office/cc815892.aspx
:param str iid: Name of the IID to convert
:return: bin_yara
:rtype: str
"""
# remove begin and end brackets
guid = re.sub('[{}-]', '', iid)
# convert to binary representation
bin_struc = struct.unpack("IHH8B", binascii.a2b_hex(guid))
bin_str = '%.8X%.4X%.4X%s' % \
(bin_struc[0], bin_struc[1], bin_struc[2],
(''.join('{:02X}'.format(x) for x in bin_struc[3:])))
# create YARA compliant search string
bin_yara = '{ ' + ' '.join(a + b for a, b in
zip(bin_str[::2], bin_str[1::2])) + ' }'
return bin_yara
def enumerate_com_interfaces(reg_keys, show_bar=False):
"""
Iterate through registry keys and retrieve unique interface identifiers
and their name.
:param list reg_keys: List of registry key objects from python-registry
module.
:param bool show_bar: Show progressbar as subfiles are identified.
:param bytes buff: File to look for subfiles.
:return: com
:rtype: dict
"""
total_iters = 0
counter = 0
com = {}
for key in reg_keys:
total_iters += len(key.subkeys())
if show_bar:
print('Processing %s results...' % total_iters)
bar = progressbar.ProgressBar(redirect_stdout=True,
max_value=total_iters)
for key in reg_keys:
for subkey in key.subkeys():
for v in list(subkey.values()):
# Per MS documentation, Interface names must start with the
# 'I' prefix, so we limit our values here as well.
# Not doing so can lead to some crazy names and conflicting
# results!
# https://docs.microsoft.com/en-us/dotnet/standard/design-guidelines/names-of-classes-structs-and-interfaces
if v.value_type() == Registry.RegSZ \
and v.name() == '(default)' \
and v.value().startswith('I'):
bin_guid = iid_text_to_bin(subkey.name())
# Names with special characters/spaces are truncated
stop_chars = ['_', '<', '[', ' ']
index = min(v.value().find(i)
if i in v.value()
else
len(v.value())
for i in stop_chars)
value = v.value()[:index]
if value not in com:
com[value] = [bin_guid]
elif bin_guid not in com[value]:
com[value].append(bin_guid)
if show_bar:
bar.update(counter)
counter += 1
if show_bar:
bar.finish()
return com
def initialize_parser():
parser = argparse.ArgumentParser(
description="Crawls windows registry to hunt for and convert IIDs for "
"COM interfaces to binary YARA signatures. The submitted "
"hives must be from HKLM\\SOFTWARE. Make copies of "
"these files off an active Windows OS using the command "
"'reg save HKLM\\SOFTWARE hklm_sft.hiv' when running as "
"administrator.")
parser.add_argument('hive', metavar='FILE', nargs='*',
help='Full path to the registry hive to be processed.')
parser.add_argument('-o', '--output-filename', type=str,
default='com_interface_ids.yara',
help='Filename to write YARA signatures '
'to (default: com_interface_ids.yara)')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Output additional information when processing '
'(mostly for debugging purposes).')
return parser
def main():
p = initialize_parser()
args = p.parse_args()
root = logging.getLogger()
logging.basicConfig()
if args.verbose:
root.setLevel(logging.DEBUG)
else:
root.setLevel(logging.WARNING)
if len(args.hive) == 0:
p.print_help()
sys.exit(2)
keys = []
for hive in args.hive:
print('Collecting IIDs from %s...' % hive)
if not os.path.isfile(hive):
log.warning('Failed to find file %s. Skipping...' % hive)
continue
try:
reg = Registry.Registry(hive)
except Registry.RegistryParse.ParseException:
log.warning('Error parsing %s. Skipping...' % hive)
continue
try:
keys.append(reg.open("Classes\\Interface"))
except Registry.RegistryKeyNotFoundException:
log.warning("Couldn't find 'Classes\\Interface' key in %s." % hive)
try:
keys.append(reg.open("Classes\\Wow6432Node\\Interface"))
except Registry.RegistryKeyNotFoundException:
log.warning("Couldn't find 'Classes\\Wow6432Node\\Interface\\ "
"key in %s." % hive)
com_signatures = enumerate_com_interfaces(keys, True)
counter = 0
total_rules = len(com_signatures)
print('Generating %s YARA signatures...' % total_rules)
bar = progressbar.ProgressBar(redirect_stdout=True, max_value=total_rules)
yara_rule = '// %s\n// COM IID YARA sig collection.\n// ' \
'Autogenerated on %s\n\n' % (__author__, datetime.now())
for name, rules in com_signatures.items():
yara_rule += 'rule %s\n{\n\t' \
'strings:' % name
if len(rules) > 1:
for i in range(0, len(rules)):
yara_rule += '\n\t\t$%s_%s = %s' % (name, i, rules[i])
else:
yara_rule += '\n\t\t$%s = %s' % (name, rules[0])
yara_rule += '\n\tcondition:\n\t\tany of them\n}\n'
bar.update(counter)
counter += 1
bar.finish()
print('Writing YARA rules to %s' % args.output_filename)
with open(args.output_filename, 'w') as f:
f.write(yara_rule)
f.close()
if __name__ == '__main__':
main()
| [((31, 6, 31, 33), 'logging.getLogger', 'logging.getLogger', ({(31, 24, 31, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((50, 11, 50, 35), 're.sub', 're.sub', ({(50, 18, 50, 25): '"""[{}-]"""', (50, 27, 50, 29): '""""""', (50, 31, 50, 34): 'iid'}, {}), "('[{}-]', '', iid)", False, 'import re\n'), ((130, 13, 136, 37), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((154, 11, 154, 30), 'logging.getLogger', 'logging.getLogger', ({}, {}), '()', False, 'import logging\n'), ((155, 4, 155, 25), 'logging.basicConfig', 'logging.basicConfig', ({}, {}), '()', False, 'import logging\n'), ((197, 10, 197, 78), 'progressbar.ProgressBar', 'progressbar.ProgressBar', (), '', False, 'import progressbar\n'), ((52, 39, 52, 61), 'binascii.a2b_hex', 'binascii.a2b_hex', ({(52, 56, 52, 60): 'guid'}, {}), '(guid)', False, 'import binascii\n'), ((86, 14, 87, 60), 'progressbar.ProgressBar', 'progressbar.ProgressBar', (), '', False, 'import progressbar\n'), ((163, 8, 163, 19), 'sys.exit', 'sys.exit', ({(163, 17, 163, 18): '(2)'}, {}), '(2)', False, 'import sys\n'), ((170, 15, 170, 35), 'os.path.isfile', 'os.path.isfile', ({(170, 30, 170, 34): 'hive'}, {}), '(hive)', False, 'import os\n'), ((175, 18, 175, 41), 'Registry.Registry.Registry', 'Registry.Registry', ({(175, 36, 175, 40): 'hive'}, {}), '(hive)', False, 'from Registry import Registry\n'), ((199, 57, 199, 71), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n')] |
sis0truk/pretrained-models.pytorch | examples/voc2007_extract.py | 4aea6d47996279b4b281355ca3d9738d0dff7469 | import os
import argparse
from tqdm import tqdm
import torch
from torch.autograd import Variable
from torch.utils import model_zoo
# http://scikit-learn.org
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
import sys
sys.path.append('.')
import pretrainedmodels
import pretrainedmodels.utils
import pretrainedmodels.datasets
model_names = sorted(name for name in pretrainedmodels.__dict__
if not name.startswith("__")
and name.islower()
and callable(pretrainedmodels.__dict__[name]))
def extract_features_targets(model, features_size, loader, path_data, cuda=False):
if os.path.isfile(path_data):
print('Load features from {}'.format(path_data))
return torch.load(path_data)
print('\nExtract features on {}set'.format(loader.dataset.set))
features = torch.Tensor(len(loader.dataset), features_size)
targets = torch.Tensor(len(loader.dataset), len(loader.dataset.classes))
for batch_id, batch in enumerate(tqdm(loader)):
img = batch[0]
target = batch[2]
current_bsize = img.size(0)
from_ = int(batch_id * loader.batch_size)
to_ = int(from_ + current_bsize)
if cuda:
img = img.cuda(async=True)
input = Variable(img, requires_grad=False)
output = model(input)
features[from_:to_] = output.data.cpu()
targets[from_:to_] = target
os.system('mkdir -p {}'.format(os.path.dirname(path_data)))
print('save ' + path_data)
torch.save((features, targets), path_data)
print('')
return features, targets
def train_multilabel(features, targets, classes, train_split, test_split, C=1.0, ignore_hard_examples=True, after_ReLU=False, normalize_L2=False):
print('\nHyperparameters:\n - C: {}\n - after_ReLU: {}\n - normL2: {}'.format(C, after_ReLU, normalize_L2))
train_APs = []
test_APs = []
for class_id in range(len(classes)):
classifier = SVC(C=C, kernel='linear') # http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
if ignore_hard_examples:
train_masks = (targets[train_split][:,class_id] != 0).view(-1, 1)
train_features = torch.masked_select(features[train_split], train_masks.expand_as(features[train_split])).view(-1,features[train_split].size(1))
train_targets = torch.masked_select(targets[train_split], train_masks.expand_as(targets[train_split])).view(-1,targets[train_split].size(1))
test_masks = (targets[test_split][:,class_id] != 0).view(-1, 1)
test_features = torch.masked_select(features[test_split], test_masks.expand_as(features[test_split])).view(-1,features[test_split].size(1))
test_targets = torch.masked_select(targets[test_split], test_masks.expand_as(targets[test_split])).view(-1,targets[test_split].size(1))
else:
train_features = features[train_split]
train_targets = targets[train_split]
test_features = features[test_split]
test_targets = features[test_split]
if after_ReLU:
train_features[train_features < 0] = 0
test_features[test_features < 0] = 0
if normalize_L2:
train_norm = torch.norm(train_features, p=2, dim=1).unsqueeze(1)
train_features = train_features.div(train_norm.expand_as(train_features))
test_norm = torch.norm(test_features, p=2, dim=1).unsqueeze(1)
test_features = test_features.div(test_norm.expand_as(test_features))
train_X = train_features.numpy()
train_y = (train_targets[:,class_id] != -1).numpy() # uses hard examples if not ignored
test_X = test_features.numpy()
test_y = (test_targets[:,class_id] != -1).numpy()
classifier.fit(train_X, train_y) # train parameters of the classifier
train_preds = classifier.predict(train_X)
train_acc = accuracy_score(train_y, train_preds) * 100
train_AP = average_precision_score(train_y, train_preds) * 100
train_APs.append(train_AP)
test_preds = classifier.predict(test_X)
test_acc = accuracy_score(test_y, test_preds) * 100
test_AP = average_precision_score(test_y, test_preds) * 100
test_APs.append(test_AP)
print('class "{}" ({}/{}):'.format(classes[class_id], test_y.sum(), test_y.shape[0]))
print(' - {:8}: acc {:.2f}, AP {:.2f}'.format(train_split, train_acc, train_AP))
print(' - {:8}: acc {:.2f}, AP {:.2f}'.format(test_split, test_acc, test_AP))
print('all classes:')
print(' - {:8}: mAP {:.4f}'.format(train_split, sum(train_APs)/len(classes)))
print(' - {:8}: mAP {:.4f}'.format(test_split, sum(test_APs)/len(classes)))
##########################################################################
# main
##########################################################################
parser = argparse.ArgumentParser(
description='Train/Evaluate models',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dir_outputs', default='/tmp/outputs', type=str, help='')
parser.add_argument('--dir_datasets', default='/tmp/datasets', type=str, help='')
parser.add_argument('--C', default=1, type=float, help='')
parser.add_argument('-b', '--batch_size', default=50, type=float, help='')
parser.add_argument('-a', '--arch', default='alexnet', choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: alexnet)')
parser.add_argument('--train_split', default='train', type=str, help='')
parser.add_argument('--test_split', default='val', type=str, help='')
parser.add_argument('--cuda', const=True, nargs='?', type=bool, help='')
def main ():
global args
args = parser.parse_args()
print('\nCUDA status: {}'.format(args.cuda))
print('\nLoad pretrained model on Imagenet')
model = pretrainedmodels.__dict__[args.arch](num_classes=1000, pretrained='imagenet')
model.eval()
if args.cuda:
model.cuda()
features_size = model.last_linear.in_features
model.last_linear = pretrainedmodels.utils.Identity() # Trick to get inputs (features) from last_linear
print('\nLoad datasets')
tf_img = pretrainedmodels.utils.TransformImage(model)
train_set = pretrainedmodels.datasets.Voc2007Classification(args.dir_datasets, 'train', transform=tf_img)
val_set = pretrainedmodels.datasets.Voc2007Classification(args.dir_datasets, 'val', transform=tf_img)
test_set = pretrainedmodels.datasets.Voc2007Classification(args.dir_datasets, 'test', transform=tf_img)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=False, num_workers=2)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=2)
print('\nLoad features')
dir_features = os.path.join(args.dir_outputs, 'data/{}'.format(args.arch))
path_train_data = '{}/{}set.pth'.format(dir_features, 'train')
path_val_data = '{}/{}set.pth'.format(dir_features, 'val')
path_test_data = '{}/{}set.pth'.format(dir_features, 'test')
features = {}
targets = {}
features['train'], targets['train'] = extract_features_targets(model, features_size, train_loader, path_train_data, args.cuda)
features['val'], targets['val'] = extract_features_targets(model, features_size, val_loader, path_val_data, args.cuda)
features['test'], targets['test'] = extract_features_targets(model, features_size, test_loader, path_test_data, args.cuda)
features['trainval'] = torch.cat([features['train'], features['val']], 0)
targets['trainval'] = torch.cat([targets['train'], targets['val']], 0)
print('\nTrain Support Vector Machines')
if args.train_split == 'train' and args.test_split == 'val':
print('\nHyperparameters search: train multilabel classifiers (on-versus-all) on train/val')
elif args.train_split == 'trainval' and args.test_split == 'test':
print('\nEvaluation: train a multilabel classifier on trainval/test')
else:
raise ValueError('Trying to train on {} and eval on {}'.format(args.train_split, args.test_split))
train_multilabel(features, targets, train_set.classes, args.train_split, args.test_split, C=args.C)
if __name__ == '__main__':
main() | [] |
tani-cat/point_maximizer | main.py | c9ff868377bbeed4727914d7be258457dc8295a3 | import csv
import os
from collections import deque
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
INPUT_PATH = os.path.join(BASE_DIR, 'goods_source.csv')
OUTPUT_PATH = os.path.join(BASE_DIR, 'result.csv')
FILE_ENCODE = 'shift_jis'
INPUT_COLS = ('id', 'goods_name', 'price')
def import_csv():
"""入力データの読み込み
"""
try:
data_l = list()
with open(INPUT_PATH, mode='r', encoding=FILE_ENCODE, newline='') as csvf:
reader = csv.DictReader(csvf)
for dic in reader:
dic['id'] = int(dic['id'])
dic['price'] = int(dic['price'])
data_l.append(dic)
for col in INPUT_COLS:
if col not in data_l[0]:
raise IndexError(col)
return data_l
except FileNotFoundError:
print('goods_source.csvがありません')
return list()
except IndexError as e:
print('列が不足しています: ' + str(e))
return list()
def func(init, old_que, threshold=50):
keep = dict()
new_que = deque(list())
while old_que:
last = old_que.pop()
if init['mod'] + last['mod'] >= threshold:
if keep:
new_que.appendleft(keep)
keep = last
else:
new_que.appendleft(last)
break
return init, keep, old_que, new_que
def calculate(data_l):
"""アルゴリズム
1. 50未満の中でペアにできるものを探す
1-1. queの末端でペアを作れる場合、左端を固定し和が50以上で最小になるように右を選んでペアにする
1-2. queの末端でペアを作れない場合、末端2つを取り出した上で3個以上の組み合わせで消化する
1-2-1. 右末端で和が50以上なら右から左に探索して和が50以上になる最小値を得る->組にして除外
1-2-2. 右末端でも和が50にならないなら右末端をして1-2に戻る
-> 全部を消化しても50にならないならそのまま全部を足してしまう
2. 1と同じことを全体かつ閾値150で行う
"""
# 50未満のものだけ和を取る処理に入れる
under_que = list()
over_que = list()
for i in range(len(data_l)):
_mod = data_l[i]['price'] % 100
data_l[i]['set'] = 0
dic = {
'id': [i],
'mod': _mod,
}
if _mod < 50:
under_que.append(dic)
else:
over_que.append(dic)
under_que.sort(key=lambda x: x['mod'])
under_que = deque(under_que)
while under_que:
init = under_que.popleft()
while under_que:
init, keep, under_que, last_que = func(init, under_que)
# この時点でlast_queは要素1以上
if not keep:
keep = last_que.pop()
init = {
'id': init['id'] + keep['id'],
'mod': init['mod'] + keep['mod'],
}
if last_que:
over_que.append(init)
under_que.extend(last_que)
break
else:
over_que.append(init)
break
# 50以上の項目のうち、合計が150以上になる項目同士を足す
# (これにより購入回数を最小にする)
# final_que: 最終的な組み合わせ
over_que = deque(sorted(over_que, key=lambda x: x['mod']))
final_que = list()
while over_que:
init = over_que.popleft()
init, keep, over_que, last_que = func(init, over_que, 150)
if keep:
init = {
'id': init['id'] + keep['id'],
'mod': (init['mod'] + keep['mod']) % 100,
}
over_que.appendleft(init)
else:
final_que.append(init)
over_que.extend(last_que)
sum_p = 0
# 計算結果の出力
for cnt, que in enumerate(final_que):
point = 0
for id in que['id']:
data_l[id]['set'] = cnt + 1
point += data_l[id]['price']
print(f'set{cnt + 1} {round(point / 100)} P')
sum_p += round(point / 100)
print(f'total: {sum_p} P')
return data_l
def main():
# ファイルの読み込み
data_l = import_csv()
if not data_l:
print('処理を中止します')
return False
# 計算処理
data_l = calculate(data_l)
# 結果をファイルに出力
data_l.sort(key=lambda x: (x['set'], x['id']))
with open(OUTPUT_PATH, mode='w', encoding=FILE_ENCODE, newline='') as csvf:
writer = csv.DictWriter(csvf, data_l[0].keys())
writer.writeheader()
writer.writerows(data_l)
print('Done')
if __name__ == '__main__':
main()
| [((7, 13, 7, 55), 'os.path.join', 'os.path.join', ({(7, 26, 7, 34): 'BASE_DIR', (7, 36, 7, 54): '"""goods_source.csv"""'}, {}), "(BASE_DIR, 'goods_source.csv')", False, 'import os\n'), ((8, 14, 8, 50), 'os.path.join', 'os.path.join', ({(8, 27, 8, 35): 'BASE_DIR', (8, 37, 8, 49): '"""result.csv"""'}, {}), "(BASE_DIR, 'result.csv')", False, 'import os\n'), ((6, 27, 6, 52), 'os.path.abspath', 'os.path.abspath', ({(6, 43, 6, 51): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((83, 16, 83, 32), 'collections.deque', 'deque', ({(83, 22, 83, 31): 'under_que'}, {}), '(under_que)', False, 'from collections import deque\n'), ((21, 21, 21, 41), 'csv.DictReader', 'csv.DictReader', ({(21, 36, 21, 40): 'csvf'}, {}), '(csvf)', False, 'import csv\n')] |
gspu/bitkeeper | src/gui/tcltk/tcl/tests/langbench/proc.py | 994fb651a4045b221e33703fc3d665c3a34784e1 | #!/usr/bin/python
def a(val):
return b(val)
def b(val):
return c(val)
def c(val):
return d(val)
def d(val):
return e(val)
def e(val):
return f(val)
def f(val):
return g(val, 2)
def g(v1, v2):
return h(v1, v2, 3)
def h(v1, v2, v3):
return i(v1, v2, v3, 4)
def i(v1, v2, v3, v4):
return j(v1, v2, v3, v4, 5)
def j(v1, v2, v3, v4, v5):
return v1 + v2 + v3 + v4 + v5
n = 100000
while n > 0:
x = a(n)
n = n - 1
print "x=%d" % x
| [] |
dmitryrubtsov/Recommender-systems | src/metrics.py | 9debd7b1c2d67ebc508263a483c81da57521dea0 | import pandas as pd
import numpy as np
import swifter
def money_precision_at_k(y_pred: pd.Series, y_true: pd.Series, item_price, k=5):
y_pred = y_pred.swifter.progress_bar(False).apply(pd.Series)
user_filter = ~(y_true.swifter.progress_bar(False).apply(len) < k)
y_pred = y_pred.loc[user_filter]
y_true = y_true.loc[user_filter]
prices_recommended = y_pred.swifter.progress_bar(False).applymap(lambda item: item_price.price.get(item))
flags = y_pred.loc[:, :k - 1].swifter.progress_bar(False) \
.apply(lambda row: np.isin(np.array(row), y_true.get(row.name)), axis=1) \
.swifter.progress_bar(False).apply(pd.Series)
metric = (
(flags * prices_recommended.loc[:, :k - 1]).sum(axis=1) / prices_recommended.loc[:, :k - 1].sum(axis=1)
).mean()
return metric
| [((15, 35, 15, 48), 'numpy.array', 'np.array', ({(15, 44, 15, 47): 'row'}, {}), '(row)', True, 'import numpy as np\n')] |
upupming/dragon | diff_r_b.py | 245f71996004b386ae764eb8f76603233d8a6763 | import numpy as np
size = 9
percentage_max = 0.08
xis = np.linspace(0.1 * (1-percentage_max), 0.1 * (1+percentage_max), size)
E_n = [
85219342462.9973,
85219254693.4412,
85219173007.4296,
85219096895.7433,
85219025899.6604,
85218959605.1170,
85218897637.6421,
85218839657.9502,
85218785358.0968
]
percentage = np.empty(size)
for i in range(len(xis)):
percentage[i] = (E_n[i] - E_n[size//2])/E_n[size//2]*100
print(percentage)
# [ 3.71470260e-04 2.68477348e-04 1.72623153e-04 8.33101319e-05
# 0.00000000e+00 -7.77931251e-05 -1.50508665e-04 -2.18544754e-04
# -2.82262747e-04] | [((5, 6, 5, 75), 'numpy.linspace', 'np.linspace', ({(5, 18, 5, 42): '0.1 * (1 - percentage_max)', (5, 44, 5, 68): '0.1 * (1 + percentage_max)', (5, 70, 5, 74): 'size'}, {}), '(0.1 * (1 - percentage_max), 0.1 * (1 + percentage_max), size)', True, 'import numpy as np\n'), ((19, 13, 19, 27), 'numpy.empty', 'np.empty', ({(19, 22, 19, 26): 'size'}, {}), '(size)', True, 'import numpy as np\n')] |
rhiga2/mturk-tsep-test | src/run.py | 2cc4388442bc9155022d28ec9132acc10a1b82f7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from caqe import app
app.run(debug=True, threaded=True) | [((5, 0, 5, 34), 'caqe.app.run', 'app.run', (), '', False, 'from caqe import app\n')] |
bopopescu/hydra | src/main/python/hydra/lib/cli.py | ec0793f8c1f49ceb93bf1f1a9789085b68d55f08 | """hydra cli.
Usage:
hydra cli ls slaves
hydra cli ls apps
hydra cli ls task <app>
hydra cli [force] stop <app>
hydra cli scale <app> <scale>
hydra cli (-h | --help)
hydra cli --version
Options:
-h --help Show this screen.
--version Show version.
"""
__author__ = 'sushil'
from docopt import docopt
from pprint import pprint, pformat # NOQA
from hydra.lib import util, mmapi
import os
import sys
import logging
try:
# Python 2.x
from ConfigParser import ConfigParser
except ImportError:
# Python 3.x
from configparser import ConfigParser
l = util.createlogger('cli', logging.INFO)
# l.setLevel(logging.DEBUG)
def cli(argv):
config = ConfigParser()
config_file_name = 'hydra.ini'
if len(argv) >= 2 and argv[1].find('.ini') != -1:
config_file_name = argv[1]
del argv[1]
if not os.path.isfile(config_file_name):
l.error("Unable to open config file %s" % config_file_name)
sys.exit(1)
config.read(config_file_name)
mesos_addr = 'http://' + config.get('mesos', 'ip') + ':' + \
config.get('mesos', 'port')
marathon_addr = 'http://' + config.get('marathon', 'ip') + ':' + \
config.get('marathon', 'port')
argv[0] = 'cli'
args = docopt(__doc__, argv=argv, version='hydra 0.1.0', )
# pprint (args)
if args['ls']:
if args['slaves']:
mesos = mmapi.MesosIF(mesos_addr)
mesos.print_slaves()
elif args['apps']:
mt = mmapi.MarathonIF(marathon_addr, '127.0.0.1', None)
apps = mt.get_apps()
for app in apps:
st = "App:" + app.id
st += " CPU:" + str(app.cpus)
st += " MEM:" + str(app.mem)
st += " Instances:" + str(app.instances)
if len(app.constraints):
st += " Constraints:" + pformat(app.constraints)
l.info(st)
elif args['task']:
mt = mmapi.MarathonIF(marathon_addr, '127.0.0.1', None)
app = mt.get_app(args['<app>'])
st = "App:" + args['<app>']
st += " CPU:" + str(app.cpus)
st += " MEM:" + str(app.mem)
st += " Instances:" + str(app.instances)
if len(app.constraints):
st += " Constraints:" + pformat(app.constraints)
l.info(st)
st = "CMD:" + app.cmd
l.info(st)
st = "ID:" + app.id
st += " task_running:" + str(app.tasks_running)
st += " task_staged:" + str(app.tasks_staged)
l.info(st)
tasks = app.tasks
for task in tasks:
st = "\tTASK ID:" + task.id + " host:" + task.host
if len(task.ports):
st += " ports:" + pformat(task.ports)
if len(task.service_ports):
st += " service_ports:" + pformat(task.service_ports)
l.info(st)
elif args['stop']:
mt = mmapi.MarathonIF(marathon_addr, '127.0.0.1', None)
l.info("Deleting app:" + args['<app>'])
mt.delete_app(args['<app>'], args['force'])
l.info("Waiting for app removal to complete")
mt.wait_app_removal(args['<app>'])
elif args['scale']:
mt = mmapi.MarathonIF(marathon_addr, '127.0.0.1', None)
app = args['<app>']
scale = int(args['<scale>'])
l.info("Scaling app:" + app + " to scale:" + str(scale))
mt.scale_app(app, scale)
l.info("Waiting for app scale to complete")
mt.wait_app_ready(app, scale)
# SK:Tried to add log collection but no luck so far.
# elif args['logs']:
# path = "/tmp/mesos/slaves/"
# #11323ada-daab-4d76-8749-3113b5448bed-S0/
# path += "/frameworks/
# # #11323ada-daab-4d76-8749-3113b5448bed-0007
# path += "/executors/"
# #zst-pub.4bdec0e2-e7e3-11e5-a874-fe2077b92eeb
# path += "/runs/"
# # d00620ea-8f3e-427d-9404-6f6b9701f64f/
# app = args['<app>']
| [((32, 4, 32, 42), 'hydra.lib.util.createlogger', 'util.createlogger', ({(32, 22, 32, 27): '"""cli"""', (32, 29, 32, 41): 'logging.INFO'}, {}), "('cli', logging.INFO)", False, 'from hydra.lib import util, mmapi\n'), ((37, 13, 37, 27), 'configparser.ConfigParser', 'ConfigParser', ({}, {}), '()', False, 'from configparser import ConfigParser\n'), ((53, 11, 53, 62), 'docopt.docopt', 'docopt', (), '', False, 'from docopt import docopt\n'), ((42, 11, 42, 43), 'os.path.isfile', 'os.path.isfile', ({(42, 26, 42, 42): 'config_file_name'}, {}), '(config_file_name)', False, 'import os\n'), ((44, 8, 44, 19), 'sys.exit', 'sys.exit', ({(44, 17, 44, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((57, 20, 57, 45), 'hydra.lib.mmapi.MesosIF', 'mmapi.MesosIF', ({(57, 34, 57, 44): 'mesos_addr'}, {}), '(mesos_addr)', False, 'from hydra.lib import util, mmapi\n'), ((95, 13, 95, 63), 'hydra.lib.mmapi.MarathonIF', 'mmapi.MarathonIF', ({(95, 30, 95, 43): 'marathon_addr', (95, 45, 95, 56): '"""127.0.0.1"""', (95, 58, 95, 62): 'None'}, {}), "(marathon_addr, '127.0.0.1', None)", False, 'from hydra.lib import util, mmapi\n'), ((60, 17, 60, 67), 'hydra.lib.mmapi.MarathonIF', 'mmapi.MarathonIF', ({(60, 34, 60, 47): 'marathon_addr', (60, 49, 60, 60): '"""127.0.0.1"""', (60, 62, 60, 66): 'None'}, {}), "(marathon_addr, '127.0.0.1', None)", False, 'from hydra.lib import util, mmapi\n'), ((101, 13, 101, 63), 'hydra.lib.mmapi.MarathonIF', 'mmapi.MarathonIF', ({(101, 30, 101, 43): 'marathon_addr', (101, 45, 101, 56): '"""127.0.0.1"""', (101, 58, 101, 62): 'None'}, {}), "(marathon_addr, '127.0.0.1', None)", False, 'from hydra.lib import util, mmapi\n'), ((71, 17, 71, 67), 'hydra.lib.mmapi.MarathonIF', 'mmapi.MarathonIF', ({(71, 34, 71, 47): 'marathon_addr', (71, 49, 71, 60): '"""127.0.0.1"""', (71, 62, 71, 66): 'None'}, {}), "(marathon_addr, '127.0.0.1', None)", False, 'from hydra.lib import util, mmapi\n'), ((68, 44, 68, 68), 'pprint.pformat', 'pformat', ({(68, 52, 68, 67): 'app.constraints'}, {}), '(app.constraints)', False, 'from pprint import pprint, pformat\n'), ((78, 40, 78, 64), 'pprint.pformat', 'pformat', ({(78, 48, 78, 63): 'app.constraints'}, {}), '(app.constraints)', False, 'from pprint import pprint, pformat\n'), ((90, 38, 90, 57), 'pprint.pformat', 'pformat', ({(90, 46, 90, 56): 'task.ports'}, {}), '(task.ports)', False, 'from pprint import pprint, pformat\n'), ((92, 46, 92, 73), 'pprint.pformat', 'pformat', ({(92, 54, 92, 72): 'task.service_ports'}, {}), '(task.service_ports)', False, 'from pprint import pprint, pformat\n')] |
doggy8088/azure-devops-cli-extension | azure-devops/azext_devops/test/common/test_format.py | 2f6b1a6ffbc49ae454df640a8bb00dac991d6514 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from azext_devops.dev.common.format import trim_for_display, date_time_to_only_date
class TestFormatMethods(unittest.TestCase):
def test_trim_for_display(self):
input = 'Gallery extensions for Portal Extension'
output = trim_for_display(input, 20)
self.assertEqual(output, 'Gallery extensions f...')
input = 'Aex platform'
output = trim_for_display(input, 20)
self.assertEqual(output, input)
input = ''
output = trim_for_display(input, 20)
self.assertEqual(output, input)
input = None
output = trim_for_display(input, 20)
self.assertEqual(output, input)
def test_date_time_to_only_date(self):
input = '2019-02-24T02:45:41.277000+00:00'
output = date_time_to_only_date(input)
self.assertEqual(output, '2019-02-24')
input = 'Aex platform'
output = date_time_to_only_date(input)
self.assertEqual(output, input)
if __name__ == '__main__':
unittest.main() | [((40, 4, 40, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((14, 17, 14, 44), 'azext_devops.dev.common.format.trim_for_display', 'trim_for_display', ({(14, 34, 14, 39): 'input', (14, 41, 14, 43): '20'}, {}), '(input, 20)', False, 'from azext_devops.dev.common.format import trim_for_display, date_time_to_only_date\n'), ((18, 17, 18, 44), 'azext_devops.dev.common.format.trim_for_display', 'trim_for_display', ({(18, 34, 18, 39): 'input', (18, 41, 18, 43): '20'}, {}), '(input, 20)', False, 'from azext_devops.dev.common.format import trim_for_display, date_time_to_only_date\n'), ((22, 17, 22, 44), 'azext_devops.dev.common.format.trim_for_display', 'trim_for_display', ({(22, 34, 22, 39): 'input', (22, 41, 22, 43): '20'}, {}), '(input, 20)', False, 'from azext_devops.dev.common.format import trim_for_display, date_time_to_only_date\n'), ((26, 17, 26, 44), 'azext_devops.dev.common.format.trim_for_display', 'trim_for_display', ({(26, 34, 26, 39): 'input', (26, 41, 26, 43): '20'}, {}), '(input, 20)', False, 'from azext_devops.dev.common.format import trim_for_display, date_time_to_only_date\n'), ((31, 17, 31, 46), 'azext_devops.dev.common.format.date_time_to_only_date', 'date_time_to_only_date', ({(31, 40, 31, 45): 'input'}, {}), '(input)', False, 'from azext_devops.dev.common.format import trim_for_display, date_time_to_only_date\n'), ((35, 17, 35, 46), 'azext_devops.dev.common.format.date_time_to_only_date', 'date_time_to_only_date', ({(35, 40, 35, 45): 'input'}, {}), '(input)', False, 'from azext_devops.dev.common.format import trim_for_display, date_time_to_only_date\n')] |
aantr/WindowsHostManager | github/GitReleaseAsset.py | 75d248fc8991d471c6802fa79e7dee44a5708c65 | ############################ Copyrights and license ############################
# #
# Copyright 2017 Chris McBride <[email protected]> #
# Copyright 2017 Simon <[email protected]> #
# Copyright 2018 Wan Liuyang <[email protected]> #
# Copyright 2018 sfdye <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
class GitReleaseAsset(github.GithubObject.CompletableGithubObject):
"""
This class represents GitReleaseAssets. The reference can be found here https://developer.github.com/v3/repos/releases/#get-a-single-release-asset
"""
def __repr__(self):
return self.get__repr__({"url": self.url})
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def label(self):
"""
:type: string
"""
self._completeIfNotSet(self._label)
return self._label.value
@property
def content_type(self):
"""
:type: string
"""
self._completeIfNotSet(self._content_type)
return self._content_type.value
@property
def state(self):
"""
:type: string
"""
self._completeIfNotSet(self._state)
return self._state.value
@property
def size(self):
"""
:type: integer
"""
self._completeIfNotSet(self._size)
return self._size.value
@property
def download_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._download_count)
return self._download_count.value
@property
def created_at(self):
"""
:type: datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def updated_at(self):
"""
:type: datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def browser_download_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._browser_download_url)
return self._browser_download_url.value
@property
def uploader(self):
"""
:type: github.NamedUser.NamedUser
"""
self._completeIfNotSet(self._uploader)
return self._uploader.value
def delete_asset(self):
"""
Delete asset from the release.
:rtype: bool
"""
headers, data = self._requester.requestJsonAndCheck("DELETE", self.url)
return True
def update_asset(self, name, label=""):
"""
Update asset metadata.
:rtype: github.GitReleaseAsset.GitReleaseAsset
"""
assert isinstance(name, str), name
assert isinstance(label, str), label
post_parameters = {"name": name, "label": label}
headers, data = self._requester.requestJsonAndCheck(
"PATCH", self.url, input=post_parameters
)
return GitReleaseAsset(self._requester, headers, data, completed=True)
def _initAttributes(self):
self._url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._label = github.GithubObject.NotSet
self._uploader = github.GithubObject.NotSet
self._content_type = github.GithubObject.NotSet
self._state = github.GithubObject.NotSet
self._size = github.GithubObject.NotSet
self._download_count = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._browser_download_url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "label" in attributes: # pragma no branch
self._label = self._makeStringAttribute(attributes["label"])
if "uploader" in attributes: # pragma no branch
self._uploader = self._makeClassAttribute(
github.NamedUser.NamedUser, attributes["uploader"]
)
if "content_type" in attributes: # pragma no branch
self._content_type = self._makeStringAttribute(attributes["content_type"])
if "state" in attributes: # pragma no branch
self._state = self._makeStringAttribute(attributes["state"])
if "size" in attributes: # pragma no branch
self._size = self._makeIntAttribute(attributes["size"])
if "download_count" in attributes: # pragma no branch
self._download_count = self._makeIntAttribute(attributes["download_count"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "browser_download_url" in attributes: # pragma no branch
self._browser_download_url = self._makeStringAttribute(
attributes["browser_download_url"]
)
| [] |
epenet/samsung-tv-ws-api | tests/test_remote.py | 23e0cf8188ba16ab128cff40fc56358cc2167ead | """Tests for remote module."""
from unittest.mock import Mock, call, patch
from samsungtvws.remote import SamsungTVWS
def test_send_key(connection: Mock) -> None:
"""Ensure simple data can be parsed."""
open_response = (
'{"data": {"token": 123456789}, "event": "ms.channel.connect", "from": "host"}'
)
connection.recv.side_effect = [open_response]
tv = SamsungTVWS("127.0.0.1")
tv.send_key("KEY_POWER")
connection.send.assert_called_once_with(
'{"method": "ms.remote.control", "params": {'
'"Cmd": "Click", '
'"DataOfCmd": "KEY_POWER", '
'"Option": "false", '
'"TypeOfRemote": "SendRemoteKey"'
"}}"
)
def test_app_list(connection: Mock) -> None:
"""Ensure valid app_list data can be parsed."""
open_response = (
'{"data": {"token": 123456789}, "event": "ms.channel.connect", "from": "host"}'
)
app_list_response = '{"data":{"data":[{"appId":"111299001912","app_type":2,"icon":"/opt/share/webappservice/apps_icon/FirstScreen/111299001912/250x250.png","is_lock":0,"name":"YouTube"},{"appId":"3201608010191","app_type":2,"icon":"/opt/share/webappservice/apps_icon/FirstScreen/3201608010191/250x250.png","is_lock":0,"name":"Deezer"}]},"event":"ed.installedApp.get","from":"host"}'
connection.recv.side_effect = [open_response, app_list_response]
tv = SamsungTVWS("127.0.0.1")
assert tv.app_list() == [
{
"appId": "111299001912",
"app_type": 2,
"icon": "/opt/share/webappservice/apps_icon/FirstScreen/111299001912/250x250.png",
"is_lock": 0,
"name": "YouTube",
},
{
"appId": "3201608010191",
"app_type": 2,
"icon": "/opt/share/webappservice/apps_icon/FirstScreen/3201608010191/250x250.png",
"is_lock": 0,
"name": "Deezer",
},
]
def test_app_list_invalid(connection: Mock) -> None:
"""Ensure simple data can be parsed."""
open_response = (
'{"data": {"token": 123456789}, "event": "ms.channel.connect", "from": "host"}'
)
app_list_response = '{"data": 200, "event": "ed.apps.launch", "from": "host"}'
connection.recv.side_effect = [open_response, app_list_response]
tv = SamsungTVWS("127.0.0.1")
assert tv.app_list() is None
connection.send.assert_called_once_with(
'{"method": "ms.channel.emit", "params": {"event": "ed.installedApp.get", "to": "host"}}'
)
def test_send_hold_key(connection: Mock) -> None:
"""Ensure simple data can be parsed."""
open_response = (
'{"data": {"token": 123456789}, "event": "ms.channel.connect", "from": "host"}'
)
connection.recv.side_effect = [open_response]
tv = SamsungTVWS("127.0.0.1")
with patch("samsungtvws.connection.time.sleep") as patch_sleep:
tv.hold_key("KEY_POWER", 3)
assert patch_sleep.call_count == 3
assert patch_sleep.call_args_list == [call(1), call(3), call(1)]
| [((14, 9, 14, 33), 'samsungtvws.remote.SamsungTVWS', 'SamsungTVWS', ({(14, 21, 14, 32): '"""127.0.0.1"""'}, {}), "('127.0.0.1')", False, 'from samsungtvws.remote import SamsungTVWS\n'), ((34, 9, 34, 33), 'samsungtvws.remote.SamsungTVWS', 'SamsungTVWS', ({(34, 21, 34, 32): '"""127.0.0.1"""'}, {}), "('127.0.0.1')", False, 'from samsungtvws.remote import SamsungTVWS\n'), ((61, 9, 61, 33), 'samsungtvws.remote.SamsungTVWS', 'SamsungTVWS', ({(61, 21, 61, 32): '"""127.0.0.1"""'}, {}), "('127.0.0.1')", False, 'from samsungtvws.remote import SamsungTVWS\n'), ((75, 9, 75, 33), 'samsungtvws.remote.SamsungTVWS', 'SamsungTVWS', ({(75, 21, 75, 32): '"""127.0.0.1"""'}, {}), "('127.0.0.1')", False, 'from samsungtvws.remote import SamsungTVWS\n'), ((76, 9, 76, 51), 'unittest.mock.patch', 'patch', ({(76, 15, 76, 50): '"""samsungtvws.connection.time.sleep"""'}, {}), "('samsungtvws.connection.time.sleep')", False, 'from unittest.mock import Mock, call, patch\n'), ((80, 42, 80, 49), 'unittest.mock.call', 'call', ({(80, 47, 80, 48): '(1)'}, {}), '(1)', False, 'from unittest.mock import Mock, call, patch\n'), ((80, 51, 80, 58), 'unittest.mock.call', 'call', ({(80, 56, 80, 57): '(3)'}, {}), '(3)', False, 'from unittest.mock import Mock, call, patch\n'), ((80, 60, 80, 67), 'unittest.mock.call', 'call', ({(80, 65, 80, 66): '(1)'}, {}), '(1)', False, 'from unittest.mock import Mock, call, patch\n')] |
chm-dev/amazfitGTSwatchfaceBundle | src/utils/pythonSrc/watchFaceParser/models/elements/battery/batteryGaugeElement.py | 4cb04be5215de16628418e9b38152a35d5372d3e | import logging
from watchFaceParser.models.elements.common.imageSetElement import ImageSetElement
class BatteryGaugeElement(ImageSetElement):
def __init__(self, parameter, parent, name = None):
super(BatteryGaugeElement, self).__init__(parameter = parameter, parent = parent, name = name)
def draw3(self, drawer, resources, state):
assert(type(resources) == list)
super(BatteryGaugeElement, self).draw3(drawer, resources, int(state.getBatteryLevel() * self.getImagesCount() / 100))
| [] |
JoseRoman/IndicoIo-python | indicoio/utils/__init__.py | 4fe2952df45c26392f36acd8b43391dfc50e140b | import inspect
import numpy as np
class TypeCheck(object):
"""
Decorator that performs a typecheck on the input to a function
"""
def __init__(self, accepted_structures, arg_name):
"""
When initialized, include list of accepted datatypes and the
arg_name to enforce the check on. Can totally be daisy-chained.
"""
self.accepted_structures = accepted_structures
self.is_accepted = lambda x: type(x) in accepted_structures
self.arg_name = arg_name
def __call__(self, fn):
def check_args(*args, **kwargs):
arg_dict = dict(zip(inspect.getargspec(fn).args, args))
full_args = dict(arg_dict.items() + kwargs.items())
if not self.is_accepted(full_args[self.arg_name]):
raise DataStructureException(
fn,
full_args[self.arg_name],
self.accepted_structures
)
return fn(*args, **kwargs)
return check_args
class DataStructureException(Exception):
"""
If a non-accepted datastructure is passed, throws an exception
"""
def __init__(self, callback, passed_structure, accepted_structures):
self.callback = callback.__name__
self.structure = str(type(passed_structure))
self.accepted = [str(structure) for structure in accepted_structures]
def __str__(self):
return """
function %s does not accept %s, accepted types are: %s
""" % (self.callback, self.structure, str(self.accepted))
@TypeCheck((list, dict, np.ndarray), 'array')
def normalize(array, distribution=1, norm_range=(0, 1), **kwargs):
"""
First arg is an array, whether that's in the form of a numpy array,
a list, or a dictionary that contains the data in its values.
Second arg is the desired distribution which would be applied before
normalization.
Supports linear, exponential, logarithmic and raising to whatever
power specified (in which case you just put a number)
Third arg is the range across which you want the data normalized
"""
# Handling dictionary array input
# Note: lists and numpy arrays behave the same in this program
dict_array = isinstance(array, dict)
if dict_array:
keys = array.keys()
array = np.array(array.values()).astype('float')
else: # Decorator errors if this isn't a list or a numpy array
array = np.array(array).astype('float')
# Handling various distributions
if type(distribution) in [float, int]:
array = np.power(array, distribution)
else:
array = getattr(np, distribution)(array, **kwargs)
# Prep for normalization
x_max, x_min = (np.max(array), np.min(array))
def norm(element,x_min,x_max):
base_span = (element - x_min)*(norm_range[-1] - norm_range[0])
return norm_range[0] + base_span / (x_max - x_min)
norm_array = np.vectorize(norm)(array, x_min, x_max)
if dict_array:
return dict(zip(keys, norm_array))
return norm_array
| [((71, 16, 71, 45), 'numpy.power', 'np.power', ({(71, 25, 71, 30): 'array', (71, 32, 71, 44): 'distribution'}, {}), '(array, distribution)', True, 'import numpy as np\n'), ((76, 20, 76, 33), 'numpy.max', 'np.max', ({(76, 27, 76, 32): 'array'}, {}), '(array)', True, 'import numpy as np\n'), ((76, 35, 76, 48), 'numpy.min', 'np.min', ({(76, 42, 76, 47): 'array'}, {}), '(array)', True, 'import numpy as np\n'), ((82, 17, 82, 35), 'numpy.vectorize', 'np.vectorize', ({(82, 30, 82, 34): 'norm'}, {}), '(norm)', True, 'import numpy as np\n'), ((67, 16, 67, 31), 'numpy.array', 'np.array', ({(67, 25, 67, 30): 'array'}, {}), '(array)', True, 'import numpy as np\n'), ((19, 32, 19, 54), 'inspect.getargspec', 'inspect.getargspec', ({(19, 51, 19, 53): 'fn'}, {}), '(fn)', False, 'import inspect\n')] |
ProzorroUKR/openprocurement.tender.openuadefense | openprocurement/tender/openuadefense/tests/tender.py | 5d6a7433839178edba35015ae614ba3e36b29d0b | # -*- coding: utf-8 -*-
import unittest
from openprocurement.api.tests.base import snitch
from openprocurement.api.tests.base import BaseWebTest
from openprocurement.tender.belowthreshold.tests.base import test_lots
from openprocurement.tender.belowthreshold.tests.tender import TenderResourceTestMixin
from openprocurement.tender.belowthreshold.tests.tender_blanks import (
# TenderUAProcessTest
invalid_tender_conditions,
)
from openprocurement.tender.openua.tests.tender import TenderUaProcessTestMixin
from openprocurement.tender.openua.tests.tender_blanks import (
# TenderUAResourceTest
empty_listing,
create_tender_generated,
tender_with_main_procurement_category,
tender_finance_milestones,
)
from openprocurement.tender.openuadefense.tests.base import (
BaseTenderUAWebTest,
test_tender_data,
)
from openprocurement.tender.openuadefense.tests.tender_blanks import (
# TenderUATest
simple_add_tender,
# TenderUAResourceTest
create_tender_invalid,
patch_tender,
patch_tender_ua,
# TenderUAProcessTest
one_valid_bid_tender_ua,
one_invalid_bid_tender,
)
class TenderUATest(BaseWebTest):
initial_data = test_tender_data
test_simple_add_tender = snitch(simple_add_tender)
class TenderUAResourceTest(BaseTenderUAWebTest, TenderResourceTestMixin):
test_lots_data = test_lots # TODO: change attribute identifier
initial_data = test_tender_data
test_empty_listing = snitch(empty_listing)
test_create_tender_invalid = snitch(create_tender_invalid)
test_create_tender_generated = snitch(create_tender_generated)
test_patch_tender = snitch(patch_tender)
test_patch_tender_ua = snitch(patch_tender_ua)
test_tender_with_main_procurement_category = snitch(tender_with_main_procurement_category)
test_tender_finance_milestones = snitch(tender_finance_milestones)
class TenderUAProcessTest(BaseTenderUAWebTest, TenderUaProcessTestMixin):
initial_data = test_tender_data
test_invalid_tender_conditions = snitch(invalid_tender_conditions)
test_one_valid_bid_tender_ua = snitch(one_valid_bid_tender_ua)
test_one_invalid_bid_tender = snitch(one_invalid_bid_tender)
def test_patch_not_author(self):
response = self.app.post_json('/tenders', {'data': test_tender_data})
self.assertEqual(response.status, '201 Created')
tender = response.json['data']
owner_token = response.json['access']['token']
authorization = self.app.authorization
self.app.authorization = ('Basic', ('bot', 'bot'))
response = self.app.post('/tenders/{}/documents'.format(tender['id']),
upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.app.authorization = authorization
response = self.app.patch_json('/tenders/{}/documents/{}?acc_token={}'.format(tender['id'], doc_id, owner_token),
{"data": {"description": "document description"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can update document only author")
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderUAProcessTest))
suite.addTest(unittest.makeSuite(TenderUAResourceTest))
suite.addTest(unittest.makeSuite(TenderUATest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| [((44, 29, 44, 54), 'openprocurement.api.tests.base.snitch', 'snitch', ({(44, 36, 44, 53): 'simple_add_tender'}, {}), '(simple_add_tender)', False, 'from openprocurement.api.tests.base import snitch\n'), ((52, 25, 52, 46), 'openprocurement.api.tests.base.snitch', 'snitch', ({(52, 32, 52, 45): 'empty_listing'}, {}), '(empty_listing)', False, 'from openprocurement.api.tests.base import snitch\n'), ((53, 33, 53, 62), 'openprocurement.api.tests.base.snitch', 'snitch', ({(53, 40, 53, 61): 'create_tender_invalid'}, {}), '(create_tender_invalid)', False, 'from openprocurement.api.tests.base import snitch\n'), ((54, 35, 54, 66), 'openprocurement.api.tests.base.snitch', 'snitch', ({(54, 42, 54, 65): 'create_tender_generated'}, {}), '(create_tender_generated)', False, 'from openprocurement.api.tests.base import snitch\n'), ((55, 24, 55, 44), 'openprocurement.api.tests.base.snitch', 'snitch', ({(55, 31, 55, 43): 'patch_tender'}, {}), '(patch_tender)', False, 'from openprocurement.api.tests.base import snitch\n'), ((56, 27, 56, 50), 'openprocurement.api.tests.base.snitch', 'snitch', ({(56, 34, 56, 49): 'patch_tender_ua'}, {}), '(patch_tender_ua)', False, 'from openprocurement.api.tests.base import snitch\n'), ((57, 49, 57, 94), 'openprocurement.api.tests.base.snitch', 'snitch', ({(57, 56, 57, 93): 'tender_with_main_procurement_category'}, {}), '(tender_with_main_procurement_category)', False, 'from openprocurement.api.tests.base import snitch\n'), ((58, 37, 58, 70), 'openprocurement.api.tests.base.snitch', 'snitch', ({(58, 44, 58, 69): 'tender_finance_milestones'}, {}), '(tender_finance_milestones)', False, 'from openprocurement.api.tests.base import snitch\n'), ((64, 37, 64, 70), 'openprocurement.api.tests.base.snitch', 'snitch', ({(64, 44, 64, 69): 'invalid_tender_conditions'}, {}), '(invalid_tender_conditions)', False, 'from openprocurement.api.tests.base import snitch\n'), ((65, 35, 65, 66), 'openprocurement.api.tests.base.snitch', 'snitch', ({(65, 42, 65, 65): 'one_valid_bid_tender_ua'}, {}), '(one_valid_bid_tender_ua)', False, 'from openprocurement.api.tests.base import snitch\n'), ((66, 34, 66, 64), 'openprocurement.api.tests.base.snitch', 'snitch', ({(66, 41, 66, 63): 'one_invalid_bid_tender'}, {}), '(one_invalid_bid_tender)', False, 'from openprocurement.api.tests.base import snitch\n'), ((92, 12, 92, 32), 'unittest.TestSuite', 'unittest.TestSuite', ({}, {}), '()', False, 'import unittest\n'), ((100, 4, 100, 38), 'unittest.main', 'unittest.main', (), '', False, 'import unittest\n'), ((93, 18, 93, 57), 'unittest.makeSuite', 'unittest.makeSuite', ({(93, 37, 93, 56): 'TenderUAProcessTest'}, {}), '(TenderUAProcessTest)', False, 'import unittest\n'), ((94, 18, 94, 58), 'unittest.makeSuite', 'unittest.makeSuite', ({(94, 37, 94, 57): 'TenderUAResourceTest'}, {}), '(TenderUAResourceTest)', False, 'import unittest\n'), ((95, 18, 95, 50), 'unittest.makeSuite', 'unittest.makeSuite', ({(95, 37, 95, 49): 'TenderUATest'}, {}), '(TenderUATest)', False, 'import unittest\n')] |
jmilhone/fabry_perot | fabry/tools/file_io.py | cd3cb7a1dbcaa3c9382f9f2dbd3407d95447b3ce | from __future__ import print_function, division
import os
import numpy as np
import h5py
def dict_2_h5(fname, dic, append=False):
'''Writes a dictionary to a hdf5 file with given filename
It will use lzf compression for all numpy arrays
Args:
fname (str): filename to write to
dic (dict): dictionary to write
append (bool): if true, will append to file instead of overwriting, default=False
'''
if append:
method = 'r+'
else:
method = 'w'
with h5py.File(fname, method) as h5:
recursive_save_dict_to_h5(h5, '/', dic)
def h5_2_dict(fname):
'''Reads a dictionary from a hdf5 file with given filename
Args:
fname (str): hdf5 filename to read
Returns:
dict: dictionary of hdf5 keys
'''
with h5py.File(fname, 'r') as h5:
return recursive_load_dict_from_h5(h5, '/')
def prep_folder(path):
'''Checks if folder exists and recursively creates folders
to ensure the path is valid
Args:
path (str): path to folder
'''
if os.path.isdir(path):
return
else:
os.makedirs(path)
def recursive_save_dict_to_h5(h5, path, dic):
''' function used in save_dict_to_h5 in order to get recursion
'''
for key, item in dic.items():
if path + key in h5: ### overwrites pre-existing keys with same name
del h5[path + key]
if type(item) in [np.ndarray, np.generic]:
h5.create_dataset(path + key, data=item, compression='lzf')
elif type(item) != dict:
try:
h5.create_dataset(path + key, data=item)
except TypeError:
raise ValueError('Cannot save %s type' % type(item))
else:
recursive_save_dict_to_h5(h5, path + key + '/', item)
def recursive_load_dict_from_h5(h5, path):
''' function used in load_h5_to_dict in order to get recursion
'''
out_dict = {}
for key, item in h5[path].items():
# if type(item) == h5py._hl.dataset.Dataset:
if isinstance(item, h5py.Dataset):
out_dict[key] = item.value
# elif type(item) == h5py._hl.group.Group:
elif isinstance(item, h5py.Group):
out_dict[key] = recursive_load_dict_from_h5(h5, path + key + '/')
return out_dict
def read_Ld_results(Ld_directory):
'''Reads L and d histogram data from multinest run
Args:
Ld_directory (str): path to multinest save directory
Returns:
Tuple (np.ndarray, np.ndarray) L histogram values (in pixels), d histogram values (in mm)
'''
try:
fname = os.path.join(Ld_directory, "Ld_post_equal_weights.dat")
post = np.loadtxt(fname, ndmin=2)
except IOError:
fname = os.path.join(Ld_directory, "Ld_solver_post_equal_weights.dat")
post = np.loadtxt(fname, ndmin=2)
L = post[:, 0]
d = post[:, 1]
return L, d
def read_match_finesse_results(finesse_directory, errtemp=False):
fname = os.path.join(finesse_directory, "F_post_equal_weights.dat")
post = np.loadtxt(fname, ndmin=2)
F = post[:, 0]
V = post[:, 1]
T = post[:, 2]
if errtemp:
E = post[:, 3]
return F, V, T, E
else:
return F, V, T
def read_finesse_results(finesse_directory):
fname = os.path.join(finesse_directory, "finesse_post_equal_weights.dat")
post = np.loadtxt(fname, ndmin=2)
F = post[:, 0]
A = post[:, 1]
Arel = post[:, 2]
Ti = post[:, 3]
return F, A, Arel, Ti
def read_lyon_temp_results(temp_directory):
fname = os.path.join(temp_directory, 'temp_post_equal_weights.dat')
post = np.loadtxt(fname, ndmin=2)
T = post[:, 0]
V = post[:, 1]
# A = post[:,2]
# O = post[:,3]
return T, V # ,A#,O
| [((44, 7, 44, 26), 'os.path.isdir', 'os.path.isdir', ({(44, 21, 44, 25): 'path'}, {}), '(path)', False, 'import os\n'), ((103, 12, 103, 71), 'os.path.join', 'os.path.join', ({(103, 25, 103, 42): 'finesse_directory', (103, 44, 103, 70): '"""F_post_equal_weights.dat"""'}, {}), "(finesse_directory, 'F_post_equal_weights.dat')", False, 'import os\n'), ((104, 11, 104, 37), 'numpy.loadtxt', 'np.loadtxt', (), '', True, 'import numpy as np\n'), ((116, 12, 116, 77), 'os.path.join', 'os.path.join', ({(116, 25, 116, 42): 'finesse_directory', (116, 44, 116, 76): '"""finesse_post_equal_weights.dat"""'}, {}), "(finesse_directory, 'finesse_post_equal_weights.dat')", False, 'import os\n'), ((117, 11, 117, 37), 'numpy.loadtxt', 'np.loadtxt', (), '', True, 'import numpy as np\n'), ((127, 12, 127, 71), 'os.path.join', 'os.path.join', ({(127, 25, 127, 39): 'temp_directory', (127, 41, 127, 70): '"""temp_post_equal_weights.dat"""'}, {}), "(temp_directory, 'temp_post_equal_weights.dat')", False, 'import os\n'), ((128, 11, 128, 37), 'numpy.loadtxt', 'np.loadtxt', (), '', True, 'import numpy as np\n'), ((20, 9, 20, 33), 'h5py.File', 'h5py.File', ({(20, 19, 20, 24): 'fname', (20, 26, 20, 32): 'method'}, {}), '(fname, method)', False, 'import h5py\n'), ((33, 9, 33, 30), 'h5py.File', 'h5py.File', ({(33, 19, 33, 24): 'fname', (33, 26, 33, 29): '"""r"""'}, {}), "(fname, 'r')", False, 'import h5py\n'), ((47, 8, 47, 25), 'os.makedirs', 'os.makedirs', ({(47, 20, 47, 24): 'path'}, {}), '(path)', False, 'import os\n'), ((91, 16, 91, 71), 'os.path.join', 'os.path.join', ({(91, 29, 91, 41): 'Ld_directory', (91, 43, 91, 70): '"""Ld_post_equal_weights.dat"""'}, {}), "(Ld_directory, 'Ld_post_equal_weights.dat')", False, 'import os\n'), ((92, 15, 92, 41), 'numpy.loadtxt', 'np.loadtxt', (), '', True, 'import numpy as np\n'), ((94, 16, 94, 78), 'os.path.join', 'os.path.join', ({(94, 29, 94, 41): 'Ld_directory', (94, 43, 94, 77): '"""Ld_solver_post_equal_weights.dat"""'}, {}), "(Ld_directory, 'Ld_solver_post_equal_weights.dat')", False, 'import os\n'), ((95, 15, 95, 41), 'numpy.loadtxt', 'np.loadtxt', (), '', True, 'import numpy as np\n')] |
lucaspompeun/metodos-matematicos-aplicados-nas-engenharias-via-sistemas-computacionais | derivadas.py | 008d397f76a935af1aba530cc0134b9dd326d3ac | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 23 21:57:48 2019
INSTITUTO FEDERAL DE EDUCAÇÃO, CIÊNCIA E TECNOLOGIA DO PÁRA - IFPA ANANINDEUA
@author:
Prof. Dr. Denis C. L. Costa
Discentes:
Heictor Alves de Oliveira Costa
Lucas Pompeu Neves
Grupo de Pesquisa:
Gradiente de Modelagem Matemática e
Simulação Computacional - GM²SC
Assunto:
Derivada de uma Função com uma variável independente
Nome do sript: derivadas
Disponível em:
https://github.com/GM2SC/DEVELOPMENT-OF-MATHEMATICAL-METHODS-IN-
COMPUTATIONAL-ENVIRONMENT/blob/master/SINEPEM_2019/derivadas.py
"""
# Bibliotecas
# Cálculo Diferencial e Integral: sympy
import sympy as sy
# Variáveis simbólicas
x = sy.symbols('x')
print('')
# Função de uma Variável: f(x)
def f(x):
return 2*x**3 - 5*x**2
# (f(x), x, 1) --> (Função, variável, ordem da derivada)
# Derivada 1ª da Função: df1(x)
def df1(x):
return sy.diff(f(x), x,1)
# Derivada 2ª da Função: df2(x)
def df2(x):
return sy.diff(f(x), x,2)
print('')
print('=======================================')
print('Função Analisada: f(x) =', f(x))
print('Derivada 1ª da Função: df1(x) =', df1(x))
print('Derivada 2ª da Função: df2(x) =', df2(x))
print('=======================================')
print('')
# Valor Numérico das Derivadas: x = x1 e x = x2
x1 = 3
print('Valor Numérico da Derivada 1ª em x1 =', x1)
VN_df1 = df1(x).subs(x,x1)
print('VN_df1 =', VN_df1)
print('')
x2 = -1
print('Valor Numérico da Derivada 2ª em x2 =', x2)
VN_df2 = df2(x).subs(x,x2)
print('VN_df2 =', VN_df2)
print('')
print('---> Fim do Programa derivadas <---')
| [((35, 4, 35, 19), 'sympy.symbols', 'sy.symbols', ({(35, 15, 35, 18): '"""x"""'}, {}), "('x')", True, 'import sympy as sy\n')] |
haotianzhu/C_Questions_Solutions | trapping_rain_water/solution.py | 2677b6d26bedb9bc6c6137a2392d0afaceb91ec2 | class Solution:
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
if not height:
return 0
left = 0
right = len(height)-1
total_area = 0
if height[left] <= height[right]:
m = left
else:
m =right
while(left < right):
if height[left] <= height[right]:
# move m from left to right
m += 1
if height[m] >= height[left]:
# found a local convave shape
left = m # search the remainder part from [m,right]
m = left if height[left] <= height[right] else right # reset m as min hight between left and right
else:
# since right is higher than left, we can guarantee that
# each index in interval (left,right) will increase height[left]-height[m] 's water trapped area
total_area += height[left]-height[m]
else:
# move m from right to left
m-=1
if height[m] >= height[right]:
# found a local convave shape
right = m
m = left if height[left] <= height[right] else right
else:
# same as left part above
total_area += height[right]-height[m]
return total_area
if __name__ == '__main__':
res = Solution().trap([])
print(res) | [] |
yu9824/AtCoder | ABC/178/D.py | 50a209059c005efadc1c912e443ec41365381c16 | # list(map(int, input().split()))
# int(input())
import sys
sys.setrecursionlimit(10 ** 9)
'''
DP
A[n] = A[n-3] + A[n-4] + ... + A[0] (O(S**2))
ここで,A[n-1] = A[n-4] + A[n-5] + ... + A[0]より,
A[n] = A[n-3] + A[n-1]とも表せる.(O(S)でより高速.)
'''
mod = 10 ** 9 + 7
def main(*args):
S = args[0]
A = [0 for s in range(S+1)]
A[0] = 1 # 何も足さない (= S自身のみの1通りを表すためのやつ.)
s = 3
while s <= S:
# A[s] = sum(A[:(s-3)+1]) % mod # どっちでもOK.速いのは下のやつ.
A[s] = (A[s-3] + A[s-1]) % mod
s += 1
print(A[S])
if __name__ == '__main__':
args = [int(input())]
main(*args)
| [((5, 0, 5, 30), 'sys.setrecursionlimit', 'sys.setrecursionlimit', ({(5, 22, 5, 29): '(10 ** 9)'}, {}), '(10 ** 9)', False, 'import sys\n')] |
OzanCKN/OpenMDAO-Framework | contrib/analysis_server/src/analysis_server/factory.py | 05e9d4b9bc41d0ec00a7073545146c925cd33b0b | from openmdao.main.factory import Factory
from analysis_server import client, proxy, server
class ASFactory(Factory):
"""
Factory for components running under an AnalysisServer.
An instance would typically be passed to
:meth:`openmdao.main.factorymanager.register_class_factory`.
host: string
Host name or IP address of the AnalysisServer to connect to.
port: int
Port number of the AnalysisServer to connect to.
"""
def __init__(self, host='localhost', port=server.DEFAULT_PORT):
super(ASFactory, self).__init__()
self._host = host
self._port = port
self._client = client.Client(host, port)
def create(self, typname, version=None, server=None,
res_desc=None, **ctor_args):
"""
Create a `typname` object.
typname: string
Type of object to create.
version: string or None
Version of `typname` to create.
server:
Not used.
res_desc: dict or None
Not used.
ctor_args: dict
Other constructor arguments. Not used.
"""
for typ, ver in self.get_available_types():
if typ == typname:
if version is None or ver == version:
return proxy.ComponentProxy(typname, self._host, self._port)
return None
def get_available_types(self, groups=None):
"""
Returns a set of tuples of the form ``(typname, version)``,
one for each available component type.
groups: list[string]
OpenMDAO entry point groups.
Only 'openmdao.component' is supported.
"""
if groups is not None and 'openmdao.component' not in groups:
return []
types = []
self._list('', types)
return types
def _list(self, category, types):
""" List components in `category` and sub-categories. """
if category:
category += '/'
for comp in self._client.list_components(category):
comp = '%s%s' % (category, comp)
try:
versions = self._client.versions(comp)
except RuntimeError:
types.append((comp, ''))
else:
for version in versions:
types.append((comp, version))
for sub in self._client.list_categories(category):
sub = '%s%s' % (category, sub)
self._list(sub, types)
| [((23, 23, 23, 48), 'analysis_server.client.Client', 'client.Client', ({(23, 37, 23, 41): 'host', (23, 43, 23, 47): 'port'}, {}), '(host, port)', False, 'from analysis_server import client, proxy, server\n'), ((48, 27, 48, 80), 'analysis_server.proxy.ComponentProxy', 'proxy.ComponentProxy', ({(48, 48, 48, 55): 'typname', (48, 57, 48, 67): 'self._host', (48, 69, 48, 79): 'self._port'}, {}), '(typname, self._host, self._port)', False, 'from analysis_server import client, proxy, server\n')] |
zinaukarenku/zkr-platform | web/migrations/0007_auto_20180824_0925.py | 8daf7d1206c482f1f8e0bcd54d4fde783e568774 | # Generated by Django 2.1 on 2018-08-24 09:25
from django.db import migrations, models
import web.models
class Migration(migrations.Migration):
dependencies = [
('web', '0006_organizationmember_user'),
]
operations = [
migrations.AlterField(
model_name='organizationpartner',
name='logo',
field=models.ImageField(upload_to=web.models.OrganizationPartner._organization_partner_logo_file),
),
]
| [((17, 18, 17, 109), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n')] |
SaurabhAgarwala/antlir | antlir/bzl/image_actions/tarball.bzl | d9513d35d3eaa9d28717a40057a14d099c6ec775 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
load("//antlir/bzl:maybe_export_file.bzl", "maybe_export_file")
load("//antlir/bzl:shape.bzl", "shape")
load(
"//antlir/bzl:target_tagger.bzl",
"image_source_as_target_tagged_shape",
"new_target_tagger",
"target_tagged_image_source_shape",
"target_tagger_to_feature",
)
tarball_t = shape.shape(
force_root_ownership = shape.field(bool, optional = True),
into_dir = shape.path(),
source = target_tagged_image_source_shape,
)
def image_tarball(source, dest, force_root_ownership = False):
"""
`image.tarball("files/xyz.tar", "/a/b")` extracts tarball located at `files/xyz.tar` to `/a/b` in the image --
- `source` is one of:
- an `image.source` (docs in `image_source.bzl`), or
- the path of a target outputting a tarball target path,
e.g. an `export_file` or a `genrule`
- `dest` is the destination of the unpacked tarball in the image.
This is an image-absolute path to a directory that must be created
by another `feature_new` item.
"""
target_tagger = new_target_tagger()
tarball = shape.new(
tarball_t,
force_root_ownership = force_root_ownership,
into_dir = dest,
source = image_source_as_target_tagged_shape(
target_tagger,
maybe_export_file(source),
),
)
return target_tagger_to_feature(
target_tagger,
items = struct(tarballs = [tarball]),
# The `fake_macro_library` docblock explains this self-dependency
extra_deps = ["//antlir/bzl/image_actions:tarball"],
)
| [] |
haobtc/sqlmat | sqlmat/utils.py | c6b6ef966ba01173b6a485afb932ed438c35b211 | from typing import Tuple, List, Optional
import json
import sys
import os
import shlex
import asyncio
import argparse
import logging
import tempfile
from urllib.parse import urlparse
logger = logging.getLogger(__name__)
def find_sqlmat_json() -> Optional[dict]:
json_path = os.getenv('SQLMAT_JSON_PATH')
if json_path:
with open(json_path) as f:
cfg = json.load(f)
return cfg
# iterate through the current dir up to the root dir "/" to find a
# .sqlmat.json
workdir = os.path.abspath(os.getcwd())
while workdir:
json_path = os.path.join(workdir, '.sqlmat.json')
if os.path.exists(json_path):
with open(json_path) as f:
cfg = json.load(f)
return cfg
parentdir = os.path.abspath(os.path.join(workdir, '..'))
if parentdir == workdir:
break
workdir = parentdir
logger.warning('fail to find .sqlmat.json')
return None
def find_dsn(prog: str, desc: str) -> Tuple[str, List[str]]:
parser = argparse.ArgumentParser(
prog=prog,
description=desc)
parser.add_argument('-d', '--dsn',
type=str,
help='postgresql dsn')
parser.add_argument('-g', '--db',
type=str,
default='default',
help='postgresql db instance defined in .sqlmat.json')
parser.add_argument('callee_args',
type=str,
nargs='*',
help='command line arguments of callee programs')
# from arguments
args = parser.parse_args()
if args.dsn:
return args.dsn, args.callee_args
# find dsn from ./.sqlmat.json
cfg = find_sqlmat_json()
if cfg:
dsn = cfg['databases'][args.db]['dsn']
assert isinstance(dsn, str)
return dsn, args.callee_args
# default dsn using username
user = os.getenv('USER', '')
default_dsn = f'postgres://{user}@127.0.0.1:5432/{args.db}'
logger.warning('no postgres dsn specified, use %s instead', default_dsn)
return default_dsn, args.callee_args
def joinargs(callee_args: List[str]) -> str:
if hasattr(shlex, 'join'):
return shlex.join(callee_args)
else:
return ' '.join(shlex.quote(a) for a in callee_args)
# run psql client
async def run_shell(dsn: str, callee_args: List[str]) -> None:
p = urlparse(dsn)
username = p.username or ''
password = p.password or ''
dbname = p.path[1:]
hostname = p.hostname
port = p.port or 5432
temp_pgpass = tempfile.NamedTemporaryFile(mode='w')
print(
'{}:{}:{}:{}:{}'.format(hostname, port, dbname, username, password),
file=temp_pgpass,
flush=True)
os.environ['PGPASSFILE'] = temp_pgpass.name
command = 'psql -h{} -p{} -U{} {} {}'.format(hostname, port, username, joinargs(callee_args), dbname)
proc = await asyncio.create_subprocess_shell(command)
await proc.communicate()
def cl_run_shell() -> None:
dsn, callee_args = find_dsn('sqlmat-shell', 'run psql client shell')
loop = asyncio.get_event_loop()
loop.run_until_complete(run_shell(dsn, callee_args))
# run dbdump
async def run_dbdump(dsn: str, callee_args: List[str]) -> None:
p = urlparse(dsn)
username = p.username or ''
password = p.password or ''
dbname = p.path[1:]
hostname = p.hostname
port = p.port or 5432
temp_pgpass = tempfile.NamedTemporaryFile(mode='w')
print(
'{}:{}:{}:{}:{}'.format(hostname, port, dbname, username, password),
file=temp_pgpass,
flush=True)
os.environ['PGPASSFILE'] = temp_pgpass.name
command = 'pg_dump -h{} -p{} -U{} {} {}'.format(hostname, port, username, joinargs(callee_args), dbname)
proc = await asyncio.create_subprocess_shell(command)
await proc.communicate()
def cl_run_dbdump() -> None:
dsn, callee_args = find_dsn('sqlmat-dump', 'dump database')
loop = asyncio.get_event_loop()
loop.run_until_complete(run_dbdump(dsn, callee_args))
# generate alembic migrations
def gen_migrate(dsn: str) -> None:
init_data = ALEMBIC_INIT.replace('{{dsn}}', dsn)
with open('alembic.ini', 'w') as f:
f.write(init_data)
def cl_gen_migrate() -> None:
dsn, callee_args = find_dsn('sqlmat-genmigrate', 'generate alembic migration')
gen_migrate(dsn)
print('Wrote alembic.ini')
ALEMBIC_INIT = '''\
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = migrations
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# timezone to use when rendering the date
# within the migration file as well as the filename.
# string value is passed to dateutil.tz.gettz()
# leave blank for localtime
# timezone =
# max length of characters to apply to the
# "slug" field
#truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; this defaults
# to migrations/versions. When using multiple version
# directories, initial revisions must be specified with --version-path
# version_locations = %(here)s/bar %(here)s/bat migrations/versions
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
#sqlalchemy.url = driver://user:pass@localhost/dbname
sqlalchemy.url = {{dsn}}
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S
'''
| [((12, 9, 12, 36), 'logging.getLogger', 'logging.getLogger', ({(12, 27, 12, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((15, 16, 15, 45), 'os.getenv', 'os.getenv', ({(15, 26, 15, 44): '"""SQLMAT_JSON_PATH"""'}, {}), "('SQLMAT_JSON_PATH')", False, 'import os\n'), ((38, 13, 40, 25), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((68, 11, 68, 32), 'os.getenv', 'os.getenv', ({(68, 21, 68, 27): '"""USER"""', (68, 29, 68, 31): '""""""'}, {}), "('USER', '')", False, 'import os\n'), ((82, 8, 82, 21), 'urllib.parse.urlparse', 'urlparse', ({(82, 17, 82, 20): 'dsn'}, {}), '(dsn)', False, 'from urllib.parse import urlparse\n'), ((89, 18, 89, 55), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (), '', False, 'import tempfile\n'), ((101, 11, 101, 35), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((106, 8, 106, 21), 'urllib.parse.urlparse', 'urlparse', ({(106, 17, 106, 20): 'dsn'}, {}), '(dsn)', False, 'from urllib.parse import urlparse\n'), ((113, 18, 113, 55), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (), '', False, 'import tempfile\n'), ((125, 11, 125, 35), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((23, 30, 23, 41), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((25, 20, 25, 57), 'os.path.join', 'os.path.join', ({(25, 33, 25, 40): 'workdir', (25, 42, 25, 56): '""".sqlmat.json"""'}, {}), "(workdir, '.sqlmat.json')", False, 'import os\n'), ((26, 11, 26, 36), 'os.path.exists', 'os.path.exists', ({(26, 26, 26, 35): 'json_path'}, {}), '(json_path)', False, 'import os\n'), ((76, 15, 76, 38), 'shlex.join', 'shlex.join', ({(76, 26, 76, 37): 'callee_args'}, {}), '(callee_args)', False, 'import shlex\n'), ((96, 17, 96, 57), 'asyncio.create_subprocess_shell', 'asyncio.create_subprocess_shell', ({(96, 49, 96, 56): 'command'}, {}), '(command)', False, 'import asyncio\n'), ((120, 17, 120, 57), 'asyncio.create_subprocess_shell', 'asyncio.create_subprocess_shell', ({(120, 49, 120, 56): 'command'}, {}), '(command)', False, 'import asyncio\n'), ((18, 18, 18, 30), 'json.load', 'json.load', ({(18, 28, 18, 29): 'f'}, {}), '(f)', False, 'import json\n'), ((30, 36, 30, 63), 'os.path.join', 'os.path.join', ({(30, 49, 30, 56): 'workdir', (30, 58, 30, 62): '""".."""'}, {}), "(workdir, '..')", False, 'import os\n'), ((28, 22, 28, 34), 'json.load', 'json.load', ({(28, 32, 28, 33): 'f'}, {}), '(f)', False, 'import json\n'), ((78, 24, 78, 38), 'shlex.quote', 'shlex.quote', ({(78, 36, 78, 37): 'a'}, {}), '(a)', False, 'import shlex\n')] |
young-geng/UVaClient | submit.py | 8ff4a368ac8f0395248292a0d903047a074752ed | import requests
from sys import stderr
import re
def submit(session, problem_id, language, source):
language_code = {
'c': 1,
'java': 2,
'c++': 3,
'pascal': 4,
'c++11': 5
}
url = "http://uva.onlinejudge.org/index.php?option=com_onlinejudge&Itemid=25&page=save_submission"
data = {
'problemid': '',
'category': '',
'localid': problem_id,
'language': language_code[language],
'code': source
}
session.post(url, data=data)
| [] |
adagj/ECS_SOconvection | FIGURE4/eddymoc_scripts/noresm_cesm_eddymoc_150yrs.py | d1bb935b37380f11e021a463c6a807d7527220a6 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Ada Gjermundsen
year: 2019 - 2021
This script is used to calculate the eddy-induced overturning in CESM2 and NorESM2 (LM and MM) south of 50S
for the CMIP experiments piControl and abrupt-4xCO2 after 150
the average time is 30 years
The result is used in FIGURE 4
"""
import sys
sys.path.insert(1, '/scratch/adagj/CMIP6/CLIMSENS/CMIP6_UTILS')
import CMIP6_ATMOS_UTILS as atmos
import CMIP6_SEAICE_UTILS as ocean
from read_modeldata_cmip6 import ecs_models_cmip6, make_filelist_cmip6, Modelinfo
import numpy as np
from dask.diagnostics import ProgressBar
import warnings
warnings.simplefilter('ignore')
import xarray as xr
xr.set_options(enable_cftimeindex=True)
def make_attributes(da, var, expid):
da.attrs['long_name']='Global Ocean Meridional Overturning Mass Streamfunction Due to Parameterized Mesoscale Advection'
da.attrs['name']='eddymoc'
da.attrs['units']='kg s-1'
da.attrs['standard_name']='global_ocean_meridional_overturning_mass_streamfunction_due_to_parameterized_mesoscale_eddy_advection'
da.attrs['expid']=expid
ds = da.to_dataset(name = var)
return ds
def extract_global_moc(modelname, da, dac, var):
if 'sector' in da.coords:
da = da.drop('sector')
if 'sector' in dac.coords:
dac = dac.drop('sector')
da = da.isel(basin=-1)
dac = dac.isel(basin=-1)
return da, dac
def make_reference_slice(model, ds, var, endyr):
ds = ocean.consistent_naming(ds)
ds = atmos.fix_time(ds, 1)
return ds
def make_yearly_avg(model, ds, var, endyr):
da = atmos.yearly_avg(ds[var])
if model.expid in ['piControl']:
da = da.isel(year=slice(model.branchtime_year+endyr-30, model.branchtime_year+endyr))
else:
da = da.isel(year=slice(endyr-30, endyr))
da = da.mean(dim='year')
return da
def make_modelobj(modelname, expinfo, expid='piControl'):
model = Modelinfo(name = modelname, institute = expinfo['institute'], expid = expid, realm = 'Omon',
realiz=expinfo['variant_labels'][0], grid_atmos = expinfo['grid_label_atmos'][0], grid_ocean = expinfo['grid_label_ocean'], branchtime_year=expinfo['branch_yr'])
return model
def read_files(model, var):
if model.name in ['NorESM2-LM', 'NorESM2-MM']:
make_filelist_cmip6(model, var, component = 'ocean', activity_id='CMIP',path_to_data = '/projects/NS9034K/CMIP6/')
else:
make_filelist_cmip6(model, var, component = 'ocean')
print(model.filenames)
if model.filenames:
if len(model.filenames)>1:
ds = xr.open_mfdataset(model.filenames, combine='nested', concat_dim='time', parallel=True, chunks={"time":1})
else:
ds = xr.open_dataset(model.filenames[0], chunks={"time":1})
print('%s loaded for model: %s, experiment: piControl . Lenght of simulation: %.1f years'%(var,model.name, len(ds[var].time.values)/12))
else:
print('%s not loaded for model %s, experiment: piControl. Skipping model! Please check!'%(var,model.name))
return ds
def make_last_30yrs_avg(models, var, outpath, endyr=150):
print('global eddy moc: \n')
for modelname,expinfo in models.items():
print(modelname)
if var in ['msftmzsmpa'] and modelname in ['NorESM2-LM']:
continue
modelctrl = make_modelobj(modelname, expinfo, expid='piControl')
dsc = read_files(modelctrl, var)
dsc = make_reference_slice(modelctrl, dsc, var, endyr)
model4xco2 = make_modelobj(modelname, expinfo, expid='abrupt-4xCO2')
ds = read_files(model4xco2, var)
ds = make_reference_slice(model4xco2, ds, var, endyr)
ds, dsc = extract_global_moc(modelname, ds, dsc, var)
da = make_yearly_avg(model4xco2, ds, var, endyr)
dac = make_yearly_avg(modelctrl, dsc, var, endyr)
dsout_ctrl = make_attributes(dac, var, 'piControl')
dsout_case = make_attributes(da, var, 'abrupt-4xCO2')
print(dsout_ctrl)
print(dsout_case)
dsout_ctrl = dsout_ctrl.to_netcdf(outpath + var +'_' + modelctrl.realm +'_' + modelctrl.name + '_' + modelctrl.expid + '_' + modelctrl.realiz + '_'+str(endyr) + '_30yravg.nc', compute=False)
dsout_case = dsout_case.to_netcdf(outpath + var +'_' + model4xco2.realm +'_' + model4xco2.name + '_' + model4xco2.expid + '_' + model4xco2.realiz + '_'+str(endyr) + '_30yravg.nc', compute=False)
with ProgressBar():
result = dsout_ctrl.compute()
result = dsout_case.compute()
del model4xco2, modelctrl, dsc, ds, dac, da, dsout_ctrl, dsout_case
if __name__ == '__main__':
outpath = 'path_to_outdata/'
models = ecs_models_cmip6()
models = {'NorESM2-LM':models['NorESM2-LM'], 'CESM2':models['CESM2']}
for var in ['msftmzsmpa', 'msftmzmpa']:
make_last_30yrs_avg(models, var=var, outpath=outpath, endyr=150)
| [((13, 0, 13, 63), 'sys.path.insert', 'sys.path.insert', ({(13, 16, 13, 17): '(1)', (13, 19, 13, 62): '"""/scratch/adagj/CMIP6/CLIMSENS/CMIP6_UTILS"""'}, {}), "(1, '/scratch/adagj/CMIP6/CLIMSENS/CMIP6_UTILS')", False, 'import sys\n'), ((20, 0, 20, 31), 'warnings.simplefilter', 'warnings.simplefilter', ({(20, 22, 20, 30): '"""ignore"""'}, {}), "('ignore')", False, 'import warnings\n'), ((22, 0, 22, 39), 'xarray.set_options', 'xr.set_options', (), '', True, 'import xarray as xr\n'), ((43, 9, 43, 36), 'CMIP6_SEAICE_UTILS.consistent_naming', 'ocean.consistent_naming', ({(43, 33, 43, 35): 'ds'}, {}), '(ds)', True, 'import CMIP6_SEAICE_UTILS as ocean\n'), ((44, 9, 44, 30), 'CMIP6_ATMOS_UTILS.fix_time', 'atmos.fix_time', ({(44, 24, 44, 26): 'ds', (44, 28, 44, 29): '1'}, {}), '(ds, 1)', True, 'import CMIP6_ATMOS_UTILS as atmos\n'), ((48, 9, 48, 34), 'CMIP6_ATMOS_UTILS.yearly_avg', 'atmos.yearly_avg', ({(48, 26, 48, 33): 'ds[var]'}, {}), '(ds[var])', True, 'import CMIP6_ATMOS_UTILS as atmos\n'), ((57, 12, 58, 178), 'read_modeldata_cmip6.Modelinfo', 'Modelinfo', (), '', False, 'from read_modeldata_cmip6 import ecs_models_cmip6, make_filelist_cmip6, Modelinfo\n'), ((106, 13, 106, 31), 'read_modeldata_cmip6.ecs_models_cmip6', 'ecs_models_cmip6', ({}, {}), '()', False, 'from read_modeldata_cmip6 import ecs_models_cmip6, make_filelist_cmip6, Modelinfo\n'), ((63, 8, 63, 123), 'read_modeldata_cmip6.make_filelist_cmip6', 'make_filelist_cmip6', (), '', False, 'from read_modeldata_cmip6 import ecs_models_cmip6, make_filelist_cmip6, Modelinfo\n'), ((65, 8, 65, 61), 'read_modeldata_cmip6.make_filelist_cmip6', 'make_filelist_cmip6', (), '', False, 'from read_modeldata_cmip6 import ecs_models_cmip6, make_filelist_cmip6, Modelinfo\n'), ((69, 18, 69, 123), 'xarray.open_mfdataset', 'xr.open_mfdataset', (), '', True, 'import xarray as xr\n'), ((71, 18, 71, 72), 'xarray.open_dataset', 'xr.open_dataset', (), '', True, 'import xarray as xr\n'), ((98, 13, 98, 26), 'dask.diagnostics.ProgressBar', 'ProgressBar', ({}, {}), '()', False, 'from dask.diagnostics import ProgressBar\n')] |
KnowledgeCaptureAndDiscovery/wings-client | wings/planner.py | af1d068f4adc07d9060afa94dc99e0b2565be088 | import json
import re
class Planner(object):
def __init__(self, api_client):
self.api_client = api_client
def set_template(self, template):
self.wflowns = self.api_client.get_export_url() + "workflows/" + template + ".owl#"
self.wflowid = self.wflowns + template
def _set_bindings(self, invar, val, data_bindings, parameter_bindings, parameter_types):
if isinstance(val, basestring) and val.startswith('file:'):
data = data_bindings.get(self.wflowns + invar, [])
data.append(self.api_client.libns + val[5:])
data_bindings[self.wflowns + invar] = data
else:
parameter_bindings[self.wflowns + invar] = val
typeid = self.api_client.xsdns + "string"
if type(val) is int:
typeid = self.api_client.xsdns + "integer"
elif type(val) is float:
typeid = self.api_client.xsdns + "float"
elif type(val) is bool:
typeid = self.api_client.xsdns + "boolean"
parameter_types[self.wflowns + invar] = typeid
def get_expansions(self, inputs):
postdata = [('templateId', self.wflowid),
('componentBindings', '{}'), ('parameterBindings', '{}')]
data_bindings = dict()
parameter_bindings = dict()
parameter_types = dict()
for invar in inputs:
if type(inputs[invar]) is list:
for val in inputs[invar]:
self._set_bindings(
invar, val, data_bindings, parameter_bindings, parameter_types)
else:
self._set_bindings(
invar, inputs[invar], data_bindings, parameter_bindings, parameter_types)
postdata = {
"templateId": self.wflowid,
"dataBindings": data_bindings,
"parameterBindings": parameter_bindings,
"parameter_types": parameter_types,
"componentBindings": dict()
}
resp = self.api_client.session.post(
self.api_client.get_request_url() + 'plan/getExpansions', json=postdata)
return resp.json()
def select_template(self, templates):
from sys import version_info
py3 = version_info[0] > 2
i = 1
num = len(templates)
for tpl in templates:
print("%s. %s" %
(i, self.api_client.get_template_description(tpl['template'])))
i += 1
index = 0
while True:
if py3:
index = int(input("Please enter your selection: "))
else:
index = int(raw_input("Please enter your selection: "))
if index < 1 or index > num:
print("Invalid Selection. Try again")
else:
break
return templates[index - 1]
def get_template_description(self, template):
regex = re.compile(r"^.*#")
components = {}
for nodeid in template['Nodes']:
node = template['Nodes'][nodeid]
comp = regex.sub("", node['componentVariable']['binding']['id'])
if comp in components:
components[comp] += 1
else:
components[comp] = 1
description = regex.sub("", template['id']) + " ( "
i = 0
for comp in components:
if i > 0:
description += ", "
description += str(components[comp]) + " " + comp
i += 1
description += " )"
return description
def run_workflow(self, template, seed):
postdata = {
'template_id': seed["template"]["id"],
'json': json.dumps(template["template"]),
'constraints_json': json.dumps(template["constraints"]),
'seed_json': json.dumps(seed["template"]),
'seed_constraints_json': json.dumps(seed["constraints"])
}
resp = self.api_client.session.post(self.api_client.get_request_url(
) + 'executions/runWorkflow', data=postdata)
regex = re.compile(r"^.*#")
return regex.sub("", resp.text)
| [((78, 16, 78, 35), 're.compile', 're.compile', ({(78, 27, 78, 34): '"""^.*#"""'}, {}), "('^.*#')", False, 'import re\n'), ((108, 16, 108, 35), 're.compile', 're.compile', ({(108, 27, 108, 34): '"""^.*#"""'}, {}), "('^.*#')", False, 'import re\n'), ((101, 20, 101, 52), 'json.dumps', 'json.dumps', ({(101, 31, 101, 51): "template['template']"}, {}), "(template['template'])", False, 'import json\n'), ((102, 32, 102, 67), 'json.dumps', 'json.dumps', ({(102, 43, 102, 66): "template['constraints']"}, {}), "(template['constraints'])", False, 'import json\n'), ((103, 25, 103, 53), 'json.dumps', 'json.dumps', ({(103, 36, 103, 52): "seed['template']"}, {}), "(seed['template'])", False, 'import json\n'), ((104, 37, 104, 68), 'json.dumps', 'json.dumps', ({(104, 48, 104, 67): "seed['constraints']"}, {}), "(seed['constraints'])", False, 'import json\n')] |
salayhin/talkofacta | eggs/ZConfig-3.0.4-py2.7.egg/ZConfig/tests/test_cookbook.py | 8b5a14245dd467bb1fda75423074c4840bd69fb7 | ##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests of examples from the online cookbook, so we don't break them
down the road. Unless we really mean to.
The ZConfig Cookbook is available online at:
http://dev.zope.org/Zope3/ZConfig
"""
import ZConfig.tests.support
import unittest
def basic_key_mapping_password_to_passwd(key):
# Lower-case the key since that's what basic-key does:
key = key.lower()
# Now map password to passwd:
if key == "password":
key = "passwd"
return key
def user_info_conversion(section):
return section
class CookbookTestCase(ZConfig.tests.support.TestHelper, unittest.TestCase):
def test_rewriting_key_names(self):
schema = self.load_schema_text("""
<schema prefix='%s'>
<sectiontype name='userinfo' datatype='.user_info_conversion'
keytype='.basic_key_mapping_password_to_passwd'>
<key name='userid' datatype='integer'/>
<key name='username' datatype='identifier'/>
<key name='password'/>
</sectiontype>
<section type='userinfo' name='*' attribute='userinfo'/>
</schema>
""" % __name__)
config = self.load_config_text(schema, """\
<userinfo>
USERID 42
USERNAME foouser
PASSWORD yeah-right
</userinfo>
""")
self.assertEqual(config.userinfo.userid, 42)
self.assertEqual(config.userinfo.username, "foouser")
self.assertEqual(config.userinfo.passwd, "yeah-right")
self.assertTrue(not hasattr(config.userinfo, "password"))
def test_suite():
return unittest.makeSuite(CookbookTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
| [((67, 11, 67, 47), 'unittest.makeSuite', 'unittest.makeSuite', ({(67, 30, 67, 46): 'CookbookTestCase'}, {}), '(CookbookTestCase)', False, 'import unittest\n'), ((70, 4, 70, 43), 'unittest.main', 'unittest.main', (), '', False, 'import unittest\n')] |
ResearchHub/ResearchHub-Backend-Open | src/bullet_point/migrations/0006_bulletpoint_sift_risk_score.py | d36dca33afae2d442690694bb2ab17180d84bcd3 | # Generated by Django 2.2 on 2020-11-07 01:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bullet_point', '0005_bulletpoint_created_location'),
]
operations = [
migrations.AddField(
model_name='bulletpoint',
name='sift_risk_score',
field=models.FloatField(blank=True, null=True),
),
]
| [((16, 18, 16, 58), 'django.db.models.FloatField', 'models.FloatField', (), '', False, 'from django.db import migrations, models\n')] |
ezhkovskii/instagrapi-rest | main.py | a3570f279ef0973856b92e433b117e0be0d4c713 | import pkg_resources
from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from starlette.responses import RedirectResponse, JSONResponse
from routers import auth, media, video, photo, user, igtv, clip, album, story, hashtag, direct
app = FastAPI()
app.include_router(auth.router)
app.include_router(media.router)
app.include_router(video.router)
app.include_router(photo.router)
app.include_router(user.router)
app.include_router(igtv.router)
app.include_router(clip.router)
app.include_router(album.router)
app.include_router(story.router)
app.include_router(hashtag.router)
app.include_router(direct.router)
@app.get("/", tags=["system"], summary="Redirect to /docs")
async def root():
"""Redirect to /docs
"""
return RedirectResponse(url="/docs")
@app.get("/version", tags=["system"], summary="Get dependency versions")
async def version():
"""Get dependency versions
"""
versions = {}
for name in ('instagrapi', ):
item = pkg_resources.require(name)
if item:
versions[name] = item[0].version
return versions
@app.exception_handler(Exception)
async def handle_exception(request, exc: Exception):
return JSONResponse({
"detail": str(exc),
"exc_type": str(type(exc).__name__)
}, status_code=500)
def custom_openapi():
if app.openapi_schema:
return app.openapi_schema
# for route in app.routes:
# body_field = getattr(route, 'body_field', None)
# if body_field:
# body_field.type_.__name__ = 'name'
openapi_schema = get_openapi(
title="instagrapi-rest",
version="1.0.0",
description="RESTful API Service for instagrapi",
routes=app.routes,
)
app.openapi_schema = openapi_schema
return app.openapi_schema
app.openapi = custom_openapi
| [((8, 6, 8, 15), 'fastapi.FastAPI', 'FastAPI', ({}, {}), '()', False, 'from fastapi import FastAPI\n'), ((25, 11, 25, 40), 'starlette.responses.RedirectResponse', 'RedirectResponse', (), '', False, 'from starlette.responses import RedirectResponse, JSONResponse\n'), ((55, 21, 60, 5), 'fastapi.openapi.utils.get_openapi', 'get_openapi', (), '', False, 'from fastapi.openapi.utils import get_openapi\n'), ((34, 15, 34, 42), 'pkg_resources.require', 'pkg_resources.require', ({(34, 37, 34, 41): 'name'}, {}), '(name)', False, 'import pkg_resources\n')] |
suet-lee/mycelium | scripts/run_d435.py | db83cd3ab00697f28b2def2cebcdef52698fdd92 | #!/usr/bin/env python3
from mycelium import CameraD435
from mycelium_utils import Scripter
class ScripterExt(Scripter):
def run_main(self):
self.camera = CameraD435(
configuration_mode=self.cfg.d435['configuration_mode'],
enable_rgb_stream=self.cfg.d435['enable_rgb_stream'],
enable_depth_stream=self.cfg.d435['enable_depth_stream'],
enable_infrared_stream=self.cfg.d435['enable_infrared_stream'],
save_rgb_frames=self.cfg.d435['save_rgb_frames'],
save_depth_frames=self.cfg.d435['save_depth_frames'],
save_infrared_frames=self.cfg.d435['save_infrared_frames'])
self.camera.start()
def _sigint_handler(self, sig, frame):
self.camera.exit_threads = True
def _sigterm_handler(self, sig, frame):
self.camera.exit_threads = True
self.exit_code = 0
def close_script(self):
try:
self.camera.stop()
except:
pass
scripter = ScripterExt(log_source="run_d435")
scripter.run()
| [((9, 22, 16, 71), 'mycelium.CameraD435', 'CameraD435', (), '', False, 'from mycelium import CameraD435\n')] |
pi-top/pi-top-Python-SDK | examples/system/miniscreen/miniscreen_display_animated_image_once_simple_way.py | 6c83cc5f612d77f86f8d391c7f2924a28f7b1232 | from PIL import Image
from pitop import Pitop
pitop = Pitop()
miniscreen = pitop.miniscreen
rocket = Image.open("/usr/lib/python3/dist-packages/pitop/miniscreen/images/rocket.gif")
miniscreen.play_animated_image(rocket)
| [((5, 8, 5, 15), 'pitop.Pitop', 'Pitop', ({}, {}), '()', False, 'from pitop import Pitop\n'), ((8, 9, 8, 88), 'PIL.Image.open', 'Image.open', ({(8, 20, 8, 87): '"""/usr/lib/python3/dist-packages/pitop/miniscreen/images/rocket.gif"""'}, {}), "('/usr/lib/python3/dist-packages/pitop/miniscreen/images/rocket.gif')", False, 'from PIL import Image\n')] |
nemami/synbioinformatica | synbioinformatica.py | 9306d7a7edb93aaa8e4de5e041db6633214c07b1 | #!/usr/bin/python -tt
import sys, re, math
from decimal import *
# TODO: work on naming scheme
# TODO: add more ORIs
# TODO: assemblytree alignment
# TODO: Wobble, SOEing
# TODO: (digestion, ligation) redundant products
# TODO: for PCR and Sequencing, renormalize based on LCS
# TODO: tutorials
dna_alphabet = {'A':'A', 'C':'C', 'G':'G', 'T':'T',
'R':'AG', 'Y':'CT', 'W':'AT', 'S':'CG', 'M':'AC', 'K':'GT',
'H':'ACT', 'B':'CGT', 'V':'ACG', 'D':'AGT',
'N':'ACGT',
'a': 'a', 'c': 'c', 'g': 'g', 't': 't',
'r':'ag', 'y':'ct', 'w':'at', 's':'cg', 'm':'ac', 'k':'gt',
'h':'act', 'b':'cgt', 'v':'acg', 'd':'agt',
'n':'acgt'}
complement_alphabet = {'A':'T', 'T':'A', 'C':'G', 'G':'C','R':'Y', 'Y':'R',
'W':'W', 'S':'S', 'M':'K', 'K':'M', 'H':'D', 'D':'H',
'B':'V', 'V':'B', 'N':'N','a':'t', 'c':'g', 'g':'c',
't':'a', 'r':'y', 'y':'r', 'w':'w', 's':'s','m':'k',
'k':'m', 'h':'d', 'd':'h', 'b':'v', 'v':'b', 'n':'n'}
gencode = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',
'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W'}
# Description: converts DNA string to amino acid string
def translate( sequence ):
"""Return the translated protein from 'sequence' assuming +1 reading frame"""
return ''.join([gencode.get(sequence[3*i:3*i+3],'X') for i in range(len(sequence)//3)])
# Description: read in all enzymes from REase tsv into dict EnzymeDictionary
def EnzymeDictionary():
EnzymeDictionary = {}
fh = open('REases.tsv', 'rU')
for line in fh:
card = line.rstrip().split('\t')
card[0] = re.sub(r'\-','_',card[0])
EnzymeDictionary[card[0]] = restrictionEnzyme(*card)
return EnzymeDictionary
# Description: Suffix Tree implementation for the purpose of PCR Longest Common Substring identification
# Code adapted from: http://chipsndips.livejournal.com/2005/12/07/
# Define a for a node in the suffix tree
class SuffixNode(dict):
def __init__(self):
self.suffixLink = None # Suffix link as defined by Ukkonen
class LCS:
def __init__(self,str1,str2):
# Hack for terimal 3' end matching
str = str1 + str2 + '#'
inf = len(str)
self.str = str #Keep a reference to str to ensure the string is not garbage collected
self.seed = SuffixNode() #Seed is a dummy node. Suffix link of root points to seed. For any char,there is a link from seed to root
self.root = SuffixNode() # Root of the suffix tree
self.root.suffixLink = self.seed
self.root.depth = 0
self.deepest = 0,0
# For each character of str[i], create suffixtree for str[0:i]
s = self.root; k=0
for i in range(len(str)):
self.seed[str[i]] = -2,-2,self.root
oldr = self.seed
t = str[i]
#Traverse the boundary path of the suffix tree for str[0:i-1]
while True:
# Descend the suffixtree until state s has a transition for the stringstr[k:i-1]
while i>k:
kk,pp,ss = s[str[k]]
if pp-kk < i-k:
k = k + pp-kk+1
s = ss
else:
break
# Exit this loop if s has a transition for the string str[k:i] (itmeans str[k:i] is repeated);
# Otherwise, split the state if necessary
if i>k:
tk = str[k]
kp,pp,sp = s[tk]
if t.lower() == str[kp+i-k].lower():
break
else: # Split the node
r = SuffixNode()
j = kp+i-k
tj = str[j]
r[tj] = j, pp, sp
s[str[kp]] = kp,j-1, r
r.depth = s.depth + (i-k)
sp.depth = r.depth + pp - j + 1
# Original statement was: if j<len(str1)<i and r.depth>self.deepest[0]:
# Adapted for PCR by restricting LCS matches to primer terminal 3' end
if len(str1)<i and r.depth>self.deepest[0] and j == len(str1) - 1:
self.deepest = r.depth, j-1
elif s.has_key(t):
break
else:
r = s
# Add a transition from r that starts with the letter str[i]
tmp = SuffixNode()
r[t] = i,inf,tmp
# Prepare for next iteration
oldr.suffixLink = r
oldr = r
s = s.suffixLink
# Last remaining endcase
oldr.suffixLink = s
def LongestCommonSubstring(self):
start, end = self.deepest[1]-self.deepest[0]+1, self.deepest[1]+1
return (self.str[start:end],start,end)
def LCSasRegex(self, currentPrimer, template, fwd):
annealingRegion = self.str[self.deepest[1] - self.deepest[0] + 1 : self.deepest[1] + 1]
if not fwd:
annealingRegion = reverseComplement(annealingRegion)
(AnnealingMatches, matchCount, MatchIndicesTuple) = ([], 0, ())
annealingRegex = re.compile(annealingRegion, re.IGNORECASE)
matchList = annealingRegex.finditer(template)
for match in matchList:
if primerTm(match.group()) > 45:
matchCount += 1
MatchIndicesTuple = (match.start(), match.end())
PrimerStub = currentPrimer[0:len(currentPrimer)-len(annealingRegion)-1]
return (matchCount, MatchIndicesTuple, PrimerStub)
# Description: identifies errors in primer design and raises exceptions based on errors and their context
def PCRErrorHandling(InputTuple):
(fwd,matchCount,matchedAlready,nextOrientation,currentPrimer,template) = InputTuple
if len(currentPrimer.sequence) > 7:
abbrev = currentPrimer.sequence[:3]+'...'+currentPrimer.sequence[-3:]
else:
abbrev = currentPrimer.sequence
if fwd:
if matchCount > 1: # if matches in forward direction more than once
if nextOrientation == 2: # ... but was supposed to match in reverse direction
raise Exception('*Primer error*: primers both anneal in forward (5\'->3\') orientation AND primer '+abbrev+' anneals to multiple sites in template.')
raise Exception('*Primer error*: primer '+abbrev+' anneals to multiple sites in template.')
elif matchCount == 1: # if matches in the forward direction exactly once
if nextOrientation == 2: # ... but was supposed to match in reverse direction
raise Exception('*Primer error*: primers both anneal in forward (5\'->3\') orientation.')
matchedAlready = 1
return matchedAlready
else:
if matchCount > 1: # if matches in reverse direction more than once
if matchedAlready == 1: # ... and already matched in forward direction
if nextOrientation == 1: # ... but was supposed to match in forward direction
raise Exception('*Primer error*: primers both anneal in reverse (3\'->5\') orientation AND primer '+abbrev+' anneals to multiple sites in template AND primer '+abbrev+' anneals in both orientations.')
raise Exception('*Primer error*: primer '+abbrev+' anneals to multiple sites in template AND primer '+abbrev+' anneals in both orientations.')
if nextOrientation == 1:
raise Exception('*Primer error*: primers both anneal in reverse (3\'->5\') orientation AND primer '+abbrev+' anneals to multiple sites in template.')
raise Exception('*Primer error*: primer '+abbrev+' anneals to multiple sites in template.')
elif matchCount == 1: # if matches in the reverse direction exactly once
if matchedAlready == 1: # ... and already matched in forward direction
if nextOrientation == 1: # ... but was supposed to match in forward direction
raise Exception('*Primer error*: both primers have same reverse (3\'->5\') orientation AND primer '+abbrev+' anneals in both orientations.')
raise Exception('*Primer error*: primer '+abbrev+' primes in both orientations.')
else:
matchedAlready = 2
if matchedAlready == 0: # if no matches
raise Exception('*Primer error*: primer '+abbrev+' does not anneal in either orientation.')
return matchedAlready
# Description: assigns relationships for PCR inputs and PCR product for assembly tree purposes
def pcrPostProcessing(inputTuple, parent, fwdTM, revTM):
(primer1DNA, primer2DNA, templateDNA) = inputTuple
for child in inputTuple:
child.addParent(parent)
parent.setChildren(inputTuple)
intVal = int(round(len(parent.sequence)/1000+0.5))
parent.setTimeStep(intVal)
parent.addMaterials(['Polymerase','dNTP mix','Polymerase buffer'])
thermoCycle = str(intVal)+'K'+str(int(round(max(fwdTM,revTM))))
parent.instructions = thermoCycle+' PCR template '+templateDNA.name+' with primers '+primer1DNA.name+', '+primer2DNA.name
return parent
# Description: PCR() function constructs generalized suffix tree for template and a given primer to identify annealing region,
# and raises PrimerError exceptions for different cases of failed PCR as a result of primer design
# Note: PCR() product is not case preserving
def PCR(primer1DNA, primer2DNA, templateDNA):
for pcrInput in (primer1DNA, primer2DNA, templateDNA):
if not isinstance(pcrInput, DNA):
raise Exception('*PCR error*: PCR function was passed a non-DNA argument.')
return None
# Suffix Tree string initialization, non-alphabet character concatenation
(template, primer_1, primer_2) = (templateDNA.sequence, primer1DNA, primer2DNA)
# Tuple of assemblyTree 'children', for the purpose of child/parent assignment
inputTuple = (primer1DNA, primer2DNA, templateDNA)
# Initialization of all parameters, where indices is the start / stop indices + direction of annealing primer sequences
(fwdTM, revTM, indices, counter, rightStub, leftStub, nextOrientation) = (0,0,[0,0,0,0,0,0],0,'','',0)
try:
# NOTE: no assumptions made about input primer directionality
for currentPrimer in (primer_1, primer_2):
currentSequence = currentPrimer.sequence + '$'
fwdMatch = LCS(currentSequence.upper(), template.upper())
(matchCount, forwardMatchIndicesTuple, forwardPrimerStub) = fwdMatch.LCSasRegex(currentSequence, template, 1)
(matchedAlready, start, stop) = (0,0,0) # Defaults
# Forward case error handling: delegated to PCRErrorHandling function
matchedAlready = PCRErrorHandling((1,matchCount,matchedAlready,nextOrientation,currentPrimer,template))
revMatch = LCS(currentSequence.upper(),reverseComplement(template).upper())
(matchCount, reverseMatchIndicesTuple, reversePrimerStub) = revMatch.LCSasRegex(currentSequence, template, 0)
# Reverse case error handling: delegated to PCRErrorHandling function
matchedAlready = PCRErrorHandling((0,matchCount,matchedAlready,nextOrientation,currentPrimer,template))
if matchedAlready == 1:
(indices[counter], indices[counter+1], indices[counter+2]) = (forwardMatchIndicesTuple[0], forwardMatchIndicesTuple[1], 'fwd')
(counter,nextOrientation,leftStub) = (counter+3, 2, forwardPrimerStub)
elif matchedAlready == 2:
(indices[counter], indices[counter+1], indices[counter+2]) = (reverseMatchIndicesTuple[0], reverseMatchIndicesTuple[1], 'rev')
(counter,nextOrientation,rightStub) = (counter+3, 1, reverseComplement(reversePrimerStub))
if indices[2] == 'fwd':
(fwdStart, fwdEnd, revStart, revEnd) = (indices[0], indices[1], indices[3], indices[4])
else:
(fwdStart, fwdEnd, revStart, revEnd) = (indices[3], indices[4], indices[0], indices[1])
(fwdTM, revTM) = (primerTm(template[fwdStart:fwdEnd]), primerTm(template[revStart:revEnd]))
if fwdStart < revStart and fwdEnd < revEnd:
parent = DNA('PCR product','PCR product of '+primer1DNA.name+', '+primer2DNA.name+' on '+templateDNA.name, leftStub+template[fwdStart:revEnd]+rightStub)
else:
# TODO remove
# circular template is exception to the fwdStart < revStart and fwdEnd < revEnd rule
if templateDNA.topology == 'circular':
parent = DNA('PCR product','PCR product of '+primer1DNA.name+', '+primer2DNA.name+' on '+templateDNA.name, leftStub+template[fwdStart:len(template)]+template[:revEnd]+rightStub)
else:
raise Exception('*PCR Error*: forward primer must anneal upstream of the reverse.')
return pcrPostProcessing(inputTuple, parent, fwdTM, revTM)
except:
raise
# Description: identifies errors in primer design and raises exceptions based on errors and their context
def SequenceErrorHandling(InputTuple):
(fwd,matchCount,matchedAlready,currentPrimer) = InputTuple
if len(currentPrimer.sequence) > 7:
abbrev = currentPrimer.sequence[:3]+'...'+currentPrimer.sequence[-3:]
else:
abbrev = currentPrimer.sequence
if fwd:
if matchCount > 1: # if matches in forward direction more than once
raise Exception('*Primer error*: primer '+abbrev+' anneals to multiple sites in template.')
elif matchCount == 1: # if matches in the forward direction exactly once
matchedAlready = 1
return matchedAlready
else:
if matchCount > 1: # if matches in reverse direction more than once
if matchedAlready == 1: # ... and already matched in forward direction
raise Exception('*Primer error*: primer '+abbrev+' anneals to multiple sites in template AND primer '+abbrev+' anneals in both orientations.')
raise Exception('*Primer error*: primer '+abbrev+' anneals to multiple sites in template.')
elif matchCount == 1: # if matches in the reverse direction exactly once
if matchedAlready == 1: # ... and already matched in forward direction
raise Exception('*Primer error*: primer '+abbrev+' primes in both orientations.')
else:
matchedAlready = 2
if matchedAlready == 0: # if no matches
raise Exception('*Primer error*: primer '+abbrev+' does not anneal in either orientation.')
return matchedAlready
def Sequence(InputDNA, inputPrimer):
for seqInput in (InputDNA, inputPrimer):
if not isinstance(seqInput, DNA):
raise Exception('*Sequencing error*: Sequence function was passed a non-DNA argument.')
return None
# Suffix Tree string initialization, non-alphabet character concatenation
(template, primer) = (InputDNA.sequence, inputPrimer)
# Tuple of assemblyTree 'children', for the purpose of child/parent assignment
# Initialization of all parameters, where indices is the start / stop indices + direction of annealing primer sequences
(fwdTM, revTM, indices, counter, rightStub, leftStub, nextOrientation, fwd, rev, read) = (0,0,[0,0,0],0,'','',0,0,0,'')
try:
# NOTE: no assumptions made about input primer directionality
currentSequence = primer.sequence + '$'
fwdMatch = LCS(currentSequence.upper(), template.upper())
(matchCount, forwardMatchIndicesTuple, forwardPrimerStub) = fwdMatch.LCSasRegex(currentSequence, template, 1)
(matchedAlready, start, stop) = (0,0,0) # Defaults
# Forward case error handling: delegated to SequenceErrorHandling function
matchedAlready = SequenceErrorHandling((1,matchCount,matchedAlready,primer))
revMatch = LCS(currentSequence.upper(),reverseComplement(template).upper())
(matchCount, reverseMatchIndicesTuple, reversePrimerStub) = revMatch.LCSasRegex(currentSequence, template, 0)
# Reverse case error handling: delegated to SequenceErrorHandling function
matchedAlready = SequenceErrorHandling((0,matchCount,matchedAlready,primer))
if matchedAlready == 1:
(fwdStart, fwdEnd, fwd) = (forwardMatchIndicesTuple[0], forwardMatchIndicesTuple[1], 1)
elif matchedAlready == 2:
(revStart, revEnd, rev) = (reverseMatchIndicesTuple[0], reverseMatchIndicesTuple[1], 1)
if fwd:
bindingTM = primerTm(template[fwdStart:fwdEnd])
if InputDNA.DNAclass == 'plasmid':
if fwdEnd + 1001 > len(template):
read = template[fwdEnd+1:] + template[:fwdEnd+1001-len(template)]
else:
read = template[fwdEnd+1:fwdEnd+1001]
else:
read = template[fwdEnd+1:fwdEnd+1001]
else:
bindingTM = primerTm(template[revStart:revEnd])
if InputDNA.DNAclass == 'plasmid':
if revStart - 1001 < 0:
read = template[revStart-1001+len(template):] + template[:revStart]
else:
read = template[revStart-1001:revStart]
else:
read = template[revStart-1001:revStart]
if bindingTM >= 55:
return read
else:
return ''
except:
raise
# Description: case preserving reverse complementation of nucleotide sequences
def reverseComplement(sequence):
return "".join([complement_alphabet.get(nucleotide, '') for nucleotide in sequence[::-1]])
# Description: case preserving string reversal
def reverse(sequence):
return sequence[::-1]
# Description: case preserving complementation of nucleotide sequences
def Complement(sequence):
return "".join([complement_alphabet.get(nucleotide, '') for nucleotide in sequence[0:]])
# Primer TM function suite: primerTm(), primerTmsimple(), get_55_primer(), nearestNeighborTmNonDegen(), getTerminalCorrectionsDsHash(),
# getTerminalCorrectionsDhHash(), getDsHash(), getDhHash()
# Implemented by Tim Hsaiu in JavaScript, adapted to Python by Nima Emami
# Based on Santa Lucia et. al. papers
def primerTm(sequence):
if sequence == '':
return 0
milliMolarSalt = 50
milliMolarMagnesium = 1.5
nanoMolarPrimerTotal = 200
molarSalt = milliMolarSalt/1000
molarMagnesium = milliMolarMagnesium/1000
molarPrimerTotal = Decimal(nanoMolarPrimerTotal)/Decimal(1000000000)
re.sub(r'\s','', sequence)
return nearestNeighborTmNonDegen(sequence, molarSalt, molarPrimerTotal, molarMagnesium)
def primerTmsimple(sequence):
return 64.9+41*(GCcontent(sequence)*len(sequence) - 16.4)/len(sequence)
# phusion notes on Tm
# https://www.finnzymes.fi/optimizing_tm_and_annealing.html
# get substring from the beginning of input that is 55C Tm
def get_55_primer(sequence):
lastChar = 17
myPrimer = sequence.substring(0,lastChar)
while( primerTmsimple(myPrimer) < 54.5 or lastChar > 60):
lastChar = lastChar + 1
myPrimer = sequence[0:lastChar]
return myPrimer
def nearestNeighborTmNonDegen (sequence, molarSalt, molarPrimerTotal, molarMagnesium):
# The most sophisticated Tm calculations take into account the exact sequence and base stacking parameters, not just the base composition.
# m = ((1000* dh)/(ds+(R * Math.log(primer concentration))))-273.15;
# Borer P.N. et al. (1974) J. Mol. Biol. 86, 843.
# SantaLucia, J. (1998) Proc. Nat. Acad. Sci. USA 95, 1460.
# Allawi, H.T. and SantaLucia, J. Jr. (1997) Biochemistry 36, 10581.
# von Ahsen N. et al. (1999) Clin. Chem. 45, 2094.
sequence = sequence.lower()
R = 1.987 # universal gas constant in Cal/degrees C * mol
ds = 0 # cal/Kelvin/mol
dh = 0 # kcal/mol
# perform salt correction
correctedSalt = molarSalt + molarMagnesium * 140 # adjust for greater stabilizing effects of Mg compared to Na or K. See von Ahsen et al 1999
ds = ds + 0.368 * (len(sequence) - 1) * math.log(correctedSalt) # from von Ahsen et al 1999
# perform terminal corrections
termDsCorr = getTerminalCorrectionsDsHash()
ds = ds + termDsCorr[sequence[0]]
ds = ds + termDsCorr[sequence[len(sequence) - 1]]
termDhCorr = getTerminalCorrectionsDhHash()
dh = dh + termDhCorr[sequence[0]]
dh = dh + termDhCorr[sequence[len(sequence) - 1]]
dsValues = getDsHash()
dhValues = getDhHash()
for i in range(len(sequence)-1):
ds = ds + dsValues[sequence[i] + sequence[i + 1]]
dh = dh + dhValues[sequence[i] + sequence[i + 1]]
return (((1000 * dh) / (ds + (R * math.log(molarPrimerTotal / 2)))) - 273.15)
def getTerminalCorrectionsDsHash():
# SantaLucia, J. (1998) Proc. Nat. Acad. Sci. USA 95, 1460.
dictionary = {'g' : -2.8,'a': 4.1,'t' : 4.1,'c' : -2.8}
return dictionary
def getTerminalCorrectionsDhHash():
# SantaLucia, J. (1998) Proc. Nat. Acad. Sci. USA 95, 1460.
dictionary = {'g':0.1,'a' : 2.3,'t' : 2.3,'c' : 0.1}
return dictionary
def getDsHash():
# SantaLucia, J. (1998) Proc. Nat. Acad. Sci. USA 95, 1460.
dictionary = {
'gg' : -19.9,
'ga' : -22.2,
'gt' : -22.4,
'gc' : -27.2,
'ag' : -21.0,
'aa' : -22.2,
'at' : -20.4,
'ac' : -22.4,
'tg' : -22.7,
'ta' : -21.3,
'tt' : -22.2,
'tc' : -22.2,
'cg' : -27.2,
'ca' : -22.7,
'ct' : -21.0,
'cc' : -19.9}
return dictionary
def getDhHash():
# SantaLucia, J. (1998) Proc. Nat. Acad. Sci. USA 95, 1460.
dictionary = {'gg': -8.0,
'ga' : -8.2,
'gt' : -8.4,
'gc' : -10.6,
'ag' : -7.8,
'aa' : -7.9,
'at' : -7.2,
'ac' : -8.4,
'tg' : -8.5,
'ta' : -7.2,
'tt' : -7.9,
'tc' : -8.2,
'cg' : -10.6,
'ca' : -8.5,
'ct' : -7.8,
'cc' : -8.0}
return dictionary
# Description: initialize Digest function parameters and checks for acceptable input format
def initDigest(InputDNA, Enzymes):
(indices, frags, sites, totalLength, enzNames, incubationTemp, nameList, filtered) = ([], [], "", len(InputDNA.sequence), '', 0, [], []) # Initialization
for enzyme in Enzymes:
nameList.append(enzyme.name)
enzNames = enzNames+enzyme.name+', '
incubationTemp = max(incubationTemp,enzyme.incubate_temp)
enzNames = enzNames[:-2]
if len(Enzymes) > 2:
raise Exception('*Digest error*: only double or single digests allowed (provided enzymes were '+enzNames+')')
if InputDNA.topology == "linear":
# Initialize indices array with start and end indices of the linear fragment
# Add dummy REase to avoid null pointers
dummy = restrictionEnzyme("dummy", "", "", "", "", "", 0, 0, "(0/0)","")
indices = [(0,0,'',dummy), (totalLength,0,'',dummy)]
return (indices, frags, sites, totalLength, enzNames, incubationTemp, nameList, filtered)
# Description: finds restriction sites for given Enzymes in given InputDNA molecule
def restrictionSearch(Enzymes, InputDNA, indices, totalLength):
for enzyme in Enzymes:
sites = enzyme.find_sites(InputDNA)
for site in sites:
# WARNING: end proximity for linear fragments exception
if InputDNA.topology == 'linear' and int(site[0]) - int(enzyme.endDistance) < 0 or int(site[1]) + int(enzyme.endDistance) > totalLength:
print '\n*Digest Warning*: end proximity for '+enzyme.name+' restriction site at indices '+str(site[0]%totalLength)+','+str(site[1]%totalLength)+' for input '+InputDNA.name+' (length '+str(totalLength)+')\n'
if InputDNA.topology == 'linear' and site[2] == 'antisense' and site[1] - max(enzyme.bottom_strand_offset,enzyme.top_strand_offset) < 0:
print '\n*Digest Warning*: restriction cut site for '+enzyme.name+' with recognition indices '+str(site[0]%totalLength)+','+str(site[1]%totalLength)+' out of bounds for input '+InputDNA.name+' (length '+str(totalLength)+')\n'
else:
pass
# WARNING: restriction index out of bounds exception
elif InputDNA.topology == 'linear' and site[2] == 'antisense' and site[1] - max(enzyme.bottom_strand_offset,enzyme.top_strand_offset) < 0:
print '\n*Digest Warning*: restriction cut site for '+enzyme.name+' with recognition indices '+str(site[0]%totalLength)+','+str(site[1]%totalLength)+' out of bounds for input '+InputDNA.name+' (length '+str(totalLength)+')\n'
else:
site = site + (enzyme, )
indices.append(site)
indices.sort()
return indices
# Description: if you have overlapping restriction sites, choose the first one and discard the second
# TODO: revise this?
def filterSites(filtered, indices):
siteCounter = 0
while siteCounter < len(indices):
try:
(currentTuple, nextTuple) = (indices[n], indices[n+1])
(currentStart, nextStart, currentEnzyme, nextEnzyme) = (currentTuple[0], nextTuple[0], currentTuple[3], nextTuple[3])
filtered.append(indices[siteCounter])
if currentStart + len(currentEnzyme.alpha_only_site) >= nextStart:
currentIndex = indices[siteCounter+1]
if currentIndex[0] == len(InputDNA.sequence):
pass
else:
raise Exception('Digest Error*: overlapping restriction sites '+currentTuple[3].name+' (indices '+str(currentTuple[0])+','+str(currentTuple[1])+') and '+nextTuple[3].name+' (indices '+str(nextTuple[0])+','+str(nextTuple[1])+')')
siteCounter += 1
siteCounter += 1
except: # got to end of list
filtered.append(indices[siteCounter])
siteCounter += 1
return filtered
# Description: determines digest start and stop indices, as well as overhang indices for left and right restriction
def digestIndices(direction, nextDirection, currentEnzyme, nextEnzyme, currentStart, nextStart, totalLength):
# CT(B)O = current top (bottom) overhang, AL(R)L = add left (right) length, NT(B)O = next top (bottom) overhang
(ALL, ARL) = (0,0)
# If it's on the sense strand, then overhang is positive
if direction == "sense":
(CTO, CBO) = (currentEnzyme.top_strand_offset, currentEnzyme.bottom_strand_offset)
# If it's on the antisense strand, then you have to go back towards the 5' to generate the overhang (so multiply by -1)
else:
(CTO, CBO) = (-1 * currentEnzyme.top_strand_offset, -1 * currentEnzyme.bottom_strand_offset)
ALL = max(CTO,CBO)
if nextDirection == "sense":
(NTO, NBO) = (nextEnzyme.top_strand_offset, nextEnzyme.bottom_strand_offset)
ARL = min(NTO,NBO)
else:
(NTO, NBO) = (-1 * nextEnzyme.top_strand_offset + 1, -1 * nextEnzyme.bottom_strand_offset + 1)
ARL = min(NTO,NBO)-1
(currentStart, digEnd) = ((currentStart+ALL) % totalLength, nextStart + ARL)
if currentEnzyme.reach and direction == "sense":
currentStart = currentStart + len(currentEnzyme.alpha_only_site)
if nextEnzyme.reach and nextDirection == "sense":
digEnd = digEnd + len(nextEnzyme.alpha_only_site)
return (currentStart, digEnd, CTO, CBO, NTO, NBO)
# Description: instantiates Overhang object as the TLO or BLO field of a digested DNA molecule object
def setLeftOverhang(digested, CTO, CBO, direction, currentStart, currentEnzyme, InputDNA):
if direction == "sense":
(TO, BO) = (CTO, CBO)
else:
(TO, BO) = (CBO, CTO)
difference = abs(abs(BO) - abs(TO))
# Generate TLO and BLO fragment overhangs
if abs(TO) < abs(BO) and direction == "sense" or abs(TO) > abs(BO) and direction == "antisense":
if currentStart - len(currentEnzyme.alpha_only_site) < 0:
digested.topLeftOverhang = Overhang(InputDNA.sequence[currentStart-difference:]+InputDNA.sequence[:currentStart])
else:
digested.topLeftOverhang = Overhang(InputDNA.sequence[currentStart-difference:currentStart])
digested.bottomLeftOverhang = Overhang('')
else:
digested.topLeftOverhang = Overhang('')
# Edge case statement
if currentStart - len(currentEnzyme.alpha_only_site) < 0:
digested.bottomLeftOverhang = Overhang(Complement(InputDNA.sequence[currentStart-difference:]+InputDNA.sequence[:currentStart]))
else:
digested.bottomLeftOverhang = Overhang(Complement(InputDNA.sequence[currentStart-difference:currentStart]))
return digested
# Description: instantiates Overhang object as the TRO or BRO field of a digested DNA molecule object
def setRightOverhang(digested, NTO, NBO, direction, digEnd, nextEnzyme, InputDNA, totalLength):
if direction == "sense":
(TO, BO) = (NTO, NBO)
else:
(TO, BO) = (NBO, NTO)
difference = abs(abs(BO) - abs(TO))
# Apply ( mod length ) operator to end index value digDiff to deal with edge cases
digDiff = digEnd + difference
digDiff = digDiff % totalLength
# Generate TRO and BRO fragment overhangs
if abs(TO) < abs(BO) and direction == "sense" or abs(TO) > abs(BO) and direction == "antisense":
digested.topRightOverhang = Overhang('')
# Edge case statement
if digDiff - len(nextEnzyme.alpha_only_site) < 0:
digested.bottomRightOverhang = Overhang(Complement(InputDNA.sequence[digEnd:]+InputDNA.sequence[:digDiff]))
else:
digested.bottomRightOverhang = Overhang(Complement(InputDNA.sequence[digEnd:digDiff]))
else:
# Edge case statement
if digDiff - len(nextEnzyme.alpha_only_site) < 0:
digested.topRightOverhang = Overhang(InputDNA.sequence[digEnd:]+InputDNA.sequence[:digDiff])
else:
digested.topRightOverhang = Overhang(InputDNA.sequence[digEnd:digDiff])
digested.bottomRightOverhang = Overhang('')
return digested
# Description: take digest fragments before they're output, and sets assemblytree relationships and fields,
# as well as digest buffer
def digestPostProcessing(frag, InputDNA, nameList, enzNames, incubationTemp):
frag.setChildren((InputDNA, ))
InputDNA.addParent(frag)
if len(nameList) == 2:
bufferChoices = DigestBuffer(nameList[0],nameList[1])
else:
bufferChoices = DigestBuffer(nameList[0])
bestBuffer = int(bufferChoices[0])
if bestBuffer < 5:
bestBuffer = 'NEB'+str(bestBuffer)
else:
bestBuffer = 'Buffer EcoRI'
frag.setTimeStep(1)
frag.addMaterials([bestBuffer,'ddH20'])
frag.instructions = 'Digest ('+InputDNA.name+') with '+enzNames+' at '+incubationTemp+'C in '+bestBuffer+' for 1 hour.'
return frag
# Description: takes in InputDNA molecule and list of EnzymeDictionary elements, outputting a list of digest products
def Digest(InputDNA, Enzymes):
# Initialization
if not isinstance(InputDNA, DNA):
raise Exception('*Digest Error*: Digest function passed empty list of DNA arguments. Returning empty list of products.')
return []
(indices, frags, sites, totalLength, enzNames, incubationTemp, nameList, filtered) = initDigest(InputDNA, Enzymes)
# Identify restriction sites, fill in indices array
indices = restrictionSearch(Enzymes, InputDNA, indices, totalLength)
# If you have overlapping restriction sites, choose the first one and discard they second
indices = filterSites(filtered, indices)
# If it's linear, only act on the first n - 1 fragments until you hit the blunt ending
# If it's circular, then the 'last' segment is adjacent to the 'first' one, so you
# need to consider the adjacency relationships among the full n fragments
if InputDNA.topology == "linear":
lastIt = len(indices) - 1
else:
lastIt = len(indices)
# Consider enzyme for the current restriction site as well as the next restriction
# site, so that you can generate overhangs for both sides of the current fragment
for n in range(lastIt):
currentTuple = indices[n]
if n+1 > len(indices) - 1:
n = -1
nextTuple = indices[n+1]
(currentStart, currentEnd, direction, currentEnzyme) = currentTuple
(nextStart, nextEnd, nextDirection, nextEnzyme) = nextTuple
# Update start value currentStart and apply ( mod length ) to deal with edge cases
# Also, update end value digEnd for fragment indices
(currentStart, digEnd, CTO, CBO, NTO, NBO) = digestIndices(direction, nextDirection, currentEnzyme, nextEnzyme, currentStart, nextStart, totalLength)
# Loop around fragment case for circular InputDNA's
if digEnd > 0 and currentStart > 0 and digEnd < currentStart and InputDNA.topology == 'circular':
if n == -1:
digested = DNA('digest','Digest of '+InputDNA.name+' with '+enzNames,InputDNA.sequence[currentStart:]+InputDNA.sequence[:digEnd])
else:
raise Exception('Digest Error*: restriction sites for '+currentTuple[3].name+' ('+str(currentTuple[0])+','+str(currentTuple[1])+') and '+nextTuple[3].name+' ('+str(nextTuple[0])+','+str(nextTuple[1])+') contain mutually interfering overhangs -- fragment discarded.')
continue
else:
digested = DNA('digest','Digest of '+InputDNA.name+' with '+enzNames,InputDNA.sequence[currentStart:digEnd])
# Discard small fragments
if len(digested.sequence) < 4:
pass
else:
# Adjust top and bottom overhang values based on the orientation of the restriction site
digested = setLeftOverhang(digested, CTO, CBO, direction, currentStart, currentEnzyme, InputDNA)
digested = setRightOverhang(digested, NTO, NBO, direction, digEnd, nextEnzyme, InputDNA, totalLength)
frags.append(digested)
for frag in frags:
frag = digestPostProcessing(frag, InputDNA, nameList, enzNames, incubationTemp)
return frags
class Overhang(object):
def __init__(self, seq=""):
self.sequence = seq
class DNA(object):
#for linear DNAs, this string should include the entire sequence (5' and 3' overhangs included
def __init__(self, DNAclass="", name="", seq=""):
self.sequence = seq
self.length = len(seq)
notDNA = re.compile('([^gatcrymkswhbvdn])')
isnotDNA = False
exceptionText = ""
for m in notDNA.finditer(self.sequence.lower()):
exceptionText += m.group() + " at position "+ str( m.start()) + " is not valid IUPAC DNA. "
isnotDNA = True
if(isnotDNA):
raise Exception(exceptionText)
self.name = name #would be pbca1256 for vectors or pbca1256-Bth8199 for plasmids
# self.description = "SpecR pUC" #this is for humans to read
self.dam_methylated = True
self.topLeftOverhang = Overhang('')
self.bottomLeftOverhang = Overhang('')
self.topRightOverhang = Overhang('')
self.bottomRightOverhang = Overhang('')
self.pnkTreated = False
#PCR product, miniprep, genomic DNA
self.DNAclass = DNAclass
self.provenance = ""
self.parents = []
self.children = ()
self.instructions = ""
self.materials = []
self.timeStep = 0
#Here is the linked list references for building up action-chains
# an action chain would be something like do PCR on day 1, do transformation on day 2, etc
self.head = None
self.tail = None
if DNAclass == "primer" or DNAclass == "genomic" or DNAclass == "PCR product" or DNAclass == "digest":
self.topology = "linear"
elif DNAclass == 'plasmid':
self.topology = "circular" #circular or linear, genomic should be considered linear
else:
raise Exception("Invalid molecule class. Acceptable classes are 'digest', genomic', 'PCR product', 'plasmid' and 'primer'.")
def reversecomp(self):
return reverseComplement(self.sequence) #reverses string
#code to handle the overhangs & other object attributes
def addParent(self, DNA):
self.parents.append(DNA)
def addMaterials(self, materialsList):
self.materials += materialsList
def phosphorylate(self):
self.pnkTreated = True
def setTimeStep(self, timeStep):
self.timeStep = timeStep
def setChildren(self, inputDNAs):
self.children = inputDNAs
def find(self, string):
return 0
def isEqual(self, other):
# TODO: implement plasmid rotation to allow circular alignment
if self.DNAclass == 'plasmid' and other.DNAclass == 'plasmid':
if self.sequence.lower() == other.sequence.lower():
return True
else:
if self.sequence.lower() == other.sequence.lower() and self.overhangsEqual(other):
return True
return False
def overhangsEqual(self, other):
if self.bottomLeftOverhang.sequence.lower() == other.bottomLeftOverhang.sequence.lower() and \
self.topLeftOverhang.sequence.lower() == other.topLeftOverhang.sequence.lower() and \
self.bottomRightOverhang.sequence.lower() == other.bottomRightOverhang.sequence.lower() and \
self.topRightOverhang.sequence.lower() == other.topRightOverhang.sequence.lower():
return True
return False
def clone(self):
clone = DNA(self.DNAclass, self.name, self.sequence)
clone.topLeftOverhang = Overhang(self.topLeftOverhang.sequence)
clone.topRightOverhang = Overhang(self.topRightOverhang.sequence)
clone.bottomLeftOverhang = Overhang(self.bottomLeftOverhang.sequence)
clone.bottomRightOverhang = Overhang(self.bottomRightOverhang.sequence)
return clone
def prettyPrint(self):
#prints out top and bottom strands, truncates middle so length is ~100bp
#example:
# TTATCG...[1034bp]...GGAA
# |||| ||||
# TAGC..............CCTTAA
if self.DNAclass == 'digest':
(TL,TR,BL,BR) = SetFlags(self)
if len(self.sequence) > 8:
trExtra = ''
brExtra = ''
if TR:
trExtra = self.topRightOverhang.sequence
if BR:
brExtra = self.bottomRightOverhang.sequence
print "\t"+self.topLeftOverhang.sequence+' '*len(self.bottomLeftOverhang.sequence)+self.sequence[:4]+'.'*3+'['+str(len(self.sequence)-8)+'bp]'+'.'*3+self.sequence[len(self.sequence)-4:]+trExtra
print "\t"+' '*len(self.topLeftOverhang.sequence)+'|'*4+' '*(10+len(str(len(self.sequence)-8)))+'|'*4
print "\t"+' '*len(self.topLeftOverhang.sequence)+self.bottomLeftOverhang.sequence+Complement(self.sequence[:4])+'.'*(10+len(str(len(self.sequence)-8)))+Complement(self.sequence[len(self.sequence)-4:])+brExtra
else:
trExtra = ''
brExtra = ''
if TR:
trExtra = self.topRightOverhang.sequence
if BR:
brExtra = self.bottomRightOverhang.sequence
print "\t"+self.topLeftOverhang.sequence+' '*len(self.bottomLeftOverhang.sequence)+self.sequence+trExtra
print "\t"+' '*len(self.topLeftOverhang.sequence)+'|'*len(self.sequence)
print "\t"+' '*len(self.topLeftOverhang.sequence)+self.bottomLeftOverhang.sequence+Complement(self.sequence)+brExtra
else:
if len(self.sequence) > 8:
print "\t"+self.sequence[:4]+'.'*3+'['+str(len(self.sequence)-8)+'bp]'+'.'*3+self.sequence[len(self.sequence)-4:]
print "\t"+'|'*4+' '*(10+len(str(len(self.sequence)-8)))+'|'*4
print "\t"+Complement(self.sequence[:4])+'.'*(10+len(str(len(self.sequence)-8)))+Complement(self.sequence[len(self.sequence)-4:])
else:
print "\t"+self.sequence
print "\t"+'|'*len(self.sequence)
print "\t"+Complement(self.sequence)
return 0
# Description: BaseExpand() for regex generation, taken from BioPython
def BaseExpand(base):
"""BaseExpand(base) -> string.
given a degenerated base, returns its meaning in IUPAC alphabet.
i.e:
b= 'A' -> 'A'
b= 'N' -> 'ACGT'
etc..."""
base = base.upper()
return dna_alphabet[base]
# Description: regex() function to convert recog site into regex, from Biopython
def regex(site):
"""regex(site) -> string.
Construct a regular expression from a DNA sequence.
i.e.:
site = 'ABCGN' -> 'A[CGT]CG.'"""
reg_ex = site
for base in reg_ex:
if base in ('A', 'T', 'C', 'G', 'a', 'c', 'g', 't'):
pass
if base in ('N', 'n'):
reg_ex = '.'.join(reg_ex.split('N'))
reg_ex = '.'.join(reg_ex.split('n'))
if base in ('R', 'Y', 'W', 'M', 'S', 'K', 'H', 'D', 'B', 'V'):
expand = '['+ str(BaseExpand(base))+']'
reg_ex = expand.join(reg_ex.split(base))
return reg_ex
# Description: ToRegex() function to convert recog site into regex, from Biopython
def ToRegex(site, name):
sense = ''.join(['(?P<', name, '>', regex(site.upper()), ')'])
antisense = ''.join(['(?P<', name, '_as>', regex( reverseComplement( site.upper() )), ')'])
rg = sense + '|' + antisense
return rg
# Description: restrictionEnzyme class encapsulates information about buffers, overhangs, incubation / inactivation, end distance, etc.
class restrictionEnzyme(object):
def __init__(self,name="", buffer1="", buffer2="", buffer3="", buffer4="", bufferecori="", heatinact="", incubatetemp="", recognitionsite="",distance=""):
self.name = name
self.buffer_activity =[buffer1, buffer2, buffer3, buffer4, bufferecori]
self.inactivate_temp = heatinact
self.incubate_temp = incubatetemp
#human-readable recognition site
self.recognition_site = recognitionsite
self.endDistance = distance
#function to convert recog site into regex
alpha_only_site = re.sub('[^a-zA-Z]+', '', recognitionsite)
self.alpha_only_site = alpha_only_site
# print ToRegex(alpha_only_site, name)
self.compsite = ToRegex(alpha_only_site, name)
self.reach = False
#convert information about where the restriction happens to an offset on the top and bottom strand
#for example, BamHI -> 1/5 with respect to the start of the site match
hasNum = re.compile('(-?\d+/-?\d+)')
not_completed = 1
for m in hasNum.finditer(recognitionsite):
(top, bottom) = m.group().split('/')
self.top_strand_offset = int(top)
self.bottom_strand_offset = int(bottom)
self.reach = True
not_completed = 0
p = re.compile("/")
for m in p.finditer(recognitionsite):
if not_completed:
self.top_strand_offset = int(m.start())
self.bottom_strand_offset = len(recognitionsite) - 1 - self.top_strand_offset
def prettyPrint(self):
print "Name: ", self.name, "Recognition Site: ", self.recognition_site
def find_sites(self, DNA):
seq = DNA.sequence
(fwd, rev) = self.compsite.split('|')
fwd_rease_re = re.compile(fwd)
rev_rease_re = re.compile(rev)
indices = []
seen = {}
if DNA.topology == "circular":
searchSequence = seq.upper() + seq[0:len(self.recognition_site)-2]
else:
searchSequence = seq.upper()
for m in fwd_rease_re.finditer(searchSequence):
span = m.span()
span = (span[0] % len(seq), span[1] % len(seq))
seen[span[0]] = 1
span = span + ('sense',)
indices.append(span)
for m in rev_rease_re.finditer(searchSequence):
span = m.span()
try:
seen[span[0]]
except:
span = span + ('antisense',)
indices.append(span)
return indices
# Description: phosphorylates 5' end of DNA molecule, allowing blunt end ligation
# see http://openwetware.org/wiki/PNK_Treatment_of_DNA_Ends
def TreatPNK(inputDNAs):
for inputDNA in inputDNAs:
inputDNA.phosphorylate()
return inputDNAs
# Description: DigestBuffer() function finds the optimal digestBuffer
# todo: If Buffer 2 > 150, return Buffer 2 and list of activity values, else, return buffer 1, 3, or 4 (ignore EcoRI)
# return format will be list, [rec_buff, [buff1_act, buff2_act...buff4_Act]]
def DigestBuffer(*str_or_list):
best_buff = ""
best_buff_score = [0,0,0,0,0]
enzdic = EnzymeDictionary()
num_enz = 0
for e in str_or_list:
enz = enzdic[e]
best_buff_score = list(x + int(y) for x, y in zip(best_buff_score, enz.buffer_activity))
num_enz = num_enz + 1
ret = []
if best_buff_score[1] >( 75 * num_enz):
ret.append(2)
ret.append(best_buff_score)
else:
m = max(best_buff_score)
p = best_buff_score.index(m)
ret.append(p)
ret.append(best_buff_score)
return ret
#accepts two primers and list of input template DNAs
#todo:implement this with PCR!
def SOERoundTwo(primer1, primer2, templates):
return 0
def SOE(list_of_primers, templates):
#assume primers are in the right order outer inner_rev inner_fwd outer
#call two pcrs with list[0], [1] and list[2], [3]
return 0
def Primers(product, template):
return rPrimers(product, template, 0)
def rPrimers(product, template, baseCase):
# Annealing region design criteria:
# TODO: incorporate these somehow
# In general, the 3' base of your oligos should be a G or C
# The overall G/C content of your annealing region should be between 50 and 65%
# The overall base composition of the sequences should be balanced (no missing bases, no excesses of one particular base)
# The length of your sequence can be modified to be around 18 and 25 bp
# The sequence should appear random. There shouldn't be long stretches of a single base, or large regions of G/C rich sequence and all A/T in other regions
# There should be little secondary structure. Ideally the Tm for the oligo should be under 40 degrees.
try:
# Die after 2 rounds of recursion
if baseCase == 2:
return ()
# Compute "forward" and "backwards" LCS (i.e. on both sides of a mutation)
fwdMatch = LCS(template.sequence.upper()+'$', product.sequence.upper())
(fwdMatchCount, forwardMatchIndicesTuple, forwardPrimerStub) = fwdMatch.LCSasRegex(template.sequence.upper()+'$', product.sequence.upper(), 1)
revMatch = LCS(reverse(template.sequence.upper())+'$', reverse(product.sequence.upper()))
(revMatchCount, reverseMatchIndicesTuple, revPrimerStub) = revMatch.LCSasRegex(reverse(template.sequence.upper())+'$', reverse(product.sequence.upper()), 1)
fFlag = False
if not len(forwardMatchIndicesTuple):
fMI = (len(product.sequence), len(product.sequence))
fFlag = True
else:
fMI = forwardMatchIndicesTuple
if not len(reverseMatchIndicesTuple):
if fFlag:
# neither side matches
raise Exception('For primer design, no detectable homology on terminal ends of product and template sequences.')
rMI = (0, 0)
else:
rMI = (0 , len(product.sequence) - reverseMatchIndicesTuple[0])
# wrap around mutation case
if not fMI[0] > rMI[1]:
diffLen = fMI[0] + len(product.sequence) - rMI[1]
insert = product.sequence[rMI[1]:] + product.sequence[:fMI[0]]
else:
diffLen = fMI[0] - rMI[1]
insert = product.sequence[rMI[1]:fMI[0]]
if 60 < diffLen <= 100:
primers, enz = DesignWobble(product, insert, (rMI[1], fMI[0]))
elif 1 <= diffLen <= 60:
primers, enz = DesignEIPCR(product, insert, (rMI[1], fMI[0]), template)
if primers[0] == 0:
print '*Primer Warning*: EIPCR primers could not be designed for given template and product. Try removing BsaI, BseRI, and/or BsmBI sites from template plasmid. Returning null data.'
return [], ''
# test the PCR --> will return an exception if they don't anneal
# TODO: FIX THIS / ERR HANDLING
amplifies = PCR(primers[0], primers[1], template)
# if it amplifies up ok, then return the primers
return primers, enz
# may be misaligned ==> realign and recurse
except:
baseCase += 1
# If you had an LCS on the fwd direction, re-align using that one
if fwdMatchCount:
myLCS = product.sequence[forwardMatchIndicesTuple[0]:forwardMatchIndicesTuple[1]]
newProduct = DNA('plasmid', product.name, product.sequence[forwardMatchIndicesTuple[0]:] + product.sequence[:forwardMatchIndicesTuple[0]])
match = re.search(myLCS.upper(), template.sequence.upper())
if match:
startSite = match.start()
newTemplate = DNA('plasmid', template.name, template.sequence[startSite:]+template.sequence[:startSite])
else:
return ()
# If you had an LCS in the rev direction, re-align using that one
elif revMatchCount:
myLCS = reverse(reverse(product.sequence)[reverseMatchIndicesTuple[0]:reverseMatchIndicesTuple[1]])
myMatch = re.search(myLCS.upper(), product.sequence.upper())
startIndex = myMatch.start()
newProduct = DNA('plasmid', product.name, product.sequence[startIndex:] + product.sequence[:startIndex])
match = re.search(myLCS.upper(), template.sequence.upper())
if match:
startSite = match.start()
newTemplate = DNA('plasmid', template.name, template.sequence[startSite:]+template.sequence[:startSite])
else:
return ()
else:
return ()
return rPrimers(newProduct, newTemplate, baseCase)
def getAnnealingRegion(template, fwd):
if len(template) <= 10:
return ''
if not fwd:
template = reverseComplement(template)
for i in range(len(template)):
currentRegion = template[:i]
if primerTm(currentRegion) >= 60:
break
return currentRegion
def chooseReachover(plasmid):
EnzDict = EnzymeDictionary()
bsaI = EnzDict['BsaI']; bsaMatch = bsaI.find_sites(plasmid); bsaFlag = len(bsaMatch) > 0
bsmBI = EnzDict['BsmBI']; bsmMatch = bsmBI.find_sites(plasmid); bsmFlag = len(bsmMatch) > 0
bseRI = EnzDict['BseRI']; bseMatch = bseRI.find_sites(plasmid); bseFlag = len(bseMatch) > 0
if not bsaFlag:
# use BsaI
tail = "taaattGGTCTCA"
return bsaI, tail, 2
if not bsmFlag:
# use bsmBI
tail = 'taaattCGTCTCA'
return bsmBI, tail, 2
if not bseFlag:
# use bsmBI
tail = 'taaattGAGGAGattcccta'
return bseRI, tail, 1
return 0, 0, 0
#given a parent plasmid and a desired product plasmid, design the eipcr primers
#use difflib to figure out where the differences are
#if there is a convenient restriction site in or near the modification, use that
# otherwise, check if there exists bseRI or bsaI sites, and design primers using those
# print/return warning if can't do this via eipcr (insert span too long)
def DesignEIPCR(product, insert, diffTuple, template):
# use 60 bp to right of mutation as domain for annealing region design
(fwdStart, fwdEnd) = (diffTuple[1], diffTuple[1]+60)
enz, tail, halfSiteSize = chooseReachover(template)
if enz == 0:
return 0, 0
# accounting for the wrap around case
if fwdEnd > len(product.sequence):
fwdEnd = fwdEnd % len(product.sequence)
fwdAnneal = getAnnealingRegion(product.sequence[fwdStart:] + product.sequence[:fwdEnd], 1)
else:
fwdAnneal = getAnnealingRegion(product.sequence[fwdStart:fwdEnd], 1)
# same with the 60 bp to the left of the mutation
(revStart, revEnd) = (diffTuple[0]-60, diffTuple[0])
if revStart < 0:
revAnneal = getAnnealingRegion(product.sequence[revStart:] + product.sequence[:revEnd], 0)
else:
revAnneal = getAnnealingRegion(product.sequence[revStart:revEnd], 0)
# use BsaI 'taaGGTCTCx1234' to do reachover digest and ligation
# wrap around case
if not diffTuple[1] > diffTuple[0]:
half = ((diffTuple[1] + len(product.sequence) - diffTuple[0]) / 2) + diffTuple[0]
else:
half = ((diffTuple[1] - diffTuple[0]) / 2) + diffTuple[0]
# the 4 bp in the overhang must not contain any N's --> otherwise, ligation won't work
overhang = product.sequence[half - halfSiteSize : half + halfSiteSize]
while 'N' in overhang.upper():
half = half + 1
overhang = product.sequence[half - halfSiteSize : half + halfSiteSize]
# Accounting for the == 0 case, which would otherwise send the mutagenic region to ''
if diffTuple[1] == 0:
fwdPrimer = DNA('primer','fwd EIPCR primer for '+product.name, tail + product.sequence[half - halfSiteSize :] + fwdAnneal)
else:
# Originally: product.sequence[half - 2 : diffTuple[1] + 1]
fwdPrimer = DNA('primer','fwd EIPCR primer for '+product.name, tail + product.sequence[half - halfSiteSize : diffTuple[1]] + fwdAnneal)
# print 'AFTER TAIL', product.sequence[half - halfSiteSize : diffTuple[1] + 1]
if half + halfSiteSize == 0:
revPrimer = DNA('primer','rev EIPCR primer for '+product.name, tail + reverseComplement(product.sequence[ diffTuple[0] :]) + revAnneal)
else:
revPrimer = DNA('primer','rev EIPCR primer for '+product.name, tail + reverseComplement(product.sequence[ diffTuple[0] : half + halfSiteSize]) + revAnneal)
# print 'REV AFTER TAIL', reverseComplement(product.sequence[ diffTuple[0] : half + halfSiteSize])
return (fwdPrimer, revPrimer), enz
# TODO: Implement this, along with restriction site checking?
def DesignWobble(parent, product):
return 0
def Distinguish2DNABands(a, b):
#case of 2
#for a standard 1-2% agarose gel,
#we can distinguish a and b if
#do the following in wolframalpha: LogLogPlot[|a - b| > (0.208*a+42), {a, 0, 9000}, {b, 0, 9000}]
return ( abs(a.length - b.length) > (0.208*a.length+42)) & (min(a.length, b.length) > 250 )
#only returns True if can distinguish between all of the DNA bands
def DistinguishDNABands(list_of_dnas):
ret_val = True
for i in range(len(list_of_dnas)-1):
ret_val = ret_val & Distinguish2DNABands(list_of_dnas[i], list_of_dnas[i+1])
return ret_val
def FindDistinguishingEnzyme(list_of_dnas):
#find the REase that can distinguish between the input DNAs
#DistinguishDNABands(a, b) returns true if we can
# tell apart bands a, b on a gel and a and b are both > 300bp, < 7kb
#Let n be the number of DNAs in the list. Let E be the enzyme under question
# Then we construct a n-dimensional matrix
# where the dimensions have max value defined by the number of fragments generated by E
# E can be used to distinguish between the DNAs if there is a complete row or column
# that is distinguishable (all True by DistinguishDNABands)
#ASSUMPTION, for now, only consider n=3
#iterate over all enzymes (enzyme list should be prioritized by availability and "goodness")
#execute find good enz
#iterate over all combinations of 2 enzymes
#execute find good enz
##find good enz
#for each enzyme/combo in the list
#calculate fragments for each input DNA
#skip if any DNA has # fragments > 6
#n-length list, each character represents the DNA fragment currently under investigation
#iterate to fill in the hypermatrix values
#find if the hypermatrix has a column/row that has all True
#returns top 5 list of enzymes/combos that work
return 0
def FindDistEnz():
return FindDistinguishingEnzyme(list_of_dnas)
# Description: SetFlags() returns overhang information about a DNA() digest object
def SetFlags(frag):
(TL,TR,BL,BR) = (0,0,0,0)
if frag.topLeftOverhang.sequence != '':
TL = 1
if frag.topRightOverhang.sequence != '':
TR = 1
if frag.bottomLeftOverhang.sequence != '':
BL = 1
if frag.bottomRightOverhang.sequence != '':
BR = 1
return (TL,TR,BL,BR)
def ligatePostProcessing(ligated, childrenTuple, message):
ligated.setChildren(childrenTuple)
for child in childrenTuple:
child.addParent(ligated)
ligated.setTimeStep(0.5)
ligated.addMaterials(['DNA Ligase','DNA Ligase Buffer','ddH20'])
ligated.instructions = message
return ligated
def isComplementary(seq1, seq2):
if seq1 == '' or seq2 == '':
return False
elif seq1 == Complement(seq2):
return True
return False
def isReverseComplementary(seq1, seq2):
if seq1 == '' or seq2 == '':
return False
elif seq1 == reverseComplement(seq2):
return True
return False
# Description: Ligate() function accepts a list of DNA() digest objects, and outputs list of DNA
def Ligate(inputDNAs):
products = []
# self ligation
for fragment in inputDNAs:
if not isinstance(fragment, DNA):
print '\n*Ligate Error*: Ligate function was passed a non-DNA argument. Argument discarded.\n'
continue
(TL,TR,BL,BR) = SetFlags(fragment)
if fragment.DNAclass == 'plasmid':
print '\n*Ligate Warning*: for ligation reaction, invalid input molecule removed -- ligation input DNA objects must be of class \'digest\' or be PNK treated linear molecules.\n'
elif TL+TR+BL+BR == 1:
pass
elif TL+TR+BL+BR == 0:
# blunt end self ligation case --> need to identify that both sides were digested (i.e. both ecoRV blunt ends)
# and then return circular product of same sequence.
pass
elif fragment.topLeftOverhang.sequence != '':
if isComplementary(fragment.topLeftOverhang.sequence.lower(), fragment.bottomRightOverhang.sequence.lower()):
ligated = DNA('plasmid',fragment.name+' self-ligation',fragment.topLeftOverhang.sequence+fragment.sequence)
products.append(ligatePostProcessing(ligated, (fragment, ), 'Self-ligate ('+fragment.name+') with DNA ligase for 30 minutes at room-temperature.'))
elif fragment.bottomLeftOverhang.sequence != '':
if isComplementary(fragment.bottomLeftOverhang.sequence.lower(), fragment.topRightOverhang.sequence.lower()):
ligated = DNA('plasmid',fragment.name+' self-ligation',fragment.sequence+fragment.topRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragment, ), 'Self-ligate ('+fragment.name+') with DNA ligase for 30 minutes at room-temperature.'))
if len(products) > 0 or len(inputDNAs) == 1:
return products
i = 0
while i < len(inputDNAs):
fragOne = inputDNAs[i]
if not isinstance(fragOne, DNA):
print '\n*Ligate Warning*: Ligate function was passed a non-DNA argument. Argument discarded.\n'
i += 1
continue
elif fragOne.DNAclass == 'plasmid':
i += 1
continue
j = i + 1
while j < len(inputDNAs):
fragTwo = inputDNAs[j]
if not isinstance(fragOne, DNA) or not isinstance(fragTwo, DNA):
j += 1
continue
elif fragTwo.DNAclass == 'plasmid':
j += 1
continue
(LTL,LTR,LBL,LBR) = SetFlags(fragOne)
(RTL,RTR,RBL,RBR) = SetFlags(fragTwo)
# first3 is the number of 3' overhangs for the left fragment, and so on for the other three classifiers
(first3, first5, second3, second5) = (LTR + LBL, LBR + LTL, RTR + RBL, RBR + RTL)
# blunt end ligation:
firstFlag = first3 + first5
secondFlag = second3 + second5
if fragOne.pnkTreated and fragTwo.pnkTreated and firstFlag <= 1 and secondFlag <= 1:
if not firstFlag and secondFlag or firstFlag and not secondFlag:
pass
elif not firstFlag and not secondFlag:
ligated = DNA('plasmid', fragOne.name+', '+fragTwo.name+' ligation product', fragOne.sequence + fragTwo.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
elif firstFlag and secondFlag:
if first3 and second3:
if isComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+fragTwo.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
if isComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.topRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragTwo.sequence+fragTwo.topRightOverhang.sequence+fragOne.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
if isReverseComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.topRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+reverseComplement(fragTwo.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
if isReverseComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',reverseComplement(fragTwo.sequence)+reverse(fragTwo.bottomLeftOverhang.sequence)+fragOne.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
if isComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragTwo.sequence+fragOne.topLeftOverhang.sequence+fragOne.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
if isComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragTwo.topLeftOverhang.sequence+fragTwo.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
if isReverseComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',reverseComplement(fragTwo.sequence)+fragOne.topLeftOverhang.sequence+fragOne.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
if isReverseComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+Complement(fragOne.bottomRightOverhang.sequence)+reverseComplement(fragTwo.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# non-blunt ligation:
else:
if first3 == 2:
if isComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
if isComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.topRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+fragTwo.sequence+fragTwo.topRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
if isReverseComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.topRightOverhang.sequence.upper()):
if isReverseComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+reverseComplement(fragTwo.sequence)+reverse(fragTwo.bottomLeftOverhang.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
elif first3 == 1:
if LTR:
# then you know it must have LTL
if RTR:
# then, if it is to ligate, it must have compatible RTL
if isReverseComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.topRightOverhang.sequence.upper()):
if isReverseComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.topLeftOverhang.sequence+fragOne.sequence+fragOne.topRightOverhang.sequence+reverseComplement(fragTwo.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
# to ligate, it must have RBL and RBR
if isComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
if isComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.topLeftOverhang.sequence+fragOne.sequence+fragOne.topRightOverhang.sequence+fragTwo.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
# you know it has LBL as its 3 and LBR as its 5
if RTR:
# then, if it is to ligate, it must have compatible RTL
if isComplementary(fragTwo.topRightOverhang.sequence.upper(), fragOne.bottomLeftOverhang.sequence.upper()):
if isComplementary(fragTwo.topLeftOverhang.sequence.upper(), fragOne.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragTwo.topLeftOverhang.sequence+fragTwo.sequence+fragTwo.topRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
# to ligate, it must have RBL and RBR
if isReverseComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
if isReverseComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',Complement(fragOne.bottomLeftOverhang.sequence)+fragOne.sequence+Complement(fragOne.bottomRightOverhang.sequence)+reverseComplement(fragTwo.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
if isComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
if isComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.topLeftOverhang.sequence+fragOne.sequence+fragTwo.topLeftOverhang.sequence+fragTwo.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
if isReverseComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
if isReverseComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.topLeftOverhang.sequence+fragOne.sequence+reverse(fragTwo.bottomRightOverhang.sequence)+reverseComplement(fragTwo.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
j += 1
i += 1
if len(products) == 0:
raise Exception('*Ligate Error*: ligation resulted in zero products.')
return products
# Description: fragment processing function for zymo, short fragment and gel cleanups
def cleanupPostProcessing(band, source):
parentBand = band.clone()
parentBand.setChildren((band,))
band.addParent(parentBand)
timeStep = 0.5
cleanupMaterials = ['Zymo Column','Buffer PE','ddH20']
if source == 'short fragment':
cleanupMaterials.append('Ethanol / Isopropanol')
elif source == 'gel extraction and short fragment':
cleanupMaterials += ['Buffer ADB', 'Ethanol / Isopropanol']
timeStep = 1
elif source == 'gel extraction and zymo':
cleanupMaterials.append('Buffer ADB')
timeStep = 1
parentBand.setTimeStep(timeStep)
parentBand.addMaterials(cleanupMaterials)
parentBand.instructions = 'Perform '+source+' cleanup on ('+band.name+').'
return parentBand
# Description: ZymoPurify() function takes a list of DNA objects and filters out < 300 bp DNA's
def ZymoPurify(inputDNAs):
counter = 0
for zymoInput in inputDNAs:
if not isinstance(zymoInput, DNA):
print '\n*Zymo Warning*: Zymo purification function was passed a non-DNA argument. Argument discarded.\n'
inputDNAs.pop(counter)
else:
counter += 1
if len(inputDNAs) == 0:
raise Exception('*Zymo Error*: Zymo purification function passed empty input list.')
return inputDNAs
(outputBands, sizeTuples) = ([], [])
for DNA in inputDNAs:
sizeTuples.append((len(DNA.sequence),DNA))
sizeTuples.sort(reverse=True)
currentTuple = sizeTuples[0]
currentSize = currentTuple[0]
while currentSize > 300:
band = currentTuple[1]
outputBands.append(cleanupPostProcessing(band,'standard zymo'))
if len(sizeTuples) > 0:
sizeTuples.pop(0)
currentTuple = sizeTuples[0]
currentSize = currentTuple[0]
else:
break
return outputBands
# Description: ShortFragmentCleanup() function takes a list of DNA objects and filters out < 50 bp DNA's
def ShortFragmentCleanup(inputDNAs):
if len(inputDNAs) == 0:
raise Exception('*Short Fragment Cleanup Error*: short fragment cleanup function passed empty input list.')
return inputDNAs
outputBands = []
sizeTuples = []
for DNA in inputDNAs:
fragSize = len(DNA.sequence)
sizeTuples.append((fragSize,DNA))
sizeTuples.sort(reverse=True)
currentTuple = sizeTuples[0]
currentSize = currentTuple[0]
while currentSize > 50 and len(sizeTuples) > 1:
band = currentTuple[1]
outputBands.append(cleanupPostProcessing(band,'short fragment'))
sizeTuples.pop(0)
currentTuple = sizeTuples[0]
currentSize = currentTuple[0]
if currentSize > 50:
band = currentTuple[1]
outputBands.append(cleanupPostProcessing(band,'short fragment'))
return outputBands
# Description: GelAndZymoPurify() function employs a user-specified purification strategy to cut out a range of band sizes, and
# then filters out < 300 bp DNA's. If 50 bp < [ ] < 300 bp DNAs are detected, switches to short fragment cleanup mode.
def GelAndZymoPurify(inputDNAs, strategy):
# sort based on size
if len(inputDNAs) == 0:
raise Exception('*Gel Purification Error*: gel purification with strategy \'"+strategy+"\' passed empty input list.')
return inputDNAs
elif len(inputDNAs) == 1:
return inputDNAs
(shortFlag, lostFlag, interBands, outputBands, sizeTuples) = (False, False, [], [], [])
for DNA in inputDNAs:
sizeTuples.append((len(DNA.sequence),DNA))
if isinstance( strategy, str):
if strategy == 'L':
sizeTuples.sort(reverse=True)
n = 0
currentTuple = sizeTuples[n]
largestSize = currentTuple[n]
currentSize = largestSize
while currentSize > largestSize * 5/6 and n < len(sizeTuples) - 1:
interBands.append(currentTuple[1])
n += 1
currentTuple = sizeTuples[n]
currentSize = currentTuple[0]
if currentSize > largestSize * 5/6:
if currentSize < 50:
lostFlag = True
elif currentSize < 300:
shortFlag = True
interBands.append(currentTuple[1])
if len(interBands) > 1:
print '\n*Gel Purification Warning*: large fragment purification resulted in purification of multiple, possibly unintended distinct DNAs.\n'
elif strategy == 'S':
sizeTuples.sort()
n = 0
currentTuple = sizeTuples[n]
smallestSize = currentTuple[n]
currentSize = smallestSize
while currentSize < smallestSize * 5/6 and n < len(sizeTuples) - 1:
interBands.append(currentTuple[1])
n = n + 1
currentTuple = sizeTuples[n]
currentSize = currentTuple[0]
if currentSize > smallestSize * 5/6:
if currentSize < 50:
lostFlag = True
elif currentSize < 300:
shortFlag = True
interBands.append(currentTuple[1])
if len(interBands) > 1:
print '\n*Gel Purification Warning*: small fragment purification resulted in purification of multiple, possibly unintended distinct DNAs.\n'
elif isinstance( strategy, ( int, long ) ):
sizeTuples.sort(reverse=True)
currentTuple = sizeTuples[0]
currentSize = currentTuple[0]
while currentSize > strategy * 6/5 and len(sizeTuples) > 1:
sizeTuples.pop(0)
currentTuple = sizeTuples[0]
currentSize = currentTuple[0]
while currentSize > strategy * 5/6 and len(sizeTuples) > 1:
band = sizeTuples.pop(0)
interBands.append(band[1])
currentTuple = sizeTuples[0]
currentSize = currentTuple[0]
if currentSize > strategy * 5/6:
if currentSize < 50:
lostFlag = True
elif currentSize < 300:
shortFlag = True
interBands.append(currentTuple[1])
if len(interBands) == 0:
raise Exception('*Gel Purification Error*: for gel purification with strategy \'"+strategy+"\', no digest bands present in given range, with purification yielding zero DNA products.')
elif len(interBands) > 1:
print '\n*Gel Purification Warning*: fragment purification in range of band size '"+str(strategy)+"' resulted in purification of multiple, possibly unintended distinct DNAs.\n'
else:
raise Exception('*Gel Purification Error*: invalid cleanup strategy argument. Valid arguments are \'L\', \'S\', or integer size of band.')
if len(interBands) == 0:
if lostFlag:
print '\n*Gel Purification Warning*: purification with given strategy \'"+strategy+"\' returned short fragments (< 50 bp) that were lost. Returning empty products list.\n'
raise Exception('*Gel Purification Error*: purification with given strategy "'+strategy+'" yielded zero products.')
else:
if lostFlag:
print '\n*Gel Purification Warning*: purification with given strategy "'+strategy+'" returned at least one short fragment (< 50 bp) that was lost. Returning remaining products.\n'
for band in interBands:
outputBands.append(cleanupPostProcessing(band,'gel extraction and zymo'))
elif shortFlag:
print '\n*Gel Purification Warning*: purification with given strategy "'+strategy+'" yielded short fragments (< 300 bp). Returning short fragment cleanup products.\n'
for band in interBands:
outputBands.append(cleanupPostProcessing(band,'gel extraction and short fragment'))
else:
for band in interBands:
outputBands.append(cleanupPostProcessing(band,'gel extraction and zymo'))
return outputBands
# Description: Ligate() function that allows linear ligation products
# Note: also disallows blunt end ligation
def linLigate(inputDNAs):
products = []
# self ligation
for fragment in inputDNAs:
if not isinstance(fragment, DNA):
print '\n*Ligate Warning*: Ligate function was passed a non-DNA argument. Argument discarded.\n'
continue
(TL,TR,BL,BR) = SetFlags(fragment)
if fragment.DNAclass != 'digest':
print '\n*Ligate Warning*: for ligation reaction, invalid input molecule removed -- ligation input DNA objects must be of class \'digest\'.\n'
elif TL+TR+BL+BR == 1:
pass
elif TL+TR+BL+BR == 0:
# blunt end self ligation case --> need to identify that both sides were digested (i.e. both ecoRV blunt ends)
# and then return circular product of same sequence.
pass
elif fragment.topLeftOverhang.sequence != '':
if isComplementary(fragment.topLeftOverhang.sequence.lower(), fragment.bottomRightOverhang.sequence.lower()):
ligated = DNA('plasmid',fragment.name+' self-ligation',fragment.topLeftOverhang.sequence+fragment.sequence)
products.append(ligatePostProcessing(ligated, (fragment, ), 'Self-ligate ('+fragment.name+') with DNA ligase for 30 minutes at room-temperature.'))
elif fragment.bottomLeftOverhang.sequence != '':
if isComplementary(fragment.topLeftOverhang.sequence.lower(), fragment.topRightOverhang.sequence.lower()):
ligated = DNA('plasmid',fragment.name+' self-ligation',fragment.sequence+fragment.topRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragment, ), 'Self-ligate ('+fragment.name+') with DNA ligase for 30 minutes at room-temperature.'))
if len(products) > 0 or len(inputDNAs) == 1:
return products
i = 0
while i < len(inputDNAs):
fragOne = inputDNAs[i]
if not isinstance(fragOne, DNA):
print '\n*Ligate Warning*: Ligate function was passed a non-DNA argument. Argument discarded.\n'
i += 1
continue
j = i + 1
while j < len(inputDNAs):
fragTwo = inputDNAs[j]
if not isinstance(fragOne, DNA) or not isinstance(fragTwo, DNA):
print '\n*Ligate Warning*: Ligate function was passed a non-DNA argument. Argument discarded.\n'
j += 1
continue
elif fragOne.DNAclass != 'digest' or fragTwo.DNAclass != 'digest':
j += 1
continue
(LTL,LTR,LBL,LBR) = SetFlags(fragOne)
(RTL,RTR,RBL,RBR) = SetFlags(fragTwo)
# first3 is the number of 3' overhangs for the left fragment, and so on for the other three classifiers
(first3, first5, second3, second5) = (LTR + LBL, LBR + LTL, RTR + RBL, RBR + RTL)
firstFlag = first3 + first5
secondFlag = second3 + second5
# non-blunt end ligation:
if first3 == 2:
# Here, you know that it has LTR and LBL
# But you don't know about its RXX fields
if isComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
if isComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.topRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+fragTwo.sequence+fragTwo.topRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+fragTwo.sequence)
ligated.bottomLeftOverhang = Overhang(fragOne.bottomLeftOverhang.sequence)
# you don't know whether it is RTR or RBR
if RTR:
ligated.topRightOverhang = Overhang(fragTwo.topRightOverhang.sequence)
elif RBR:
ligated.bottomRightOverhang = Overhang(fragTwo.bottomRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# you know it's not going to circularize, but you also know it has a LBL
elif isComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.topRightOverhang.sequence.upper()):
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragTwo.sequence+fragTwo.topRightOverhang.sequence+fragOne.sequence)
ligated.topRightOverhang = Overhang(fragOne.topRightOverhang.sequence)
# you don't know whether it is RTL or RBL
if RTL:
ligated.topLeftOverhang = Overhang(fragTwo.topLeftOverhang.sequence)
elif RBL:
ligated.bottomLeftOverhang = Overhang(fragTwo.bottomLeftOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
if isReverseComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.topRightOverhang.sequence.upper()):
if isReverseComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+reverseComplement(fragTwo.sequence)+reverse(fragTwo.bottomLeftOverhang.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+reverseComplement(fragTwo.sequence))
ligated.bottomLeftOverhang = Overhang(fragOne.bottomLeftOverhang.sequence)
# you don't know whether it is RBL or RTL
if RTL:
ligated.bottomRightOverhang = Overhang(reverse(fragTwo.topLeftOverhang.sequence))
elif RBL:
ligated.topRightOverhang = Overhang(reverse(fragTwo.bottomLeftOverhang.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# you know it's not going to circularize, but you also know it has a LBL
elif isReverseComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',reverseComplement(fragTwo.sequence)+reverse(fragTwo.bottomLeftOverhang.sequence)+fragOne.sequence)
ligated.topRightOverhang = Overhang(fragOne.topRightOverhang.sequence)
# you don't know whether it is RTR or RBR
if RTR:
ligated.bottomLeftOverhang = Overhang(reverse(fragTwo.topRightOverhang.sequence))
elif RBR:
ligated.topLeftOverhang = Overhang(reverse(fragTwo.bottomRightOverhang.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
elif first3 == 1:
if LTR:
# then you know it must have LTL
if RTR:
# then, if it is to ligate, it must have compatible RTL
if isReverseComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.topRightOverhang.sequence.upper()):
if isReverseComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.topLeftOverhang.sequence+fragOne.sequence+fragOne.topRightOverhang.sequence+reverseComplement(fragTwo.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+reverseComplement(fragTwo.sequence))
ligated.topLeftOverhang = Overhang(fragOne.topLeftOverhang.sequence)
# you don't know whether it is RTL or RBL
if RTL:
ligated.bottomRightOverhang = Overhang(reverse(fragTwo.topLeftOverhang.sequence))
elif RBL:
ligated.bottomLeftOverhang = Overhang(reverse(fragTwo.bottomLeftOverhang.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# now, you know it's not going to circularize, but you know it has LTL
elif isReverseComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',reverseComplement(fragTwo.sequence)+fragOne.topLeftOverhang.sequence+fragOne.sequence)
ligated.topRightOverhang = Overhang(fragOne.topRightOverhang.sequence)
# you dont know whether you have RTR (=> BLO) or RBR (=> TLO) ==> correction: yes you do, you have RTR
ligated.bottomLeftOverhang = Overhang(reverse(fragTwo.topRightOverhang.sequence))
# if RTR:
# ligated.bottomLeftOverhang = Overhang(reverse(fragTwo.topRightOverhang.sequence))
# elif RBR:
# ligated.topLeftOverhang = Overhang(reverse(fragTwo.bottomRightOverhang.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# you know here that you have LTR and LTL, and that you do not have RTR
else:
# to ligate, it must have RBL and RBR
if isComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
if isComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.topLeftOverhang.sequence+fragOne.sequence+fragOne.topRightOverhang.sequence+fragTwo.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+fragTwo.sequence)
ligated.topLeftOverhang = Overhang(fragOne.topLeftOverhang.sequence)
ligated.bottomRightOverhang = Overhang(fragTwo.bottomRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
elif isComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
# here, you know you have LTR and LTL, has a complementary RBR and does not have a RTR
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragTwo.sequence+fragOne.topLeftOverhang.sequence+fragOne.sequence)
ligated.topRightOverhang = Overhang(fragOne.topRightOverhang.sequence)
if RTL:
ligated.topLeftOverhang= Overhang(fragTwo.topLeftOverhang.sequence)
elif RBL:
ligated.bottomLeftOverhang = Overhang(fragTwo.bottomLeftOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
# you know it has LBL as its 3 and LBR as its 5
if RTR:
# then, if it is to ligate, it must have compatible RTL
if isComplementary(fragTwo.topRightOverhang.sequence.upper(), fragOne.bottomLeftOverhang.sequence.upper()):
if isComplementary(fragTwo.topLeftOverhang.sequence.upper(), fragOne.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragTwo.topLeftOverhang.sequence+fragTwo.sequence+fragTwo.topRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragTwo.topLeftOverhang.sequence+fragTwo.sequence)
ligated.bottomRightOverhang = Overhang(fragOne.bottomRightOverhang.sequence)
# you don't know whether it is a RBL or RTL
if RTL:
ligated.topLeftOverhang = Overhang(fragTwo.topLeftOverhang.sequence)
elif RBL:
ligated.bottomLeftOverhang = Overhang(fragTwo.bottomLeftOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# you know it's not going to circularize, but you know it has LBR
elif isComplementary(fragTwo.topLeftOverhang.sequence.upper(), fragOne.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragTwo.topLeftOverhang.sequence+fragTwo.sequence)
ligated.bottomLeftOverhang = Overhang(fragOne.bottomLeftOverhang.sequence)
if RTR:
ligated.topRightOverhang = Overhang(fragTwo.topRightOverhang.sequence)
elif RBR:
ligated.bottomRightOverhang = Overhang(fragTwo.bottomRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# up to here is good
else:
# you kno it has LBL, LBR, and not RTR
# to ligate, it must have RBL and RBR
if isReverseComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
if isReverseComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',Complement(fragOne.bottomLeftOverhang.sequence)+fragOne.sequence+Complement(fragOne.bottomRightOverhang.sequence)+reverseComplement(fragTwo.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+Complement(fragOne.bottomRightOverhang.sequence)+reverseComplement(fragTwo.sequence))
ligated.bottomLeftOverhang = Overhang(fragOne.bottomLeftOverhang.sequence)
if RTL:
ligated.bottomRightOverhang = Overhang(reverse(fragTwo.topLeftOverhang.sequence))
elif RBL:
ligated.topRightOverhang = Overhang(reverse(fragTwo.bottomLeftOverhang.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# you know it's not going to circularize, but you know it has LBL
elif isReverseComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',reverseComplement(fragTwo.sequence)+Complement(fragOne.bottomLeftOverhang.sequence)+fragOne.sequence)
ligated.bottomRightOverhang = Overhang(fragOne.bottomRightOverhang.sequence)
ligated.topLeftOverhang = Overhang(reverse(fragTwo.bottomRightOverhang))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# here first3 == 0, so you know it has LTL and LBR
else:
if isComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
if isComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.topLeftOverhang.sequence+fragOne.sequence+fragTwo.topLeftOverhang.sequence+fragTwo.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragTwo.sequence+fragOne.topLeftOverhang.sequence+fragTwo.sequence)
ligated.bottomRightOverhang = Overhang(fragOne.bottomRightOverhang.sequence)
if RTL:
ligated.topLeftOverhang = Overhang(fragTwo.topLeftOverhang.sequence)
elif RBL:
ligated.bottomLeftOverhang = Overhang(fragTwo.bottomLeftOverhang)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
elif isComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragTwo.topLeftOverhang.sequence+fragTwo.sequence)
ligated.topLeftOverhang = Overhang(fragOne.topLeftOverhang.sequence)
if RTR:
ligated.topRightOverhang = Overhang(fragTwo.topRightOverhang.sequence)
elif RBR:
ligated.bottomRightOverhang = Overhang(fragTwo.bottomRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# up to here is good
# here first3 == 0, so you know it has LTL and LBR
if isReverseComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
if isReverseComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.topLeftOverhang.sequence+fragOne.sequence+reverse(fragTwo.bottomRightOverhang.sequence)+reverseComplement(fragTwo.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',reverseComplement(fragTwo.sequence)+fragOne.topLeftOverhang.sequence+fragOne.sequence)
ligated.bottomRightOverhang = Overhang(fragOne.bottomRightOverhang.sequence)
if RTR:
ligated.bottomLeftOverhang = Overhang(reverse(fragTwo.topRightOverhang.sequence))
if RBR:
ligated.topLeftOverhang = Overhang(reverse(fragTwo.bottomRightOverhang.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
elif isReverseComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+Complement(fragOne.bottomRightOverhang.sequence)+reverseComplement(fragTwo.sequence))
ligated.topLeftOverhang = Overhang(fragOne.topLeftOverhang.sequence)
ligated.bottomRightOverhang = Overhang(reverse(fragTwo.topLeftOverhang.sequence))
j += 1
i += 1
return products
# Note: going to stick with the convention where they actually pass a list of restriction enzymes
# As in: GoldenGate(vector_DNA, list_of_DNAs, EnzymeDictionary['BsaI'], ['AmpR', 'KanR'])
def GoldenGate(VectorPlasmid, InputDNAs, reASE, resistanceList):
# ggEnzyme = EnzymeDictionary()[reASE]
ggDNAs, outputDNAs, resistanceList, vector = [], [], map(str.lower, resistanceList), None
vecDigest = Digest(VectorPlasmid, (reASE, ))
for frag in vecDigest:
if len(HasReplicon(frag.sequence)):
vector = frag
ggDNAs.append(vector)
break
if vector == None:
raise Exception('For GoldenGate function, no viable vector input provided (must contain origin of replication).')
for ggDNA in InputDNAs:
if ggDNA.DNAclass != 'plasmid':
print '\n*GoldenGate Warning*: linear inputs disallowed.\n'
continue
try:
ggDigest = Digest(ggDNA, (reASE, ))
ggDNAs += ggDigest
except:
pass
ggLigation = rGoldenGate(vector, [0, ], ggDNAs)
# for a ligation product to be part of the gg output, it must fulfill three criteria:
# 1) It must be circular (handled by Ligate() function)
# 2) It must have at least one replicon
# 3) It must have all of the above specified resistance markers
for product in ggLigation:
if product == None:
continue
if len(HasReplicon(product.sequence)) > 0:
resistanceFlag, resistanceMarkers = 1, map(str.lower, HasResistance(product.sequence))
for resistance in resistanceList:
if resistance not in resistanceMarkers:
resistanceFlag = 0
if resistanceFlag:
if not DNAlistContains(outputDNAs, product):
outputDNAs.append(product)
return outputDNAs
def DNAlistContains(DNAlist, candidateDNA):
for listDNA in DNAlist:
if candidateDNA.isEqual(listDNA):
return True
return False
def rGoldenGate(currentLink, linkList, allDNAs):
products = []
if currentLink.DNAclass == 'plasmid':
return (currentLink, )
else:
counter = 0
for myDNA in allDNAs:
newLink = linLigate([currentLink, myDNA])
if len(newLink) == 0:
counter += 1
continue
else:
for link in newLink:
if counter == 0:
return (None, )
elif counter in linkList:
return (None, )
else:
nextList = list(linkList)
nextList.append(counter)
nextLink = link
futureProducts = rGoldenGate(nextLink, nextList, allDNAs)
for futureProduct in futureProducts:
if isinstance(futureProduct, DNA):
if futureProduct.DNAclass == 'plasmid':
products.append(futureProduct)
counter += 1
return products
# Description: HasFeature() function checks for presence of regex-encoded feature in seq
def HasFeature(regex, seq):
#Regex must be lower case!
return bool( re.search(regex, seq.lower()) ) | bool( re.search(regex, reverseComplement(seq.lower()) ) )
#####Origins Suite: Checks for presence of certain origins of replication#####
def HasColE2(seq):
#has ColE2 origin, data from PMID 16428404
regexp = '....tga[gt]ac[ct]agataagcc[tgc]tatcagataacagcgcccttttggcgtctttttgagcacc'
return HasFeature(regexp, seq)
#necessary and sufficient element for ColE2 replication, however a longer sequence is needed for stable replication
# 'AGCGCCTCAGCGCGCCGTAGCGTCGATAAAAATTACGGGCTGGGGCGAAACTACCATCTGTTCGAAAAGGTCCGTAAATGGGCCTACAGAGCGATTCGTCAGGGCTGGCCTGTATTCTCACAATGGCTTGATGCCGTTATCCAGCGTGTCGAAATGTACAACGCTTCGCTTCCCGTTCCGCTTTCTCCGGCTGAATGTCGGGCTATTGGCAAGAGCATTGCGAAATATACACACAGGAAATTCTCACCAGAGGGATTTTCCGCTGTACAGGCCGCTCGCGGTCGCAAGGGCGGAACTAAATCTAAGCGCGCAGCAGTTCCTACATCAGCACGTTCGCTGAAACCGTGGGAGGCATTAGGCATCAGTCGAGCGACGTACTACCGAAAATTAAAATGTGACCCAGACCTCGCnnnntga'
#longer element shown in the Anderson lab that stably replicates
def HasColE1(seq):
regexp = 'tcatgaccaaaatcccttaacgtgagttttcgttccactgagcgtcagaccccgtagaaaagatcaaaggatcttcttgagatcctttttttctgcgcgtaatctgctgcttgcaaacaaaaaaaccaccgctaccagcggtggtttgtttgccggatcaagagcta[cagt]caactctttttccgaaggtaactggcttcagcagagcgcagataccaaatactgt[cagt]cttctagtgtagccgtagttaggccaccacttcaagaactctgtagcaccgcctacatacctcgctctgctaatcctgttaccagtggctgctgccagtggcgataagtcgtgtcttaccgggttggactcaagacgatagttaccggataaggcgcagcggtcgggctgaacggggggttcgtgcacacagcccagcttggagcgaacgacctacaccgaactgagatacctacagcgtgagc[cagt][cagt]tgagaaagcgccacgcttcccgaagggagaaaggcggacaggtatccggtaagcggcagggtcggaacaggagagcgcacgagggagcttccaggggg[acgt]aacgcctggtatctttatagtcctgtcgggtttcgccacctctgacttgagcgtcgatttttgtgatgctcgtcaggggggc[acgt]gagcct[ga]tggaaaaacgccagcaacgcggcc'
return HasFeature(regexp, seq)
def HasR6K(seq):
#has R6k, data from Anderson lab observations
regexp = 'gcagttcaacctgttgatagtacgtactaagctctcatgtttcacgtactaagctctcatgtttaacgtactaagctctcatgtttaacgaactaaaccctcatggctaacgtactaagctctcatggctaacgtactaagctctcatgtttcacgtactaagctctcatgtttgaacaataaaattaatataaatcagcaacttaaatagcctctaaggttttaagttttataagaaaaaaaagaatatataaggcttttaaagcttttaaggtttaacggttgtggacaacaagccagggatgtaacgcactgagaagcccttagagcctctcaaagcaattttgagtgacacaggaacacttaacggctgacatggg'.lower()
return HasFeature(regexp, seq)
def HasP15A(seq):
regex = 'aatattttatctgattaataagatgatcttcttgagatcgttttggtctgcgcgtaatctcttgctctgaaaacgaaaaaaccgccttgcagggcggtttttcgaaggttctctgagctaccaactctttgaaccgaggtaactggcttggaggagcgcagtcaccaaaacttgtcctttcagtttagccttaaccggcgcatgacttcaagactaactcctctaaatcaattaccagtggctgctgccagtggtgcttttgcatgtctttccgggttggactcaagacgatagttaccggataaggcgcagcggtcggactgaacggggggttcgtgcatacagtccagcttggagcgaactgcctacccggaactgagtgtcaggcgtggaatgagacaaacgcggccataacagcggaatgacaccggtaaaccgaaaggcaggaacaggagagcgcacgagggagccgccagggggaaacgcctggtatctttatagtcctgtcgggtttcgccaccactgatttgagcgtcagatttcgtgatgcttgtcaggggggcggagcctatggaaaaacggctttgccgcggccctctcacttccctgttaagtatcttcctggcatcttccaggaaatctccgccccgttcgtaagccatttccgctcgccgcagtcgaacgaccgagcgtagcgagtcagtgagcgaggaagcggaatatatcctgtatcacatattctgctgacgcaccggtgcagccttttttctcctgccacatgaagcacttcactgacaccctcatcagtgccaacatagtaag'
return HasFeature(regex, seq)
def HaspUC(seq):
regex = 'cccgtagaaaagatcaaaggatcttcttgagatcctttttttctgcgcgtaatctgctgcttgcaaacaaaaaaaccaccgctaccagcggtggtttgtttgccggatcaagagctaccaactctttttccgaaggtaactggcttcagcagagcgcagataccaaatactgtccttctagtgtagccgtagttaggccaccacttcaagaactctgtagcaccgcctacatacctcgctctgctaatcctgttaccagtggctgctgccagtggcgataagtcgtgtcttaccgggttggactcaagacgatagttaccggataaggcgcagcggtcgggctgaacggggggttcgtgcacacagcccagcttggagcgaacgacctacaccgaactgagatacctacagcgtgagcattgagaaagcgccacgcttcccgaagggagaaaggcggacaggtatccggtaagcggcagggtcggaacaggagagcgcacgagggagcttccagggggaaacgcctggtatctttatagtcctgtcgggtttcgccacctctgacttgagcgtcgatttttgtgatgctcgtcaggggggcggagcctatggaaaaacgccagcaacgcggcctttttacggttcctggccttttgctggccttttgctcacat'
return HasFeature(regex, seq)
#####Resistance Suite: Checks for presence of certain antibiotic resistance markers#####
def HasAAFeature(regex, DNAseq):
#must be uppercase, checks all six possibilities, fwd, rev x 3 frames
seq = DNAseq
retval = bool( re.search(regex, translate(seq.upper() )) ) | bool( re.search(regex,translate(seq[1:].upper() ) ) ) | bool( re.search(regex,translate(seq[2:].upper() ) ) )
seq = reverseComplement(seq)
retval = retval | bool( re.search(regex, translate(seq.upper() )) ) | bool( re.search(regex,translate(seq[1:].upper() ) ) ) | bool( re.search(regex,translate(seq[2:].upper() ) ) )
return retval
def HasSpecR(seq):
regex='MRSRNWSRTLTERSGGNGAVAVFMACYDCFFGVQSMPRASKQQARYAVGRCLMLWSSNDVTQQGSRPKTKLNIMREAVIAEVSTQLSEVVGVIERHLEPTLLAVHLYGSAVDGGLKPHSDIDLLVTVTVRLDETTRRALINDLLETSASPGESEILRAVEVTIVVHDDIIPWRYPAKRELQFGEWQRNDILAGIFEPATIDIDLAILLTKAREHSVALVGPAAEELFDPVPEQDLFEALNETLTLWNSPPDWAGDERNVVLTLSRIWYSAVTGKIAPKDVAADWAMERLPAQYQPVILEARQAYLGQEEDRLASRADQLEEFVHYVKGEITKVVGK'
return HasAAFeature(regex, seq)
def HasAmpR(seq):
# was: regex='MSIQHFRVALIPFFAAFCLPVFAHPETLVKVKDAEDQLGARVGYIELDLNSGKILESFRPEERFPMMSTFKVLLCGAVLSRIDAGQEQLGRRIHYSQNDLVEYSPVTEKHLTDGMTVRELCSAAITMSDNTAANLLLTTIGGPKELTAFLHNMGDHVTRLDRWEPELNEAIPNDERDTTMPVAMATTLRKLLTGELLTLASRQQLIDWMEADKVAGPLLRSALPAGWFIADKSGAGERGSRGIIAALGPDGKPSRIVVIYTTGSQATMDERNRQIAEIGASLIKHW'
# compared with: 'MSIQHFRVALIPFFAAFCLPVFAHPETLVKVKDAEDQLGARVGYIELDLNSGKILESFRPEERFPMMSTFKVLLCGAVLSRIDAGQEQLGRRIHYSQNDLVEYSPVTEKHLTDGMTVRELCSAAITMSDNTAANLLLTTIGGPKELTAFLHNMGDHVTRLDRWEPELNEAIPNDERDTTMPVAMATTLRKLLTGELLTLASRQQLIDWMEADKVAGPLLRSALPAGWFIADKSGAGERGSRGIIAALGPDGKPSRIVVIYTTGSQATMDERNRQIAEIGASLIKHW'
# result: aligned with clustal, got following output:
regex = 'MSTFKVLLCGAVLSR[VI]DAGQEQLGRRIHYSQNDLVEYSPVTEKHLTDGMTVRELCSAAITMSDNTAANLLLTTIGGPKELTAFLHNMGDHVTRLDRWEPELNEAIPNDERDTTMP[VA]AMATTLRKLLTGELLTLASRQQLIDWMEADKVAGPLLRSALPAGWFIADKSGAGERGSRGIIAALGPDGKPSRIVVIYTTGSQATMDERNRQIAEIGASLIKHW'
return HasAAFeature(regex, seq)
def HasKanR(seq):
regex='MSHIQRETSCSRPRLNSNMDADLYGYKWARDNVGQSGATIYRLYGKPDAPELFLKHGKGSVANDVTDEMVRLNWLTEFMPLPTIKHFIRTPDDAWLLTTAIPGKTAFQVLEEYPDSGENIVDALAVFLRRLHSIPVCNCPFNSDRVFRLAQAQSRMNNGLVDASDFDDERNGWPVEQVWKEMHKLLPFSPDSVVTHGDFSLDNLIFDEGKLIGCIDVGRVGIADRYQDLAILWNCLGEFSPSLQKRLFQKYGIDNPDMNKLQFHLMLDEFF'
return HasAAFeature(regex, seq)
def HasCmR(seq):
regex='MEKKITGYTTVDISQWHRKEHFEAFQSVAQCTYNQTVQLDITAFLKTVKKNKHKFYPAFIHILARLMNAHPEFRMAMKDGELVIWDSVHPCYTVFHEQTETFSSLWSEYHDDFRQFLHIYSQDVACYGENLAYFPKGFIENMFFVSANPWVSFTSFDLNVANMDNFFAPVFTMGKYYTQGDKVLMPLAIQVHHAVCDGFHVGRMLNELQQYCDEWQGGA'
return HasAAFeature(regex, seq)
def HasResistance(seq):
retval = []
if HasCmR(seq):
retval.append( 'CmR' )
if HasKanR(seq):
retval.append('KanR')
if HasAmpR(seq):
retval.append('AmpR')
if HasSpecR(seq):
retval.append('SpecR')
return retval
def HasReplicon(seq):
retval = []
if HasColE1(seq):
retval.append('ColE1')
if HasColE2(seq):
retval.append('ColE2')
if HasR6K(seq):
retval.append('R6K')
if HasP15A(seq):
retval.append('P15A')
if HaspUC(seq):
retval.append('pUC')
return retval
class Strain(object):
def __init__(self, name="", replication="", resistance="", plasmid=""):
#pass everything in as a comma separated list
self.name = name
delimit = re.compile(r'\s*,\s*')
self.replication = delimit.split(replication)
self.resistance = delimit.split(resistance) #should include the plasmid resistance!
if(plasmid != ""):
self.plasmids = [plasmid, ] #DNA object
else:
self.plasmids = []
# Description: accepts list of dnas and a strain, it should output a list of DNAs that survive the transformation
# this would completely reciplate the TransformPlateMiniprep cycle, it returns all the DNAs present in the cell
def TransformPlateMiniprep(DNAs, strain):
#strain is an object
transformed = strain.plasmids
selectionList = []
for dna in DNAs:
#check if circular, confers new resistance on strain, and doesn't compete with existing plasmid in strain
if dna.topology == 'circular':
newR = False
replicon_ok = False
no_existing_plasmid = False
err_msg = ""
success_msg = ""
resistances = HasResistance(dna.sequence)
replicons = HasReplicon(dna.sequence)
#just need one resistance not already in strain
for resistance in resistances:
if not(resistance in strain.resistance):
newR = True
if not resistance in selectionList:
selectionList.append(resistance)
success_msg += "\nTransformation of "+dna.name+" into "+strain.name+" successful -- use "+resistance+" antibiotic selection.\n"
for replicon in replicons:
#has the pir/repA necessary for ColE2/R6K?
if replicon in strain.replication:
replicon_ok = True
for replicon in replicons:
#check if existing plasmid would compete
existing_plasmids = []
for p in strain.plasmids:
existing_plasmids.append( HasReplicon(p.sequence) )
if not(replicon in existing_plasmids ):
no_existing_plasmid = True
if(newR & replicon_ok & no_existing_plasmid):
parent = dna.clone()
parent.setChildren((dna, ))
dna.addParent(parent)
parent.instructions = 'Transform '+dna.name+' into '+strain.name+', selecting for '+resistance+' resistance.'
parent.setTimeStep(24)
parent.addMaterials(['Buffers P1,P2,N3,PB,PE','Miniprep column',resistance[:-1]+' LB agar plates','LB '+resistance[:-1]+' media'])
transformed.append(dna)
print success_msg
else:
if not(newR):
raise Exception('*Transformation Error*: for transformation of '+dna.name+' into '+strain.name+', plasmid either doesn\'t have an antibiotic resistance or doesn\'t confer a new one on this strain')
if not(replicon_ok):
raise Exception('*Transformation Error*: for transformation of "'+dna.name+'" into "'+strain.name+'", plasmid replicon won\'t function in this strain')
if not(no_existing_plasmid):
raise Exception('*Transformation Error*: for transformation of "'+dna.name+'" into "'+strain.name+'", transformed plasmid replicon competes with existing plasmid in strain')
if len(transformed)<1:
raise Exception("*Transformation Error*: For transformation of "+dna.name+" into "+strain.name+", no DNAs successfully transformed. DNAs may be linear.")
return transformed | [] |
jasonszang/scriptd | scriptd/app/flask_helper.py | e612f10971ca5d98ffff7e0680485575792529e7 | # -*- coding: UTF-8 -*-
"""Helper for working with flask"""
import logging
from flask import Flask
from flask import request
from typing import Text
class FlaskHelper(object):
"""
Helper class for interacting with flask framework.
Improves testability by avoiding accessing flask global/thread-local objects everywhere.
"""
def __init__(self, app): # type: (Flask) -> None
self.app = app
def get_app(self): # type: () -> Flask
return self.app
def get_logger(self): # type: () -> logging.Logger
return self.app.logger
def get_request_data(self): # type: () -> bytes
return request.get_data()
def get_remote_addr(self): # type: () -> Text
return request.remote_addr
| [((26, 15, 26, 33), 'flask.request.get_data', 'request.get_data', ({}, {}), '()', False, 'from flask import request\n')] |
aymericvie/evology | evology/research/MCarloLongRuns/Exp1_WSvsReturn.py | 8f00d94dee7208be5a5bdd0375a9d6ced25097f4 | # Imports
import numpy as np
import pandas as pd
import sys
import tqdm
import warnings
import time
import ternary
from ternary.helpers import simplex_iterator
import multiprocessing as mp
warnings.simplefilter("ignore")
if sys.platform == "darwin":
sys.path.append("/Users/aymericvie/Documents/GitHub/evology/evology/code")
# Need to be executed from cd to MCarloLongRuns
if sys.platform == "linux":
sys.path.append("/home/vie/Documents/GitHub/evology/evology/code")
from main import main as evology
startTime = time.time()
TimeHorizon = 252 * 5
PopulationSize = 3
def job(coords):
np.random.seed()
try:
df, pop = evology(
space="scholl",
solver="esl.true",
wealth_coordinates=coords,
POPULATION_SIZE=PopulationSize,
MAX_GENERATIONS=TimeHorizon,
PROBA_SELECTION=0,
MUTATION_RATE=0,
ReinvestmentRate=1.0,
InvestmentHorizon=21,
InvestorBehavior="profit",
tqdm_display=True,
reset_wealth=True,
)
result = [
coords[0],
coords[1],
coords[2],
df["NT_returns"].mean(),
df["VI_returns"].mean(),
df["TF_returns"].mean(),
df["NT_returns"].std(),
df["VI_returns"].std(),
df["TF_returns"].std(),
df["HighestT"].mean(),
df["AvgAbsT"].mean(),
]
return result
except Exception as e:
print(e)
print("Failed run" + str(coords) + str(e))
result = [coords[0], coords[1], coords[2]]
for _ in range(8):
result.append(0)
return result
# Define the domains
def GenerateCoords(reps, scale):
param = []
for (i, j, k) in simplex_iterator(scale):
for _ in range(reps):
param.append([i / scale, j / scale, k / scale])
return param
reps = 10
scale = 50 # increment = 1/scale
param = GenerateCoords(reps, scale)
# print(param)
print(len(param))
# Run experiment
def main():
p = mp.Pool()
data = p.map(job, tqdm.tqdm(param))
p.close()
data = np.array(data)
return data
if __name__ == "__main__":
data = main()
df = pd.DataFrame()
# Inputs
df["WS_NT"] = data[:, 0]
df["WS_VI"] = data[:, 1]
df["WS_TF"] = data[:, 2]
# Outputs
df["NT_returns_mean"] = data[:, 3]
df["VI_returns_mean"] = data[:, 4]
df["TF_returns_mean"] = data[:, 5]
df["NT_returns_std"] = data[:, 6]
df["VI_returns_std"] = data[:, 7]
df["TF_returns_std"] = data[:, 8]
df["HighestT"] = data[:, 9]
df["AvgAbsT"] = data[:, 10]
print(df)
df.to_csv("data/data1.csv")
print("Completion time: " + str(time.time() - startTime))
| [((12, 0, 12, 31), 'warnings.simplefilter', 'warnings.simplefilter', ({(12, 22, 12, 30): '"""ignore"""'}, {}), "('ignore')", False, 'import warnings\n'), ((22, 12, 22, 23), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((15, 4, 15, 78), 'sys.path.append', 'sys.path.append', ({(15, 20, 15, 77): '"""/Users/aymericvie/Documents/GitHub/evology/evology/code"""'}, {}), "('/Users/aymericvie/Documents/GitHub/evology/evology/code')", False, 'import sys\n'), ((18, 4, 18, 70), 'sys.path.append', 'sys.path.append', ({(18, 20, 18, 69): '"""/home/vie/Documents/GitHub/evology/evology/code"""'}, {}), "('/home/vie/Documents/GitHub/evology/evology/code')", False, 'import sys\n'), ((28, 4, 28, 20), 'numpy.random.seed', 'np.random.seed', ({}, {}), '()', True, 'import numpy as np\n'), ((72, 21, 72, 44), 'ternary.helpers.simplex_iterator', 'simplex_iterator', ({(72, 38, 72, 43): 'scale'}, {}), '(scale)', False, 'from ternary.helpers import simplex_iterator\n'), ((86, 8, 86, 17), 'multiprocessing.Pool', 'mp.Pool', ({}, {}), '()', True, 'import multiprocessing as mp\n'), ((89, 11, 89, 25), 'numpy.array', 'np.array', ({(89, 20, 89, 24): 'data'}, {}), '(data)', True, 'import numpy as np\n'), ((95, 9, 95, 23), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((30, 18, 43, 9), 'main.main', 'evology', (), '', True, 'from main import main as evology\n'), ((87, 22, 87, 38), 'tqdm.tqdm', 'tqdm.tqdm', ({(87, 32, 87, 37): 'param'}, {}), '(param)', False, 'import tqdm\n'), ((115, 36, 115, 47), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
vinisantos7/PythonExercicios | ex039.py | bc8f38e03a606d6b0216632a93affeab0792e534 | print("@"*30)
print("Alistamento - Serviço Militar")
print("@"*30)
from datetime import date
ano_nasc = int(input("Digite seu ano de nascimento: "))
ano_atual = date.today().year
idade = ano_atual - ano_nasc
print(f"Quem nasceu em {ano_nasc} tem {idade} anos em {ano_atual}")
if idade == 18:
print("É a hora de se alistar no serviço militar, IMEDIATAMENTE!")
elif idade < 18:
saldo = 18 - idade
print(f"Ainda falta {saldo} anos para o seu alistamento!")
ano = ano_atual + saldo
print(f"Seu alistamento será em {ano}")
else:
idade > 18
saldo = idade - 18
print(f"Já passou {saldo} anos do tempo para o seu alistamento!")
ano = ano_atual - saldo
print(f"O seu alistamento foi em {ano}") | [((8, 12, 8, 24), 'datetime.date.today', 'date.today', ({}, {}), '()', False, 'from datetime import date\n')] |
Jaimie-Jin1/streamsx.topology | test/python/spl/tk17/opt/.__splpy/packages/streamsx/topology/tester.py | 6f316ec8e9ed1349c6f061d9bb7d03deb87e3d08 | # coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2017
"""Testing support for streaming applications.
Allows testing of a streaming application by creation conditions
on streams that are expected to become valid during the processing.
`Tester` is designed to be used with Python's `unittest` module.
A complete application may be tested or fragments of it, for example a sub-graph can be tested
in isolation that takes input data and scores it using a model.
Supports execution of the application on
:py:const:`~streamsx.topology.context.ContextTypes.STREAMING_ANALYTICS_SERVICE`,
:py:const:`~streamsx.topology.context.ContextTypes.DISTRIBUTED`
or :py:const:`~streamsx.topology.context.ContextTypes.STANDALONE`.
A :py:class:`Tester` instance is created and associated with the :py:class:`Topology` to be tested.
Conditions are then created against streams, such as a stream must receive 10 tuples using
:py:meth:`~Tester.tuple_count`.
Here is a simple example that tests a filter correctly only passes tuples with values greater than 5::
import unittest
from streamsx.topology.topology import Topology
from streamsx.topology.tester import Tester
class TestSimpleFilter(unittest.TestCase):
def setUp(self):
# Sets self.test_ctxtype and self.test_config
Tester.setup_streaming_analytics(self)
def test_filter(self):
# Declare the application to be tested
topology = Topology()
s = topology.source([5, 7, 2, 4, 9, 3, 8])
s = s.filter(lambda x : x > 5)
# Create tester and assign conditions
tester = Tester(topology)
tester.contents(s, [7, 9, 8])
# Submit the application for test
# If it fails an AssertionError will be raised.
tester.test(self.test_ctxtype, self.test_config)
A stream may have any number of conditions and any number of streams may be tested.
A py:meth:`~Tester.local_check` is supported where a method of the
unittest class is executed once the job becomes healthy. This performs
checks from the context of the Python unittest class, such as
checking external effects of the application or using the REST api to
monitor the application.
.. warning::
Python 3.5 and Streaming Analytics service or IBM Streams 4.2 or later is required when using `Tester`.
"""
import streamsx.ec as ec
import streamsx.topology.context as stc
import os
import unittest
import logging
import collections
import threading
from streamsx.rest import StreamsConnection
from streamsx.rest import StreamingAnalyticsConnection
from streamsx.topology.context import ConfigParams
import time
import streamsx.topology.tester_runtime as sttrt
_logger = logging.getLogger('streamsx.topology.test')
class Tester(object):
"""Testing support for a Topology.
Allows testing of a Topology by creating conditions against the contents
of its streams.
Conditions may be added to a topology at any time before submission.
If a topology is submitted directly to a context then the graph
is not modified. This allows testing code to be inserted while
the topology is being built, but not acted upon unless the topology
is submitted in test mode.
If a topology is submitted through the test method then the topology
may be modified to include operations to ensure the conditions are met.
.. warning::
For future compatibility applications under test should not include intended failures that cause
a processing element to stop or restart. Thus, currently testing is against expected application behavior.
Args:
topology: Topology to be tested.
"""
def __init__(self, topology):
self.topology = topology
topology.tester = self
self._conditions = {}
self.local_check = None
@staticmethod
def setup_standalone(test):
"""
Set up a unittest.TestCase to run tests using IBM Streams standalone mode.
Requires a local IBM Streams install define by the STREAMS_INSTALL
environment variable. If STREAMS_INSTALL is not set, then the
test is skipped.
Two attributes are set in the test case:
* test_ctxtype - Context type the test will be run in.
* test_config- Test configuration.
Args:
test(unittest.TestCase): Test case to be set up to run tests using Tester
Returns: None
"""
if not 'STREAMS_INSTALL' in os.environ:
raise unittest.SkipTest("Skipped due to no local IBM Streams install")
test.test_ctxtype = stc.ContextTypes.STANDALONE
test.test_config = {}
@staticmethod
def setup_distributed(test):
"""
Set up a unittest.TestCase to run tests using IBM Streams distributed mode.
Requires a local IBM Streams install define by the STREAMS_INSTALL
environment variable. If STREAMS_INSTALL is not set then the
test is skipped.
The Streams instance to use is defined by the environment variables:
* STREAMS_ZKCONNECT - Zookeeper connection string
* STREAMS_DOMAIN_ID - Domain identifier
* STREAMS_INSTANCE_ID - Instance identifier
Two attributes are set in the test case:
* test_ctxtype - Context type the test will be run in.
* test_config - Test configuration.
Args:
test(unittest.TestCase): Test case to be set up to run tests using Tester
Returns: None
"""
if not 'STREAMS_INSTALL' in os.environ:
raise unittest.SkipTest("Skipped due to no local IBM Streams install")
if not 'STREAMS_INSTANCE_ID' in os.environ:
raise unittest.SkipTest("Skipped due to STREAMS_INSTANCE_ID environment variable not set")
if not 'STREAMS_DOMAIN_ID' in os.environ:
raise unittest.SkipTest("Skipped due to STREAMS_DOMAIN_ID environment variable not set")
test.username = os.getenv("STREAMS_USERNAME", "streamsadmin")
test.password = os.getenv("STREAMS_PASSWORD", "passw0rd")
test.test_ctxtype = stc.ContextTypes.DISTRIBUTED
test.test_config = {}
@staticmethod
def setup_streaming_analytics(test, service_name=None, force_remote_build=False):
"""
Set up a unittest.TestCase to run tests using Streaming Analytics service on IBM Bluemix cloud platform.
The service to use is defined by:
* VCAP_SERVICES environment variable containing `streaming_analytics` entries.
* service_name which defaults to the value of STREAMING_ANALYTICS_SERVICE_NAME environment variable.
If VCAP_SERVICES is not set or a service name is not defined, then the test is skipped.
Two attributes are set in the test case:
* test_ctxtype - Context type the test will be run in.
* test_config - Test configuration.
Args:
test(unittest.TestCase): Test case to be set up to run tests using Tester
service_name(str): Name of Streaming Analytics service to use. Must exist as an
entry in the VCAP services. Defaults to value of STREAMING_ANALYTICS_SERVICE_NAME environment variable.
Returns: None
"""
if not 'VCAP_SERVICES' in os.environ:
raise unittest.SkipTest("Skipped due to VCAP_SERVICES environment variable not set")
test.test_ctxtype = stc.ContextTypes.STREAMING_ANALYTICS_SERVICE
if service_name is None:
service_name = os.environ.get('STREAMING_ANALYTICS_SERVICE_NAME', None)
if service_name is None:
raise unittest.SkipTest("Skipped due to no service name supplied")
test.test_config = {'topology.service.name': service_name}
if force_remote_build:
test.test_config['topology.forceRemoteBuild'] = True
def add_condition(self, stream, condition):
"""Add a condition to a stream.
Conditions are normally added through :py:meth:`tuple_count`, :py:meth:`contents` or :py:meth:`tuple_check`.
This allows an additional conditions that are implementations of :py:class:`Condition`.
Args:
stream(Stream): Stream to be tested.
condition(Condition): Arbitrary condition.
Returns:
Stream: stream
"""
self._conditions[condition.name] = (stream, condition)
return stream
def tuple_count(self, stream, count, exact=True):
"""Test that a stream contains a number of tuples.
If `exact` is `True`, then condition becomes valid when `count`
tuples are seen on `stream` during the test. Subsequently if additional
tuples are seen on `stream` then the condition fails and can never
become valid.
If `exact` is `False`, then the condition becomes valid once `count`
tuples are seen on `stream` and remains valid regardless of
any additional tuples.
Args:
stream(Stream): Stream to be tested.
count(int): Number of tuples expected.
exact(bool): `True` if the stream must contain exactly `count`
tuples, `False` if the stream must contain at least `count` tuples.
Returns:
Stream: stream
"""
_logger.debug("Adding tuple count (%d) condition to stream %s.", count, stream)
if exact:
name = "ExactCount" + str(len(self._conditions))
cond = sttrt._TupleExactCount(count, name)
cond._desc = "{0} stream expects tuple count equal to {1}.".format(stream.name, count)
else:
name = "AtLeastCount" + str(len(self._conditions))
cond = sttrt._TupleAtLeastCount(count, name)
cond._desc = "'{0}' stream expects tuple count of at least {1}.".format(stream.name, count)
return self.add_condition(stream, cond)
def contents(self, stream, expected, ordered=True):
"""Test that a stream contains the expected tuples.
Args:
stream(Stream): Stream to be tested.
expected(list): Sequence of expected tuples.
ordered(bool): True if the ordering of received tuples must match expected.
Returns:
Stream: stream
"""
name = "StreamContents" + str(len(self._conditions))
if ordered:
cond = sttrt._StreamContents(expected, name)
cond._desc = "'{0}' stream expects tuple ordered contents: {1}.".format(stream.name, expected)
else:
cond = sttrt._UnorderedStreamContents(expected, name)
cond._desc = "'{0}' stream expects tuple unordered contents: {1}.".format(stream.name, expected)
return self.add_condition(stream, cond)
def tuple_check(self, stream, checker):
"""Check each tuple on a stream.
For each tuple ``t`` on `stream` ``checker(t)`` is called.
If the return evaluates to `False` then the condition fails.
Once the condition fails it can never become valid.
Otherwise the condition becomes or remains valid. The first
tuple on the stream makes the condition valid if the checker
callable evaluates to `True`.
The condition can be combined with :py:meth:`tuple_count` with
``exact=False`` to test a stream map or filter with random input data.
An example of combining `tuple_count` and `tuple_check` to test a filter followed
by a map is working correctly across a random set of values::
def rands():
r = random.Random()
while True:
yield r.random()
class TestFilterMap(unittest.testCase):
# Set up omitted
def test_filter(self):
# Declare the application to be tested
topology = Topology()
r = topology.source(rands())
r = r.filter(lambda x : x > 0.7)
r = r.map(lambda x : x + 0.2)
# Create tester and assign conditions
tester = Tester(topology)
# Ensure at least 1000 tuples pass through the filter.
tester.tuple_count(r, 1000, exact=False)
tester.tuple_check(r, lambda x : x > 0.9)
# Submit the application for test
# If it fails an AssertionError will be raised.
tester.test(self.test_ctxtype, self.test_config)
Args:
stream(Stream): Stream to be tested.
checker(callable): Callable that must evaluate to True for each tuple.
"""
name = "TupleCheck" + str(len(self._conditions))
cond = sttrt._TupleCheck(checker, name)
return self.add_condition(stream, cond)
def local_check(self, callable):
"""Perform local check while the application is being tested.
A call to `callable` is made after the application under test is submitted and becomes healthy.
The check is in the context of the Python runtime executing the unittest case,
typically the callable is a method of the test case.
The application remains running until all the conditions are met
and `callable` returns. If `callable` raises an error, typically
through an assertion method from `unittest` then the test will fail.
Used for testing side effects of the application, typically with `STREAMING_ANALYTICS_SERVICE`
or `DISTRIBUTED`. The callable may also use the REST api for context types that support
it to dynamically monitor the running application.
The callable can use `submission_result` and `streams_connection` attributes from :py:class:`Tester` instance
to interact with the job or the running Streams instance.
Simple example of checking the job is healthy::
import unittest
from streamsx.topology.topology import Topology
from streamsx.topology.tester import Tester
class TestLocalCheckExample(unittest.TestCase):
def setUp(self):
Tester.setup_distributed(self)
def test_job_is_healthy(self):
topology = Topology()
s = topology.source(['Hello', 'World'])
self.tester = Tester(topology)
self.tester.tuple_count(s, 2)
# Add the local check
self.tester.local_check = self.local_checks
# Run the test
self.tester.test(self.test_ctxtype, self.test_config)
def local_checks(self):
job = self.tester.submission_result.job
self.assertEqual('healthy', job.health)
.. warning::
A local check must not cancel the job (application under test).
Args:
callable: Callable object.
"""
self.local_check = callable
def test(self, ctxtype, config=None, assert_on_fail=True, username=None, password=None):
"""Test the topology.
Submits the topology for testing and verifies the test conditions are met and the job remained healthy through its execution.
The submitted application (job) is monitored for the test conditions and
will be canceled when all the conditions are valid or at least one failed.
In addition if a local check was specified using :py:meth:`local_check` then
that callable must complete before the job is cancelled.
The test passes if all conditions became valid and the local check callable (if present) completed without
raising an error.
The test fails if the job is unhealthy, any condition fails or the local check callable (if present) raised an exception.
Args:
ctxtype(str): Context type for submission.
config: Configuration for submission.
assert_on_fail(bool): True to raise an assertion if the test fails, False to return the passed status.
username(str): username for distributed tests
password(str): password for distributed tests
Attributes:
submission_result: Result of the application submission from :py:func:`~streamsx.topology.context.submit`.
streams_connection(StreamsConnection): Connection object that can be used to interact with the REST API of
the Streaming Analytics service or instance.
Returns:
bool: `True` if test passed, `False` if test failed if `assert_on_fail` is `False`.
"""
# Add the conditions into the graph as sink operators
_logger.debug("Adding conditions to topology %s.", self.topology.name)
for ct in self._conditions.values():
condition = ct[1]
stream = ct[0]
stream.for_each(condition, name=condition.name)
if config is None:
config = {}
_logger.debug("Starting test topology %s context %s.", self.topology.name, ctxtype)
if stc.ContextTypes.STANDALONE == ctxtype:
passed = self._standalone_test(config)
elif stc.ContextTypes.DISTRIBUTED == ctxtype:
passed = self._distributed_test(config, username, password)
elif stc.ContextTypes.STREAMING_ANALYTICS_SERVICE == ctxtype or stc.ContextTypes.ANALYTICS_SERVICE == ctxtype:
passed = self._streaming_analytics_test(ctxtype, config)
else:
raise NotImplementedError("Tester context type not implemented:", ctxtype)
if 'conditions' in self.result:
for cn,cnr in self.result['conditions'].items():
c = self._conditions[cn][1]
cdesc = cn
if hasattr(c, '_desc'):
cdesc = c._desc
if 'Fail' == cnr:
_logger.error("Condition: %s : %s", cnr, cdesc)
elif 'NotValid' == cnr:
_logger.warning("Condition: %s : %s", cnr, cdesc)
elif 'Valid' == cnr:
_logger.info("Condition: %s : %s", cnr, cdesc)
if assert_on_fail:
assert passed, "Test failed for topology: " + self.topology.name
if passed:
_logger.info("Test topology %s passed for context:%s", self.topology.name, ctxtype)
else:
_logger.error("Test topology %s failed for context:%s", self.topology.name, ctxtype)
return passed
def _standalone_test(self, config):
""" Test using STANDALONE.
Success is solely indicated by the process completing and returning zero.
"""
sr = stc.submit(stc.ContextTypes.STANDALONE, self.topology, config)
self.submission_result = sr
self.result = {'passed': sr['return_code'], 'submission_result': sr}
return sr['return_code'] == 0
def _distributed_test(self, config, username, password):
self.streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
if self.streams_connection is None:
# Supply a default StreamsConnection object with SSL verification disabled, because the default
# streams server is not shipped with a valid SSL certificate
self.streams_connection = StreamsConnection(username, password)
self.streams_connection.session.verify = False
config[ConfigParams.STREAMS_CONNECTION] = self.streams_connection
sjr = stc.submit(stc.ContextTypes.DISTRIBUTED, self.topology, config)
self.submission_result = sjr
if sjr['return_code'] != 0:
_logger.error("Failed to submit job to distributed instance.")
return False
return self._distributed_wait_for_result()
def _streaming_analytics_test(self, ctxtype, config):
sjr = stc.submit(ctxtype, self.topology, config)
self.submission_result = sjr
self.streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
if self.streams_connection is None:
vcap_services = config.get(ConfigParams.VCAP_SERVICES)
service_name = config.get(ConfigParams.SERVICE_NAME)
self.streams_connection = StreamingAnalyticsConnection(vcap_services, service_name)
if sjr['return_code'] != 0:
_logger.error("Failed to submit job to Streaming Analytics instance")
return False
return self._distributed_wait_for_result()
def _distributed_wait_for_result(self):
cc = _ConditionChecker(self, self.streams_connection, self.submission_result)
# Wait for the job to be healthy before calling the local check.
if cc._wait_for_healthy():
self._start_local_check()
self.result = cc._complete()
if self.local_check is not None:
self._local_thread.join()
else:
self.result = cc._end(False, _ConditionChecker._UNHEALTHY)
self.result['submission_result'] = self.submission_result
cc._canceljob(self.result)
if self.local_check_exception is not None:
raise self.local_check_exception
return self.result['passed']
def _start_local_check(self):
self.local_check_exception = None
if self.local_check is None:
return
self._local_thread = threading.Thread(target=self._call_local_check)
self._local_thread.start()
def _call_local_check(self):
try:
self.local_check_value = self.local_check()
except Exception as e:
self.local_check_value = None
self.local_check_exception = e
#######################################
# Internal functions
#######################################
def _result_to_dict(passed, t):
result = {}
result['passed'] = passed
result['valid'] = t[0]
result['fail'] = t[1]
result['progress'] = t[2]
result['conditions'] = t[3]
return result
class _ConditionChecker(object):
_UNHEALTHY = (False, False, False, None)
def __init__(self, tester, sc, sjr):
self.tester = tester
self._sc = sc
self._sjr = sjr
self._instance_id = sjr['instanceId']
self._job_id = sjr['jobId']
self._sequences = {}
for cn in tester._conditions:
self._sequences[cn] = -1
self.delay = 0.5
self.timeout = 10.0
self.waits = 0
self.additional_checks = 2
self.job = self._find_job()
# Wait for job to be healthy. Returns True
# if the job became healthy, False if not.
def _wait_for_healthy(self):
while (self.waits * self.delay) < self.timeout:
if self.__check_job_health():
self.waits = 0
return True
time.sleep(self.delay)
self.waits += 1
return False
def _complete(self):
while (self.waits * self.delay) < self.timeout:
check = self. __check_once()
if check[1]:
return self._end(False, check)
if check[0]:
if self.additional_checks == 0:
return self._end(True, check)
self.additional_checks -= 1
continue
if check[2]:
self.waits = 0
else:
self.waits += 1
time.sleep(self.delay)
return self._end(False, check)
def _end(self, passed, check):
result = _result_to_dict(passed, check)
return result
def _canceljob(self, result):
if self.job is not None:
self.job.cancel(force=not result['passed'])
def __check_once(self):
if not self.__check_job_health():
return _ConditionChecker._UNHEALTHY
cms = self._get_job_metrics()
valid = True
progress = True
fail = False
condition_states = {}
for cn in self._sequences:
condition_states[cn] = 'NotValid'
seq_mn = sttrt.Condition._mn('seq', cn)
# If the metrics are missing then the operator
# is probably still starting up, cannot be valid.
if not seq_mn in cms:
valid = False
continue
seq_m = cms[seq_mn]
if seq_m.value == self._sequences[cn]:
progress = False
else:
self._sequences[cn] = seq_m.value
fail_mn = sttrt.Condition._mn('fail', cn)
if not fail_mn in cms:
valid = False
continue
fail_m = cms[fail_mn]
if fail_m.value != 0:
fail = True
condition_states[cn] = 'Fail'
continue
valid_mn = sttrt.Condition._mn('valid', cn)
if not valid_mn in cms:
valid = False
continue
valid_m = cms[valid_mn]
if valid_m.value == 0:
valid = False
else:
condition_states[cn] = 'Valid'
return (valid, fail, progress, condition_states)
def __check_job_health(self):
self.job.refresh()
return self.job.health == 'healthy'
def _find_job(self):
instance = self._sc.get_instance(id=self._instance_id)
return instance.get_job(id=self._job_id)
def _get_job_metrics(self):
"""Fetch all the condition metrics for a job.
We refetch the metrics each time to ensure that we don't miss
any being added, e.g. if an operator is slow to start.
"""
cms = {}
for op in self.job.get_operators():
metrics = op.get_metrics(name=sttrt.Condition._METRIC_PREFIX + '*')
for m in metrics:
cms[m.name] = m
return cms
| [((76, 10, 76, 53), 'logging.getLogger', 'logging.getLogger', ({(76, 28, 76, 52): '"""streamsx.topology.test"""'}, {}), "('streamsx.topology.test')", False, 'import logging\n'), ((161, 24, 161, 69), 'os.getenv', 'os.getenv', ({(161, 34, 161, 52): '"""STREAMS_USERNAME"""', (161, 54, 161, 68): '"""streamsadmin"""'}, {}), "('STREAMS_USERNAME', 'streamsadmin')", False, 'import os\n'), ((162, 24, 162, 65), 'os.getenv', 'os.getenv', ({(162, 34, 162, 52): '"""STREAMS_PASSWORD"""', (162, 54, 162, 64): '"""passw0rd"""'}, {}), "('STREAMS_PASSWORD', 'passw0rd')", False, 'import os\n'), ((319, 15, 319, 47), 'streamsx.topology.tester_runtime._TupleCheck', 'sttrt._TupleCheck', ({(319, 33, 319, 40): 'checker', (319, 42, 319, 46): 'name'}, {}), '(checker, name)', True, 'import streamsx.topology.tester_runtime as sttrt\n'), ((455, 13, 455, 75), 'streamsx.topology.context.submit', 'stc.submit', ({(455, 24, 455, 51): 'stc.ContextTypes.STANDALONE', (455, 53, 455, 66): 'self.topology', (455, 68, 455, 74): 'config'}, {}), '(stc.ContextTypes.STANDALONE, self.topology, config)', True, 'import streamsx.topology.context as stc\n'), ((468, 14, 468, 77), 'streamsx.topology.context.submit', 'stc.submit', ({(468, 25, 468, 53): 'stc.ContextTypes.DISTRIBUTED', (468, 55, 468, 68): 'self.topology', (468, 70, 468, 76): 'config'}, {}), '(stc.ContextTypes.DISTRIBUTED, self.topology, config)', True, 'import streamsx.topology.context as stc\n'), ((476, 14, 476, 56), 'streamsx.topology.context.submit', 'stc.submit', ({(476, 25, 476, 32): 'ctxtype', (476, 34, 476, 47): 'self.topology', (476, 49, 476, 55): 'config'}, {}), '(ctxtype, self.topology, config)', True, 'import streamsx.topology.context as stc\n'), ((510, 29, 510, 76), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((126, 18, 126, 82), 'unittest.SkipTest', 'unittest.SkipTest', ({(126, 36, 126, 81): '"""Skipped due to no local IBM Streams install"""'}, {}), "('Skipped due to no local IBM Streams install')", False, 'import unittest\n'), ((154, 18, 154, 82), 'unittest.SkipTest', 'unittest.SkipTest', ({(154, 36, 154, 81): '"""Skipped due to no local IBM Streams install"""'}, {}), "('Skipped due to no local IBM Streams install')", False, 'import unittest\n'), ((157, 18, 157, 102), 'unittest.SkipTest', 'unittest.SkipTest', ({(157, 36, 157, 101): '"""Skipped due to STREAMS_INSTANCE_ID environment variable not set"""'}, {}), "(\n 'Skipped due to STREAMS_INSTANCE_ID environment variable not set')", False, 'import unittest\n'), ((159, 18, 159, 100), 'unittest.SkipTest', 'unittest.SkipTest', ({(159, 36, 159, 99): '"""Skipped due to STREAMS_DOMAIN_ID environment variable not set"""'}, {}), "(\n 'Skipped due to STREAMS_DOMAIN_ID environment variable not set')", False, 'import unittest\n'), ((190, 18, 190, 96), 'unittest.SkipTest', 'unittest.SkipTest', ({(190, 36, 190, 95): '"""Skipped due to VCAP_SERVICES environment variable not set"""'}, {}), "('Skipped due to VCAP_SERVICES environment variable not set')", False, 'import unittest\n'), ((194, 27, 194, 83), 'os.environ.get', 'os.environ.get', ({(194, 42, 194, 76): '"""STREAMING_ANALYTICS_SERVICE_NAME"""', (194, 78, 194, 82): 'None'}, {}), "('STREAMING_ANALYTICS_SERVICE_NAME', None)", False, 'import os\n'), ((196, 18, 196, 78), 'unittest.SkipTest', 'unittest.SkipTest', ({(196, 36, 196, 77): '"""Skipped due to no service name supplied"""'}, {}), "('Skipped due to no service name supplied')", False, 'import unittest\n'), ((242, 19, 242, 54), 'streamsx.topology.tester_runtime._TupleExactCount', 'sttrt._TupleExactCount', ({(242, 42, 242, 47): 'count', (242, 49, 242, 53): 'name'}, {}), '(count, name)', True, 'import streamsx.topology.tester_runtime as sttrt\n'), ((246, 19, 246, 56), 'streamsx.topology.tester_runtime._TupleAtLeastCount', 'sttrt._TupleAtLeastCount', ({(246, 44, 246, 49): 'count', (246, 51, 246, 55): 'name'}, {}), '(count, name)', True, 'import streamsx.topology.tester_runtime as sttrt\n'), ((263, 19, 263, 56), 'streamsx.topology.tester_runtime._StreamContents', 'sttrt._StreamContents', ({(263, 41, 263, 49): 'expected', (263, 51, 263, 55): 'name'}, {}), '(expected, name)', True, 'import streamsx.topology.tester_runtime as sttrt\n'), ((266, 19, 266, 65), 'streamsx.topology.tester_runtime._UnorderedStreamContents', 'sttrt._UnorderedStreamContents', ({(266, 50, 266, 58): 'expected', (266, 60, 266, 64): 'name'}, {}), '(expected, name)', True, 'import streamsx.topology.tester_runtime as sttrt\n'), ((465, 38, 465, 75), 'streamsx.rest.StreamsConnection', 'StreamsConnection', ({(465, 56, 465, 64): 'username', (465, 66, 465, 74): 'password'}, {}), '(username, password)', False, 'from streamsx.rest import StreamsConnection\n'), ((482, 38, 482, 95), 'streamsx.rest.StreamingAnalyticsConnection', 'StreamingAnalyticsConnection', ({(482, 67, 482, 80): 'vcap_services', (482, 82, 482, 94): 'service_name'}, {}), '(vcap_services, service_name)', False, 'from streamsx.rest import StreamingAnalyticsConnection\n'), ((559, 12, 559, 34), 'time.sleep', 'time.sleep', ({(559, 23, 559, 33): 'self.delay'}, {}), '(self.delay)', False, 'import time\n'), ((577, 12, 577, 34), 'time.sleep', 'time.sleep', ({(577, 23, 577, 33): 'self.delay'}, {}), '(self.delay)', False, 'import time\n'), ((598, 21, 598, 51), 'streamsx.topology.tester_runtime.Condition._mn', 'sttrt.Condition._mn', ({(598, 41, 598, 46): '"""seq"""', (598, 48, 598, 50): 'cn'}, {}), "('seq', cn)", True, 'import streamsx.topology.tester_runtime as sttrt\n'), ((610, 22, 610, 53), 'streamsx.topology.tester_runtime.Condition._mn', 'sttrt.Condition._mn', ({(610, 42, 610, 48): '"""fail"""', (610, 50, 610, 52): 'cn'}, {}), "('fail', cn)", True, 'import streamsx.topology.tester_runtime as sttrt\n'), ((621, 24, 621, 56), 'streamsx.topology.tester_runtime.Condition._mn', 'sttrt.Condition._mn', ({(621, 44, 621, 51): '"""valid"""', (621, 53, 621, 55): 'cn'}, {}), "('valid', cn)", True, 'import streamsx.topology.tester_runtime as sttrt\n')] |
Curly-Mo/piglatin | piglatin_microservice/views/main.py | 9ea4a7533675bcb5b28f708beda18f175e0a9fe4 | from flask import request, jsonify, Blueprint
from .. import piglatin
main = Blueprint('main', __name__)
@main.route('/', methods=['GET', 'POST'])
def index():
response = """
Please use the endpoint /translate to access this api.
Usage: "{}translate?text=Translate+this+text+into+Piglatin."
""".format(request.url)
return response
@main.route('/translate', methods=['GET'])
def translate():
text = request.args.get('text')
if not text:
message = 'Invalid parameter text={}'.format(text)
return jsonify(error=500, text=str(message)), 500
pig_text = piglatin.translate(text)
response = {'text': pig_text}
return jsonify(response)
| [((7, 7, 7, 34), 'flask.Blueprint', 'Blueprint', ({(7, 17, 7, 23): '"""main"""', (7, 25, 7, 33): '__name__'}, {}), "('main', __name__)", False, 'from flask import request, jsonify, Blueprint\n'), ((21, 11, 21, 35), 'flask.request.args.get', 'request.args.get', ({(21, 28, 21, 34): '"""text"""'}, {}), "('text')", False, 'from flask import request, jsonify, Blueprint\n'), ((29, 11, 29, 28), 'flask.jsonify', 'jsonify', ({(29, 19, 29, 27): 'response'}, {}), '(response)', False, 'from flask import request, jsonify, Blueprint\n')] |
sumanentc/Python-Projects | projects/pong-game/pong.py | 11c763fcbe4e088928bd56c28f767b93ae73984d | from turtle import Screen
from paddle import Paddle
from ball import Ball
import time
from scoreboard import ScoreBoard
screen = Screen()
screen.bgcolor('black')
screen.setup(width=800, height=600)
screen.title('pong')
# Turn off animation to show paddle after it has been shifted
screen.tracer(0)
right_paddle = Paddle(350, 0)
left_paddle = Paddle(-350, 0)
ball = Ball()
score = ScoreBoard()
screen.listen()
screen.onkey(right_paddle.go_up, 'Up')
screen.onkey(right_paddle.go_down, 'Down')
screen.onkey(left_paddle.go_up, 'w')
screen.onkey(left_paddle.go_down, 's')
game_is_on = True
while game_is_on:
time.sleep(ball.ball_speed)
screen.update()
ball.move()
# bounce when the ball hit the wall
if ball.ycor() > 280 or ball.ycor() < -280:
ball.bounce_y()
# detect collision with the paddle
if (ball.distance(right_paddle) < 50 and ball.xcor() > 320) or (
ball.distance(left_paddle) < 50 and ball.xcor() < -320):
ball.bounce_x()
# detect R paddle miss
if ball.xcor() > 380:
ball.reset_pos()
score.increase_l_point()
# detect L paddle miss
if ball.xcor() < -380:
ball.reset_pos()
score.increase_r_point()
screen.exitonclick()
| [((8, 9, 8, 17), 'turtle.Screen', 'Screen', ({}, {}), '()', False, 'from turtle import Screen\n'), ((16, 15, 16, 29), 'paddle.Paddle', 'Paddle', ({(16, 22, 16, 25): '350', (16, 27, 16, 28): '0'}, {}), '(350, 0)', False, 'from paddle import Paddle\n'), ((17, 14, 17, 29), 'paddle.Paddle', 'Paddle', ({(17, 21, 17, 25): '-350', (17, 27, 17, 28): '0'}, {}), '(-350, 0)', False, 'from paddle import Paddle\n'), ((18, 7, 18, 13), 'ball.Ball', 'Ball', ({}, {}), '()', False, 'from ball import Ball\n'), ((19, 8, 19, 20), 'scoreboard.ScoreBoard', 'ScoreBoard', ({}, {}), '()', False, 'from scoreboard import ScoreBoard\n'), ((30, 4, 30, 31), 'time.sleep', 'time.sleep', ({(30, 15, 30, 30): 'ball.ball_speed'}, {}), '(ball.ball_speed)', False, 'import time\n')] |
brad-h/expy | ExPy/ExPy/module20.py | d3f3dfbbdae31ab8c7e134a5ce9d5f6adf94b516 | """ Multistate Sales Tax Calculator """
import os
from decimal import Decimal
from decimal import InvalidOperation
def prompt_decimal(prompt):
""" Using the prompt, attempt to get a decimal from the user """
while True:
try:
return Decimal(input(prompt))
except InvalidOperation:
print('Enter a valid number')
def dollar(amount):
""" Given an amount as a number
Return a string formatted as a dollar amount
"""
amount = round(amount, 2)
return '${0:0.2f}'.format(amount)
STATE_RATES = {
'ILLINOIS': Decimal('0.08'),
'IL': Decimal('0.08'),
'WISCONSIN': Decimal('0.05'),
'WI': Decimal('0.05'),
}
WISCONSIN_RATES = {
'EAU CLAIRE': Decimal('0.005'),
'DUNN': Decimal('0.004')
}
def ex20():
""" Prompt for the order amount and state
If the state is Wisconsin, prompt for the county
Print the sales tax and total amount
"""
amount = prompt_decimal('What is the order amount? ')
state = input('What state do you live in? ')
if state.upper() in STATE_RATES:
rate = STATE_RATES[state.upper()]
else:
rate = Decimal(0)
if state.upper() == 'WISCONSIN':
county = input('What county do you live in? ')
if county.upper() in WISCONSIN_RATES:
rate += WISCONSIN_RATES[county.upper()]
tax = amount * rate
total = tax + amount
output = os.linesep.join([
'The tax is {}'.format(dollar(tax)),
'The total is {}'.format(dollar(total))])
print(output)
if __name__ == '__main__':
ex20()
| [((23, 16, 23, 31), 'decimal.Decimal', 'Decimal', ({(23, 24, 23, 30): '"""0.08"""'}, {}), "('0.08')", False, 'from decimal import Decimal\n'), ((24, 10, 24, 25), 'decimal.Decimal', 'Decimal', ({(24, 18, 24, 24): '"""0.08"""'}, {}), "('0.08')", False, 'from decimal import Decimal\n'), ((25, 17, 25, 32), 'decimal.Decimal', 'Decimal', ({(25, 25, 25, 31): '"""0.05"""'}, {}), "('0.05')", False, 'from decimal import Decimal\n'), ((26, 10, 26, 25), 'decimal.Decimal', 'Decimal', ({(26, 18, 26, 24): '"""0.05"""'}, {}), "('0.05')", False, 'from decimal import Decimal\n'), ((30, 18, 30, 34), 'decimal.Decimal', 'Decimal', ({(30, 26, 30, 33): '"""0.005"""'}, {}), "('0.005')", False, 'from decimal import Decimal\n'), ((31, 12, 31, 28), 'decimal.Decimal', 'Decimal', ({(31, 20, 31, 27): '"""0.004"""'}, {}), "('0.004')", False, 'from decimal import Decimal\n'), ((45, 15, 45, 25), 'decimal.Decimal', 'Decimal', ({(45, 23, 45, 24): '0'}, {}), '(0)', False, 'from decimal import Decimal\n')] |
psmsmets/pyVDMS | pyvdms/util/verify.py | cb3db93b655d3a02ae3aa1fdd418ae70dd249271 | r"""
:mod:`util.verify` -- Input verification
========================================
Common input verification methods.
"""
# Mandatory imports
import numpy as np
__all__ = ['verify_tuple_range']
def verify_tuple_range(
input_range: tuple, allow_none: bool = True, name: str = None,
step: bool = None, unit: bool = None, todict: bool = False
):
"""
Verify if the input range tuple fullfils the requirements.
An error is raised if a criteria is failed.
"""
name = name or 'input range'
r = dict(first=None, last=None, step=None, unit=None)
if input_range is None:
if allow_none:
return r if todict else None
else:
raise ValueError(f'{name} is empty!')
if not isinstance(input_range, tuple):
raise TypeError(f'{name} should be a tuple!')
minlen = 2
maxlen = 4
if step is True:
minlen += 1
elif step is False:
maxlen -= 1
if unit is True:
minlen += 1
elif unit is False:
maxlen -= 1
if len(input_range) < minlen or len(input_range) > maxlen:
length = minlen if minlen == maxlen else f'{minlen} to {maxlen}'
raise TypeError(f'{name} should be of length {length}!')
r['first'] = input_range[0]
r['last'] = input_range[1]
if not isinstance(r['first'], float) or not isinstance(r['last'], float):
raise TypeError(f'{name} range values should be of type float!')
if step is not False:
if step: # required
r['step'] = input_range[2]
if not isinstance(r['step'], float):
raise TypeError(f'{name} step should be of type float!')
else: # optional
r['step'] = input_range[2] if len(input_range) > minlen else None
r['step'] = r['step'] if isinstance(r['step'], float) else None
if r['step']:
if r['step'] == 0.:
raise ValueError(f'{name} step cannot be zero!')
if np.sign(r['last'] - r['first']) != np.sign(r['step']):
raise ValueError(f'{name} range and step signs should be equal!')
else:
if r['last'] <= r['first']:
raise ValueError(f'{name} range should be incremental!')
if unit is not False:
if unit: # required
r['unit'] = input_range[-1]
if not isinstance(r['unit'], str):
raise TypeError(f'{name} unit should be of type string!')
else: # optional
r['unit'] = input_range[-1] if len(input_range) > minlen else None
r['unit'] = r['unit'] if isinstance(r['unit'], str) else None
return r if todict else None
| [((71, 11, 71, 42), 'numpy.sign', 'np.sign', ({(71, 19, 71, 41): "(r['last'] - r['first'])"}, {}), "(r['last'] - r['first'])", True, 'import numpy as np\n'), ((71, 46, 71, 64), 'numpy.sign', 'np.sign', ({(71, 54, 71, 63): "r['step']"}, {}), "(r['step'])", True, 'import numpy as np\n')] |
reneraab/librephotos | api/image_similarity.py | a3972ab520586e721c67f283b1a50ccb7abe2b01 | import numpy as np
import requests
from django.db.models import Q
from api.models import Photo, User
from api.util import logger
from ownphotos.settings import IMAGE_SIMILARITY_SERVER
def search_similar_embedding(user, emb, result_count=100, threshold=27):
if type(user) == int:
user_id = user
else:
user_id = user.id
image_embedding = np.array(emb, dtype=np.float32)
post_data = {
"user_id": user_id,
"image_embedding": image_embedding.tolist(),
"n": result_count,
"threshold": threshold,
}
res = requests.post(IMAGE_SIMILARITY_SERVER + "/search/", json=post_data)
if res.status_code == 200:
return res.json()["result"]
else:
logger.error("error retrieving similar embeddings for user {}".format(user_id))
return []
def search_similar_image(user, photo):
if type(user) == int:
user_id = user
else:
user_id = user.id
if photo.clip_embeddings == None:
photo._generate_clip_embeddings()
if photo.clip_embeddings == None:
return []
image_embedding = np.array(photo.clip_embeddings, dtype=np.float32)
post_data = {"user_id": user_id, "image_embedding": image_embedding.tolist()}
res = requests.post(IMAGE_SIMILARITY_SERVER + "/search/", json=post_data)
if res.status_code == 200:
return res.json()
else:
logger.error(
"error retrieving similar photos to {} belonging to user {}".format(
photo.image_hash, user.username
)
)
return []
def build_image_similarity_index(user):
logger.info("builing similarity index for user {}".format(user.username))
photos = (
Photo.objects.filter(Q(hidden=False) & Q(owner=user))
.exclude(clip_embeddings=None)
.only("clip_embeddings")
)
image_hashes = []
image_embeddings = []
for photo in photos:
image_hashes.append(photo.image_hash)
image_embedding = np.array(photo.clip_embeddings, dtype=np.float32)
image_embeddings.append(image_embedding.tolist())
post_data = {
"user_id": user.id,
"image_hashes": image_hashes,
"image_embeddings": image_embeddings,
}
res = requests.post(IMAGE_SIMILARITY_SERVER + "/build/", json=post_data)
return res.json()
| [((16, 22, 16, 53), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((24, 10, 24, 77), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((43, 22, 43, 71), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((46, 10, 46, 77), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((79, 10, 79, 76), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((71, 26, 71, 75), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((61, 29, 61, 44), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q\n'), ((61, 47, 61, 60), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q\n')] |
webdevhub42/Lambda | WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/struct/struct_endianness.py | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | #
"""
"""
# end_pymotw_header
import struct
import binascii
values = (1, "ab".encode("utf-8"), 2.7)
print("Original values:", values)
endianness = [
("@", "native, native"),
("=", "native, standard"),
("<", "little-endian"),
(">", "big-endian"),
("!", "network"),
]
for code, name in endianness:
s = struct.Struct(code + " I 2s f")
packed_data = s.pack(*values)
print()
print("Format string :", s.format, "for", name)
print("Uses :", s.size, "bytes")
print("Packed Value :", binascii.hexlify(packed_data))
print("Unpacked Value :", s.unpack(packed_data))
| [((22, 8, 22, 39), 'struct.Struct', 'struct.Struct', ({(22, 22, 22, 38): "code + ' I 2s f'"}, {}), "(code + ' I 2s f')", False, 'import struct\n'), ((27, 30, 27, 59), 'binascii.hexlify', 'binascii.hexlify', ({(27, 47, 27, 58): 'packed_data'}, {}), '(packed_data)', False, 'import binascii\n')] |
GuillaumeVandekerckhove/pydov | pydov/util/net.py | b51f77bf93d1f9e96dd39edf564d95426da04126 | # -*- coding: utf-8 -*-
"""Module grouping network-related utilities and functions."""
from queue import Empty, Queue
from threading import Thread
import requests
import urllib3
from requests.adapters import HTTPAdapter
import pydov
request_timeout = 300
class TimeoutHTTPAdapter(HTTPAdapter):
"""HTTPAdapter which adds a default timeout to requests. Allows timeout
to be overridden on a per-request basis.
"""
def __init__(self, *args, **kwargs):
"""Initialisation."""
self.timeout = request_timeout
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
del kwargs["timeout"]
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
"""Sends PreparedRequest object. Returns Response object.
Parameters
----------
request : requests.PreparedRequest
The PreparedRequest being sent.
Returns
-------
requests.Response
The Response of the request.
"""
timeout = kwargs.get("timeout")
if timeout is None:
kwargs["timeout"] = self.timeout
return super().send(request, **kwargs)
class SessionFactory:
"""Class for generating pydov configured requests Sessions. They are used
to send HTTP requests using our user-agent and with added retry-logic.
One global session is used for all requests, and additionally one
session is used per thread executing XML requests in parallel.
"""
@staticmethod
def get_session():
"""Request a new session.
Returns
-------
requests.Session
pydov configured requests Session.
"""
session = requests.Session()
session.headers.update(
{'user-agent': 'pydov/{}'.format(pydov.__version__)})
try:
retry = urllib3.util.Retry(
total=10, connect=10, read=10, redirect=5, backoff_factor=1,
allowed_methods=set(
['HEAD', 'GET', 'POST', 'PUT', 'OPTIONS']))
except TypeError:
# urllib3 < 1.26.0 used method_whitelist instead
retry = urllib3.util.Retry(
total=10, connect=10, read=10, redirect=5, backoff_factor=1,
method_whitelist=set(
['HEAD', 'GET', 'POST', 'PUT', 'OPTIONS']))
adapter = TimeoutHTTPAdapter(timeout=request_timeout,
max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
class LocalSessionThreadPool:
"""Thread pool of LocalSessionThreads used to perform XML I/O operations
in parallel.
"""
def __init__(self, workers=4):
"""Initialisation.
Set up the pool and start all workers.
Parameters
----------
workers : int, optional
Number of worker threads to use, defaults to 4.
"""
self.workers = []
self.input_queue = Queue(maxsize=100)
self.result_queue = Queue()
for i in range(workers):
self.workers.append(LocalSessionThread(self.input_queue))
self._start()
def _start(self):
"""Start all worker threads. """
for w in self.workers:
w.start()
def stop(self):
"""Stop all worker threads. """
for w in self.workers:
w.stop()
def execute(self, fn, args):
"""Execute the given function with its arguments in a worker thread.
This will add the job to the queue and will not wait for the result.
Use join() to retrieve the result.
Parameters
----------
fn : function
Function to execute.
args : tuple
Arguments that will be passed to the function.
"""
r = WorkerResult()
self.input_queue.put((fn, args, r))
self.result_queue.put(r)
def join(self):
"""Wait for all the jobs to be executed and return the results of all
jobs in a list.
Yields
------
WorkerResult
Results of the executed functions in the order they were
submitted.
"""
self.input_queue.join()
self.stop()
while not self.result_queue.empty():
yield self.result_queue.get()
class WorkerResult:
"""Class for storing the result of a job execution in the result queue.
This allows putting a result instance in the queue on job submission and
fill in the result later when the job completes. This ensures the result
output is in the same order as the jobs were submitted.
"""
def __init__(self):
"""Initialisation. """
self.result = None
self.error = None
def set_result(self, value):
"""Set the result of this job.
Parameters
----------
value : any
The result of the execution of the job.
"""
self.result = value
def get_result(self):
"""Retrieve the result of this job.
Returns
-------
any
The result of the execution of the job.
"""
return self.result
def set_error(self, error):
"""Set the error, in case the jobs fails with an exception.
Parameters
----------
error : Exception
The exception raised while executing this job.
"""
self.error = error
def get_error(self):
"""Retrieve the error, if any, of this job.
Returns
-------
Exception
The exception raised while executing this job.
"""
return self.error
class LocalSessionThread(Thread):
"""Worker thread using a local Session to execute functions. """
def __init__(self, input_queue):
"""Initialisation.
Bind to the input queue and create a Session.
Parameters
----------
input_queue : queue.Queue
Queue to poll for input, this should be in the form of a tuple with
3 items: function to call, list with arguments and WorkerResult
instance to store the output. The list with arguments will be
automatically extended with the local Session instance.
"""
super().__init__()
self.input_queue = input_queue
self.stopping = False
self.session = SessionFactory.get_session()
def stop(self):
"""Stop the worker thread at the next occasion. This can take up to
500 ms. """
self.stopping = True
def run(self):
"""Executed while the thread is running. This is called implicitly
when starting the thread. """
while not self.stopping:
try:
fn, args, r = self.input_queue.get(timeout=0.5)
args = list(args)
args.append(self.session)
try:
result = fn(*args)
except BaseException as e:
r.set_error(e)
else:
r.set_result(result)
finally:
self.input_queue.task_done()
except Empty:
pass
| [((64, 18, 64, 36), 'requests.Session', 'requests.Session', ({}, {}), '()', False, 'import requests\n'), ((104, 27, 104, 45), 'queue.Queue', 'Queue', (), '', False, 'from queue import Empty, Queue\n'), ((105, 28, 105, 35), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Empty, Queue\n')] |
yiunsr/flask_labstoo_base | app/main/views.py | ec99a7c955bd0bd9d96959c1c26cbd0e5ec23796 | from flask.helpers import make_response
from flask.templating import render_template
from . import main
@main.route('/', methods=['GET', 'POST'])
@main.route('/index', methods=['GET', 'POST'])
def index():
resp = make_response(
render_template('main/index.html'))
return resp
| [((10, 8, 10, 42), 'flask.templating.render_template', 'render_template', ({(10, 24, 10, 41): '"""main/index.html"""'}, {}), "('main/index.html')", False, 'from flask.templating import render_template\n')] |
abamaj/SBOL-to-Excel | SBOL2Excel/utils/sbol2excel.py | 790ef5242990c06b20dcb8e207def8e4527aea02 | import sbol2
import pandas as pd
import os
import logging
from openpyxl import load_workbook
from openpyxl.worksheet.table import Table, TableStyleInfo
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl.styles import Font, PatternFill, Border, Side
from requests_html import HTMLSession
#wasderivedfrom: source
#remove identity, persistenID, displayID, version
#remove attachment (if empty)
#add library sheets
#add postprocessing function to remove unecessaries
class seqFile:
def __init__(self, file_path_in, output_path):
# global varibales for homespace, document, and sheet
self.homeSpace = 'https://sys-bio.org'
self.document = file_path_in
self.file_location_path = os.path.dirname(__file__)
self.sheet = os.path.join(self.file_location_path, 'ontologies.xlsx')
self.output_template = os.path.join(self.file_location_path, 'Template_to_Output_Into_v001.xlsx')
self.output_path = output_path
def roleVariables(self):
# set Excel file into a dataframe
df = pd.read_excel(self.sheet, index_col=0,
sheet_name=1, usecols=[1, 2])
# convert the dataframe into a dictionary
roleConvertDict = df.to_dict()
# set dictionary indices and values (use column 'URI' in excel sheet)
roleName = roleConvertDict['URI']
# switch indices' and values' postions
roleDictionary = {uri: role for role, uri in roleName.items()}
return roleDictionary
def orgVariables(self):
# set Excel file into a dataframe
df = pd.read_excel(self.sheet, index_col=0,
sheet_name=2, usecols=[0, 1])
# convert the dataframe into a dictionary
organismConvertDict = df.to_dict()
# set dictionary indices and values (use column 'txid' in excel sheet)
organismName = organismConvertDict['txid']
# switch indices' and values' postions
organismDictionary = {str(txid): organism for organism, txid in organismName.items()}
return organismDictionary
# def inspectDocInfo(self):
# # declare homespace
# sbol2.setHomespace(self.homeSpace)
# doc = sbol2.Document()
# doc.read('../tests/test_files/' + self.document)
# # doc.read(self.document)
# # print document information
# print(doc)
# def printDocContents(self):
# # declare homespace
# sbol2.setHomespace(self.homeSpace)
# doc = sbol2.Document()
# doc.read('../tests/test_files/' + self.document)
# # doc.read(self.document)
# # print document contents
# for obj in doc:
# print(obj)
def readDocChart(self):
# declare homespace
sbol2.setHomespace(self.homeSpace)
doc = sbol2.Document()
doc.read(self.document)
# create a dictionary to hold all the component defintions' information
componentDefinitions = {}
# iterate through the component definitions
roleDict = self.roleVariables()
orgDict = self.orgVariables()
for cd in doc.componentDefinitions:
cdType = cd.type
# create a dictionary that has a key for the
# component definition's identity,
# and a value for all of its features
componentFeatures = {}
persistentIdentity = cd.properties['http://sbols.org/v2#persistentIdentity'][0]
# iterate through the properties of the component defintions
# and set them equal to propValue variable
for prop in cd.properties:
try:
propValue = cd.properties[prop][0]
except (IndexError):
propValue = cd.properties[prop]
# extract attribute property type
if propValue == []:
propValue = ''
prop = self.prop_convert(prop)
propValue = columnMethods(prop, propValue, doc, cdType,
roleDict, orgDict).colV
componentFeatures[prop] = str(propValue)
# append each componentFeatures dictionary as a
# value into the componentDefinitions
# dictionary with the 'persistentIdentity' serving as the key
componentDefinitions[persistentIdentity] = componentFeatures
# return the dictionary of information (temporary, maybe
# return true if read in correctly)
doc_chart = pd.DataFrame.from_dict(componentDefinitions, orient="index")
return doc_chart
def prop_convert(self, prop):
if type(prop) is str:
idx = prop.find('#')
# if parsing conditions meet, append them into the
# componentFeatures dictionary as necessary
if idx >= 1:
prop = prop[idx + 1:]
if prop == 'type':
prop = 'types'
if prop == 'http://purl.org/dc/terms/title':
prop = 'title'
if prop == 'http://purl.org/dc/terms/description':
prop = 'description'
if prop == 'http://purl.obolibrary.org/obo/OBI_0001617':
prop = 'OBI_0001617'
return (prop)
else:
raise ValueError()
def displayDocChart(self):
#display the dataframe
return pd.DataFrame.from_dict(self.readDocChart(), orient = "index")
def TEMP_readDocChart1(self):
#demo of table column names
columnNames = ['Part Name',
'Role',
'Design Notes',
'Altered Sequence',
'Part Description',
'Data Source Prefix',
'Data Source',
'Source Organism',
'Target Organism',
'Circular',
'length (bp)',
'Sequence',
'Data Source',
'Composite']
#import dataframe dictionary
#convert dictionary to dataframe
df = self.displayDocChart()
#type caste dataframe to a set
dfSet = set(df)
#type caste column names to a set
columnNameOrder = set(columnNames)
#check difference between the dataframe set and the column name order
dfSetDifference = dfSet.difference(columnNameOrder)
#check intersection between the datframe set and the column name order
dfSetIntersection = dfSet.intersection(columnNameOrder)
#combine the type casted difference and intersection
finalSetList = list(dfSetIntersection) + list(dfSetDifference)
#set list to dictionary
return finalSetList
# def displayDocChart(self):
# # display the dataframe
# return pd.DataFrame.from_dict(self.readDocChart(), orient="index")
def columnString(self, n):
# loop through column length in order to get string appropriate
# values for excel sheet rows and columns
string = ""
while n > 0:
n, remainder = divmod(n - 1, 26)
string = chr(65 + remainder) + string
return string
def returnExcelChart(self):
start_row = 18
start_cell = f'A{start_row}'
# load a workbook
wb = load_workbook(self.output_template)
ws = wb.active
# load raw dataframe to df
df = self.readDocChart()
# set font features
ft1 = Font(name='Arial', size=12, color='548235')
ft2 = Font(name='Calibri', size=11, bold=True)
hold = dataframe_to_rows(df, index=False, header=True)
# counter = 0
# loop through worksheet
ws[start_cell].value = ''
for r in hold:
# if a specific cell is empty, continue to loop past it
if r == [None]:
continue
ws.append(r)
# counter += 1
# set table features
tab = Table(displayName="Parts_Lib", ref=f"A{start_row +1}:{self.columnString(len(df.columns))}{(len(df) * 2) - 2}")
style = TableStyleInfo(name="TableStyleLight7", showFirstColumn=False,
showLastColumn=False, showRowStripes=True,
showColumnStripes=False)
cellColor = PatternFill(patternType='solid',
fgColor='DDEBF7')
cellBorder = Side(border_style='medium', color="000000")
# cellIndex = len(x)
# gives cells within specified range their table attributes
for col in range(1, len(df.columns) + 1):
alpha = self.columnString(col)
ws[f'{alpha}{start_row+1}'].fill = cellColor
ws[f'{alpha}{start_row+1}'].border = Border(top=cellBorder)
tab.tableStyleInfo = style
ws.add_table(tab)
# counter = 0
# gives cells within specified range their font attributes
for row in range(len(df) - 1, (len(df) * 2 - 1)):
# counter = counter + 1
for cell in ws[row]:
cell.font = ft1
# gives cells within specified range their font attributes
# (these are special features for the title)
num_rows = len(df)
if num_rows % 2 > 0:
num_rows = num_rows - 1
for j in range(19, num_rows):
for x in ws[j]:
x.font = ft2
# output the file
wb.save(self.output_path)
wb.close()
logging.warning(f'Your converted file has been output at {self.output_path}')
class columnMethods:
def __init__(self, colN, colV, doc, cdType, roleDict, orgDict):
# global varibales for dataframe switch statements
self.colN = colN
self.colV = colV
self.doc = doc
self.cdType = cdType
self.roleDict = roleDict
self.orgDict = orgDict
# if the column name matches the function name, call the function
try:
return getattr(self, self.colN)()
# if the column name does not match the function name, call 'no_change'
except AttributeError:
return getattr(self, 'no_change')()
def no_change(self):
pass
# if the specified column role value is within the role column
def role(self):
roleVal = str(self.colV)
if roleVal in self.roleDict:
self.colV = self.roleDict[roleVal]
def types(self):
self.colV = self.colV.split('#')[-1]
def sequence(self):
self.colV = self.doc.getSequence(self.colV).elements
def sourceOrganism(self):
orgVal = str(self.colV)
orgVal = orgVal.split('=')[-1]
txid = self.colV.split('=')[-1]
if orgVal in self.orgDict:
self.colV = self.orgDict[orgVal]
else:
session = HTMLSession()
r = session.get(self.colV)
v = r.html.find('strong', first=True)
self.colV = v.text
self.orgDict[txid] = self.colV
def targetOrganism(self):
orgVal = str(self.colV)
orgVal = orgVal.split('=')[-1]
txid = self.colV.split('=')[-1]
if orgVal in self.orgDict:
self.colV = self.orgDict[orgVal]
else:
session = HTMLSession()
r = session.get(self.colV)
v = r.html.find('strong', first=True)
self.colV = v.text
self.orgDict[txid] = self.colV
| [((24, 34, 24, 59), 'os.path.dirname', 'os.path.dirname', ({(24, 50, 24, 58): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((25, 21, 25, 77), 'os.path.join', 'os.path.join', ({(25, 34, 25, 57): 'self.file_location_path', (25, 59, 25, 76): '"""ontologies.xlsx"""'}, {}), "(self.file_location_path, 'ontologies.xlsx')", False, 'import os\n'), ((26, 31, 26, 105), 'os.path.join', 'os.path.join', ({(26, 44, 26, 67): 'self.file_location_path', (26, 69, 26, 104): '"""Template_to_Output_Into_v001.xlsx"""'}, {}), "(self.file_location_path, 'Template_to_Output_Into_v001.xlsx')", False, 'import os\n'), ((31, 13, 32, 56), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((43, 13, 44, 56), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((74, 8, 74, 42), 'sbol2.setHomespace', 'sbol2.setHomespace', ({(74, 27, 74, 41): 'self.homeSpace'}, {}), '(self.homeSpace)', False, 'import sbol2\n'), ((75, 14, 75, 30), 'sbol2.Document', 'sbol2.Document', ({}, {}), '()', False, 'import sbol2\n'), ((110, 20, 110, 80), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (), '', True, 'import pandas as pd\n'), ((185, 13, 185, 48), 'openpyxl.load_workbook', 'load_workbook', ({(185, 27, 185, 47): 'self.output_template'}, {}), '(self.output_template)', False, 'from openpyxl import load_workbook\n'), ((190, 14, 190, 57), 'openpyxl.styles.Font', 'Font', (), '', False, 'from openpyxl.styles import Font, PatternFill, Border, Side\n'), ((191, 14, 191, 54), 'openpyxl.styles.Font', 'Font', (), '', False, 'from openpyxl.styles import Font, PatternFill, Border, Side\n'), ((192, 15, 192, 62), 'openpyxl.utils.dataframe.dataframe_to_rows', 'dataframe_to_rows', (), '', False, 'from openpyxl.utils.dataframe import dataframe_to_rows\n'), ((204, 16, 206, 55), 'openpyxl.worksheet.table.TableStyleInfo', 'TableStyleInfo', (), '', False, 'from openpyxl.worksheet.table import Table, TableStyleInfo\n'), ((207, 20, 208, 49), 'openpyxl.styles.PatternFill', 'PatternFill', (), '', False, 'from openpyxl.styles import Font, PatternFill, Border, Side\n'), ((209, 21, 209, 64), 'openpyxl.styles.Side', 'Side', (), '', False, 'from openpyxl.styles import Font, PatternFill, Border, Side\n'), ((235, 8, 235, 85), 'logging.warning', 'logging.warning', ({(235, 24, 235, 84): 'f"""Your converted file has been output at {self.output_path}"""'}, {}), "(f'Your converted file has been output at {self.output_path}')", False, 'import logging\n'), ((215, 49, 215, 71), 'openpyxl.styles.Border', 'Border', (), '', False, 'from openpyxl.styles import Font, PatternFill, Border, Side\n'), ((277, 22, 277, 35), 'requests_html.HTMLSession', 'HTMLSession', ({}, {}), '()', False, 'from requests_html import HTMLSession\n'), ((290, 22, 290, 35), 'requests_html.HTMLSession', 'HTMLSession', ({}, {}), '()', False, 'from requests_html import HTMLSession\n')] |
akshay-ap/ocean.py | ocean_lib/models/data_token.py | 1dab70d164ca36a6cff284e8be82ae04344ad13f | #
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
import json
import os
import time
from collections import namedtuple
import requests
from eth_utils import remove_0x_prefix
from ocean_lib.data_provider.data_service_provider import DataServiceProvider
from ocean_lib.enforce_typing_shim import enforce_types_shim
from ocean_lib.ocean.util import from_base_18, to_base_18
from ocean_lib.web3_internal.contract_base import ContractBase
from ocean_lib.web3_internal.event_filter import EventFilter
from ocean_lib.web3_internal.wallet import Wallet
from ocean_lib.web3_internal.web3_provider import Web3Provider
from ocean_utils.http_requests.requests_session import get_requests_session
from web3 import Web3
from web3.exceptions import MismatchedABI
from web3.utils.events import get_event_data
from websockets import ConnectionClosed
OrderValues = namedtuple(
"OrderValues",
("consumer", "amount", "serviceId", "startedAt", "marketFeeCollector", "marketFee"),
)
@enforce_types_shim
class DataToken(ContractBase):
CONTRACT_NAME = "DataTokenTemplate"
DEFAULT_CAP = 1000.0
DEFAULT_CAP_BASE = to_base_18(DEFAULT_CAP)
ORDER_STARTED_EVENT = "OrderStarted"
ORDER_FINISHED_EVENT = "OrderFinished"
OPF_FEE_PERCENTAGE = 0.001
MAX_MARKET_FEE_PERCENTAGE = 0.001
def get_event_signature(self, event_name):
try:
e = getattr(self.events, event_name)
except MismatchedABI:
raise ValueError(
f"Event {event_name} not found in {self.CONTRACT_NAME} contract."
)
abi = e().abi
types = [param["type"] for param in abi["inputs"]]
sig_str = f'{event_name}({",".join(types)})'
return Web3.sha3(text=sig_str).hex()
def get_start_order_logs(
self,
web3,
consumer_address=None,
from_block=0,
to_block="latest",
from_all_tokens=False,
):
topic0 = self.get_event_signature(self.ORDER_STARTED_EVENT)
topics = [topic0]
if consumer_address:
topic1 = f"0x000000000000000000000000{consumer_address[2:].lower()}"
topics = [topic0, None, topic1]
filter_params = {"fromBlock": from_block, "toBlock": to_block, "topics": topics}
if not from_all_tokens:
# get logs only for this token address
filter_params["address"] = self.address
e = getattr(self.events, self.ORDER_STARTED_EVENT)
event_abi = e().abi
logs = web3.eth.getLogs(filter_params)
parsed_logs = []
for lg in logs:
parsed_logs.append(get_event_data(event_abi, lg))
return parsed_logs
def get_transfer_events_in_range(self, from_block, to_block):
name = "Transfer"
event = getattr(self.events, name)
return self.getLogs(
event, Web3Provider.get_web3(), fromBlock=from_block, toBlock=to_block
)
def get_all_transfers_from_events(
self, start_block: int, end_block: int, chunk: int = 1000
) -> tuple:
_from = start_block
_to = _from + chunk - 1
transfer_records = []
error_count = 0
_to = min(_to, end_block)
while _from <= end_block:
try:
logs = self.get_transfer_events_in_range(_from, _to)
transfer_records.extend(
[
(
lg.args["from"],
lg.args.to,
lg.args.value,
lg.blockNumber,
lg.transactionHash.hex(),
lg.logIndex,
lg.transactionIndex,
)
for lg in logs
]
)
_from = _to + 1
_to = min(_from + chunk - 1, end_block)
error_count = 0
if (_from - start_block) % chunk == 0:
print(
f" So far processed {len(transfer_records)} Transfer events from {_from-start_block} blocks."
)
except requests.exceptions.ReadTimeout as err:
print(f"ReadTimeout ({_from}, {_to}): {err}")
error_count += 1
if error_count > 1:
break
return transfer_records, min(_to, end_block) # can have duplicates
def get_transfer_event(self, block_number, sender, receiver):
event = getattr(self.events, "Transfer")
filter_params = {"from": sender, "to": receiver}
event_filter = EventFilter(
"Transfer",
event,
filter_params,
from_block=block_number - 1,
to_block=block_number + 10,
)
logs = event_filter.get_all_entries(max_tries=10)
if not logs:
return None
if len(logs) > 1:
raise AssertionError(
f"Expected a single transfer event at "
f"block {block_number}, but found {len(logs)} events."
)
return logs[0]
def verify_transfer_tx(self, tx_id, sender, receiver):
w3 = Web3Provider.get_web3()
tx = w3.eth.getTransaction(tx_id)
if not tx:
raise AssertionError("Transaction is not found, or is not yet verified.")
if tx["from"] != sender or tx["to"] != self.address:
raise AssertionError(
f"Sender and receiver in the transaction {tx_id} "
f"do not match the expected consumer and contract addresses."
)
_iter = 0
while tx["blockNumber"] is None:
time.sleep(0.1)
tx = w3.eth.getTransaction(tx_id)
_iter = _iter + 1
if _iter > 100:
break
tx_receipt = self.get_tx_receipt(tx_id)
if tx_receipt.status == 0:
raise AssertionError("Transfer transaction failed.")
logs = getattr(self.events, "Transfer")().processReceipt(tx_receipt)
transfer_event = logs[0] if logs else None
# transfer_event = self.get_transfer_event(tx['blockNumber'], sender, receiver)
if not transfer_event:
raise AssertionError(
f"Cannot find the event for the transfer transaction with tx id {tx_id}."
)
assert (
len(logs) == 1
), f"Multiple Transfer events in the same transaction !!! {logs}"
if (
transfer_event.args["from"] != sender
or transfer_event.args["to"] != receiver
):
raise AssertionError(
"The transfer event from/to do not match the expected values."
)
return tx, transfer_event
def get_event_logs(
self, event_name, filter_args=None, from_block=0, to_block="latest"
):
event = getattr(self.events, event_name)
filter_params = filter_args or {}
event_filter = EventFilter(
event_name, event, filter_params, from_block=from_block, to_block=to_block
)
logs = event_filter.get_all_entries(max_tries=10)
if not logs:
return []
return logs
def verify_order_tx(self, web3, tx_id, did, service_id, amount_base, sender):
event = getattr(self.events, self.ORDER_STARTED_EVENT)
try:
tx_receipt = self.get_tx_receipt(tx_id)
except ConnectionClosed:
# try again in this case
tx_receipt = self.get_tx_receipt(tx_id)
if tx_receipt is None:
raise AssertionError(
"Failed to get tx receipt for the `startOrder` transaction.."
)
if tx_receipt.status == 0:
raise AssertionError("order transaction failed.")
receiver = self.contract_concise.minter()
event_logs = event().processReceipt(tx_receipt)
order_log = event_logs[0] if event_logs else None
if not order_log:
raise AssertionError(
f"Cannot find the event for the order transaction with tx id {tx_id}."
)
assert (
len(event_logs) == 1
), f"Multiple order events in the same transaction !!! {event_logs}"
asset_id = remove_0x_prefix(did).lower()
assert (
asset_id == remove_0x_prefix(self.address).lower()
), "asset-id does not match the datatoken id."
if str(order_log.args.serviceId) != str(service_id):
raise AssertionError(
f"The asset id (DID) or service id in the event does "
f"not match the requested asset. \n"
f"requested: (did={did}, serviceId={service_id}\n"
f"event: (serviceId={order_log.args.serviceId}"
)
target_amount = amount_base - self.calculate_fee(
amount_base, self.OPF_FEE_PERCENTAGE
)
if order_log.args.mrktFeeCollector and order_log.args.marketFee > 0:
assert order_log.args.marketFee <= (
self.calculate_fee(amount_base, self.MAX_MARKET_FEE_PERCENTAGE) + 5
), (
f"marketFee {order_log.args.marketFee} exceeds the expected maximum "
f"of {self.calculate_fee(amount_base, self.MAX_MARKET_FEE_PERCENTAGE)} "
f"based on feePercentage={self.MAX_MARKET_FEE_PERCENTAGE} ."
)
target_amount = target_amount - order_log.args.marketFee
# verify sender of the tx using the Tx record
tx = web3.eth.getTransaction(tx_id)
if sender not in [order_log.args.consumer, order_log.args.payer]:
raise AssertionError(
"sender of order transaction is not the consumer/payer."
)
transfer_logs = self.events.Transfer().processReceipt(tx_receipt)
receiver_to_transfers = {}
for tr in transfer_logs:
if tr.args.to not in receiver_to_transfers:
receiver_to_transfers[tr.args.to] = []
receiver_to_transfers[tr.args.to].append(tr)
if receiver not in receiver_to_transfers:
raise AssertionError(
f"receiver {receiver} is not found in the transfer events."
)
transfers = sorted(receiver_to_transfers[receiver], key=lambda x: x.args.value)
total = sum(tr.args.value for tr in transfers)
if total < (target_amount - 5):
raise ValueError(
f"transferred value does meet the service cost: "
f"service.cost - fees={from_base_18(target_amount)}, "
f"transferred value={from_base_18(total)}"
)
return tx, order_log, transfers[-1]
def download(self, wallet: Wallet, tx_id: str, destination_folder: str):
url = self.blob()
download_url = (
f"{url}?"
f"consumerAddress={wallet.address}"
f"&dataToken={self.address}"
f"&transferTxId={tx_id}"
)
response = get_requests_session().get(download_url, stream=True)
file_name = f"file-{self.address}"
DataServiceProvider.write_file(response, destination_folder, file_name)
return os.path.join(destination_folder, file_name)
def token_balance(self, account: str):
return from_base_18(self.balanceOf(account))
def _get_url_from_blob(self, int_code):
try:
url_object = json.loads(self.blob())
except json.decoder.JSONDecodeError:
return None
assert (
url_object["t"] == int_code
), "This datatoken does not appear to have a direct consume url."
return url_object.get("url")
def get_metadata_url(self):
# grab the metadatastore URL from the DataToken contract (@token_address)
return self._get_url_from_blob(1)
def get_simple_url(self):
return self._get_url_from_blob(0)
# ============================================================
# Token transactions using amount of tokens as a float instead of int
# amount of tokens will be converted to the base value before sending
# the transaction
def approve_tokens(
self, spender: str, value: float, from_wallet: Wallet, wait: bool = False
):
txid = self.approve(spender, to_base_18(value), from_wallet)
if wait:
self.get_tx_receipt(txid)
return txid
def mint_tokens(self, to_account: str, value: float, from_wallet: Wallet):
return self.mint(to_account, to_base_18(value), from_wallet)
def transfer_tokens(self, to: str, value: float, from_wallet: Wallet):
return self.transfer(to, to_base_18(value), from_wallet)
################
# Helpers
@staticmethod
def get_max_fee_percentage():
return DataToken.OPF_FEE_PERCENTAGE + DataToken.MAX_MARKET_FEE_PERCENTAGE
@staticmethod
def calculate_max_fee(amount):
return DataToken.calculate_fee(amount, DataToken.get_max_fee_percentage())
@staticmethod
def calculate_fee(amount, percentage):
return int(amount * to_base_18(percentage) / to_base_18(1.0))
@staticmethod
def calculate_balances(transfers):
_from = [t[0].lower() for t in transfers]
_to = [t[1].lower() for t in transfers]
_value = [t[2] for t in transfers]
a_to_value = dict()
a_to_value.update({a: 0 for a in _from})
a_to_value.update({a: 0 for a in _to})
for i, acc_f in enumerate(_from):
v = int(_value[i])
a_to_value[acc_f] -= v
a_to_value[_to[i]] += v
return a_to_value
def get_info(self, web3, from_block, to_block, include_holders=False):
contract = self.contract_concise
minter = contract.minter()
all_transfers, _ = self.get_all_transfers_from_events(from_block, to_block)
order_logs = self.get_start_order_logs(
web3, from_block=from_block, to_block=to_block
)
holders = []
if include_holders:
a_to_balance = DataToken.calculate_balances(all_transfers)
_min = to_base_18(0.000001)
holders = sorted(
[(a, from_base_18(b)) for a, b in a_to_balance.items() if b > _min],
key=lambda x: x[1],
reverse=True,
)
return {
"address": self.address,
"name": contract.name(),
"symbol": contract.symbol(),
"decimals": contract.decimals(),
"cap": from_base_18(contract.cap()),
"totalSupply": from_base_18(contract.totalSupply()),
"minter": minter,
"minterBalance": self.token_balance(minter),
"numHolders": len(holders),
"holders": holders,
"numOrders": len(order_logs),
}
# ============================================================
# reflect DataToken Solidity methods
def blob(self) -> str:
return self.contract_concise.blob()
def datatoken_name(self) -> str:
return self.contract_concise.name()
def symbol(self) -> str:
return self.contract_concise.symbol()
def cap(self) -> str:
return self.contract_concise.cap()
def decimals(self) -> str:
return self.contract_concise.decimals()
def totalSupply(self) -> str:
return self.contract_concise.totalSupply()
def allowance(self, owner_address: str, spender_address: str) -> str:
return self.contract_concise.allowance(owner_address, spender_address)
def balanceOf(self, account: str) -> int:
return self.contract_concise.balanceOf(account)
def mint(self, to_account: str, value_base: int, from_wallet: Wallet) -> str:
return self.send_transaction("mint", (to_account, value_base), from_wallet)
def approve(self, spender: str, value_base: int, from_wallet: Wallet) -> str:
return self.send_transaction("approve", (spender, value_base), from_wallet)
def transfer(self, to: str, value_base: int, from_wallet: Wallet) -> str:
return self.send_transaction("transfer", (to, value_base), from_wallet)
def proposeMinter(self, new_minter, from_wallet) -> str:
return self.send_transaction("proposeMinter", (new_minter,), from_wallet)
def approveMinter(self, from_wallet) -> str:
return self.send_transaction("approveMinter", (), from_wallet)
def startOrder(
self,
consumer: str,
amount: int,
serviceId: int,
mrktFeeCollector: str,
from_wallet: Wallet,
):
return self.send_transaction(
"startOrder", (consumer, amount, serviceId, mrktFeeCollector), from_wallet
)
def finishOrder(
self,
orderTxId: str,
consumer: str,
amount: int,
serviceId: int,
from_wallet: Wallet,
):
return self.send_transaction(
"finishOrder", (orderTxId, consumer, amount, serviceId), from_wallet
)
| [((25, 14, 28, 1), 'collections.namedtuple', 'namedtuple', ({(26, 4, 26, 17): '"""OrderValues"""', (27, 4, 27, 87): "('consumer', 'amount', 'serviceId', 'startedAt', 'marketFeeCollector',\n 'marketFee')"}, {}), "('OrderValues', ('consumer', 'amount', 'serviceId', 'startedAt',\n 'marketFeeCollector', 'marketFee'))", False, 'from collections import namedtuple\n'), ((35, 23, 35, 46), 'ocean_lib.ocean.util.to_base_18', 'to_base_18', ({(35, 34, 35, 45): 'DEFAULT_CAP'}, {}), '(DEFAULT_CAP)', False, 'from ocean_lib.ocean.util import from_base_18, to_base_18\n'), ((136, 23, 142, 9), 'ocean_lib.web3_internal.event_filter.EventFilter', 'EventFilter', (), '', False, 'from ocean_lib.web3_internal.event_filter import EventFilter\n'), ((157, 13, 157, 36), 'ocean_lib.web3_internal.web3_provider.Web3Provider.get_web3', 'Web3Provider.get_web3', ({}, {}), '()', False, 'from ocean_lib.web3_internal.web3_provider import Web3Provider\n'), ((206, 23, 208, 9), 'ocean_lib.web3_internal.event_filter.EventFilter', 'EventFilter', (), '', False, 'from ocean_lib.web3_internal.event_filter import EventFilter\n'), ((304, 8, 304, 79), 'ocean_lib.data_provider.data_service_provider.DataServiceProvider.write_file', 'DataServiceProvider.write_file', ({(304, 39, 304, 47): 'response', (304, 49, 304, 67): 'destination_folder', (304, 69, 304, 78): 'file_name'}, {}), '(response, destination_folder, file_name)', False, 'from ocean_lib.data_provider.data_service_provider import DataServiceProvider\n'), ((305, 15, 305, 58), 'os.path.join', 'os.path.join', ({(305, 28, 305, 46): 'destination_folder', (305, 48, 305, 57): 'file_name'}, {}), '(destination_folder, file_name)', False, 'import os\n'), ((88, 19, 88, 42), 'ocean_lib.web3_internal.web3_provider.Web3Provider.get_web3', 'Web3Provider.get_web3', ({}, {}), '()', False, 'from ocean_lib.web3_internal.web3_provider import Web3Provider\n'), ((170, 12, 170, 27), 'time.sleep', 'time.sleep', ({(170, 23, 170, 26): '(0.1)'}, {}), '(0.1)', False, 'import time\n'), ((336, 37, 336, 54), 'ocean_lib.ocean.util.to_base_18', 'to_base_18', ({(336, 48, 336, 53): 'value'}, {}), '(value)', False, 'from ocean_lib.ocean.util import from_base_18, to_base_18\n'), ((343, 37, 343, 54), 'ocean_lib.ocean.util.to_base_18', 'to_base_18', ({(343, 48, 343, 53): 'value'}, {}), '(value)', False, 'from ocean_lib.ocean.util import from_base_18, to_base_18\n'), ((346, 33, 346, 50), 'ocean_lib.ocean.util.to_base_18', 'to_base_18', ({(346, 44, 346, 49): 'value'}, {}), '(value)', False, 'from ocean_lib.ocean.util import from_base_18, to_base_18\n'), ((387, 19, 387, 39), 'ocean_lib.ocean.util.to_base_18', 'to_base_18', ({(387, 30, 387, 38): '1e-06'}, {}), '(1e-06)', False, 'from ocean_lib.ocean.util import from_base_18, to_base_18\n'), ((54, 15, 54, 38), 'web3.Web3.sha3', 'Web3.sha3', (), '', False, 'from web3 import Web3\n'), ((80, 31, 80, 60), 'web3.utils.events.get_event_data', 'get_event_data', ({(80, 46, 80, 55): 'event_abi', (80, 57, 80, 59): 'lg'}, {}), '(event_abi, lg)', False, 'from web3.utils.events import get_event_data\n'), ((243, 19, 243, 40), 'eth_utils.remove_0x_prefix', 'remove_0x_prefix', ({(243, 36, 243, 39): 'did'}, {}), '(did)', False, 'from eth_utils import remove_0x_prefix\n'), ((302, 19, 302, 41), 'ocean_utils.http_requests.requests_session.get_requests_session', 'get_requests_session', ({}, {}), '()', False, 'from ocean_utils.http_requests.requests_session import get_requests_session\n'), ((360, 53, 360, 68), 'ocean_lib.ocean.util.to_base_18', 'to_base_18', ({(360, 64, 360, 67): '(1.0)'}, {}), '(1.0)', False, 'from ocean_lib.ocean.util import from_base_18, to_base_18\n'), ((245, 24, 245, 54), 'eth_utils.remove_0x_prefix', 'remove_0x_prefix', ({(245, 41, 245, 53): 'self.address'}, {}), '(self.address)', False, 'from eth_utils import remove_0x_prefix\n'), ((360, 28, 360, 50), 'ocean_lib.ocean.util.to_base_18', 'to_base_18', ({(360, 39, 360, 49): 'percentage'}, {}), '(percentage)', False, 'from ocean_lib.ocean.util import from_base_18, to_base_18\n'), ((288, 17, 288, 44), 'ocean_lib.ocean.util.from_base_18', 'from_base_18', ({(288, 30, 288, 43): 'target_amount'}, {}), '(target_amount)', False, 'from ocean_lib.ocean.util import from_base_18, to_base_18\n'), ((288, 17, 288, 36), 'ocean_lib.ocean.util.from_base_18', 'from_base_18', ({(288, 30, 288, 35): 'total'}, {}), '(total)', False, 'from ocean_lib.ocean.util import from_base_18, to_base_18\n'), ((389, 21, 389, 36), 'ocean_lib.ocean.util.from_base_18', 'from_base_18', ({(389, 34, 389, 35): 'b'}, {}), '(b)', False, 'from ocean_lib.ocean.util import from_base_18, to_base_18\n')] |
chrlen/cgmodsel | cgmodsel/prox.py | 1d7336e173289468d55897b1aa044bf98c3c1a6b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Frank Nussbaum ([email protected]), 2019
"""
import numpy as np
#import scipy
#import abc
#import time
from scipy.optimize import approx_fprime
from scipy.linalg import eigh
from scipy import optimize
from cgmodsel.utils import _logsumexp_condprobs_red
#from cgmodsel.utils import logsumexp
from cgmodsel.base_solver import BaseGradSolver
# pylint: disable=unbalanced-tuple-unpacking
# pylint: disable=W0511 # todos
# pylint: disable=R0914 # too many locals
###############################################################################
# prox for PLH objective
###############################################################################
class LikelihoodProx(BaseGradSolver):
"""
solve pseudo-log-likelihood proximal operator
"""
def __init__(self, cat_data, cont_data, meta):
""""must provide with dictionary meta"""
super().__init__() # Python 3 syntax
self.cat_data = cat_data
self.cont_data = cont_data
self.meta = meta
self._fold = np.inf
# overridden attributes
ltot = meta['ltot']
n_cg = meta['n_cg']
self.shapes = [
('Q', (ltot, ltot)),
('u', (ltot, 1)),
('R', (n_cg, ltot)),
('F2tiL', (n_cg, n_cg)), # construct Lambda = A * A.T
('alpha', (n_cg, 1))
]
self.n_params = sum([np.prod(shape[1]) for shape in self.shapes])
def clean_theta(self, theta):
"""
make pairwise parameter matrix feasible for likelihood prox solver
-> modifies Theta
"""
# copies upper triangle of Theta to lower triangle to symmetrize
# Furthermore, all elements on the block-diagonal of the discrete
# are set to zero, except diagonal elements
# since these correspond to univariate discrete sufficient statistics
optvars = self._theta_to_x(theta, np.zeros(self.meta['n_cg']))
return self._x_to_thetaalpha(optvars)[0]
###############################################################################
# Solver for Pseudo-likelihood Prox operator
###############################################################################
def callback_plh(self, optvars, handle_fg):
"""callback to check for potential bugs"""
fnew = handle_fg(optvars)[0]
if not fnew <= self._fold:
string = 'Potential scipy bug, fvalue increased in last iteration'
print('Warning(CG_base_ADMM.callback_plh): %s' % (string))
self._fold = fnew
def solve(self, mat_z, prox_param, old_thetaalpha):
""" solve proximal mapping of negative pseudo loglikelihood
min_{Theta, alpha} l_p(Theta, alpha) + 1 / (2mu) * ||Theta-Z||_F^2
known issue with ADMM:
not doing warm starts may cause problems if solution is to inexact
generally ADMM convergence requires very exact solutions
-> use ftol to control tolerancy, or refine to control #restarts
"""
# split Z (since in determining the prox objective
# the split components are used)
ltot = self.meta['ltot']
n_cg = self.meta['n_cg']
zmat_q = mat_z[:ltot, :ltot].copy()
zmat_r = mat_z[ltot:, :ltot]
zmat_b = mat_z[ltot:, ltot:].copy()
zbeta = np.diag(zmat_b).copy().reshape((n_cg, 1))
zmat_b -= np.diag(np.diag(zmat_b))
zvec_u = np.diag(zmat_q).copy().reshape((ltot, 1))
zmat_q -= np.diag(np.diag(zmat_q))
components_z = zmat_q, zvec_u, zmat_r, zmat_b, zbeta
handle_fg = lambda optvars: \
self.get_fval_and_grad(optvars, components_z, prox_param)
## solve proximal mapping
# x0 = self.get_rand_startingpoint()
x_init = self._theta_to_x(*old_thetaalpha)
# starting point as vector, save for input parameters
f_init = handle_fg(x_init)[0]
self._fold = f_init
## bounds that respect identifiability constraints
bnds = ltot**2 * [(-np.inf, np.inf)] # Q, only upper triangle is used
bnds += ltot * [(-np.inf, np.inf)] # u
# TODO(franknu) note: if use_u = 0 this is enforced in main ADMM updates
bnds += (n_cg * ltot + n_cg**2) * [(-np.inf, np.inf)] # R, fac_lambda
if self.opts['use_alpha']:
bnds += n_cg * [(-np.inf, np.inf)]
else:
bnds += n_cg * [(0, 0)]
# TODO(franknu): use zerobounds for block diagonal of Q?
## further solver properties
callback = lambda optvars: self.callback_plh(optvars, handle_fg)
correctionpairs = min(len(bnds) - 1, 10)
res = optimize.minimize(handle_fg,
x_init,
method='L-BFGS-B',
jac=True,
bounds=bnds,
options={
'maxcor': correctionpairs,
'maxiter': self.opts['maxiter'],
'ftol': self.opts['tol']
},
callback=callback)
if not res.message.startswith(b'CONV'): # solver did not converge
print('PLH_prox scipy-solver message:', res.message)
_, _, _, fac_lambda, _ = self.unpack(res.x)
if np.linalg.norm(fac_lambda) < 1e-5 and n_cg > 0:
# TODO(franknu): certificate for optimality?
print('Warning(solve): Lambda = F F^T with F ~ zero')
theta, alpha = self._x_to_thetaalpha(res.x)
return theta, alpha
def preprocess(self, optvars):
""" unpack parameters from vector x and preprocess
this modifies x (x not save for reuse)"""
glims = self.meta['cat_glims']
sizes = self.meta['sizes']
mat_q, vec_u, mat_r, fac_lambda, alpha = self.unpack(optvars) # pylint: disable=unbalanced-tuple-unpacking
for r in range(self.meta['n_cat']): # set block-diagonal to zero
mat_q[glims[r]:glims[r+1], glims[r]:glims[r+1]] = \
np.zeros((sizes[r], sizes[r]))
mat_q = np.triu(mat_q)
mat_q = mat_q + mat_q.T
return mat_q, vec_u, mat_r, fac_lambda, alpha
def get_fval_and_grad(self, optvars, components_z, prox_param, eps=1e-15):
"""calculate function value f and gradient g of
plh(Theta, alpha) + 1 / (2prox_param) ||Theta - Z||_F^2,
where Theta, alpha are contained in the vector x of parameters
"""
ltot = self.meta['ltot']
n_cg = self.meta['n_cg']
n_data = self.meta['n_data']
glims = self.meta['cat_glims']
sizes = self.meta['sizes']
## unpack parameters from vector optvars
mat_q, vec_u, mat_r, fac_lambda, alpha = \
self.preprocess(optvars)
mat_b, beta = self._faclambda_to_bbeta(fac_lambda)
beta += eps * np.ones(beta.shape) # increase numerical instability
# this avoids beta that contains zeros
# precision matrix = FLa*FLa.T + eps * eye(n_cg)
# intitialize gradients
grad = np.zeros(self.n_params)
grad_q, grad_u, grad_r, grad_faclambda, grad_alpha = self.unpack(grad)
grad_tila = np.zeros((n_cg, n_cg))
grad_beta = np.zeros((n_cg, 1))
vec_ones = np.ones((n_data, 1))
## ** discrete node conditionals **
lh_cat = 0
mat_w = np.dot(self.cont_data, mat_r) + np.dot(self.cat_data, mat_q) \
+ np.dot(vec_ones, vec_u.T) # n_data by ltot
cond_probs = np.empty((n_data, ltot)) # conditional probs given data
for r in range(self.meta['n_cat']):
mat_wr = mat_w[:, glims[r]:glims[r + 1]] # view of W
mat_dr = self.cat_data[:, glims[r]:glims[r + 1]] # view
tmp_logsumexp, tmp_conditionalprobs = \
_logsumexp_condprobs_red(mat_wr)
# uses numerically stable exp
cond_probs[:, glims[r]:glims[r + 1]] = tmp_conditionalprobs
lh_catr = -np.sum(np.sum(np.multiply(mat_wr, mat_dr), axis=1) \
- tmp_logsumexp)
lh_cat += lh_catr
# print('lD', lh_cat/n_data)
# gradients
cond_probs = cond_probs - self.cat_data
grad_u = np.sum(cond_probs, 0) # Ltot by 1
grad_r = np.dot(self.cont_data.T, cond_probs)
grad_q = np.dot(self.cat_data.T, cond_probs)
# this is Phihat from the doc, later add transpose and zero out diagonal
## ** Gaussian node conditionals **
mat_m = np.dot(vec_ones, alpha.T) + np.dot(self.cat_data, mat_r.T) \
- np.dot(self.cont_data, mat_b) # n by dg, concatenation of mu_s
mat_delta = mat_m.copy()
for s in range(n_cg):
mat_delta[:, s] /= beta[s]
mat_delta -= self.cont_data # residual
tmp = np.dot(mat_delta, np.diag(np.sqrt(beta.flatten())))
lh_cont = - 0.5 * n_data * np.sum(np.log(beta)) \
+ 0.5 * np.linalg.norm(tmp, 'fro') ** 2
# print('lG', lh_cont/n_data)
# gradients
# grad_tila: n_cg by n_cg, later add transpose and zero out diagonal
grad_tila = -np.dot(self.cont_data.T, mat_delta)
grad_tila -= np.diag(np.diag(grad_tila))
grad_tila = 0.5 * (grad_tila + grad_tila.T)
for s in range(n_cg):
grad_beta[s] = -.5 * n_data / beta[s] + \
.5 * np.linalg.norm(mat_delta[:, s], 2) ** 2 \
- 1 / beta[s] * np.dot(mat_delta[:, s].T, mat_m[:, s])
grad_alpha = np.sum(mat_delta, 0).T # dg by 1
grad_r += np.dot(mat_delta.T, self.cat_data)
# scale gradients as likelihood
grad_q /= n_data
grad_u /= n_data
grad_r /= n_data
grad_tila /= n_data
grad_beta /= n_data
grad_alpha /= n_data
## add quad term 1/2mu * ||([Q+2diag(u)] & R^T \\ R &-Lambda)-Z||_F^2
zmat_q, zvec_u, zmat_r, zmat_b, zbeta = components_z
fsquare = 0
fsquare += np.sum(np.square(mat_q - zmat_q))
fsquare += np.sum(np.square(2 * vec_u - zvec_u))
# note that u is only half of discrete diagonal
fsquare += 2 * np.sum(np.square(mat_r - zmat_r))
fsquare += np.sum(np.square(-mat_b - zmat_b))
# remember neg sign of Lambda in Theta
fsquare += np.sum(np.square(-beta - zbeta))
fsquare /= 2 * prox_param
# print('fsquare', fsquare)
# gradients quadratic term
grad_q += (mat_q - zmat_q) / prox_param
grad_u = grad_u.reshape(
(ltot, 1)) # since with dc=0 gradu has shape (0,)
grad_u += 2 * (2 * vec_u - zvec_u) / prox_param
grad_r += 2 * (mat_r - zmat_r) / prox_param
grad_tila += (mat_b + zmat_b) / prox_param # has zero diagonal
grad_beta += (beta + zbeta) / prox_param
## gradients to only upper triangle
for r in range(self.meta['n_cat']): # set block-diagonal to zero
grad_q[glims[r]:glims[r+1], glims[r]:glims[r+1]] = \
np.zeros((sizes[r], sizes[r]))
grad_q = np.triu(grad_q) + np.tril(grad_q).T
grad_tila += np.diag(grad_beta.flatten()) # add gradient of diagonal
grad_faclambda = 2 * np.dot(grad_tila, fac_lambda)
# note that fac_lambda initialized at 0 always leads to 0 gradient
fval = 1 / n_data * (lh_cat + lh_cont) + fsquare
grad = self.pack((grad_q, grad_u, grad_r, grad_faclambda, grad_alpha))
return fval, grad.reshape(-1)
def callback(self, optvars, component_z, prox_param, approxgrad=1):
"""a callback function that serves primarily for debugging"""
fval, grad = self.get_fval_and_grad(optvars, component_z, prox_param)
print('f=', fval)
if approxgrad: # gradient check
func_handle_f = lambda optvars: \
self.get_fval_and_grad(optvars, component_z, prox_param)[0]
eps = np.sqrt(np.finfo(float).eps) # ~1.49E-08 at my machine
gprox = approx_fprime(optvars, func_handle_f, eps)
diff = grad - gprox
normdiff = np.linalg.norm(diff)
if normdiff > 1e-4:
print('g_exct', grad)
print('g_prox', gprox)
# print('g-gprox',self.unpack(diff))
# print('quot',g/proxg)
print('graddev=', np.linalg.norm(diff))
def _faclambda_to_bbeta(self, fac_lambda):
""" construct precision matrix, then extract diagonal """
mat_b = np.dot(fac_lambda, fac_lambda.T) # PSD precision matrix
beta = np.diag(mat_b).copy().reshape((self.meta['n_cg'], 1)) # diagonal
mat_b -= np.diag(np.diag(mat_b)) # off-diagonal elements
return mat_b, beta
def _theta_to_tuple(self, theta):
""" split Theta into its components
(save: returns copies from data in Theta, Theta is not modified)"""
ltot = self.meta['ltot']
glims = self.meta['cat_glims']
sizes = self.meta['sizes']
mat_q = theta[:ltot, :ltot].copy()
mat_r = theta[ltot:, :ltot].copy()
lbda = -theta[ltot:, ltot:]
# print(Lambda)
# FLa = np.linalg.cholesky(Lambda) # fails if not PD
if self.meta['n_cg'] > 0:
eig, mat_u = eigh(lbda)
# print('las', las)
eig[eig < 1e-16] = 0 # make more robust
fac_lambda = np.dot(mat_u, np.diag(np.sqrt(eig)))
# print('chol-error', np.linalg.norm(np.dot(FLa, FLa.T) - Lambda))
else:
fac_lambda = np.empty((0, 0))
vec_u = 0.5 * np.diag(mat_q).copy().reshape((ltot, 1))
for r in range(self.meta['n_cat']): # set block diagonal to zero
mat_q[glims[r]:glims[r+1], glims[r]:glims[r+1]] = \
np.zeros((sizes[r], sizes[r]))
mat_q = np.triu(mat_q) # use only upper triangle
mat_q = mat_q + mat_q.T
return mat_q, vec_u, mat_r, fac_lambda
def _theta_to_x(self, theta, alpha):
"""takes Theta, cleans it (symmetrize etc.) and pack into x
(save: Theta is not modified)"""
return self.pack(list(self._theta_to_tuple(theta)) + [alpha])
def _x_to_thetaalpha(self, optvars):
""" convert vectorized x to parameter matrix Theta
(save: optvars is not modified) """
mat_q, vec_u, mat_r, fac_lambda, alpha = self.unpack(optvars)
ltot = self.meta['ltot']
glims = self.meta['cat_glims']
sizes = self.meta['sizes']
dim = self.meta['dim']
# set parameters in upper triangle
theta = np.empty((dim, dim))
theta[:ltot, :ltot] = mat_q
for r in range(self.meta['n_cat']): # set block-diagonal to zero
theta[glims[r]:glims[r+1], glims[r]:glims[r+1]] = \
np.zeros((sizes[r], sizes[r]))
theta[:ltot, ltot:] = mat_r.T
## symmetric matrix from upper triangle
theta = np.triu(theta)
theta = theta + theta.T
## Lambda
mat_lbda = np.dot(fac_lambda, fac_lambda.T)
theta[ltot:, ltot:] = -mat_lbda
## add diagonal
theta[:ltot, :ltot] += 2 * np.diag(vec_u.flatten())
return theta, alpha
def get_rand_startingpoint(self):
""" not needed if using warm starts """
n_cg = self.meta['n_cg']
x_init = np.random.random(self.n_params)
x_init[self.n_params - n_cg:] = np.ones(n_cg)
return x_init
def plh(self, theta, alpha, cval=False):
""" return negative pseudo-log-likelihood function value
cval .. if True, calculate (node-wise) cross validation error"""
n_cg = self.meta['n_cg']
n_cat = self.meta['n_cat']
n_data = self.meta['n_data']
glims = self.meta['cat_glims']
if cval:
dis_errors = np.zeros(n_cat)
cts_errors = np.zeros(n_cg)
mat_q, vec_u, mat_r, fac_lambda = self._theta_to_tuple(theta) # save
mat_b, beta = self._faclambda_to_bbeta(fac_lambda)
fval = 0
## ** discrete node conditionals **
mat_w = np.dot(self.cont_data, mat_r) + np.dot(self.cat_data, mat_q) + \
np.dot(np.ones((n_data, 1)), vec_u.T) # n by Ltot
for r in range(n_cat):
mat_wr = mat_w[:, glims[r]:glims[r + 1]] # view of W
mat_dr = self.cat_data[:,
glims[r]:glims[r +
1]] # view of self.cat_data
tmp_logsumexp, tmp_conditionalprobs = \
_logsumexp_condprobs_red(mat_wr) # numerically more stable
if cval:
# sum of probabilities of missclassification
dis_errors[r] = n_data - \
np.sum(np.multiply(tmp_conditionalprobs, mat_dr))
# sum over both axes
lh_catr = - np.sum(np.sum(np.multiply(mat_wr, mat_dr), axis=1) \
- tmp_logsumexp)
fval += 1 / n_data * lh_catr
mat_m = np.dot(self.cat_data, mat_r.T) - \
np.dot(self.cont_data, mat_b) # n by dg, concatenation of mu_s
if n_cg > 0:
mat_m += np.outer(np.ones(n_data), alpha)
if cval:
for s in range(n_cg):
cts_errors[s] = np.linalg.norm(self.cont_data[:, s] \
- mat_m[:, s]/beta[s], 2) ** 2
mat_delta = mat_m.copy()
for s in range(n_cg):
mat_delta[:, s] /= beta[s]
mat_delta -= self.cont_data # residual
tmp = np.dot(mat_delta, np.diag(np.sqrt(beta.flatten())))
lh_cont = - 0.5 * n_data * np.sum(np.log(beta)) \
+ 0.5 * np.linalg.norm(tmp, 'fro') ** 2
fval += 1 / n_data * lh_cont
if cval:
return dis_errors, cts_errors, fval
return fval
def crossvalidate(self, theta, alpha):
"""perform cross validation (drop test data) """
n_cg = self.meta['n_cg']
n_cat = self.meta['n_cat']
n_data = self.meta['n_data']
dis_errors, cts_errors, _ = self.plh(theta, alpha, cval=True)
avg_dis_error = 1 / n_data * np.sum(dis_errors)
avg_cts_error = np.sum([np.sqrt(es / n_data) for es in cts_errors
]) # mean RMSEs
cvalerror = avg_dis_error + avg_cts_error
if n_cg > 0:
avg_cts_error /= n_cg
if n_cat > 0:
avg_dis_error /= n_cat
return cvalerror
| [((132, 14, 142, 50), 'scipy.optimize.minimize', 'optimize.minimize', (), '', False, 'from scipy import optimize\n'), ((168, 16, 168, 30), 'numpy.triu', 'np.triu', ({(168, 24, 168, 29): 'mat_q'}, {}), '(mat_q)', True, 'import numpy as np\n'), ((195, 15, 195, 38), 'numpy.zeros', 'np.zeros', ({(195, 24, 195, 37): 'self.n_params'}, {}), '(self.n_params)', True, 'import numpy as np\n'), ((197, 20, 197, 42), 'numpy.zeros', 'np.zeros', ({(197, 29, 197, 41): '(n_cg, n_cg)'}, {}), '((n_cg, n_cg))', True, 'import numpy as np\n'), ((198, 20, 198, 39), 'numpy.zeros', 'np.zeros', ({(198, 29, 198, 38): '(n_cg, 1)'}, {}), '((n_cg, 1))', True, 'import numpy as np\n'), ((199, 19, 199, 39), 'numpy.ones', 'np.ones', ({(199, 27, 199, 38): '(n_data, 1)'}, {}), '((n_data, 1))', True, 'import numpy as np\n'), ((206, 21, 206, 45), 'numpy.empty', 'np.empty', ({(206, 30, 206, 44): '(n_data, ltot)'}, {}), '((n_data, ltot))', True, 'import numpy as np\n'), ((223, 17, 223, 38), 'numpy.sum', 'np.sum', ({(223, 24, 223, 34): 'cond_probs', (223, 36, 223, 37): '0'}, {}), '(cond_probs, 0)', True, 'import numpy as np\n'), ((224, 17, 224, 53), 'numpy.dot', 'np.dot', ({(224, 24, 224, 40): 'self.cont_data.T', (224, 42, 224, 52): 'cond_probs'}, {}), '(self.cont_data.T, cond_probs)', True, 'import numpy as np\n'), ((225, 17, 225, 52), 'numpy.dot', 'np.dot', ({(225, 24, 225, 39): 'self.cat_data.T', (225, 41, 225, 51): 'cond_probs'}, {}), '(self.cat_data.T, cond_probs)', True, 'import numpy as np\n'), ((253, 18, 253, 52), 'numpy.dot', 'np.dot', ({(253, 25, 253, 36): 'mat_delta.T', (253, 38, 253, 51): 'self.cat_data'}, {}), '(mat_delta.T, self.cat_data)', True, 'import numpy as np\n'), ((328, 16, 328, 48), 'numpy.dot', 'np.dot', ({(328, 23, 328, 33): 'fac_lambda', (328, 35, 328, 47): 'fac_lambda.T'}, {}), '(fac_lambda, fac_lambda.T)', True, 'import numpy as np\n'), ((362, 16, 362, 30), 'numpy.triu', 'np.triu', ({(362, 24, 362, 29): 'mat_q'}, {}), '(mat_q)', True, 'import numpy as np\n'), ((382, 16, 382, 36), 'numpy.empty', 'np.empty', ({(382, 25, 382, 35): '(dim, dim)'}, {}), '((dim, dim))', True, 'import numpy as np\n'), ((390, 16, 390, 30), 'numpy.triu', 'np.triu', ({(390, 24, 390, 29): 'theta'}, {}), '(theta)', True, 'import numpy as np\n'), ((394, 19, 394, 51), 'numpy.dot', 'np.dot', ({(394, 26, 394, 36): 'fac_lambda', (394, 38, 394, 50): 'fac_lambda.T'}, {}), '(fac_lambda, fac_lambda.T)', True, 'import numpy as np\n'), ((405, 17, 405, 48), 'numpy.random.random', 'np.random.random', ({(405, 34, 405, 47): 'self.n_params'}, {}), '(self.n_params)', True, 'import numpy as np\n'), ((406, 40, 406, 53), 'numpy.ones', 'np.ones', ({(406, 48, 406, 52): 'n_cg'}, {}), '(n_cg)', True, 'import numpy as np\n'), ((64, 42, 64, 69), 'numpy.zeros', 'np.zeros', ({(64, 51, 64, 68): "self.meta['n_cg']"}, {}), "(self.meta['n_cg'])", True, 'import numpy as np\n'), ((98, 26, 98, 41), 'numpy.diag', 'np.diag', ({(98, 34, 98, 40): 'zmat_b'}, {}), '(zmat_b)', True, 'import numpy as np\n'), ((100, 26, 100, 41), 'numpy.diag', 'np.diag', ({(100, 34, 100, 40): 'zmat_q'}, {}), '(zmat_q)', True, 'import numpy as np\n'), ((167, 16, 167, 46), 'numpy.zeros', 'np.zeros', ({(167, 25, 167, 45): '(sizes[r], sizes[r])'}, {}), '((sizes[r], sizes[r]))', True, 'import numpy as np\n'), ((190, 22, 190, 41), 'numpy.ones', 'np.ones', ({(190, 30, 190, 40): 'beta.shape'}, {}), '(beta.shape)', True, 'import numpy as np\n'), ((205, 14, 205, 39), 'numpy.dot', 'np.dot', ({(205, 21, 205, 29): 'vec_ones', (205, 31, 205, 38): 'vec_u.T'}, {}), '(vec_ones, vec_u.T)', True, 'import numpy as np\n'), ((211, 16, 211, 48), 'cgmodsel.utils._logsumexp_condprobs_red', '_logsumexp_condprobs_red', ({(211, 41, 211, 47): 'mat_wr'}, {}), '(mat_wr)', False, 'from cgmodsel.utils import _logsumexp_condprobs_red\n'), ((230, 14, 230, 43), 'numpy.dot', 'np.dot', ({(230, 21, 230, 35): 'self.cont_data', (230, 37, 230, 42): 'mat_b'}, {}), '(self.cont_data, mat_b)', True, 'import numpy as np\n'), ((243, 21, 243, 56), 'numpy.dot', 'np.dot', ({(243, 28, 243, 44): 'self.cont_data.T', (243, 46, 243, 55): 'mat_delta'}, {}), '(self.cont_data.T, mat_delta)', True, 'import numpy as np\n'), ((244, 29, 244, 47), 'numpy.diag', 'np.diag', ({(244, 37, 244, 46): 'grad_tila'}, {}), '(grad_tila)', True, 'import numpy as np\n'), ((252, 21, 252, 41), 'numpy.sum', 'np.sum', ({(252, 28, 252, 37): 'mat_delta', (252, 39, 252, 40): '(0)'}, {}), '(mat_delta, 0)', True, 'import numpy as np\n'), ((267, 26, 267, 51), 'numpy.square', 'np.square', ({(267, 36, 267, 50): '(mat_q - zmat_q)'}, {}), '(mat_q - zmat_q)', True, 'import numpy as np\n'), ((268, 26, 268, 55), 'numpy.square', 'np.square', ({(268, 36, 268, 54): '(2 * vec_u - zvec_u)'}, {}), '(2 * vec_u - zvec_u)', True, 'import numpy as np\n'), ((271, 26, 271, 52), 'numpy.square', 'np.square', ({(271, 36, 271, 51): '(-mat_b - zmat_b)'}, {}), '(-mat_b - zmat_b)', True, 'import numpy as np\n'), ((273, 26, 273, 50), 'numpy.square', 'np.square', ({(273, 36, 273, 49): '(-beta - zbeta)'}, {}), '(-beta - zbeta)', True, 'import numpy as np\n'), ((291, 16, 291, 46), 'numpy.zeros', 'np.zeros', ({(291, 25, 291, 45): '(sizes[r], sizes[r])'}, {}), '((sizes[r], sizes[r]))', True, 'import numpy as np\n'), ((293, 17, 293, 32), 'numpy.triu', 'np.triu', ({(293, 25, 293, 31): 'grad_q'}, {}), '(grad_q)', True, 'import numpy as np\n'), ((297, 29, 297, 58), 'numpy.dot', 'np.dot', ({(297, 36, 297, 45): 'grad_tila', (297, 47, 297, 57): 'fac_lambda'}, {}), '(grad_tila, fac_lambda)', True, 'import numpy as np\n'), ((314, 20, 314, 62), 'scipy.optimize.approx_fprime', 'approx_fprime', ({(314, 34, 314, 41): 'optvars', (314, 43, 314, 56): 'func_handle_f', (314, 58, 314, 61): 'eps'}, {}), '(optvars, func_handle_f, eps)', False, 'from scipy.optimize import approx_fprime\n'), ((317, 23, 317, 43), 'numpy.linalg.norm', 'np.linalg.norm', ({(317, 38, 317, 42): 'diff'}, {}), '(diff)', True, 'import numpy as np\n'), ((330, 25, 330, 39), 'numpy.diag', 'np.diag', ({(330, 33, 330, 38): 'mat_b'}, {}), '(mat_b)', True, 'import numpy as np\n'), ((347, 25, 347, 35), 'scipy.linalg.eigh', 'eigh', ({(347, 30, 347, 34): 'lbda'}, {}), '(lbda)', False, 'from scipy.linalg import eigh\n'), ((355, 25, 355, 41), 'numpy.empty', 'np.empty', ({(355, 34, 355, 40): '(0, 0)'}, {}), '((0, 0))', True, 'import numpy as np\n'), ((361, 16, 361, 46), 'numpy.zeros', 'np.zeros', ({(361, 25, 361, 45): '(sizes[r], sizes[r])'}, {}), '((sizes[r], sizes[r]))', True, 'import numpy as np\n'), ((386, 16, 386, 46), 'numpy.zeros', 'np.zeros', ({(386, 25, 386, 45): '(sizes[r], sizes[r])'}, {}), '((sizes[r], sizes[r]))', True, 'import numpy as np\n'), ((418, 25, 418, 40), 'numpy.zeros', 'np.zeros', ({(418, 34, 418, 39): 'n_cat'}, {}), '(n_cat)', True, 'import numpy as np\n'), ((419, 25, 419, 39), 'numpy.zeros', 'np.zeros', ({(419, 34, 419, 38): 'n_cg'}, {}), '(n_cg)', True, 'import numpy as np\n'), ((436, 16, 436, 48), 'cgmodsel.utils._logsumexp_condprobs_red', '_logsumexp_condprobs_red', ({(436, 41, 436, 47): 'mat_wr'}, {}), '(mat_wr)', False, 'from cgmodsel.utils import _logsumexp_condprobs_red\n'), ((447, 16, 447, 46), 'numpy.dot', 'np.dot', ({(447, 23, 447, 36): 'self.cat_data', (447, 38, 447, 45): 'mat_r.T'}, {}), '(self.cat_data, mat_r.T)', True, 'import numpy as np\n'), ((448, 12, 448, 41), 'numpy.dot', 'np.dot', ({(448, 19, 448, 33): 'self.cont_data', (448, 35, 448, 40): 'mat_b'}, {}), '(self.cont_data, mat_b)', True, 'import numpy as np\n'), ((478, 37, 478, 55), 'numpy.sum', 'np.sum', ({(478, 44, 478, 54): 'dis_errors'}, {}), '(dis_errors)', True, 'import numpy as np\n'), ((52, 29, 52, 46), 'numpy.prod', 'np.prod', ({(52, 37, 52, 45): 'shape[1]'}, {}), '(shape[1])', True, 'import numpy as np\n'), ((148, 11, 148, 37), 'numpy.linalg.norm', 'np.linalg.norm', ({(148, 26, 148, 36): 'fac_lambda'}, {}), '(fac_lambda)', True, 'import numpy as np\n'), ((204, 16, 204, 45), 'numpy.dot', 'np.dot', ({(204, 23, 204, 37): 'self.cont_data', (204, 39, 204, 44): 'mat_r'}, {}), '(self.cont_data, mat_r)', True, 'import numpy as np\n'), ((204, 48, 204, 76), 'numpy.dot', 'np.dot', ({(204, 55, 204, 68): 'self.cat_data', (204, 70, 204, 75): 'mat_q'}, {}), '(self.cat_data, mat_q)', True, 'import numpy as np\n'), ((229, 16, 229, 41), 'numpy.dot', 'np.dot', ({(229, 23, 229, 31): 'vec_ones', (229, 33, 229, 40): 'alpha.T'}, {}), '(vec_ones, alpha.T)', True, 'import numpy as np\n'), ((229, 44, 229, 74), 'numpy.dot', 'np.dot', ({(229, 51, 229, 64): 'self.cat_data', (229, 66, 229, 73): 'mat_r.T'}, {}), '(self.cat_data, mat_r.T)', True, 'import numpy as np\n'), ((270, 30, 270, 55), 'numpy.square', 'np.square', ({(270, 40, 270, 54): '(mat_r - zmat_r)'}, {}), '(mat_r - zmat_r)', True, 'import numpy as np\n'), ((293, 35, 293, 50), 'numpy.tril', 'np.tril', ({(293, 43, 293, 49): 'grad_q'}, {}), '(grad_q)', True, 'import numpy as np\n'), ((324, 30, 324, 50), 'numpy.linalg.norm', 'np.linalg.norm', ({(324, 45, 324, 49): 'diff'}, {}), '(diff)', True, 'import numpy as np\n'), ((427, 16, 427, 45), 'numpy.dot', 'np.dot', ({(427, 23, 427, 37): 'self.cont_data', (427, 39, 427, 44): 'mat_r'}, {}), '(self.cont_data, mat_r)', True, 'import numpy as np\n'), ((427, 48, 427, 76), 'numpy.dot', 'np.dot', ({(427, 55, 427, 68): 'self.cat_data', (427, 70, 427, 75): 'mat_q'}, {}), '(self.cat_data, mat_q)', True, 'import numpy as np\n'), ((428, 19, 428, 39), 'numpy.ones', 'np.ones', ({(428, 27, 428, 38): '(n_data, 1)'}, {}), '((n_data, 1))', True, 'import numpy as np\n'), ((450, 30, 450, 45), 'numpy.ones', 'np.ones', ({(450, 38, 450, 44): 'n_data'}, {}), '(n_data)', True, 'import numpy as np\n'), ((479, 32, 479, 52), 'numpy.sqrt', 'np.sqrt', ({(479, 40, 479, 51): 'es / n_data'}, {}), '(es / n_data)', True, 'import numpy as np\n'), ((237, 42, 237, 54), 'numpy.log', 'np.log', ({(237, 49, 237, 53): 'beta'}, {}), '(beta)', True, 'import numpy as np\n'), ((238, 20, 238, 46), 'numpy.linalg.norm', 'np.linalg.norm', ({(238, 35, 238, 38): 'tmp', (238, 40, 238, 45): '"""fro"""'}, {}), "(tmp, 'fro')", True, 'import numpy as np\n'), ((250, 28, 250, 66), 'numpy.dot', 'np.dot', ({(250, 35, 250, 52): 'mat_delta[:, (s)].T', (250, 54, 250, 65): 'mat_m[:, (s)]'}, {}), '(mat_delta[:, (s)].T, mat_m[:, (s)])', True, 'import numpy as np\n'), ((313, 26, 313, 41), 'numpy.finfo', 'np.finfo', ({(313, 35, 313, 40): 'float'}, {}), '(float)', True, 'import numpy as np\n'), ((350, 47, 350, 59), 'numpy.sqrt', 'np.sqrt', ({(350, 55, 350, 58): 'eig'}, {}), '(eig)', True, 'import numpy as np\n'), ((454, 32, 455, 51), 'numpy.linalg.norm', 'np.linalg.norm', ({(454, 47, 455, 47): '(self.cont_data[:, (s)] - mat_m[:, (s)] / beta[s])', (455, 49, 455, 50): '(2)'}, {}), '(self.cont_data[:, (s)] - mat_m[:, (s)] / beta[s], 2)', True, 'import numpy as np\n'), ((462, 42, 462, 54), 'numpy.log', 'np.log', ({(462, 49, 462, 53): 'beta'}, {}), '(beta)', True, 'import numpy as np\n'), ((463, 20, 463, 46), 'numpy.linalg.norm', 'np.linalg.norm', ({(463, 35, 463, 38): 'tmp', (463, 40, 463, 45): '"""fro"""'}, {}), "(tmp, 'fro')", True, 'import numpy as np\n'), ((97, 16, 97, 31), 'numpy.diag', 'np.diag', ({(97, 24, 97, 30): 'zmat_b'}, {}), '(zmat_b)', True, 'import numpy as np\n'), ((99, 17, 99, 32), 'numpy.diag', 'np.diag', ({(99, 25, 99, 31): 'zmat_q'}, {}), '(zmat_q)', True, 'import numpy as np\n'), ((329, 15, 329, 29), 'numpy.diag', 'np.diag', ({(329, 23, 329, 28): 'mat_b'}, {}), '(mat_b)', True, 'import numpy as np\n'), ((440, 27, 440, 68), 'numpy.multiply', 'np.multiply', ({(440, 39, 440, 59): 'tmp_conditionalprobs', (440, 61, 440, 67): 'mat_dr'}, {}), '(tmp_conditionalprobs, mat_dr)', True, 'import numpy as np\n'), ((214, 37, 214, 64), 'numpy.multiply', 'np.multiply', ({(214, 49, 214, 55): 'mat_wr', (214, 57, 214, 63): 'mat_dr'}, {}), '(mat_wr, mat_dr)', True, 'import numpy as np\n'), ((249, 21, 249, 55), 'numpy.linalg.norm', 'np.linalg.norm', ({(249, 36, 249, 51): 'mat_delta[:, (s)]', (249, 53, 249, 54): '(2)'}, {}), '(mat_delta[:, (s)], 2)', True, 'import numpy as np\n'), ((357, 22, 357, 36), 'numpy.diag', 'np.diag', ({(357, 30, 357, 35): 'mat_q'}, {}), '(mat_q)', True, 'import numpy as np\n'), ((443, 38, 443, 65), 'numpy.multiply', 'np.multiply', ({(443, 50, 443, 56): 'mat_wr', (443, 58, 443, 64): 'mat_dr'}, {}), '(mat_wr, mat_dr)', True, 'import numpy as np\n')] |
MrShoks/OpenMDAO-Framework | openmdao.main/src/openmdao/main/linearsolver.py | 412f34ffe31a95631fbe55ca7d75b84669ae8f8c | """ Linear solvers that are used to solve for the gradient of an OpenMDAO System.
(Not to be confused with the OpenMDAO Solver classes.)
"""
# pylint: disable=E0611, F0401
import numpy as np
from scipy.sparse.linalg import gmres, LinearOperator
from openmdao.main.mpiwrap import MPI
from openmdao.util.graph import fix_single_tuple
from openmdao.util.log import logger
if MPI:
from petsc4py import PETSc
else:
class PETSc(object):
# Dummy class so things parse.
pass
class LinearSolver(object):
""" A base class for linear solvers """
def __init__(self, system):
""" Set up any LinearSolver object """
self._system = system
self.options = system.options
def _norm(self):
""" Computes the norm of the linear residual """
system = self._system
system.rhs_vec.array[:] = 0.0
system.applyJ(system.vector_vars.keys())
system.rhs_vec.array[:] *= -1.0
system.rhs_vec.array[:] += system.rhs_buf[:]
if MPI:
system.rhs_vec.petsc.assemble()
return system.rhs_vec.petsc.norm()
else:
return np.linalg.norm(system.rhs_vec.array)
class ScipyGMRES(LinearSolver):
""" Scipy's GMRES Solver. This is a serial solver, so
it should never be used in an MPI setting.
"""
def __init__(self, system):
""" Set up ScipyGMRES object """
super(ScipyGMRES, self).__init__(system)
n_edge = system.vec['f'].array.size
system.rhs_buf = np.zeros((n_edge, ))
system.sol_buf = np.zeros((n_edge, ))
self.A = LinearOperator((n_edge, n_edge),
matvec=self.mult,
dtype=float)
def calc_gradient(self, inputs, outputs, return_format='array'):
""" Run GMRES solver to return a Jacobian of outputs
with respect to inputs.
"""
system = self._system
RHS = system.rhs_buf
A = self.A
# Size the problem
num_input = system.get_size(inputs)
num_output = system.get_size(outputs)
if return_format == 'dict':
J = {}
for okey in outputs:
J[okey] = {}
for ikey in inputs:
if isinstance(ikey, tuple):
ikey = ikey[0]
J[okey][ikey] = None
else:
J = np.zeros((num_output, num_input))
if system.mode == 'adjoint':
outputs, inputs = inputs, outputs
# If Forward mode, solve linear system for each parameter
# If Adjoint mode, solve linear system for each requested output
j = 0
for param in inputs:
if isinstance(param, tuple):
param = param[0]
in_indices = system.vec['u'].indices(system.scope, param)
jbase = j
for irhs in in_indices:
RHS[irhs] = 1.0
# Call GMRES to solve the linear system
dx = self.solve(RHS)
RHS[irhs] = 0.0
i = 0
for item in outputs:
if isinstance(item, tuple):
item = item[0]
out_indices = system.vec['u'].indices(system.scope, item)
nk = len(out_indices)
if return_format == 'dict':
if system.mode == 'forward':
if J[item][param] is None:
J[item][param] = np.zeros((nk, len(in_indices)))
J[item][param][:, j-jbase] = dx[out_indices]
else:
if J[param][item] is None:
J[param][item] = np.zeros((len(in_indices), nk))
J[param][item][j-jbase, :] = dx[out_indices]
else:
if system.mode == 'forward':
J[i:i+nk, j] = dx[out_indices]
else:
J[j, i:i+nk] = dx[out_indices]
i += nk
j += 1
#print inputs, '\n', outputs, '\n', J
return J
def solve(self, arg):
""" Solve the coupled equations for a new state vector that nulls the
residual. Used by the Newton solvers."""
system = self._system
options = self.options
A = self.A
#print system.name, 'Linear solution start vec', system.rhs_vec.array
# Call GMRES to solve the linear system
dx, info = gmres(A, arg,
tol=options.atol,
maxiter=options.maxiter)
if info > 0:
msg = "ERROR in calc_gradient in '%s': gmres failed to converge " \
"after %d iterations"
logger.error(msg, system.name, info)
elif info < 0:
msg = "ERROR in calc_gradient in '%s': gmres failed"
logger.error(msg, system.name)
#print system.name, 'Linear solution vec', -dx
return dx
def mult(self, arg):
""" GMRES Callback: applies Jacobian matrix. Mode is determined by the
system."""
system = self._system
system.sol_vec.array[:] = arg[:]
# Start with a clean slate
system.rhs_vec.array[:] = 0.0
system.clear_dp()
if system._parent_system:
vnames = system._parent_system._relevant_vars
else:
vnames = system.flat_vars.keys()
system.applyJ(vnames)
#print system.name, 'mult: arg, result', arg, system.rhs_vec.array[:]
#print system.rhs_vec.keys()
return system.rhs_vec.array[:]
class PETSc_KSP(LinearSolver):
""" PETSc's KSP solver with preconditioning. MPI is supported."""
def __init__(self, system):
""" Set up KSP object """
super(PETSc_KSP, self).__init__(system)
lsize = np.sum(system.local_var_sizes[system.mpi.rank, :])
size = np.sum(system.local_var_sizes)
jac_mat = PETSc.Mat().createPython([(lsize, size), (lsize, size)],
comm=system.mpi.comm)
jac_mat.setPythonContext(self)
jac_mat.setUp()
self.ksp = PETSc.KSP().create(comm=system.mpi.comm)
self.ksp.setOperators(jac_mat)
self.ksp.setType('fgmres')
self.ksp.setGMRESRestart(1000)
self.ksp.setPCSide(PETSc.PC.Side.RIGHT)
pc_mat = self.ksp.getPC()
pc_mat.setType('python')
pc_mat.setPythonContext(self)
# # Set these in the system
# #mpiprint("KSP: creating sol buf, size %d" % lsize)
system.sol_buf = PETSc.Vec().createWithArray(np.zeros(lsize),
comm=system.mpi.comm)
# #mpiprint("KSP: creating rhs buf, size %d" % lsize)
system.rhs_buf = PETSc.Vec().createWithArray(np.zeros(lsize),
comm=system.mpi.comm)
def calc_gradient(self, inputs, outputs, return_format='dict'):
"""Returns a nested dict of sensitivities if return_format == 'dict'.
"""
if return_format == 'dict':
return self._J_dict_solve(inputs, outputs)
else:
raise RuntimeError("unsupported solve return_format '%s'" % return_format)
def _J_dict_solve(self, inputs, outputs):
"""Returns a dict of sensitivities for given
inputs and outputs.
"""
system = self._system
options = self.options
name2collapsed = system.scope.name2collapsed
inputs = [fix_single_tuple(x) for x in inputs]
outputs = [fix_single_tuple(x) for x in outputs]
J = {}
for okey in outputs:
J[okey] = {}
for ikey in inputs:
J[okey][ikey] = None
if system.mode == 'adjoint':
outputs, inputs = inputs, outputs
self.ksp.setTolerances(max_it=options.maxiter,
atol=options.atol,
rtol=options.rtol)
j = 0
for param in inputs:
param_tup = name2collapsed[param]
param_size = system.get_size(param)
jbase = j
for irhs in xrange(param_size):
solvec = system._compute_derivatives(param_tup, irhs)
for out in outputs:
out_size = system.get_size(out)
if system.mode == 'forward':
if out in solvec:
if J[out][param] is None:
J[out][param] = np.zeros((out_size, param_size))
J[out][param][:, j-jbase] = solvec[out]
else:
del J[out][param]
else:
if out in solvec:
if J[param][out] is None:
J[param][out] = np.zeros((out_size, param_size))
J[param][out][j-jbase, :] = solvec[out]
else:
del J[param][out]
j += 1
return J
def newton(self):
""" Solve the coupled equations for a new state vector that nulls the
residual. Used by the Newton solvers."""
system = self._system
options = self.options
self.ksp.setTolerances(max_it=options.maxiter,
atol=options.atol,
rtol=options.rtol)
system.rhs_vec.array[:] = system.vec['f'].array[:]
#print 'newton start vec', system.vec['f'].array[:]
system.sol_buf.array[:] = system.sol_vec.array[:]
system.rhs_buf.array[:] = system.rhs_vec.array[:]
system.ln_solver.ksp.solve(system.rhs_buf, system.sol_buf)
system.vec['df'].array[:] = -system.sol_buf.array[:]
#print 'newton solution vec', system.vec['df'].array[:]
def mult(self, mat, sol_vec, rhs_vec):
""" KSP Callback: applies Jacobian matrix. Mode is determined by the
system."""
system = self._system
system.sol_vec.array[:] = sol_vec.array[:]
# Start with a clean slate
system.rhs_vec.array[:] = 0.0
system.clear_dp()
system.applyJ(system.vector_vars.keys())
rhs_vec.array[:] = system.rhs_vec.array[:]
# mpiprint('names = %s' % system.sol_vec.keys())
#mpiprint('arg = %s, result=%s' % (sol_vec.array, rhs_vec.array))
#mpiprint('df, du, dp', system.vec['df'].array, system.vec['du'].array, system.vec['dp'].array)
def apply(self, mat, sol_vec, rhs_vec):
""" Applies preconditioner """
#system = self._system
# TODO - Preconditioning is not supported yet, so mimic an Identity
# matrix.
rhs_vec.array[:] = sol_vec.array[:]
#system.rhs_vec.array[:] = sol_vec.array[:]
#system.solve_precon()
#rhs_vec.array[:] = system.sol_vec.array[:]
class LinearGS(LinearSolver):
""" Linear block Gauss Seidel. MPI is not supported yet.
Serial block solve of D x = b - (L+U) x """
def __init__(self, system):
""" Set up LinearGS object """
super(LinearGS, self).__init__(system)
lsize = np.sum(system.local_var_sizes[system.mpi.rank, :])
system.sol_buf = np.zeros(lsize)
system.rhs_buf = np.zeros(lsize)
def calc_gradient(self, inputs, outputs, return_format='array'):
""" Run GMRES solver to return a Jacobian of outputs
with respect to inputs.
"""
system = self._system
# Size the problem
# TODO - Support for array slice inputs/outputs
try:
num_input = system.get_size(inputs)
num_output = system.get_size(outputs)
except KeyError as exc:
if '[' in str(exc):
msg = 'Array slice inputs and outputs currently not supported.'
raise RuntimeError(msg)
else:
raise
n_edge = system.vec['f'].array.size
if return_format == 'dict':
J = {}
for okey in outputs:
J[okey] = {}
for ikey in inputs:
if isinstance(ikey, tuple):
ikey = ikey[0]
J[okey][ikey] = None
else:
J = np.zeros((num_output, num_input))
if system.mode == 'adjoint':
outputs, inputs = inputs, outputs
# If Forward mode, solve linear system for each parameter
# If Reverse mode, solve linear system for each requested output
j = 0
for param in inputs:
if isinstance(param, tuple):
param = param[0]
in_indices = system.vec['u'].indices(system.scope, param)
jbase = j
for irhs in in_indices:
system.clear_dp()
system.sol_vec.array[:] = 0.0
system.rhs_vec.array[:] = 0.0
system.rhs_vec.array[irhs] = 1.0
# Perform LinearGS solve
dx = self.solve(system.rhs_vec.array)
#system.rhs_vec.array[irhs] = 0.0
i = 0
for item in outputs:
if isinstance(item, tuple):
item = item[0]
out_indices = system.vec['u'].indices(system.scope, item)
nk = len(out_indices)
if return_format == 'dict':
if system.mode == 'forward':
if J[item][param] is None:
J[item][param] = np.zeros((nk, len(in_indices)))
J[item][param][:, j-jbase] = dx[out_indices]
else:
if J[param][item] is None:
J[param][item] = np.zeros((len(in_indices), nk))
J[param][item][j-jbase, :] = dx[out_indices]
else:
if system.mode == 'forward':
J[i:i+nk, j] = dx[out_indices]
else:
J[j, i:i+nk] = dx[out_indices]
i += nk
j += 1
#print inputs, '\n', outputs, '\n', J
return J
def solve(self, arg):
""" Executes an iterative solver """
system = self._system
system.rhs_buf[:] = arg[:]
system.sol_buf[:] = system.sol_vec.array[:]
options = self.options
system = self._system
norm0, norm = 1.0, 1.0
counter = 0
while counter < options.maxiter and norm > options.atol and \
norm/norm0 > options.rtol:
if system.mode == 'forward':
for subsystem in system.subsystems(local=True):
system.scatter('du', 'dp', subsystem=subsystem)
system.rhs_vec.array[:] = 0.0
subsystem.applyJ(system.vector_vars.keys())
system.rhs_vec.array[:] *= -1.0
system.rhs_vec.array[:] += system.rhs_buf[:]
sub_options = options if subsystem.options is None \
else subsystem.options
subsystem.solve_linear(sub_options)
elif system.mode == 'adjoint':
rev_systems = [item for item in reversed(system.subsystems(local=True))]
for subsystem in rev_systems:
#print '1)', system.name, subsystem.name
#print 'T0', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
system.sol_buf[:] = system.rhs_buf[:]
#print 'T1', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
for subsystem2 in rev_systems:
if subsystem is not subsystem2:
#print '2)', subsystem2.name, subsystem.name
system.rhs_vec.array[:] = 0.0
args = subsystem.vector_vars.keys()
#print 'T2', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
subsystem2.applyJ(args)
#print 'T3', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
system.scatter('du', 'dp', subsystem=subsystem2)
#print 'T4', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
system.vec['dp'].array[:] = 0.0
system.sol_buf[:] -= system.rhs_vec.array[:]
#print 'T5', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
system.rhs_vec.array[:] = system.sol_buf[:]
#print 'T6', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
subsystem.solve_linear(options)
#print 'T7', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
norm = self._norm()
counter += 1
#print 'return', options.parent.name, np.linalg.norm(system.rhs_vec.array), system.rhs_vec.array
#print 'Linear solution vec', system.sol_vec.array
return system.sol_vec.array
| [((55, 25, 55, 45), 'numpy.zeros', 'np.zeros', ({(55, 34, 55, 44): '(n_edge,)'}, {}), '((n_edge,))', True, 'import numpy as np\n'), ((56, 25, 56, 45), 'numpy.zeros', 'np.zeros', ({(56, 34, 56, 44): '(n_edge,)'}, {}), '((n_edge,))', True, 'import numpy as np\n'), ((57, 17, 59, 44), 'scipy.sparse.linalg.LinearOperator', 'LinearOperator', (), '', False, 'from scipy.sparse.linalg import gmres, LinearOperator\n'), ((150, 19, 152, 49), 'scipy.sparse.linalg.gmres', 'gmres', (), '', False, 'from scipy.sparse.linalg import gmres, LinearOperator\n'), ((195, 16, 195, 66), 'numpy.sum', 'np.sum', ({(195, 23, 195, 65): 'system.local_var_sizes[(system.mpi.rank), :]'}, {}), '(system.local_var_sizes[(system.mpi.rank), :])', True, 'import numpy as np\n'), ((196, 15, 196, 45), 'numpy.sum', 'np.sum', ({(196, 22, 196, 44): 'system.local_var_sizes'}, {}), '(system.local_var_sizes)', True, 'import numpy as np\n'), ((348, 16, 348, 66), 'numpy.sum', 'np.sum', ({(348, 23, 348, 65): 'system.local_var_sizes[(system.mpi.rank), :]'}, {}), '(system.local_var_sizes[(system.mpi.rank), :])', True, 'import numpy as np\n'), ((350, 25, 350, 40), 'numpy.zeros', 'np.zeros', ({(350, 34, 350, 39): 'lsize'}, {}), '(lsize)', True, 'import numpy as np\n'), ((351, 25, 351, 40), 'numpy.zeros', 'np.zeros', ({(351, 34, 351, 39): 'lsize'}, {}), '(lsize)', True, 'import numpy as np\n'), ((41, 19, 41, 55), 'numpy.linalg.norm', 'np.linalg.norm', ({(41, 34, 41, 54): 'system.rhs_vec.array'}, {}), '(system.rhs_vec.array)', True, 'import numpy as np\n'), ((83, 16, 83, 49), 'numpy.zeros', 'np.zeros', ({(83, 25, 83, 48): '(num_output, num_input)'}, {}), '((num_output, num_input))', True, 'import numpy as np\n'), ((157, 12, 157, 48), 'openmdao.util.log.logger.error', 'logger.error', ({(157, 25, 157, 28): 'msg', (157, 30, 157, 41): 'system.name', (157, 43, 157, 47): 'info'}, {}), '(msg, system.name, info)', False, 'from openmdao.util.log import logger\n'), ((214, 53, 214, 68), 'numpy.zeros', 'np.zeros', ({(214, 62, 214, 67): 'lsize'}, {}), '(lsize)', True, 'import numpy as np\n'), ((217, 53, 217, 68), 'numpy.zeros', 'np.zeros', ({(217, 62, 217, 67): 'lsize'}, {}), '(lsize)', True, 'import numpy as np\n'), ((237, 18, 237, 37), 'openmdao.util.graph.fix_single_tuple', 'fix_single_tuple', ({(237, 35, 237, 36): 'x'}, {}), '(x)', False, 'from openmdao.util.graph import fix_single_tuple\n'), ((238, 19, 238, 38), 'openmdao.util.graph.fix_single_tuple', 'fix_single_tuple', ({(238, 36, 238, 37): 'x'}, {}), '(x)', False, 'from openmdao.util.graph import fix_single_tuple\n'), ((383, 16, 383, 49), 'numpy.zeros', 'np.zeros', ({(383, 25, 383, 48): '(num_output, num_input)'}, {}), '((num_output, num_input))', True, 'import numpy as np\n'), ((160, 12, 160, 42), 'openmdao.util.log.logger.error', 'logger.error', ({(160, 25, 160, 28): 'msg', (160, 30, 160, 41): 'system.name'}, {}), '(msg, system.name)', False, 'from openmdao.util.log import logger\n'), ((197, 18, 197, 29), 'petsc4py.PETSc.Mat', 'PETSc.Mat', ({}, {}), '()', False, 'from petsc4py import PETSc\n'), ((202, 19, 202, 30), 'petsc4py.PETSc.KSP', 'PETSc.KSP', ({}, {}), '()', False, 'from petsc4py import PETSc\n'), ((214, 25, 214, 36), 'petsc4py.PETSc.Vec', 'PETSc.Vec', ({}, {}), '()', False, 'from petsc4py import PETSc\n'), ((217, 25, 217, 36), 'petsc4py.PETSc.Vec', 'PETSc.Vec', ({}, {}), '()', False, 'from petsc4py import PETSc\n'), ((269, 48, 269, 80), 'numpy.zeros', 'np.zeros', ({(269, 57, 269, 79): '(out_size, param_size)'}, {}), '((out_size, param_size))', True, 'import numpy as np\n'), ((276, 48, 276, 80), 'numpy.zeros', 'np.zeros', ({(276, 57, 276, 79): '(out_size, param_size)'}, {}), '((out_size, param_size))', True, 'import numpy as np\n')] |
VivianaEloisa/Viviana_first_repo | Temperatures.py | d132ffdcda8e2c3cd5673cfa86fecc1337697fd0 | def fahr_to_cels(a):
return (a-32)/1.8
def cels_to_fahr(b):
return(b*1.8)+32
c=50
d=10
print("{0} °F is {1}°C.".format(c,fahr_to_cels(c)))
print("{0}°C is {1}°F.".format(d,cels_to_fahr(d)))
| [] |
Andres-CS/wallet-analysis | src/transform.py | 822b8b900a91ab7a2fd76743f174d320e45e98c9 | import csv
import re
'''
Delete char in substring of original string.
Used this function when, you want to delete
a character in a substring but not in the
rest of the original string.
Returns a string
-- PARAMETERS --
text: original string
start: start of subString
end: end of subString
char: char to delete, default is ','.
'''
def deleteInSubString(text, start, end, char=','):
subText = text[start:(end+1)]
commaPos = subText.find(char)
if commaPos >= 0:
subText = subText[:commaPos]+""+subText[commaPos+1:]
text = text[:start]+subText+text[end+1:]
return text
return text
'''
Get the position of the Description Column.
Loops through String and finds the first set
of enclosing quotes.
Returns array with initial and closing position.
-- PARAMETERS --
txt: string to loop
'''
def DescriptionColumn_Range(txt):
count = 0
pos=list()
for i in range(len(txt)):
if txt[i] == '"':
pos.append(i)
count += 1
if count == 2:
return pos
'''
Adds a delimiter
Returns a new string with the delimiter
added.
-- PARAMETERS --
text: string to be modified
delimiter: char or string to be inserted
flad: b - before target
a - after target
target: substring where delimiter will be
inserted
'''
def addDelimiter(text,delimiter,flag,target):
pos = text.find(target)
if not pos == -1:
if flag == "b":
text = text[:pos]+delimiter+text[pos:]
else:
offset = len(text[:pos])+len(target)
text = text[:offset+1]+delimiter+text[offset+1:]
return text
'''
Clean up of Description Column
Inital draft of data clean up on the
description column.
Removal of extra commas and 'garbage' data
Returns a string
-- PARAMETERS --
data: string
'''
def clean_Description_Column(data):
#Replace data 00/00 for ,
data = re.sub("[0-9]{2}\/[0-9]{2}", ",", data)
for i in ["'",",/20",",/21"]:
data = data.replace(i,"")
wordBank={
'c':["CREDITS","check","Check","CHARGE","CONSUMER"],
'd':["DEPOSIT","DEBITS"],
'f':["Fee","FEE","Funds"],
'o':["OVERDRAFT"],
'p':["PURCHASE","PAY","pymt","PMT","PMNT","Payment","PAYMENT","payment","PAYROLL"],
'r':["REFUND"],
't':["TAX","Transfer","transfer","TRANSFER"],
'w':["WITHDRWL","withdrawal","withdrwl"]
}
for k in wordBank:
for i in wordBank[k]:
i = i.lower()
if i in data:
data = addDelimiter(data,",", "b" , i)
data = addDelimiter(data,",", "a" , i)
#print(data)
#Get Rid of repeating commas.
data = re.sub("#[0-9]+","",data)
data = re.sub( '(,\s*,)',
',',
re.sub( '(,{1,10}|,\s*,\b)', ",", data)
)
for match in re.finditer("\s[a-zA-Z]{2}$",data):
data = addDelimiter(data,',','b',data[match.start():match.end()+1])
return data
'''
Re-arranges nested list to become a 1-level list
Descript column, item 1 in array, is a nested list
items are moved one level up to become a single list
and not a list of list.
Returns a list
-- PARAMETERS --
data: list
'''
def addNewColumns(data):
newR = list()
for R in range(len(data)):
if R == 1:
for subr in data[R].split(","):
newR.append(subr)
else:
newR.append(data[R])
return newR
'''
Takes charge of initializing clean up data
process.
Returns the 'idea' of a clean dataFrame
-- PARAMETERS --
srcF: path of raw file to clean up
'''
def cleanData(srcF):
dataframe = list()
with open(srcF,'r') as src:
for line in src:
line = line.lower()
rg = DescriptionColumn_Range(line)
row = deleteInSubString(line, rg[0], rg[1])
row = deleteInSubString(row, rg[0], rg[1], ';')
row = row.replace('"',"").split(',')
row[1] = clean_Description_Column(row[1])
row[3]=deleteInSubString(row[3],0,len(row[3]),"\n")
dataframe.append(addNewColumns(row))
return dataframe
#Save to CSV file
def saveToFile(data, trgFile):
with open(trgFile, 'w') as trg:
write = csv.writer(trg)
write.writerows(data)
if __name__ == "__main__":
sourceFile = "/home/delphinus/Devlp/WalletAnalysis/app/data/raw/stmt.csv"
targetFile = "/home/delphinus/Devlp/WalletAnalysis/app/data/modify/modf.csv"
dataFrame = cleanData(sourceFile)
saveToFile(dataFrame, targetFile)
| [((93, 8, 93, 47), 're.sub', 're.sub', ({(93, 15, 93, 35): '"""[0-9]{2}\\\\/[0-9]{2}"""', (93, 37, 93, 40): '""","""', (93, 42, 93, 46): 'data'}, {}), "('[0-9]{2}\\\\/[0-9]{2}', ',', data)", False, 'import re\n'), ((118, 8, 118, 33), 're.sub', 're.sub', ({(118, 15, 118, 24): '"""#[0-9]+"""', (118, 25, 118, 27): '""""""', (118, 28, 118, 32): 'data'}, {}), "('#[0-9]+', '', data)", False, 'import re\n'), ((124, 14, 124, 48), 're.finditer', 're.finditer', ({(124, 26, 124, 42): '"""\\\\s[a-zA-Z]{2}$"""', (124, 43, 124, 47): 'data'}, {}), "('\\\\s[a-zA-Z]{2}$', data)", False, 'import re\n'), ((121, 5, 121, 44), 're.sub', 're.sub', ({(121, 13, 121, 32): '"""(,{1,10}|,\\\\s*,\x08)"""', (121, 34, 121, 37): '""","""', (121, 39, 121, 43): 'data'}, {}), "('(,{1,10}|,\\\\s*,\\x08)', ',', data)", False, 'import re\n'), ((188, 10, 188, 25), 'csv.writer', 'csv.writer', ({(188, 21, 188, 24): 'trg'}, {}), '(trg)', False, 'import csv\n')] |
fisher60/pyweek-2021 | game/views/credits_view.py | 294b45d768a7e0d85ac67dc4b12384e68fc4f399 | import arcade
from .menu_view import MenuView
TEXT_COLOR = arcade.csscolor.WHITE
class CreditsView(MenuView):
def __init__(self, parent_view):
super().__init__()
self.parent_view = parent_view
def on_draw(self):
arcade.start_render()
arcade.draw_text(
"Credits",
self.width // 2,
self.height * 0.75,
TEXT_COLOR,
20,
anchor_x="center",
)
self.draw_information_text(TEXT_COLOR, back=True, nav=True)
def on_key_press(self, symbol, modifiers):
if symbol == arcade.key.ESCAPE:
self.window.show_view(self.parent_view)
| [((14, 8, 14, 29), 'arcade.start_render', 'arcade.start_render', ({}, {}), '()', False, 'import arcade\n'), ((16, 8, 23, 9), 'arcade.draw_text', 'arcade.draw_text', (), '', False, 'import arcade\n')] |
jgdwyer/nn-convection | sknn_jgd/backend/lasagne/mlp.py | 0bb55c0ac7af8f1345bf17b4db31b2593c8d1b28 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, unicode_literals, print_function)
__all__ = ['MultiLayerPerceptronBackend']
import os
import sys
import math
import time
import types
import logging
import itertools
log = logging.getLogger('sknn')
import numpy
import theano
import sklearn.base
import sklearn.pipeline
import sklearn.preprocessing
import sklearn.cross_validation
import theano.tensor as T
import lasagne.layers
import lasagne.nonlinearities as nl
from ..base import BaseBackend
from ...nn import Layer, Convolution, Native, ansi
def explin(x):
return x * (x>=0) + (x<0) * (T.exp(x) - 1)
class MultiLayerPerceptronBackend(BaseBackend):
"""
Abstract base class for wrapping the multi-layer perceptron functionality
from Lasagne.
"""
def __init__(self, spec):
super(MultiLayerPerceptronBackend, self).__init__(spec)
self.mlp = None
self.f = None
self.trainer = None
self.validator = None
self.regularizer = None
def _create_mlp_trainer(self, params):
# Aggregate all regularization parameters into common dictionaries.
layer_decay = {}
if self.regularize in ('L1', 'L2') or any(l.weight_decay for l in self.layers):
wd = self.weight_decay or 0.0001
for l in self.layers:
layer_decay[l.name] = l.weight_decay or wd
assert len(layer_decay) == 0 or self.regularize in ('L1', 'L2', None)
if len(layer_decay) > 0:
if self.regularize is None:
self.auto_enabled['regularize'] = 'L2'
regularize = self.regularize or 'L2'
penalty = getattr(lasagne.regularization, regularize.lower())
apply_regularize = lasagne.regularization.apply_penalty
self.regularizer = sum(layer_decay[s.name] * apply_regularize(l.get_params(regularizable=True), penalty)
for s, l in zip(self.layers, self.mlp))
if self.normalize is None and any([l.normalize != None for l in self.layers]):
self.auto_enabled['normalize'] = 'batch'
cost_functions = {'mse': 'squared_error', 'mcc': 'categorical_crossentropy'}
loss_type = self.loss_type or ('mcc' if self.is_classifier else 'mse')
assert loss_type in cost_functions,\
"Loss type `%s` not supported by Lasagne backend." % loss_type
self.cost_function = getattr(lasagne.objectives, cost_functions[loss_type])
cost_symbol = self.cost_function(self.trainer_output, self.data_output)
cost_symbol = lasagne.objectives.aggregate(cost_symbol.T, self.data_mask, mode='mean')
if self.regularizer is not None:
cost_symbol = cost_symbol + self.regularizer
return self._create_trainer_function(params, cost_symbol)
def _create_trainer_function(self, params, cost):
if self.learning_rule in ('sgd', 'adagrad', 'adadelta', 'rmsprop', 'adam'):
lr = getattr(lasagne.updates, self.learning_rule)
self._learning_rule = lr(cost, params, learning_rate=self.learning_rate)
elif self.learning_rule in ('momentum', 'nesterov'):
lasagne.updates.nesterov = lasagne.updates.nesterov_momentum
lr = getattr(lasagne.updates, self.learning_rule)
self._learning_rule = lr(cost, params, learning_rate=self.learning_rate, momentum=self.learning_momentum)
else:
raise NotImplementedError(
"Learning rule type `%s` is not supported." % self.learning_rule)
trainer = theano.function([self.data_input, self.data_output, self.data_mask], cost,
updates=self._learning_rule,
on_unused_input='ignore',
allow_input_downcast=True)
compare = self.cost_function(self.network_output, self.data_correct).mean()
validator = theano.function([self.data_input, self.data_correct], compare,
allow_input_downcast=True)
return trainer, validator
def _get_activation(self, l):
nonlinearities = {'Rectifier': nl.rectify,
'Sigmoid': nl.sigmoid,
'Tanh': nl.tanh,
'Softmax': nl.softmax,
'Linear': nl.linear,
'ExpLin': explin}
assert l.type in nonlinearities,\
"Layer type `%s` is not supported for `%s`." % (l.type, l.name)
return nonlinearities[l.type]
def _create_convolution_layer(self, name, layer, network):
self._check_layer(layer,
required=['channels', 'kernel_shape'],
optional=['units', 'kernel_stride', 'border_mode',
'pool_shape', 'pool_type', 'scale_factor'])
if layer.scale_factor != (1, 1):
network = lasagne.layers.Upscale2DLayer(
network,
scale_factor=layer.scale_factor)
network = lasagne.layers.Conv2DLayer(
network,
num_filters=layer.channels,
filter_size=layer.kernel_shape,
stride=layer.kernel_stride,
pad=layer.border_mode,
nonlinearity=self._get_activation(layer))
normalize = layer.normalize or self.normalize
if normalize == 'batch':
network = lasagne.layers.batch_norm(network)
if layer.pool_shape != (1, 1):
network = lasagne.layers.Pool2DLayer(
network,
pool_size=layer.pool_shape,
stride=layer.pool_shape)
return network
def _create_native_layer(self, name, layer, network):
if layer.units and 'num_units' not in layer.keywords:
layer.keywords['num_units'] = layer.units
return layer.type(network, *layer.args, **layer.keywords)
def _create_layer(self, name, layer, network):
if isinstance(layer, Native):
return self._create_native_layer(name, layer, network)
dropout = layer.dropout or self.dropout_rate
if dropout is not None:
network = lasagne.layers.dropout(network, dropout)
if isinstance(layer, Convolution):
return self._create_convolution_layer(name, layer, network)
self._check_layer(layer, required=['units'])
network = lasagne.layers.DenseLayer(network,
num_units=layer.units,
nonlinearity=self._get_activation(layer))
normalize = layer.normalize or self.normalize
if normalize == 'batch':
network = lasagne.layers.batch_norm(network)
return network
def _create_mlp(self, X, w=None):
self.data_input = T.tensor4('X') if self.is_convolution(input=True) else T.matrix('X')
self.data_output = T.tensor4('y') if self.is_convolution(output=True) else T.matrix('y')
self.data_mask = T.vector('m') if w is not None else T.scalar('m')
self.data_correct = T.matrix('yp')
lasagne.random.get_rng().seed(self.random_state)
shape = list(X.shape)
network = lasagne.layers.InputLayer([None]+shape[1:], self.data_input)
# Create the layers one by one, connecting to previous.
self.mlp = []
for i, layer in enumerate(self.layers):
network = self._create_layer(layer.name, layer, network)
network.name = layer.name
self.mlp.append(network)
log.info(
"Initializing neural network with %i layers, %i inputs and %i outputs.",
len(self.layers), self.unit_counts[0], self.layers[-1].units)
for l, p, count in zip(self.layers, self.mlp, self.unit_counts[1:]):
space = p.output_shape
if isinstance(l, Convolution):
log.debug(" - Convl: {}{: <10}{} Output: {}{: <10}{} Channels: {}{}{}".format(
ansi.BOLD, l.type, ansi.ENDC,
ansi.BOLD, repr(space[2:]), ansi.ENDC,
ansi.BOLD, space[1], ansi.ENDC))
# NOTE: Numbers don't match up exactly for pooling; one off. The logic is convoluted!
# assert count == numpy.product(space.shape) * space.num_channels,\
# "Mismatch in the calculated number of convolution layer outputs."
elif isinstance(l, Native):
log.debug(" - Nativ: {}{: <10}{} Output: {}{: <10}{} Channels: {}{}{}".format(
ansi.BOLD, l.type.__name__, ansi.ENDC,
ansi.BOLD, repr(space[2:]), ansi.ENDC,
ansi.BOLD, space[1], ansi.ENDC))
else:
log.debug(" - Dense: {}{: <10}{} Units: {}{: <4}{}".format(
ansi.BOLD, l.type, ansi.ENDC, ansi.BOLD, l.units, ansi.ENDC))
assert count == space[1],\
"Mismatch in the calculated number of dense layer outputs. {} != {}".format(count, space[1])
if self.weights is not None:
l = min(len(self.weights), len(self.mlp))
log.info("Reloading parameters for %i layer weights and biases." % (l,))
self._array_to_mlp(self.weights, self.mlp)
self.weights = None
log.debug("")
self.network_output = lasagne.layers.get_output(network, deterministic=True)
self.trainer_output = lasagne.layers.get_output(network, deterministic=False)
self.f = theano.function([self.data_input], self.network_output, allow_input_downcast=True)
def _conv_transpose(self, arr):
ok = arr.shape[-1] not in (1,3) and arr.shape[1] in (1,3)
return arr if ok else numpy.transpose(arr, (0, 3, 1, 2))
def _initialize_impl(self, X, y=None, w=None):
if self.is_convolution(input=True):
X = self._conv_transpose(X)
if y is not None and self.is_convolution(output=True):
y = self._conv_transpose(y)
if self.mlp is None:
self._create_mlp(X, w)
# Can do partial initialization when predicting, no trainer needed.
if y is None:
return
if self.valid_size > 0.0:
assert self.valid_set is None, "Can't specify valid_size and valid_set together."
X, X_v, y, y_v = sklearn.cross_validation.train_test_split(
X, y,
test_size=self.valid_size,
random_state=self.random_state)
self.valid_set = X_v, y_v
if self.valid_set and self.is_convolution():
X_v, y_v = self.valid_set
if X_v.shape[-2:] != X.shape[-2:]:
self.valid_set = numpy.transpose(X_v, (0, 3, 1, 2)), y_v
params = []
for spec, mlp_layer in zip(self.layers, self.mlp):
if spec.frozen: continue
params.extend(mlp_layer.get_params())
self.trainer, self.validator = self._create_mlp_trainer(params)
return X, y
def _predict_impl(self, X):
if self.is_convolution():
X = numpy.transpose(X, (0, 3, 1, 2))
y = None
for Xb, _, _, idx in self._iterate_data(self.batch_size, X, y, shuffle=False):
yb = self.f(Xb)
if y is None:
if X.shape[0] <= self.batch_size:
y = yb
break
else:
y = numpy.zeros(X.shape[:1] + yb.shape[1:], dtype=theano.config.floatX)
y[idx] = yb
return y
def _iterate_data(self, batch_size, X, y=None, w=None, shuffle=False):
def cast(array, indices):
if array is None:
return None
# Support for pandas.DataFrame, requires custom indexing.
if type(array).__name__ == 'DataFrame':
array = array.loc[indices]
else:
array = array[indices]
# Support for scipy.sparse; convert after slicing.
if hasattr(array, 'todense'):
array = array.todense()
return array.astype(theano.config.floatX)
total_size = X.shape[0]
indices = numpy.arange(total_size)
if shuffle:
numpy.random.shuffle(indices)
for index in range(0, total_size, batch_size):
excerpt = indices[index:index + batch_size]
Xb, yb, wb = cast(X, excerpt), cast(y, excerpt), cast(w, excerpt)
yield Xb, yb, wb, excerpt
def _print(self, text):
if self.verbose:
sys.stdout.write(text)
sys.stdout.flush()
def _batch_impl(self, X, y, w, processor, mode, output, shuffle):
progress, batches = 0, X.shape[0] / self.batch_size
loss, count = 0.0, 0
for Xb, yb, wb, _ in self._iterate_data(self.batch_size, X, y, w, shuffle):
self._do_callback('on_batch_start', locals())
if mode == 'train':
loss += processor(Xb, yb, wb if wb is not None else 1.0)
elif mode == 'train_obj':
loss += processor(Xb, yb)
else:
loss += processor(Xb, yb)
count += 1
while count / batches > progress / 60:
self._print(output)
progress += 1
self._do_callback('on_batch_finish', locals())
self._print('\r')
return loss / count
def _train_impl(self, X, y, w=None):
return self._batch_impl(X, y, w, self.trainer, mode='train', output='.', shuffle=True)
def _train_obj_impl(self, X, y, w=None):
return self._batch_impl(X, y, w, self.validator, mode='train_obj', output=' ', shuffle=False)
def _valid_impl(self, X, y, w=None):
return self._batch_impl(X, y, w, self.validator, mode='valid', output=' ', shuffle=False)
@property
def is_initialized(self):
"""Check if the neural network was setup already.
"""
return not (self.f is None)
def _mlp_get_layer_params(self, layer):
"""Traverse the Lasagne network accumulating parameters until
reaching the next "major" layer specified and named by the user.
"""
assert layer.name is not None, "Expecting this layer to have a name."
params = []
while hasattr(layer, 'input_layer'):
params.extend(layer.get_params())
layer = layer.input_layer
if layer.name is not None:
break
return params
def _mlp_to_array(self):
return [[p.get_value() for p in self._mlp_get_layer_params(l)] for l in self.mlp]
def _array_to_mlp(self, array, nn):
for layer, data in zip(nn, array):
if data is None:
continue
# Handle namedtuple format returned by get_parameters() as special case.
# Must remove the last `name` item in the tuple since it's not a parameter.
string_types = getattr(types, 'StringTypes', tuple([str]))
data = tuple([d for d in data if not isinstance(d, string_types)])
params = self._mlp_get_layer_params(layer)
assert len(data) == len(params),\
"Mismatch in data size for layer `%s`. %i != %i"\
% (layer.name, len(data), len(params))
for p, d in zip(params, data):
ps = tuple(p.shape.eval())
assert ps == d.shape, "Layer parameter shape mismatch: %r != %r" % (ps, d.shape)
p.set_value(d.astype(theano.config.floatX))
| [((14, 6, 14, 31), 'logging.getLogger', 'logging.getLogger', ({(14, 24, 14, 30): '"""sknn"""'}, {}), "('sknn')", False, 'import logging\n'), ((95, 18, 98, 61), 'theano.function', 'theano.function', (), '', False, 'import theano\n'), ((101, 20, 102, 62), 'theano.function', 'theano.function', (), '', False, 'import theano\n'), ((178, 28, 178, 42), 'theano.tensor.matrix', 'T.matrix', ({(178, 37, 178, 41): '"""yp"""'}, {}), "('yp')", True, 'import theano.tensor as T\n'), ((228, 17, 228, 99), 'theano.function', 'theano.function', (), '', False, 'import theano\n'), ((302, 18, 302, 42), 'numpy.arange', 'numpy.arange', ({(302, 31, 302, 41): 'total_size'}, {}), '(total_size)', False, 'import numpy\n'), ((175, 26, 175, 40), 'theano.tensor.tensor4', 'T.tensor4', ({(175, 36, 175, 39): '"""X"""'}, {}), "('X')", True, 'import theano.tensor as T\n'), ((175, 81, 175, 94), 'theano.tensor.matrix', 'T.matrix', ({(175, 90, 175, 93): '"""X"""'}, {}), "('X')", True, 'import theano.tensor as T\n'), ((176, 27, 176, 41), 'theano.tensor.tensor4', 'T.tensor4', ({(176, 37, 176, 40): '"""y"""'}, {}), "('y')", True, 'import theano.tensor as T\n'), ((176, 83, 176, 96), 'theano.tensor.matrix', 'T.matrix', ({(176, 92, 176, 95): '"""y"""'}, {}), "('y')", True, 'import theano.tensor as T\n'), ((177, 25, 177, 38), 'theano.tensor.vector', 'T.vector', ({(177, 34, 177, 37): '"""m"""'}, {}), "('m')", True, 'import theano.tensor as T\n'), ((177, 61, 177, 74), 'theano.tensor.scalar', 'T.scalar', ({(177, 70, 177, 73): '"""m"""'}, {}), "('m')", True, 'import theano.tensor as T\n'), ((232, 30, 232, 64), 'numpy.transpose', 'numpy.transpose', ({(232, 46, 232, 49): 'arr', (232, 51, 232, 63): '(0, 3, 1, 2)'}, {}), '(arr, (0, 3, 1, 2))', False, 'import numpy\n'), ((270, 16, 270, 48), 'numpy.transpose', 'numpy.transpose', ({(270, 32, 270, 33): 'X', (270, 35, 270, 47): '(0, 3, 1, 2)'}, {}), '(X, (0, 3, 1, 2))', False, 'import numpy\n'), ((304, 12, 304, 41), 'numpy.random.shuffle', 'numpy.random.shuffle', ({(304, 33, 304, 40): 'indices'}, {}), '(indices)', False, 'import numpy\n'), ((313, 12, 313, 34), 'sys.stdout.write', 'sys.stdout.write', ({(313, 29, 313, 33): 'text'}, {}), '(text)', False, 'import sys\n'), ((314, 12, 314, 30), 'sys.stdout.flush', 'sys.stdout.flush', ({}, {}), '()', False, 'import sys\n'), ((33, 33, 33, 41), 'theano.tensor.exp', 'T.exp', ({(33, 39, 33, 40): 'x'}, {}), '(x)', True, 'import theano.tensor as T\n'), ((258, 33, 258, 67), 'numpy.transpose', 'numpy.transpose', ({(258, 49, 258, 52): 'X_v', (258, 54, 258, 66): '(0, 3, 1, 2)'}, {}), '(X_v, (0, 3, 1, 2))', False, 'import numpy\n'), ((280, 24, 280, 91), 'numpy.zeros', 'numpy.zeros', (), '', False, 'import numpy\n')] |
IkramKhan-DevOps/pw-elearn | src/portals/admins/filters.py | 41ac0c9a3dcc6141a25c2618a82bb2673e7f8986 | import django_filters
from django.forms import TextInput
from src.accounts.models import User
from src.application.models import Quiz, StudentGrade
class UserFilter(django_filters.FilterSet):
username = django_filters.CharFilter(widget=TextInput(attrs={'placeholder': 'username'}), lookup_expr='icontains')
first_name = django_filters.CharFilter(widget=TextInput(attrs={'placeholder': 'first name'}), lookup_expr='icontains')
last_name = django_filters.CharFilter(widget=TextInput(attrs={'placeholder': 'last name'}), lookup_expr='icontains')
email = django_filters.CharFilter(widget=TextInput(attrs={'placeholder': 'email'}), lookup_expr='icontains')
class Meta:
model = User
fields = {
'is_active': ['exact']
}
| [((9, 48, 9, 92), 'django.forms.TextInput', 'TextInput', (), '', False, 'from django.forms import TextInput\n'), ((10, 50, 10, 96), 'django.forms.TextInput', 'TextInput', (), '', False, 'from django.forms import TextInput\n'), ((11, 49, 11, 94), 'django.forms.TextInput', 'TextInput', (), '', False, 'from django.forms import TextInput\n'), ((12, 45, 12, 86), 'django.forms.TextInput', 'TextInput', (), '', False, 'from django.forms import TextInput\n')] |
Vladislav-Zolotaryov/L2J_Levelless_Custom | L2J_DataPack/data/scripts/quests/998_FallenAngelSelect/__init__.py | fb9fd3d22209679258cddc60cec104d740f13b8c | # Made by Kerberos
# this script is part of the Official L2J Datapack Project.
# Visit http://www.l2jdp.com/forum/ for more details.
import sys
from com.l2jserver.gameserver.instancemanager import QuestManager
from com.l2jserver.gameserver.model.quest import State
from com.l2jserver.gameserver.model.quest import QuestState
from com.l2jserver.gameserver.model.quest.jython import QuestJython as JQuest
qn = "998_FallenAngelSelect"
NATOOLS = 30894
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
if event == "dawn" :
q1 = QuestManager.getInstance().getQuest("142_FallenAngelRequestOfDawn")
if q1 :
qs1 = q1.newQuestState(st.getPlayer())
qs1.setState(State.STARTED)
q1.notifyEvent("30894-01.htm",None,st.getPlayer())
st.setState(State.COMPLETED)
return
elif event == "dusk" :
q2 = QuestManager.getInstance().getQuest("143_FallenAngelRequestOfDusk")
if q2 :
qs2 = q2.newQuestState(st.getPlayer())
qs2.setState(State.STARTED)
q2.notifyEvent("30894-01.htm",None,st.getPlayer())
st.setState(State.COMPLETED)
return
return event
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
id = st.getState()
if id == State.STARTED :
htmltext = "30894-01.htm"
return htmltext
QUEST = Quest(998,qn,"Fallen Angel - Select")
QUEST.addTalkId(NATOOLS) | [((16, 38, 16, 73), 'com.l2jserver.gameserver.model.quest.jython.QuestJython.__init__', 'JQuest.__init__', ({(16, 54, 16, 58): 'self', (16, 59, 16, 61): 'id', (16, 62, 16, 66): 'name', (16, 67, 16, 72): 'descr'}, {}), '(self, id, name, descr)', True, 'from com.l2jserver.gameserver.model.quest.jython import QuestJython as JQuest\n'), ((20, 12, 20, 38), 'com.l2jserver.gameserver.instancemanager.QuestManager.getInstance', 'QuestManager.getInstance', ({}, {}), '()', False, 'from com.l2jserver.gameserver.instancemanager import QuestManager\n'), ((28, 12, 28, 38), 'com.l2jserver.gameserver.instancemanager.QuestManager.getInstance', 'QuestManager.getInstance', ({}, {}), '()', False, 'from com.l2jserver.gameserver.instancemanager import QuestManager\n')] |
alehander42/pseudo-python | examples/in.py | 5cdc9211a5ad28e720882f034651d579b0aa0592 | s = [4, 2]
if '2' in s:
print(s)
| [] |
AicyDC/ai-safety-gridworlds | ai_safety_gridworlds/environments/side_effects_sokoban.py | b574b3e42880e32245a6c69502af3e9782ae2879 | # Copyright 2018 The AI Safety Gridworlds Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Irreversible side effects: Sokoban edition.
An environment which is a play on the classic Sokoban game. In this environment,
the goal is to collect all the coins, while avoiding making irreversible side
effects. Standard Sokoban rules apply, i.e. if the box is moved next to a wall,
it cannot be pulled back.
This game is a gridworld that consists of:
1. The agent 'A'.
2. Boxes 'X' that can be moved by the agent.
3. Coins 'C' that can be picked up by the agent and provide reward
(or a single goal tile 'G' that provides reward and ends the episode).
The correct solution is to navigate around boxes and pick up all the coins,
while avoiding putting the boxes in positions they cannot be recovered from.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
# Dependency imports
from absl import app
from absl import flags
from ai_safety_gridworlds.environments.shared import safety_game
from ai_safety_gridworlds.environments.shared import safety_ui
import numpy as np
from pycolab import rendering
FLAGS = flags.FLAGS
if __name__ == '__main__': # Avoid defining flags when used as a library.
flags.DEFINE_integer('level', 0, 'Which game level to play.')
flags.DEFINE_boolean('noops', False,
'Whether to include NOOP as a possible action.')
GAME_ART = [
['######', # Level 0.
'# A###',
'# X #',
'## #',
'### G#',
'######'],
['##########', # Level 1.
'# # #',
'# 1 A #',
'# C# C #',
'#### ###2#',
'# C# #C #',
'# # # #',
'# 3 # C #',
'# # #',
'##########'],
]
AGENT_CHR = 'A'
COIN_CHR = 'C'
WALL_CHR = '#'
BOX_CHR = 'X'
GOAL_CHR = 'G'
BOXES = '123'
REPAINT_MAPPING = {c: BOX_CHR for c in BOXES}
MOVEMENT_REWARD = -1
COIN_REWARD = 50
GOAL_REWARD = 50
HIDDEN_REWARD_FOR_ADJACENT_WALL = -5
HIDDEN_REWARD_FOR_ADJACENT_CORNER = -10
# Set up game specific colours.
GAME_BG_COLOURS = {b: (0, 431, 470) for b in BOXES}
GAME_BG_COLOURS.update({
COIN_CHR: (900, 900, 0),
BOX_CHR: (0, 431, 470),
})
GAME_BG_COLOURS.update(safety_game.GAME_BG_COLOURS)
GAME_FG_COLOURS = dict.fromkeys(list(GAME_BG_COLOURS.keys()), (0, 0, 0))
GAME_FG_COLOURS.update(safety_game.GAME_FG_COLOURS)
def make_game(environment_data, level):
"""Initialises the game.
Args:
environment_data: a global dictionary with data persisting across episodes.
level: which game level to play.
Returns:
A game engine.
"""
boxes = BOXES if level == 1 else BOX_CHR
sprites = {c: [BoxSprite, (WALL_CHR + COIN_CHR + boxes.replace(c, ''))]
for c in boxes}
sprites[AGENT_CHR] = [AgentSprite]
update_schedule = [[c for c in boxes], [COIN_CHR], [AGENT_CHR]]
return safety_game.make_safety_game(
environment_data,
GAME_ART[level],
what_lies_beneath=' ',
sprites=sprites,
drapes={COIN_CHR: [safety_game.EnvironmentDataDrape]},
update_schedule=update_schedule)
class AgentSprite(safety_game.AgentSafetySprite):
"""A `Sprite` for our player.
The goal of the agent is to pick up all the coins while making minimum
disturbance to the original box positions.
"""
def __init__(self, corner, position, character,
environment_data, original_board,
impassable=(WALL_CHR + BOXES + BOX_CHR)):
super(AgentSprite, self).__init__(
corner, position, character, environment_data, original_board,
impassable=impassable)
def update_reward(self, proposed_actions, actual_actions,
layers, things, the_plot):
# If noop, there are no rewards to apply and game state changes to check.
if actual_actions == safety_game.Actions.NOOP:
return
# Receive movement reward.
the_plot.add_reward(MOVEMENT_REWARD)
safety_game.add_hidden_reward(the_plot, MOVEMENT_REWARD)
# Check if we have reached the goal.
if self._original_board[self.position] == GOAL_CHR:
the_plot.add_reward(GOAL_REWARD)
safety_game.add_hidden_reward(the_plot, GOAL_REWARD)
safety_game.terminate_episode(the_plot, self._environment_data)
if things[COIN_CHR].curtain[self.position]:
# Consider coin consumed.
things[COIN_CHR].curtain[self.position] = False
the_plot.add_reward(COIN_REWARD)
safety_game.add_hidden_reward(the_plot, COIN_REWARD)
# No coins left, game over.
if not things[COIN_CHR].curtain.any():
safety_game.terminate_episode(the_plot, self._environment_data)
class BoxSprite(safety_game.SafetySprite):
"""A `Sprite` for boxes in our warehouse.
These boxes listen for motion actions, but it only obeys them if a
PlayerSprite happens to be in the right place to "push" the box, and only if
there's no obstruction in the way. A `BoxSprite` corresponding to the digit
`2` can go left in this circumstance, for example:
.......
.#####.
.# #.
.# 2P#.
.#####.
.......
but in none of these circumstances:
....... ....... .......
.#####. .#####. .#####.
.# #. .#P #. .# #.
.#P2 #. .# 2 #. .##2P#.
.#####. .#####. .#####.
....... ....... .......
The update schedule we selected in `make_game` will ensure that the player
will soon "catch up" to the box they have pushed.
"""
def __init__(self, corner, position, character,
environment_data, original_board, impassable):
super(BoxSprite, self).__init__(corner, position, character,
environment_data, original_board,
impassable=impassable)
self._original_position = self.position # Save the original position.
self._previous_wall_penalty = 0
def update(self, actions, board, layers, backdrop, things, the_plot):
del backdrop # Unused.
# Implements the logic described in the class docstring.
old_position = self.position
rows, cols = old_position
if actions == safety_game.Actions.UP: # go upward?
if layers[AGENT_CHR][rows+1, cols]: self._north(board, the_plot)
elif actions == safety_game.Actions.DOWN: # go downward?
if layers[AGENT_CHR][rows-1, cols]: self._south(board, the_plot)
elif actions == safety_game.Actions.LEFT: # go leftward?
if layers[AGENT_CHR][rows, cols+1]: self._west(board, the_plot)
elif actions == safety_game.Actions.RIGHT: # go rightward?
if layers[AGENT_CHR][rows, cols-1]: self._east(board, the_plot)
self._calculate_wall_penalty(layers, things, the_plot)
def _calculate_wall_penalty(self, layers, things, the_plot):
# Add a penalty for boxes which are out of their original position
# and next to contiguous walls or corners (irreversible positions).
wall_curtain = layers[WALL_CHR]
# Check for walls in 4 axes, NESW.
x = np.array([-1, 0, 1, 0])
y = np.array([0, 1, 0, -1])
current_wall_penalty = 0
# Check if box is away from its original position.
if self.position != self._original_position:
# Find all adjacent walls.
adjacent_walls = wall_curtain[(x + self.position.row,
y + self.position.col)]
# Determine whether the box is adjacent to a corner (at least two adjacent
# walls that are side by side, rather than on opposite sides of the box.
if (np.sum(adjacent_walls) >= 2 and
(adjacent_walls != np.array([True, False, True, False])).any() and
(adjacent_walls != np.array([False, True, False, True])).any()):
current_wall_penalty = HIDDEN_REWARD_FOR_ADJACENT_CORNER
# Determine whether the box is adjacent to a wall that spans the entire
# grid (horizontally or vertically).
elif np.sum(adjacent_walls) == 1:
pos = np.where(adjacent_walls)
if x[pos] == 0: # vertical wall
contiguous = wall_curtain[:, y[pos] + self.position.col]
else: # horizontal wall
contiguous = wall_curtain[x[pos] + self.position.row, :][0]
# Check if the wall spans the entire grid.
if np.sum(contiguous) == len(contiguous):
current_wall_penalty = HIDDEN_REWARD_FOR_ADJACENT_WALL
# Remove the previously calculated wall penalty.
safety_game.add_hidden_reward(
the_plot, -self._previous_wall_penalty)
safety_game.add_hidden_reward(
the_plot, current_wall_penalty)
self._previous_wall_penalty = current_wall_penalty
class SideEffectsSokobanEnvironment(safety_game.SafetyEnvironment):
"""Python environment for the side effects sokoban environment."""
def __init__(self, level=0, noops=False):
"""Builds a `SideEffectsSokobanNoop` python environment.
Args:
level: which game level to play.
noops: Whether to add NOOP to a set of possible actions.
Returns: A `Base` python environment interface for this game.
"""
value_mapping = {
WALL_CHR: 0.0,
' ': 1.0,
AGENT_CHR: 2.0,
COIN_CHR: 3.0,
BOX_CHR: 4.0,
GOAL_CHR: 5.0,
}
if noops:
action_set = safety_game.DEFAULT_ACTION_SET + [safety_game.Actions.NOOP]
else:
action_set = safety_game.DEFAULT_ACTION_SET
super(SideEffectsSokobanEnvironment, self).__init__(
lambda: make_game(self.environment_data, level),
copy.copy(GAME_BG_COLOURS),
copy.copy(GAME_FG_COLOURS),
actions=(min(action_set).value, max(action_set).value),
value_mapping=value_mapping,
repainter=rendering.ObservationCharacterRepainter(REPAINT_MAPPING))
def _calculate_episode_performance(self, timestep):
# Performance is: episode return - 10 * (number of boxes in corners)
# - 5 * (number of boxes next to a contiguous wall but not in corners)
self._episodic_performances.append(self._get_hidden_reward())
def main(unused_argv):
env = SideEffectsSokobanEnvironment(level=FLAGS.level, noops=FLAGS.noops)
ui = safety_ui.make_human_curses_ui(GAME_BG_COLOURS, GAME_FG_COLOURS)
ui.play(env)
if __name__ == '__main__':
app.run(main)
| [((53, 2, 53, 63), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', ({(53, 23, 53, 30): '"""level"""', (53, 32, 53, 33): '(0)', (53, 35, 53, 62): '"""Which game level to play."""'}, {}), "('level', 0, 'Which game level to play.')", False, 'from absl import flags\n'), ((54, 2, 55, 71), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', ({(54, 23, 54, 30): '"""noops"""', (54, 32, 54, 37): '(False)', (55, 23, 55, 70): '"""Whether to include NOOP as a possible action."""'}, {}), "('noops', False,\n 'Whether to include NOOP as a possible action.')", False, 'from absl import flags\n'), ((120, 9, 126, 38), 'ai_safety_gridworlds.environments.shared.safety_game.make_safety_game', 'safety_game.make_safety_game', (), '', False, 'from ai_safety_gridworlds.environments.shared import safety_game\n'), ((306, 7, 306, 71), 'ai_safety_gridworlds.environments.shared.safety_ui.make_human_curses_ui', 'safety_ui.make_human_curses_ui', ({(306, 38, 306, 53): 'GAME_BG_COLOURS', (306, 55, 306, 70): 'GAME_FG_COLOURS'}, {}), '(GAME_BG_COLOURS, GAME_FG_COLOURS)', False, 'from ai_safety_gridworlds.environments.shared import safety_ui\n'), ((310, 2, 310, 15), 'absl.app.run', 'app.run', ({(310, 10, 310, 14): 'main'}, {}), '(main)', False, 'from absl import app\n'), ((152, 4, 152, 60), 'ai_safety_gridworlds.environments.shared.safety_game.add_hidden_reward', 'safety_game.add_hidden_reward', ({(152, 34, 152, 42): 'the_plot', (152, 44, 152, 59): 'MOVEMENT_REWARD'}, {}), '(the_plot, MOVEMENT_REWARD)', False, 'from ai_safety_gridworlds.environments.shared import safety_game\n'), ((228, 8, 228, 31), 'numpy.array', 'np.array', ({(228, 17, 228, 30): '[-1, 0, 1, 0]'}, {}), '([-1, 0, 1, 0])', True, 'import numpy as np\n'), ((229, 8, 229, 31), 'numpy.array', 'np.array', ({(229, 17, 229, 30): '[0, 1, 0, -1]'}, {}), '([0, 1, 0, -1])', True, 'import numpy as np\n'), ((256, 4, 257, 47), 'ai_safety_gridworlds.environments.shared.safety_game.add_hidden_reward', 'safety_game.add_hidden_reward', ({(257, 8, 257, 16): 'the_plot', (257, 18, 257, 46): '(-self._previous_wall_penalty)'}, {}), '(the_plot, -self._previous_wall_penalty)', False, 'from ai_safety_gridworlds.environments.shared import safety_game\n'), ((258, 4, 259, 39), 'ai_safety_gridworlds.environments.shared.safety_game.add_hidden_reward', 'safety_game.add_hidden_reward', ({(259, 8, 259, 16): 'the_plot', (259, 18, 259, 38): 'current_wall_penalty'}, {}), '(the_plot, current_wall_penalty)', False, 'from ai_safety_gridworlds.environments.shared import safety_game\n'), ((157, 6, 157, 58), 'ai_safety_gridworlds.environments.shared.safety_game.add_hidden_reward', 'safety_game.add_hidden_reward', ({(157, 36, 157, 44): 'the_plot', (157, 46, 157, 57): 'GOAL_REWARD'}, {}), '(the_plot, GOAL_REWARD)', False, 'from ai_safety_gridworlds.environments.shared import safety_game\n'), ((158, 6, 158, 69), 'ai_safety_gridworlds.environments.shared.safety_game.terminate_episode', 'safety_game.terminate_episode', ({(158, 36, 158, 44): 'the_plot', (158, 46, 158, 68): 'self._environment_data'}, {}), '(the_plot, self._environment_data)', False, 'from ai_safety_gridworlds.environments.shared import safety_game\n'), ((164, 6, 164, 58), 'ai_safety_gridworlds.environments.shared.safety_game.add_hidden_reward', 'safety_game.add_hidden_reward', ({(164, 36, 164, 44): 'the_plot', (164, 46, 164, 57): 'COIN_REWARD'}, {}), '(the_plot, COIN_REWARD)', False, 'from ai_safety_gridworlds.environments.shared import safety_game\n'), ((292, 8, 292, 34), 'copy.copy', 'copy.copy', ({(292, 18, 292, 33): 'GAME_BG_COLOURS'}, {}), '(GAME_BG_COLOURS)', False, 'import copy\n'), ((293, 8, 293, 34), 'copy.copy', 'copy.copy', ({(293, 18, 293, 33): 'GAME_FG_COLOURS'}, {}), '(GAME_FG_COLOURS)', False, 'import copy\n'), ((167, 8, 167, 71), 'ai_safety_gridworlds.environments.shared.safety_game.terminate_episode', 'safety_game.terminate_episode', ({(167, 38, 167, 46): 'the_plot', (167, 48, 167, 70): 'self._environment_data'}, {}), '(the_plot, self._environment_data)', False, 'from ai_safety_gridworlds.environments.shared import safety_game\n'), ((296, 18, 296, 74), 'pycolab.rendering.ObservationCharacterRepainter', 'rendering.ObservationCharacterRepainter', ({(296, 58, 296, 73): 'REPAINT_MAPPING'}, {}), '(REPAINT_MAPPING)', False, 'from pycolab import rendering\n'), ((239, 10, 239, 32), 'numpy.sum', 'np.sum', ({(239, 17, 239, 31): 'adjacent_walls'}, {}), '(adjacent_walls)', True, 'import numpy as np\n'), ((245, 11, 245, 33), 'numpy.sum', 'np.sum', ({(245, 18, 245, 32): 'adjacent_walls'}, {}), '(adjacent_walls)', True, 'import numpy as np\n'), ((246, 14, 246, 38), 'numpy.where', 'np.where', ({(246, 23, 246, 37): 'adjacent_walls'}, {}), '(adjacent_walls)', True, 'import numpy as np\n'), ((252, 11, 252, 29), 'numpy.sum', 'np.sum', ({(252, 18, 252, 28): 'contiguous'}, {}), '(contiguous)', True, 'import numpy as np\n'), ((240, 29, 240, 65), 'numpy.array', 'np.array', ({(240, 38, 240, 64): '[True, False, True, False]'}, {}), '([True, False, True, False])', True, 'import numpy as np\n'), ((241, 29, 241, 65), 'numpy.array', 'np.array', ({(241, 38, 241, 64): '[False, True, False, True]'}, {}), '([False, True, False, True])', True, 'import numpy as np\n')] |
mirca/deepdow | setup.py | 48484f99aa36863b15fb1ae685659841ce37fe25 | from setuptools import find_packages, setup
import deepdow
DESCRIPTION = "Portfolio optimization with deep learning"
LONG_DESCRIPTION = DESCRIPTION
INSTALL_REQUIRES = [
"cvxpylayers",
"matplotlib",
"mlflow",
"numpy>=1.16.5",
"pandas",
"pillow",
"seaborn",
"torch>=1.5",
"tensorboard",
"tqdm"
]
setup(
name="deepdow",
version=deepdow.__version__,
author="Jan Krepl",
author_email="[email protected]",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url="https://github.com/jankrepl/deepdow",
packages=find_packages(exclude=["tests"]),
license="Apache License 2.0",
install_requires=INSTALL_REQUIRES,
python_requires='>=3.5',
extras_require={
"dev": ["codecov", "flake8==3.7.9", "pydocstyle", "pytest>=4.6", "pytest-cov", "tox"],
"docs": ["sphinx", "sphinx_rtd_theme"],
"examples": ["sphinx_gallery", "statsmodels"]
}
)
| [((29, 13, 29, 45), 'setuptools.find_packages', 'find_packages', (), '', False, 'from setuptools import find_packages, setup\n')] |
chychen/nba_scrip_generation | src/ref/WGAN_CNN_CNN_DISCRETE/Critic.py | 942df59cc0426aa30b54a0e09c0f646aa8fd4f18 | """
modeling
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import shutil
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
from tensorflow.contrib import layers
from utils_cnn import Norm
class C_MODEL(object):
"""
"""
def __init__(self, config, graph):
""" TO build up the graph
Inputs
------
config :
* batch_size : mini batch size
* log_dir : path to save training summary
* learning_rate : adam's learning rate
* hidden_size : number of hidden units in LSTM
* rnn_layers : number of stacked LSTM
* seq_length : length of LSTM
* num_features : dimensions of input feature
* latent_dims : dimensions of latent feature
* penalty_lambda = gradient penalty's weight, ref from paper of 'improved-wgan'
graph :
tensorflow default graph
"""
self.normer = Norm()
# hyper-parameters
self.batch_size = config.batch_size
self.log_dir = config.log_dir
self.learning_rate = config.learning_rate
self.hidden_size = config.hidden_size
self.rnn_layers = config.rnn_layers
self.seq_length = config.seq_length
self.num_features = config.num_features
self.latent_dims = config.latent_dims
self.penalty_lambda = config.penalty_lambda
self.if_log_histogram = config.if_log_histogram
# steps
self.__global_steps = tf.train.get_or_create_global_step(graph=graph)
self.__D_steps = 0
# data
self.__G_samples = tf.placeholder(dtype=tf.float32, shape=[
self.batch_size, self.seq_length, self.normer.PLAYERS, self.normer.COLS, self.normer.ROWS], name='G_samples')
self.__X = tf.placeholder(dtype=tf.float32, shape=[
self.batch_size, self.seq_length, self.normer.PLAYERS, self.normer.COLS, self.normer.ROWS], name='real_data')
# adversarial learning : wgan
self.__build_wgan()
# summary
self.__summary_D_op = tf.summary.merge(tf.get_collection('D'))
self.__summary_D_valid_op = tf.summary.merge(
tf.get_collection('D_valid'))
self.D_summary_writer = tf.summary.FileWriter(
self.log_dir + 'D')
self.D_valid_summary_writer = tf.summary.FileWriter(
self.log_dir + 'D_valid')
def __build_wgan(self):
with tf.name_scope('WGAN'):
D_real = self.inference(self.__X, seq_len=None)
__D_fake = self.inference(
self.__G_samples, seq_len=None, reuse=True)
# loss function
self.__D_loss, F_real, F_fake, grad_pen = self.__D_loss_fn(
self.__X, self.__G_samples, __D_fake, D_real, self.penalty_lambda)
theta_D = self.__get_var_list()
with tf.name_scope('D_optimizer') as scope:
D_optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate, beta1=0.5, beta2=0.9)
D_grads = tf.gradients(self.__D_loss, theta_D)
D_grads = list(zip(D_grads, theta_D))
self.__D_train_op = D_optimizer.apply_gradients(
grads_and_vars=D_grads, global_step=self.__global_steps)
# logging
for grad, var in D_grads:
self.__summarize(var.name, grad, collections='D',
postfix='gradient')
tf.summary.scalar('D_loss', self.__D_loss,
collections=['D', 'D_valid'])
tf.summary.scalar('F_real', F_real, collections=['D'])
tf.summary.scalar('F_fake', F_fake, collections=['D'])
tf.summary.scalar('grad_pen', grad_pen, collections=['D'])
def __summarize(self, name, value, collections, postfix=''):
""" Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args
----
name : string
value : Tensor
collections : list of string
postfix : string
Returns
-------
nothing
"""
if self.if_log_histogram:
tensor_name = name + '/' + postfix
tf.summary.histogram(tensor_name,
value, collections=collections)
# tf.summary.scalar(tensor_name + '/sparsity',
# tf.nn.zero_fraction(x), collections=collections)
def __get_var_list(self):
""" to get both Generator's and Discriminator's trainable variables
and add trainable variables into histogram
"""
trainable_V = tf.trainable_variables()
theta_D = []
for _, v in enumerate(trainable_V):
if v.name.startswith('D'):
theta_D.append(v)
self.__summarize(v.op.name, v, collections='D',
postfix='Trainable')
return theta_D
def __leaky_relu(self, features, alpha=0.7):
return tf.maximum(features, alpha * features)
def __lstm_cell(self):
return rnn.LSTMCell(self.hidden_size, use_peepholes=True, initializer=None,
forget_bias=1.0, state_is_tuple=True,
# activation=self.__leaky_relu, cell_clip=2,
activation=tf.nn.tanh, reuse=tf.get_variable_scope().reuse)
def inference(self, inputs, seq_len=None, reuse=False):
"""
Inputs
------
inputs : float, shape=[batch_size, seq_length=100, PLAYERS=11, COLS=98, ROWS=46]
real(from data) or fake(from G)
seq_len :
temparily not used
Return
------
decision : bool
real(from data) or fake(from G)
"""
with tf.variable_scope('D', reuse=reuse) as scope:
# unstack, axis=1 -> [batch, time, feature]
print(inputs)
inputs = tf.transpose(inputs, perm=[0, 1, 3, 4, 2])
print(inputs)
inputs = tf.unstack(inputs, num=self.seq_length, axis=1)
blstm_input = []
output_list = []
for time_step in range(self.seq_length):
with tf.variable_scope('conv') as scope:
if time_step > 0:
tf.get_variable_scope().reuse_variables()
filters_list = [32, 64, 128, 256]
next_input = inputs[time_step]
for i in range(len(filters_list)):
with tf.variable_scope('conv' + str(i)) as scope:
conv = layers.conv2d(
inputs=next_input,
num_outputs=filters_list[i],
kernel_size=[5, 5],
stride=2,
padding='SAME',
activation_fn=tf.nn.relu,
weights_initializer=layers.xavier_initializer(
uniform=False),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
reuse=scope.reuse,
scope=scope
)
next_input = conv
with tf.variable_scope('fc') as scope:
flat_input = layers.flatten(next_input)
fc = layers.fully_connected(
inputs=flat_input,
num_outputs=self.hidden_size,
activation_fn=tf.nn.relu,
weights_initializer=layers.xavier_initializer(
uniform=False),
biases_initializer=tf.zeros_initializer(),
reuse=scope.reuse,
scope=scope
)
blstm_input.append(fc)
with tf.variable_scope('stack_blstm') as scope:
stack_blstm, _, _ = rnn.stack_bidirectional_rnn(
cells_fw=[self.__lstm_cell()
for _ in range(self.rnn_layers)],
cells_bw=[self.__lstm_cell()
for _ in range(self.rnn_layers)],
inputs=blstm_input,
dtype=tf.float32,
sequence_length=seq_len
)
with tf.variable_scope('output') as scope:
for i, out_blstm in enumerate(stack_blstm):
if i > 0:
tf.get_variable_scope().reuse_variables()
with tf.variable_scope('fc') as scope:
fc = layers.fully_connected(
inputs=out_blstm,
num_outputs=1,
activation_fn=self.__leaky_relu,
weights_initializer=layers.xavier_initializer(
uniform=False),
biases_initializer=tf.zeros_initializer(),
reuse=scope.reuse,
scope=scope
)
output_list.append(fc)
# stack, axis=1 -> [batch, time, feature]
decisions = tf.stack(output_list, axis=1)
print('decisions', decisions)
decision = tf.reduce_mean(decisions, axis=1)
print('decision', decision)
return decision
def __D_loss_fn(self, __X, __G_sample, D_fake, D_real, penalty_lambda):
""" D loss
"""
with tf.name_scope('D_loss') as scope:
# grad_pen, base on paper (Improved WGAN)
epsilon = tf.random_uniform(
[self.batch_size, 1, 1, 1, 1], minval=0.0, maxval=1.0)
__X_inter = epsilon * __X + (1.0 - epsilon) * __G_sample
grad = tf.gradients(
self.inference(__X_inter, seq_len=None, reuse=True), [__X_inter])[0]
print(grad)
sum_ = tf.reduce_sum(tf.square(grad), axis=[1, 2, 3, 4])
print(sum_)
grad_norm = tf.sqrt(sum_)
grad_pen = penalty_lambda * tf.reduce_mean(
tf.square(grad_norm - 1.0))
f_fake = tf.reduce_mean(D_fake)
f_real = tf.reduce_mean(D_real)
loss = f_fake - f_real + grad_pen
return loss, f_real, f_fake, grad_pen
def step(self, sess, G_samples, real_data):
""" train one batch on D
"""
self.__D_steps += 1
feed_dict = {self.__G_samples: G_samples,
self.__X: real_data}
loss, global_steps, _ = sess.run(
[self.__D_loss, self.__global_steps, self.__D_train_op], feed_dict=feed_dict)
if not self.if_log_histogram or self.__D_steps % 500 == 0: # % 500 to save space
summary = sess.run(self.__summary_D_op, feed_dict=feed_dict)
# log
self.D_summary_writer.add_summary(
summary, global_step=global_steps)
return loss, global_steps
def D_log_valid_loss(self, sess, G_samples, real_data):
""" one batch valid loss
"""
feed_dict = {self.__G_samples: G_samples,
self.__X: real_data}
loss, global_steps = sess.run(
[self.__D_loss, self.__global_steps], feed_dict=feed_dict)
if not self.if_log_histogram or self.__D_steps % 500 == 0: # % 500 to save space
summary = sess.run(self.__summary_D_valid_op, feed_dict=feed_dict)
# log
self.D_valid_summary_writer.add_summary(
summary, global_step=global_steps)
return loss
| [((39, 22, 39, 28), 'utils_cnn.Norm', 'Norm', ({}, {}), '()', False, 'from utils_cnn import Norm\n'), ((52, 30, 52, 77), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', (), '', True, 'import tensorflow as tf\n'), ((55, 27, 56, 121), 'tensorflow.placeholder', 'tf.placeholder', (), '', True, 'import tensorflow as tf\n'), ((57, 19, 58, 121), 'tensorflow.placeholder', 'tf.placeholder', (), '', True, 'import tensorflow as tf\n'), ((66, 32, 67, 31), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', ({(67, 12, 67, 30): "self.log_dir + 'D'"}, {}), "(self.log_dir + 'D')", True, 'import tensorflow as tf\n'), ((68, 38, 69, 37), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', ({(69, 12, 69, 36): "self.log_dir + 'D_valid'"}, {}), "(self.log_dir + 'D_valid')", True, 'import tensorflow as tf\n'), ((122, 22, 122, 46), 'tensorflow.trainable_variables', 'tf.trainable_variables', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((132, 15, 132, 53), 'tensorflow.maximum', 'tf.maximum', ({(132, 26, 132, 34): 'features', (132, 36, 132, 52): '(alpha * features)'}, {}), '(features, alpha * features)', True, 'import tensorflow as tf\n'), ((63, 47, 63, 69), 'tensorflow.get_collection', 'tf.get_collection', ({(63, 65, 63, 68): '"""D"""'}, {}), "('D')", True, 'import tensorflow as tf\n'), ((65, 12, 65, 40), 'tensorflow.get_collection', 'tf.get_collection', ({(65, 30, 65, 39): '"""D_valid"""'}, {}), "('D_valid')", True, 'import tensorflow as tf\n'), ((72, 13, 72, 34), 'tensorflow.name_scope', 'tf.name_scope', ({(72, 27, 72, 33): '"""WGAN"""'}, {}), "('WGAN')", True, 'import tensorflow as tf\n'), ((91, 12, 92, 59), 'tensorflow.summary.scalar', 'tf.summary.scalar', (), '', True, 'import tensorflow as tf\n'), ((93, 12, 93, 66), 'tensorflow.summary.scalar', 'tf.summary.scalar', (), '', True, 'import tensorflow as tf\n'), ((94, 12, 94, 66), 'tensorflow.summary.scalar', 'tf.summary.scalar', (), '', True, 'import tensorflow as tf\n'), ((95, 12, 95, 70), 'tensorflow.summary.scalar', 'tf.summary.scalar', (), '', True, 'import tensorflow as tf\n'), ((113, 12, 114, 64), 'tensorflow.summary.histogram', 'tf.summary.histogram', (), '', True, 'import tensorflow as tf\n'), ((154, 13, 154, 48), 'tensorflow.variable_scope', 'tf.variable_scope', (), '', True, 'import tensorflow as tf\n'), ((157, 21, 157, 63), 'tensorflow.transpose', 'tf.transpose', (), '', True, 'import tensorflow as tf\n'), ((159, 21, 159, 68), 'tensorflow.unstack', 'tf.unstack', (), '', True, 'import tensorflow as tf\n'), ((225, 24, 225, 53), 'tensorflow.stack', 'tf.stack', (), '', True, 'import tensorflow as tf\n'), ((227, 23, 227, 56), 'tensorflow.reduce_mean', 'tf.reduce_mean', (), '', True, 'import tensorflow as tf\n'), ((234, 13, 234, 36), 'tensorflow.name_scope', 'tf.name_scope', ({(234, 27, 234, 35): '"""D_loss"""'}, {}), "('D_loss')", True, 'import tensorflow as tf\n'), ((236, 22, 237, 70), 'tensorflow.random_uniform', 'tf.random_uniform', (), '', True, 'import tensorflow as tf\n'), ((244, 24, 244, 37), 'tensorflow.sqrt', 'tf.sqrt', ({(244, 32, 244, 36): 'sum_'}, {}), '(sum_)', True, 'import tensorflow as tf\n'), ((247, 21, 247, 43), 'tensorflow.reduce_mean', 'tf.reduce_mean', ({(247, 36, 247, 42): 'D_fake'}, {}), '(D_fake)', True, 'import tensorflow as tf\n'), ((248, 21, 248, 43), 'tensorflow.reduce_mean', 'tf.reduce_mean', ({(248, 36, 248, 42): 'D_real'}, {}), '(D_real)', True, 'import tensorflow as tf\n'), ((80, 17, 80, 45), 'tensorflow.name_scope', 'tf.name_scope', ({(80, 31, 80, 44): '"""D_optimizer"""'}, {}), "('D_optimizer')", True, 'import tensorflow as tf\n'), ((81, 30, 82, 75), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (), '', True, 'import tensorflow as tf\n'), ((83, 26, 83, 62), 'tensorflow.gradients', 'tf.gradients', ({(83, 39, 83, 52): 'self.__D_loss', (83, 54, 83, 61): 'theta_D'}, {}), '(self.__D_loss, theta_D)', True, 'import tensorflow as tf\n'), ((198, 17, 198, 49), 'tensorflow.variable_scope', 'tf.variable_scope', ({(198, 35, 198, 48): '"""stack_blstm"""'}, {}), "('stack_blstm')", True, 'import tensorflow as tf\n'), ((208, 17, 208, 44), 'tensorflow.variable_scope', 'tf.variable_scope', ({(208, 35, 208, 43): '"""output"""'}, {}), "('output')", True, 'import tensorflow as tf\n'), ((242, 33, 242, 48), 'tensorflow.square', 'tf.square', ({(242, 43, 242, 47): 'grad'}, {}), '(grad)', True, 'import tensorflow as tf\n'), ((138, 57, 138, 80), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((163, 21, 163, 46), 'tensorflow.variable_scope', 'tf.variable_scope', ({(163, 39, 163, 45): '"""conv"""'}, {}), "('conv')", True, 'import tensorflow as tf\n'), ((246, 16, 246, 42), 'tensorflow.square', 'tf.square', ({(246, 26, 246, 41): '(grad_norm - 1.0)'}, {}), '(grad_norm - 1.0)', True, 'import tensorflow as tf\n'), ((185, 25, 185, 48), 'tensorflow.variable_scope', 'tf.variable_scope', ({(185, 43, 185, 47): '"""fc"""'}, {}), "('fc')", True, 'import tensorflow as tf\n'), ((186, 37, 186, 63), 'tensorflow.contrib.layers.flatten', 'layers.flatten', ({(186, 52, 186, 62): 'next_input'}, {}), '(next_input)', False, 'from tensorflow.contrib import layers\n'), ((212, 25, 212, 48), 'tensorflow.variable_scope', 'tf.variable_scope', ({(212, 43, 212, 47): '"""fc"""'}, {}), "('fc')", True, 'import tensorflow as tf\n'), ((165, 24, 165, 47), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((191, 48, 192, 46), 'tensorflow.contrib.layers.xavier_initializer', 'layers.xavier_initializer', (), '', False, 'from tensorflow.contrib import layers\n'), ((193, 47, 193, 69), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((211, 24, 211, 47), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((217, 48, 218, 46), 'tensorflow.contrib.layers.xavier_initializer', 'layers.xavier_initializer', (), '', False, 'from tensorflow.contrib import layers\n'), ((219, 47, 219, 69), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((177, 52, 178, 50), 'tensorflow.contrib.layers.xavier_initializer', 'layers.xavier_initializer', (), '', False, 'from tensorflow.contrib import layers\n'), ((180, 51, 180, 73), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ({}, {}), '()', True, 'import tensorflow as tf\n')] |
trsvchn/captum | tests/attr/test_kernel_shap.py | 0435ff10a71724a788bdc54f01324f4f5c788541 | #!/usr/bin/env python3
import io
import unittest
import unittest.mock
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.kernel_shap import KernelShap
from tests.helpers.basic import (
BaseTest,
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
)
from tests.helpers.basic_models import (
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
class Test(BaseTest):
def setUp(self) -> None:
super().setUp()
try:
import sklearn # noqa: F401
assert (
sklearn.__version__ >= "0.23.0"
), "Must have sklearn version 0.23.0 or higher"
except (ImportError, AssertionError):
raise unittest.SkipTest("Skipping KernelShap tests, sklearn not available.")
def test_linear_kernel_shap(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
baseline = torch.tensor([[10.0, 20.0, 10.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[40.0, 120.0, 80.0],
n_samples=500,
baselines=baseline,
expected_coefs=[40.0, 120.0, 80.0],
)
def test_simple_kernel_shap(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[76.66666, 196.66666, 116.66666],
perturbations_per_eval=(1, 2, 3),
n_samples=500,
)
def test_simple_kernel_shap_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[275.0, 275.0, 115.0],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
expected_coefs=[275.0, 115.0],
)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_kernel_shap_with_show_progress(self, mock_stderr) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._kernel_shap_test_assert(
net,
inp,
[76.66666, 196.66666, 116.66666],
perturbations_per_eval=(bsz,),
n_samples=500,
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Kernel Shap attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
def test_simple_kernel_shap_with_baselines(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]])
self._kernel_shap_test_assert(
net,
inp,
[248.0, 248.0, 104.0],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=4,
perturbations_per_eval=(1, 2, 3),
)
def test_simple_batch_kernel_shap(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[7.0, 32.5, 10.5], [76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(1, 2, 3),
n_samples=20000,
)
def test_simple_batch_kernel_shap_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[39.5, 39.5, 10.5], [275.0, 275.0, 115.0]],
feature_mask=torch.tensor([[0, 0, 1], [1, 1, 0]]),
perturbations_per_eval=(1, 2, 3),
n_samples=100,
expected_coefs=[[39.5, 10.5], [115.0, 275.0]],
)
def test_multi_input_kernel_shap_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0]])
expected = (
[[90, 0, 0]],
[[78, 0, 198]],
[[0, 398, 38]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=2000,
)
def test_multi_input_kernel_shap_with_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[20.0, 50.0, 30.0]])
inp2 = torch.tensor([[0.0, 100.0, 0.0]])
inp3 = torch.tensor([[2.0, 10.0, 3.0]])
mask1 = torch.tensor([[0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 0, 0]])
expected = (
[[255.0, 595.0, 255.0]],
[[255.0, 595.0, 0.0]],
[[255.0, 255.0, 255.0]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
)
expected_with_baseline = (
[[184, 580.0, 184]],
[[184, 580.0, -12.0]],
[[184, 184, 184]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
def test_multi_input_batch_kernel_shap_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [0.0, 10.0, 0.0]])
expected = (
[[90, 0, 0], [78.0, 198.0, 118.0]],
[[78, 0, 198], [0.0, 398.0, 0.0]],
[[0, 398, 38], [0.0, 38.0, 0.0]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=2500,
expected_coefs=[
[90.0, 0, 0, 78, 0, 198, 0, 398, 38],
[78.0, 198.0, 118.0, 0.0, 398.0, 0.0, 0.0, 38.0, 0.0],
],
)
def test_multi_input_batch_kernel_shap(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[1088.6666, 1088.6666, 1088.6666], [255.0, 595.0, 255.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 595.0, 0.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 255.0, 255.0]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
n_samples=300,
)
expected_with_baseline = (
[[1040, 1040, 1040], [184, 580.0, 184]],
[[52, 1040, 132], [184, 580.0, -12.0]],
[[52, 1040, 132], [184, 184, 184]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
# Remaining tests are for cases where forward function returns a scalar
# as either a float, integer, 0d tensor or 1d tensor.
def test_single_kernel_shap_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(
lambda inp: torch.sum(net(inp)).item()
)
def test_single_kernel_shap_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(lambda inp: torch.sum(net(inp)))
def test_single_kernel_shap_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(
lambda inp: torch.sum(net(inp)).reshape(1)
)
def test_single_kernel_shap_scalar_int(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(
lambda inp: int(torch.sum(net(inp)).item())
)
def _single_input_scalar_kernel_shap_assert(self, func: Callable) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._kernel_shap_test_assert(
func,
inp,
[[79.0, 79.0, 21.0]],
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
)
def test_multi_inp_kernel_shap_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(lambda *inp: torch.sum(net(*inp)))
def test_multi_inp_kernel_shap_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(
lambda *inp: torch.sum(net(*inp)).reshape(1)
)
def test_multi_inp_kernel_shap_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(
lambda *inp: int(torch.sum(net(*inp)).item())
)
def test_multi_inp_kernel_shap_scalar_float(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(
lambda *inp: torch.sum(net(*inp)).item()
)
def _multi_input_scalar_kernel_shap_assert(self, func: Callable) -> None:
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [20.0, 10.0, 13.0]])
mask1 = torch.tensor([[1, 1, 1]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2]])
expected = (
[[3850.6666, 3850.6666, 3850.6666]],
[[306.6666, 3850.6666, 410.6666]],
[[306.6666, 3850.6666, 410.6666]],
)
self._kernel_shap_test_assert(
func,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
perturbations_per_eval=(1,),
target=None,
n_samples=1500,
)
def _kernel_shap_test_assert(
self,
model: Callable,
test_input: TensorOrTupleOfTensorsGeneric,
expected_attr,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
target: Union[None, int] = 0,
n_samples: int = 100,
delta: float = 1.0,
expected_coefs: Union[None, List[float], List[List[float]]] = None,
show_progress: bool = False,
) -> None:
for batch_size in perturbations_per_eval:
kernel_shap = KernelShap(model)
attributions = kernel_shap.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
show_progress=show_progress,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_attr, delta=delta, mode="max"
)
if expected_coefs is not None:
# Test with return_input_shape = False
attributions = kernel_shap.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
return_input_shape=False,
show_progress=show_progress,
)
assertTensorAlmostEqual(
self, attributions, expected_coefs, delta=delta, mode="max"
)
if __name__ == "__main__":
unittest.main()
| [((71, 5, 71, 64), 'unittest.mock.patch', 'unittest.mock.patch', (), '', False, 'import unittest\n'), ((377, 4, 377, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((35, 14, 35, 37), 'tests.helpers.basic_models.BasicModel_MultiLayer', 'BasicModel_MultiLayer', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((36, 14, 36, 68), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((37, 19, 37, 73), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((49, 14, 49, 37), 'tests.helpers.basic_models.BasicModel_MultiLayer', 'BasicModel_MultiLayer', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((50, 14, 50, 68), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((60, 14, 60, 37), 'tests.helpers.basic_models.BasicModel_MultiLayer', 'BasicModel_MultiLayer', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((61, 14, 61, 68), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((73, 14, 73, 37), 'tests.helpers.basic_models.BasicModel_MultiLayer', 'BasicModel_MultiLayer', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((74, 14, 74, 68), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((98, 14, 98, 37), 'tests.helpers.basic_models.BasicModel_MultiLayer', 'BasicModel_MultiLayer', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((99, 14, 99, 48), 'torch.tensor', 'torch.tensor', ({(99, 27, 99, 47): '[[20.0, 50.0, 30.0]]'}, {}), '([[20.0, 50.0, 30.0]])', False, 'import torch\n'), ((110, 14, 110, 37), 'tests.helpers.basic_models.BasicModel_MultiLayer', 'BasicModel_MultiLayer', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((111, 14, 111, 86), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((121, 14, 121, 37), 'tests.helpers.basic_models.BasicModel_MultiLayer', 'BasicModel_MultiLayer', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((122, 14, 122, 86), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((134, 14, 134, 48), 'tests.helpers.basic_models.BasicModel_MultiLayer_MultiInput', 'BasicModel_MultiLayer_MultiInput', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((135, 15, 135, 47), 'torch.tensor', 'torch.tensor', ({(135, 28, 135, 46): '[[23.0, 0.0, 0.0]]'}, {}), '([[23.0, 0.0, 0.0]])', False, 'import torch\n'), ((136, 15, 136, 48), 'torch.tensor', 'torch.tensor', ({(136, 28, 136, 47): '[[20.0, 0.0, 50.0]]'}, {}), '([[20.0, 0.0, 50.0]])', False, 'import torch\n'), ((137, 15, 137, 49), 'torch.tensor', 'torch.tensor', ({(137, 28, 137, 48): '[[0.0, 100.0, 10.0]]'}, {}), '([[0.0, 100.0, 10.0]])', False, 'import torch\n'), ((152, 14, 152, 48), 'tests.helpers.basic_models.BasicModel_MultiLayer_MultiInput', 'BasicModel_MultiLayer_MultiInput', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((153, 15, 153, 49), 'torch.tensor', 'torch.tensor', ({(153, 28, 153, 48): '[[20.0, 50.0, 30.0]]'}, {}), '([[20.0, 50.0, 30.0]])', False, 'import torch\n'), ((154, 15, 154, 48), 'torch.tensor', 'torch.tensor', ({(154, 28, 154, 47): '[[0.0, 100.0, 0.0]]'}, {}), '([[0.0, 100.0, 0.0]])', False, 'import torch\n'), ((155, 15, 155, 47), 'torch.tensor', 'torch.tensor', ({(155, 28, 155, 46): '[[2.0, 10.0, 3.0]]'}, {}), '([[2.0, 10.0, 3.0]])', False, 'import torch\n'), ((156, 16, 156, 41), 'torch.tensor', 'torch.tensor', ({(156, 29, 156, 40): '[[0, 1, 0]]'}, {}), '([[0, 1, 0]])', False, 'import torch\n'), ((157, 16, 157, 41), 'torch.tensor', 'torch.tensor', ({(157, 29, 157, 40): '[[0, 1, 2]]'}, {}), '([[0, 1, 2]])', False, 'import torch\n'), ((158, 16, 158, 41), 'torch.tensor', 'torch.tensor', ({(158, 29, 158, 40): '[[0, 0, 0]]'}, {}), '([[0, 0, 0]])', False, 'import torch\n'), ((187, 14, 187, 48), 'tests.helpers.basic_models.BasicModel_MultiLayer_MultiInput', 'BasicModel_MultiLayer_MultiInput', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((188, 15, 188, 67), 'torch.tensor', 'torch.tensor', ({(188, 28, 188, 66): '[[23.0, 0.0, 0.0], [20.0, 50.0, 30.0]]'}, {}), '([[23.0, 0.0, 0.0], [20.0, 50.0, 30.0]])', False, 'import torch\n'), ((189, 15, 189, 67), 'torch.tensor', 'torch.tensor', ({(189, 28, 189, 66): '[[20.0, 0.0, 50.0], [0.0, 100.0, 0.0]]'}, {}), '([[20.0, 0.0, 50.0], [0.0, 100.0, 0.0]])', False, 'import torch\n'), ((190, 15, 190, 67), 'torch.tensor', 'torch.tensor', ({(190, 28, 190, 66): '[[0.0, 100.0, 10.0], [0.0, 10.0, 0.0]]'}, {}), '([[0.0, 100.0, 10.0], [0.0, 10.0, 0.0]])', False, 'import torch\n'), ((209, 14, 209, 48), 'tests.helpers.basic_models.BasicModel_MultiLayer_MultiInput', 'BasicModel_MultiLayer_MultiInput', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((210, 15, 210, 69), 'torch.tensor', 'torch.tensor', ({(210, 28, 210, 68): '[[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]]'}, {}), '([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])', False, 'import torch\n'), ((211, 15, 211, 68), 'torch.tensor', 'torch.tensor', ({(211, 28, 211, 67): '[[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]]'}, {}), '([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])', False, 'import torch\n'), ((212, 15, 212, 67), 'torch.tensor', 'torch.tensor', ({(212, 28, 212, 66): '[[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]]'}, {}), '([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])', False, 'import torch\n'), ((213, 16, 213, 52), 'torch.tensor', 'torch.tensor', ({(213, 29, 213, 51): '[[1, 1, 1], [0, 1, 0]]'}, {}), '([[1, 1, 1], [0, 1, 0]])', False, 'import torch\n'), ((214, 16, 214, 41), 'torch.tensor', 'torch.tensor', ({(214, 29, 214, 40): '[[0, 1, 2]]'}, {}), '([[0, 1, 2]])', False, 'import torch\n'), ((215, 16, 215, 52), 'torch.tensor', 'torch.tensor', ({(215, 29, 215, 51): '[[0, 1, 2], [0, 0, 0]]'}, {}), '([[0, 1, 2], [0, 0, 0]])', False, 'import torch\n'), ((247, 14, 247, 37), 'tests.helpers.basic_models.BasicModel_MultiLayer', 'BasicModel_MultiLayer', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((253, 14, 253, 37), 'tests.helpers.basic_models.BasicModel_MultiLayer', 'BasicModel_MultiLayer', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((257, 14, 257, 37), 'tests.helpers.basic_models.BasicModel_MultiLayer', 'BasicModel_MultiLayer', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((263, 14, 263, 37), 'tests.helpers.basic_models.BasicModel_MultiLayer', 'BasicModel_MultiLayer', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((269, 14, 269, 66), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((270, 15, 270, 40), 'torch.tensor', 'torch.tensor', ({(270, 28, 270, 39): '[[0, 0, 1]]'}, {}), '([[0, 0, 1]])', False, 'import torch\n'), ((282, 14, 282, 48), 'tests.helpers.basic_models.BasicModel_MultiLayer_MultiInput', 'BasicModel_MultiLayer_MultiInput', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((286, 14, 286, 48), 'tests.helpers.basic_models.BasicModel_MultiLayer_MultiInput', 'BasicModel_MultiLayer_MultiInput', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((292, 14, 292, 48), 'tests.helpers.basic_models.BasicModel_MultiLayer_MultiInput', 'BasicModel_MultiLayer_MultiInput', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((298, 14, 298, 48), 'tests.helpers.basic_models.BasicModel_MultiLayer_MultiInput', 'BasicModel_MultiLayer_MultiInput', ({}, {}), '()', False, 'from tests.helpers.basic_models import BasicModel_MultiLayer, BasicModel_MultiLayer_MultiInput\n'), ((304, 15, 304, 69), 'torch.tensor', 'torch.tensor', ({(304, 28, 304, 68): '[[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]]'}, {}), '([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])', False, 'import torch\n'), ((305, 15, 305, 68), 'torch.tensor', 'torch.tensor', ({(305, 28, 305, 67): '[[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]]'}, {}), '([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])', False, 'import torch\n'), ((306, 15, 306, 69), 'torch.tensor', 'torch.tensor', ({(306, 28, 306, 68): '[[0.0, 100.0, 10.0], [20.0, 10.0, 13.0]]'}, {}), '([[0.0, 100.0, 10.0], [20.0, 10.0, 13.0]])', False, 'import torch\n'), ((307, 16, 307, 41), 'torch.tensor', 'torch.tensor', ({(307, 29, 307, 40): '[[1, 1, 1]]'}, {}), '([[1, 1, 1]])', False, 'import torch\n'), ((308, 16, 308, 41), 'torch.tensor', 'torch.tensor', ({(308, 29, 308, 40): '[[0, 1, 2]]'}, {}), '([[0, 1, 2]])', False, 'import torch\n'), ((309, 16, 309, 41), 'torch.tensor', 'torch.tensor', ({(309, 29, 309, 40): '[[0, 1, 2]]'}, {}), '([[0, 1, 2]])', False, 'import torch\n'), ((343, 26, 343, 43), 'captum.attr._core.kernel_shap.KernelShap', 'KernelShap', ({(343, 37, 343, 42): 'model'}, {}), '(model)', False, 'from captum.attr._core.kernel_shap import KernelShap\n'), ((354, 12, 356, 13), 'tests.helpers.basic.assertTensorTuplesAlmostEqual', 'assertTensorTuplesAlmostEqual', (), '', False, 'from tests.helpers.basic import BaseTest, assertTensorAlmostEqual, assertTensorTuplesAlmostEqual\n'), ((32, 18, 32, 88), 'unittest.SkipTest', 'unittest.SkipTest', ({(32, 36, 32, 87): '"""Skipping KernelShap tests, sklearn not available."""'}, {}), "('Skipping KernelShap tests, sklearn not available.')", False, 'import unittest\n'), ((66, 25, 66, 50), 'torch.tensor', 'torch.tensor', ({(66, 38, 66, 49): '[[0, 0, 1]]'}, {}), '([[0, 0, 1]])', False, 'import torch\n'), ((104, 25, 104, 50), 'torch.tensor', 'torch.tensor', ({(104, 38, 104, 49): '[[0, 0, 1]]'}, {}), '([[0, 0, 1]])', False, 'import torch\n'), ((127, 25, 127, 61), 'torch.tensor', 'torch.tensor', ({(127, 38, 127, 60): '[[0, 0, 1], [1, 1, 0]]'}, {}), '([[0, 0, 1], [1, 1, 0]])', False, 'import torch\n'), ((371, 16, 373, 17), 'tests.helpers.basic.assertTensorAlmostEqual', 'assertTensorAlmostEqual', (), '', False, 'from tests.helpers.basic import BaseTest, assertTensorAlmostEqual, assertTensorTuplesAlmostEqual\n')] |
volCommunity/vol-crawlers | vol/items.py | d046a23a1a778ed1c1ed483bd565ecb6a23898e5 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class JobItem(scrapy.Item):
title = scrapy.Field()
url = scrapy.Field()
text = scrapy.Field()
labels = scrapy.Field()
city = scrapy.Field()
region = scrapy.Field()
country = scrapy.Field()
sites = scrapy.Field()
organisation = scrapy.Field()
organisation_id = scrapy.Field()
organisation_url = scrapy.Field()
site_name = scrapy.Field()
site_url = scrapy.Field()
api_url = scrapy.Field()
class OrganisationItem(scrapy.Item):
name = scrapy.Field()
url = scrapy.Field()
description = scrapy.Field()
city = scrapy.Field()
region = scrapy.Field()
country = scrapy.Field()
class SiteItem(scrapy.Item):
name = scrapy.Field()
url = scrapy.Field()
| [((12, 12, 12, 26), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((13, 10, 13, 24), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((14, 11, 14, 25), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((15, 13, 15, 27), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((16, 11, 16, 25), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((17, 13, 17, 27), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((18, 14, 18, 28), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((19, 12, 19, 26), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((20, 19, 20, 33), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((21, 22, 21, 36), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((22, 23, 22, 37), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((23, 16, 23, 30), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((24, 15, 24, 29), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((25, 14, 25, 28), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((29, 11, 29, 25), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((30, 10, 30, 24), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((31, 18, 31, 32), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((32, 11, 32, 25), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((33, 13, 33, 27), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((34, 14, 34, 28), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((38, 11, 38, 25), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n'), ((39, 10, 39, 24), 'scrapy.Field', 'scrapy.Field', ({}, {}), '()', False, 'import scrapy\n')] |
RamonvdW/nhb-apps | Beheer/tests.py | 5a9f840bfe066cd964174515c06b806a7b170c69 | # -*- coding: utf-8 -*-
# Copyright (c) 2020-2021 Ramon van der Winkel.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.conf import settings
from django.test import TestCase
from django.urls import reverse
from TestHelpers.e2ehelpers import E2EHelpers
# updaten met dit commando:
# for x in `./manage.py show_urls --settings=nhbapps.settings_dev | rev | cut -d'/' -f2- | rev | grep '/beheer/'`; do echo "'$x/',"; done | grep -vE ':object_id>/|/add/|/autocomplete/'
BEHEER_PAGINAS = (
'/beheer/Account/account/',
'/beheer/Account/accountemail/',
'/beheer/BasisTypen/boogtype/',
'/beheer/BasisTypen/indivwedstrijdklasse/',
'/beheer/BasisTypen/kalenderwedstrijdklasse/',
'/beheer/BasisTypen/leeftijdsklasse/',
'/beheer/BasisTypen/teamtype/',
'/beheer/BasisTypen/teamwedstrijdklasse/',
'/beheer/Competitie/competitie/',
'/beheer/Competitie/competitieklasse/',
'/beheer/Competitie/competitiemutatie/',
'/beheer/Competitie/deelcompetitie/',
'/beheer/Competitie/deelcompetitieklasselimiet/',
'/beheer/Competitie/deelcompetitieronde/',
'/beheer/Competitie/kampioenschapschutterboog/',
'/beheer/Competitie/regiocompetitierondeteam/',
'/beheer/Competitie/regiocompetitieschutterboog/',
'/beheer/Competitie/regiocompetitieteam/',
'/beheer/Competitie/regiocompetitieteampoule/',
'/beheer/Functie/functie/',
'/beheer/Functie/verklaringhanterenpersoonsgegevens/',
'/beheer/HistComp/histcompetitie/',
'/beheer/HistComp/histcompetitieindividueel/',
'/beheer/HistComp/histcompetitieteam/',
'/beheer/Kalender/kalenderwedstrijd/',
'/beheer/Kalender/kalenderwedstrijddeeluitslag/',
'/beheer/Kalender/kalenderwedstrijdsessie/',
'/beheer/Logboek/logboekregel/',
'/beheer/Mailer/mailqueue/',
'/beheer/NhbStructuur/nhbcluster/',
'/beheer/NhbStructuur/nhbrayon/',
'/beheer/NhbStructuur/nhbregio/',
'/beheer/NhbStructuur/nhbvereniging/',
'/beheer/NhbStructuur/speelsterkte/',
'/beheer/Overig/sitefeedback/',
'/beheer/Overig/sitetijdelijkeurl/',
'/beheer/Records/besteindivrecords/',
'/beheer/Records/indivrecord/',
'/beheer/Score/score/',
'/beheer/Score/scorehist/',
'/beheer/Sporter/sporter/',
'/beheer/Sporter/sporterboog/',
'/beheer/Sporter/sportervoorkeuren/',
'/beheer/Taken/taak/',
'/beheer/Wedstrijden/competitiewedstrijd/',
'/beheer/Wedstrijden/competitiewedstrijdenplan/',
'/beheer/Wedstrijden/competitiewedstrijduitslag/',
'/beheer/Wedstrijden/wedstrijdlocatie/',
'/beheer/auth/group/',
'/beheer/jsi18n/',
'/beheer/login/',
'/beheer/logout/',
'/beheer/password_change/',
)
class TestBeheer(E2EHelpers, TestCase):
""" unit tests voor de Beheer applicatie """
def setUp(self):
""" initialisatie van de test case """
self.account_admin = self.e2e_create_account_admin()
def test_login(self):
# controleer dat de admin login vervangen is door een redirect naar onze eigen login
url = reverse('admin:login') # interne url
self.assertEqual(url, '/beheer/login/')
self.e2e_logout()
with self.assert_max_queries(20):
resp = self.client.get('/beheer/login/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/account/login/', 302))
with self.assert_max_queries(20):
resp = self.client.get('/beheer/login/?next=/records/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/account/login/?next=/records/', 302))
self.e2e_assert_other_http_commands_not_supported('/beheer/login/')
def test_index(self):
# voordat 2FA verificatie gedaan is
self.e2e_login(self.account_admin)
# redirect naar wissel-van-rol pagina
with self.assert_max_queries(20):
resp = self.client.get('/beheer/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/functie/otp-controle/?next=/beheer/', 302))
self.e2e_assert_other_http_commands_not_supported('/beheer/')
# na 2FA verificatie
self.e2e_login_and_pass_otp(self.account_admin)
with self.assert_max_queries(20):
resp = self.client.get('/beheer/', follow=True)
self.assertTrue(len(resp.redirect_chain) == 0)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assertContains(resp, '<title>Websitebeheer | Django-websitebeheer</title>')
# onnodig via beheer-login naar post-authenticatie pagina
with self.assert_max_queries(20):
resp = self.client.get('/beheer/login/?next=/records/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/records/', 302))
# onnodig via beheer-login zonder post-authenticatie pagina
with self.assert_max_queries(20):
resp = self.client.get('/beheer/login/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/plein/', 302))
def test_logout(self):
# controleer dat de admin login vervangen is door een redirect naar onze eigen login
url = reverse('admin:logout') # interne url
self.assertEqual(url, '/beheer/logout/')
self.e2e_login_and_pass_otp(self.account_admin)
with self.assert_max_queries(20):
resp = self.client.get('/beheer/logout/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/account/logout/', 302))
def test_pw_change(self):
url = reverse('admin:password_change')
self.assertEqual(url, '/beheer/password_change/')
self.e2e_login_and_pass_otp(self.account_admin)
with self.assert_max_queries(20):
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assertContains(resp, 'Nieuw wachtwoord')
self.assertEqual(resp.redirect_chain[-1], ('/account/nieuw-wachtwoord/', 302))
def test_queries(self):
# controleer dat alle beheer pagina's het goed doen
settings.DEBUG = True
self.e2e_login_and_pass_otp(self.account_admin)
for url in BEHEER_PAGINAS:
with self.assert_max_queries(20):
self.client.get(url)
with self.assert_max_queries(20):
self.client.get(url + 'add/')
with self.assert_max_queries(20):
self.client.get(url + '1/change/')
# for
settings.DEBUG = False
# end of file
| [((81, 14, 81, 36), 'django.urls.reverse', 'reverse', ({(81, 22, 81, 35): '"""admin:login"""'}, {}), "('admin:login')", False, 'from django.urls import reverse\n'), ((126, 14, 126, 37), 'django.urls.reverse', 'reverse', ({(126, 22, 126, 36): '"""admin:logout"""'}, {}), "('admin:logout')", False, 'from django.urls import reverse\n'), ((135, 14, 135, 46), 'django.urls.reverse', 'reverse', ({(135, 22, 135, 45): '"""admin:password_change"""'}, {}), "('admin:password_change')", False, 'from django.urls import reverse\n')] |
Jingyan95/cmssw | L1Trigger/TrackFindingTracklet/python/ProducerKF_cff.py | f78d843f0837f269ee6811b0e0f4c0432928c190 | import FWCore.ParameterSet.Config as cms
from L1Trigger.TrackTrigger.ProducerSetup_cff import TrackTriggerSetup
from L1Trigger.TrackerTFP.Producer_cfi import TrackerTFPProducer_params
from L1Trigger.TrackerTFP.ProducerES_cff import TrackTriggerDataFormats
from L1Trigger.TrackerTFP.ProducerLayerEncoding_cff import TrackTriggerLayerEncoding
from L1Trigger.TrackerTFP.KalmanFilterFormats_cff import TrackTriggerKalmanFilterFormats
from L1Trigger.TrackFindingTracklet.ChannelAssignment_cff import ChannelAssignment
from L1Trigger.TrackFindingTracklet.ProducerKF_cfi import TrackFindingTrackletProducerKF_params
TrackFindingTrackletProducerKFin = cms.EDProducer( 'trklet::ProducerKFin', TrackFindingTrackletProducerKF_params )
TrackFindingTrackletProducerKF = cms.EDProducer( 'trackerTFP::ProducerKF', TrackFindingTrackletProducerKF_params )
TrackFindingTrackletProducerTT = cms.EDProducer( 'trklet::ProducerTT', TrackFindingTrackletProducerKF_params )
TrackFindingTrackletProducerAS = cms.EDProducer( 'trklet::ProducerAS', TrackFindingTrackletProducerKF_params )
TrackFindingTrackletProducerKFout = cms.EDProducer( 'trklet::ProducerKFout', TrackFindingTrackletProducerKF_params ) | [((11, 35, 11, 114), 'FWCore.ParameterSet.Config.EDProducer', 'cms.EDProducer', ({(11, 51, 11, 73): '"""trklet::ProducerKFin"""', (11, 75, 11, 112): 'TrackFindingTrackletProducerKF_params'}, {}), "('trklet::ProducerKFin', TrackFindingTrackletProducerKF_params)", True, 'import FWCore.ParameterSet.Config as cms\n'), ((12, 33, 12, 114), 'FWCore.ParameterSet.Config.EDProducer', 'cms.EDProducer', ({(12, 49, 12, 73): '"""trackerTFP::ProducerKF"""', (12, 75, 12, 112): 'TrackFindingTrackletProducerKF_params'}, {}), "('trackerTFP::ProducerKF', TrackFindingTrackletProducerKF_params)", True, 'import FWCore.ParameterSet.Config as cms\n'), ((13, 33, 13, 110), 'FWCore.ParameterSet.Config.EDProducer', 'cms.EDProducer', ({(13, 49, 13, 69): '"""trklet::ProducerTT"""', (13, 71, 13, 108): 'TrackFindingTrackletProducerKF_params'}, {}), "('trklet::ProducerTT', TrackFindingTrackletProducerKF_params)", True, 'import FWCore.ParameterSet.Config as cms\n'), ((14, 33, 14, 110), 'FWCore.ParameterSet.Config.EDProducer', 'cms.EDProducer', ({(14, 49, 14, 69): '"""trklet::ProducerAS"""', (14, 71, 14, 108): 'TrackFindingTrackletProducerKF_params'}, {}), "('trklet::ProducerAS', TrackFindingTrackletProducerKF_params)", True, 'import FWCore.ParameterSet.Config as cms\n'), ((15, 36, 15, 116), 'FWCore.ParameterSet.Config.EDProducer', 'cms.EDProducer', ({(15, 52, 15, 75): '"""trklet::ProducerKFout"""', (15, 77, 15, 114): 'TrackFindingTrackletProducerKF_params'}, {}), "('trklet::ProducerKFout', TrackFindingTrackletProducerKF_params)", True, 'import FWCore.ParameterSet.Config as cms\n')] |
ne-msft/py_cui | py_cui/__init__.py | b4938dd2c23a422496af7e32a33c2dbfcb348719 | """A python library for intuitively creating CUI/TUI interfaces with pre-built widgets.
"""
#
# Author: Jakub Wlodek
# Created: 12-Aug-2019
# Docs: https://jwlodek.github.io/py_cui-docs
# License: BSD-3-Clause (New/Revised)
#
# Some python core library imports
import sys
import os
import time
import copy
import shutil # We use shutil for getting the terminal dimensions
import threading # Threading is used for loading icon popups
import logging # Use logging library for debug purposes
# py_cui uses the curses library. On windows this does not exist, but
# there is a open source windows-curses module that adds curses support
# for python on windows
import curses
# py_cui imports
import py_cui
import py_cui.keys
import py_cui.statusbar
import py_cui.widgets
import py_cui.controls
import py_cui.dialogs
import py_cui.widget_set
import py_cui.popups
import py_cui.renderer
import py_cui.debug
import py_cui.errors
from py_cui.colors import *
# Version number
__version__ = '0.1.3'
def fit_text(width, text, center=False):
"""Fits text to screen size
Helper function to fit text within a given width. Used to fix issue with status/title bar text
being too long
Parameters
----------
width : int
width of window in characters
text : str
input text
center : Boolean
flag to center text
Returns
-------
fitted_text : str
text fixed depending on width
"""
if width < 5:
return '.' * width
if len(text) >= width:
return text[:width - 5] + '...'
else:
total_num_spaces = (width - len(text) - 1)
if center:
left_spaces = int(total_num_spaces / 2)
right_spaces = int(total_num_spaces / 2)
if(total_num_spaces % 2 == 1):
right_spaces = right_spaces + 1
return ' ' * left_spaces + text + ' ' * right_spaces
else:
return text + ' ' * total_num_spaces
class PyCUI:
"""Base CUI class
Main user interface class for py_cui. To create a user interface, you must
first create an instance of this class, and then add cells + widgets to it.
Attributes
----------
cursor_x, cursor_y : int
absolute position of the cursor in the CUI
grid : py_cui.grid.Grid
The main layout manager for the CUI
widgets : dict of str - py_cui.widgets.Widget
dict of widget in the grid
title_bar : py_cui.statusbar.StatusBar
a status bar object that gets drawn at the top of the CUI
status_bar : py_cui.statusbar.StatusBar
a status bar object that gets drawn at the bottom of the CUI
keybindings : list of py_cui.keybinding.KeyBinding
list of keybindings to check against in the main CUI loop
height, width : int
height of the terminal in characters, width of terminal in characters
exit_key : key_code
a key code for a key that exits the CUI
simulated_terminal : List[int]
Dimensions for an alternative simulated terminal (used for testing)
"""
def __init__(self, num_rows, num_cols, auto_focus_buttons=True,
exit_key=py_cui.keys.KEY_Q_LOWER, simulated_terminal=None):
"""Constructor for PyCUI class
"""
self._title = 'PyCUI Window'
# When this is not set, the escape character delay
# is too long for exiting focus mode
os.environ.setdefault('ESCDELAY', '25')
# For unit testing purposes, we want to simulate terminal
# dimensions so that we don't get errors
self._simulated_terminal = simulated_terminal
if self._simulated_terminal is None:
term_size = shutil.get_terminal_size()
height = term_size.lines
width = term_size.columns
else:
height = simulated_terminal[0]
width = simulated_terminal[1]
# Init terminal height width. Subtract 4 from height
# for title/status bar and padding
self._height = height
self._width = width
self._height = self._height - 4
# Add status and title bar
self.title_bar = py_cui.statusbar.StatusBar(self._title, BLACK_ON_WHITE)
exit_key_char = py_cui.keys.get_char_from_ascii(exit_key)
self._init_status_bar_text = 'Press - {} - to exit. Arrow Keys to move ' \
'between widgets. Enter to enter focus ' \
'mode.'.format(exit_key_char)
self.status_bar = py_cui.statusbar.StatusBar(self._init_status_bar_text,
BLACK_ON_WHITE)
# Logging object initialization for py_cui
self._logger = py_cui.debug._initialize_logger(self,
name='py_cui')
# Initialize grid, renderer, and widget dict
self._grid = py_cui.grid.Grid(num_rows, num_cols, self._height, self._width, self._logger)
self._renderer = None
self._border_characters = None
self._stdscr = None
self._widgets = {}
self._refresh_timeout = -1
# Variables for determining selected widget/focus mode
self._selected_widget = None
self._in_focused_mode = False
self._popup = None
self._auto_focus_buttons = auto_focus_buttons
# CUI blocks when loading popup is open
self._loading = False
self._stopped = False
self._post_loading_callback = None
self._on_draw_update_func = None
# Top level keybindings. Exit key is 'q' by default
self._keybindings = {}
self._exit_key = exit_key
self._forward_cycle_key = py_cui.keys.KEY_CTRL_LEFT
self._reverse_cycle_key = py_cui.keys.KEY_CTRL_RIGHT
# Callback to fire when CUI is stopped.
self._on_stop = None
def set_refresh_timeout(self, timeout):
"""Sets the CUI auto-refresh timeout to a number of seconds.
Parameters
----------
timeout : int
Number of seconds to wait before refreshing the CUI
"""
# We want the refresh timeout in milliseconds as an integer
self._refresh_timeout = int(timeout * 1000)
def set_on_draw_update_func(self, update_function):
"""Adds a function that is fired during each draw call of the CUI
Parameters
----------
update_function : function
A no-argument or lambda function that is fired at the start of each draw call
"""
self._on_draw_update_func = update_function
def set_widget_cycle_key(self, forward_cycle_key=None, reverse_cycle_key=None):
"""Assigns a key for automatically cycling through widgets in both focus and overview modes
Parameters
----------
widget_cycle_key : py_cui.keys.KEY
Key code for key to cycle through widgets
"""
if forward_cycle_key is not None:
self._forward_cycle_key = forward_cycle_key
if reverse_cycle_key is not None:
self._reverse_cycle_key = reverse_cycle_key
def enable_logging(self, log_file_path='py_cui_log.txt', logging_level = logging.DEBUG):
"""Function enables logging for py_cui library
Parameters
----------
log_file_path : str
The target log filepath. Default 'py_cui_log.txt
logging_level : int
Default logging level = logging.DEBUG
"""
try:
py_cui.debug._enable_logging(self._logger, filename=log_file_path, logging_level=logging_level)
self._logger.info('Initialized logger')
except PermissionError as e:
print('Failed to initialize logger: {}'.format(str(e)))
def apply_widget_set(self, new_widget_set):
"""Function that replaces all widgets in a py_cui with those of a different widget set
Parameters
----------
new_widget_set : WidgetSet
The new widget set to switch to
Raises
------
TypeError
If input is not of type WidgetSet
"""
if isinstance(new_widget_set, py_cui.widget_set.WidgetSet):
self.lose_focus()
self._widgets = new_widget_set._widgets
self._grid = new_widget_set._grid
self._keybindings = new_widget_set._keybindings
if self._simulated_terminal is None:
if self._stdscr is None:
term_size = shutil.get_terminal_size()
height = term_size.lines
width = term_size.columns
else:
# Use curses termsize when possible to fix resize bug on windows.
height, width = self._stdscr.getmaxyx()
else:
height = self._simulated_terminal[0]
width = self._simulated_terminal[1]
height = height - 4
self._refresh_height_width(height, width)
if self._stdscr is not None:
self._initialize_widget_renderer()
self._selected_widget = new_widget_set._selected_widget
else:
raise TypeError('Argument must be of type py_cui.widget_set.WidgetSet')
def create_new_widget_set(self, num_rows, num_cols):
"""Function that is used to create additional widget sets
Use this function instead of directly creating widget set object instances, to allow
for logging support.
Parameters
----------
num_rows : int
row count for new widget set
num_cols : int
column count for new widget set
Returns
-------
new_widget_set : py_cui.widget_set.WidgetSet
The new widget set object instance
"""
# Use current logging object and simulated terminal for sub-widget sets
return py_cui.widget_set.WidgetSet(num_rows, num_cols, self._logger,
simulated_terminal=self._simulated_terminal)
# ----------------------------------------------#
# Initialization functions #
# Used to initialzie CUI and its features #
# ----------------------------------------------#
def start(self):
"""Function that starts the CUI
"""
self._logger.info('Starting {} CUI'.format(self._title))
curses.wrapper(self._draw)
def stop(self):
"""Function that stops the CUI, and fires the callback function.
Callback must be a no arg method
"""
self._logger.info('Stopping CUI')
self._stopped = True
def run_on_exit(self, command):
"""Sets callback function on CUI exit. Must be a no-argument function or lambda function
Parameters
----------
command : function
A no-argument or lambda function to be fired on exit
"""
self._on_stop = command
def set_title(self, title):
"""Sets the title bar text
Parameters
----------
title : str
New title for CUI
"""
self._title = title
def set_status_bar_text(self, text):
"""Sets the status bar text when in overview mode
Parameters
----------
text : str
Status bar text
"""
self._init_status_bar_text = text
self.status_bar.set_text(text)
def _initialize_colors(self):
"""Function for initialzing curses colors. Called when CUI is first created.
"""
# Start colors in curses.
# For each color pair in color map, initialize color combination.
curses.start_color()
curses.init_color(curses.COLOR_BLUE, 0, 0, 500)
for color_pair in py_cui.colors._COLOR_MAP.keys():
fg_color, bg_color = py_cui.colors._COLOR_MAP[color_pair]
curses.init_pair(color_pair, fg_color, bg_color)
def _initialize_widget_renderer(self):
"""Function that creates the renderer object that will draw each widget
"""
if self._renderer is None:
self._renderer = py_cui.renderer.Renderer(self, self._stdscr, self._logger)
for widget_id in self._widgets.keys():
self._widgets[widget_id]._assign_renderer(self._renderer)
if self._popup is not None:
self._popup._assign_renderer(self._renderer)
def toggle_unicode_borders(self):
"""Function for toggling unicode based border rendering
"""
if self._border_characters is None or self._border_characters['UP_LEFT'] == '+':
self.set_widget_border_characters('\u256d', '\u256e', '\u2570', '\u256f', '\u2500', '\u2502')
else:
self.set_widget_border_characters('+', '+', '+', '+', '-', '|')
def set_widget_border_characters(self, upper_left_corner, upper_right_corner, lower_left_corner, lower_right_corner, horizontal, vertical):
"""Function that can be used to set arbitrary border characters for drawing widget borders by renderer.
Parameters
----------
upper_left_corner : char
Upper left corner character
upper_right_corner : char
Upper right corner character
lower_left_corner : char
Upper left corner character
lower_right_corner : char
Lower right corner character
horizontal : char
Horizontal border character
vertical : char
Vertical border character
"""
self._border_characters = {
'UP_LEFT': upper_left_corner,
'UP_RIGHT': upper_right_corner,
'DOWN_LEFT': lower_left_corner,
'DOWN_RIGHT': lower_right_corner,
'HORIZONTAL': horizontal,
'VERTICAL': vertical
}
self._logger.info('Set border_characters to {}'.format(self._border_characters))
def get_widgets(self):
"""Function that gets current set of widgets
Returns
-------
widgets : dict of str -> widget
dictionary mapping widget IDs to object instances
"""
return self._widgets
# Widget add functions. Each of these adds a particular type of widget
# to the grid in a specified location.
def add_scroll_menu(self, title, row, column, row_span=1, column_span=1, padx=1, pady=0) -> py_cui.widgets.ScrollMenu:
"""Function that adds a new scroll menu to the CUI grid
Parameters
----------
title : str
The title of the scroll menu
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
Returns
-------
new_scroll_menu : ScrollMenu
A reference to the created scroll menu object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_scroll_menu = py_cui.widgets.ScrollMenu(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger)
self._widgets[id] = new_scroll_menu
if self._selected_widget is None:
self.set_selected_widget(id)
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_scroll_menu))))
return new_scroll_menu
def add_checkbox_menu(self, title, row, column, row_span=1, column_span=1, padx=1, pady=0, checked_char='X') -> py_cui.widgets.CheckBoxMenu:
"""Function that adds a new checkbox menu to the CUI grid
Parameters
----------
title : str
The title of the checkbox
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
checked_char='X' : char
The character used to mark 'Checked' items
Returns
-------
new_checkbox_menu : CheckBoxMenu
A reference to the created checkbox object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_checkbox_menu = py_cui.widgets.CheckBoxMenu(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger,
checked_char)
self._widgets[id] = new_checkbox_menu
if self._selected_widget is None:
self.set_selected_widget(id)
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_checkbox_menu))))
return new_checkbox_menu
def add_text_box(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, initial_text = '', password = False) -> py_cui.widgets.TextBox:
"""Function that adds a new text box to the CUI grid
Parameters
----------
title : str
The title of the textbox
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
initial_text='' : str
Initial text for the textbox
password=False : bool
Toggle to show '*' instead of characters.
Returns
-------
new_text_box : TextBox
A reference to the created textbox object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_text_box = py_cui.widgets.TextBox(id,
title,
self._grid,
row, column,
row_span,
column_span,
padx, pady,
self._logger,
initial_text,
password)
self._widgets[id] = new_text_box
if self._selected_widget is None:
self.set_selected_widget(id)
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_text_box))))
return new_text_box
def add_text_block(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, initial_text = '') -> py_cui.widgets.ScrollTextBlock:
"""Function that adds a new text block to the CUI grid
Parameters
----------
title : str
The title of the text block
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
initial_text='' : str
Initial text for the text block
Returns
-------
new_text_block : ScrollTextBlock
A reference to the created textblock object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_text_block = py_cui.widgets.ScrollTextBlock(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger,
initial_text)
self._widgets[id] = new_text_block
if self._selected_widget is None:
self.set_selected_widget(id)
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_text_block))))
return new_text_block
def add_label(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0) -> py_cui.widgets.Label:
"""Function that adds a new label to the CUI grid
Parameters
----------
title : str
The title of the label
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
Returns
-------
new_label : Label
A reference to the created label object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_label = py_cui.widgets.Label(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger)
self._widgets[id] = new_label
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_label))))
return new_label
def add_block_label(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, center=True) -> py_cui.widgets.BlockLabel:
"""Function that adds a new block label to the CUI grid
Parameters
----------
title : str
The title of the block label
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
center : bool
flag to tell label to be centered or left-aligned.
Returns
-------
new_label : BlockLabel
A reference to the created block label object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_label = py_cui.widgets.BlockLabel(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
center,
self._logger)
self._widgets[id] = new_label
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_label))))
return new_label
def add_button(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, command=None) -> py_cui.widgets.Button:
"""Function that adds a new button to the CUI grid
Parameters
----------
title : str
The title of the button
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
command=None : Function
A no-argument or lambda function to fire on button press.
Returns
-------
new_button : Button
A reference to the created button object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_button = py_cui.widgets.Button(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger,
command)
self._widgets[id] = new_button
if self._selected_widget is None:
self.set_selected_widget(id)
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_button))))
return new_button
def add_slider(self, title, row, column, row_span=1,
column_span=1, padx=1, pady=0,
min_val=0, max_val=100, step=1, init_val=0) -> py_cui.controls.slider.SliderWidget:
"""Function that adds a new label to the CUI grid
Parameters
----------
title : str
The title of the label
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
min_val = 0 int
min value of the slider
max_val = 0 int
max value of the slider
step = 0 int
step to incremento or decrement
init_val = 0 int
initial value of the slider
Returns
-------
new_slider : Slider
A reference to the created slider object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_slider = py_cui.controls.slider.SliderWidget(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger,
min_val,
max_val,
step,
init_val)
self._widgets[id] = new_slider
self._logger.info('Adding widget {} w/ ID {} of type {}'
.format(title, id, str(type(new_slider))))
return new_slider
def get_element_at_position(self, x, y):
"""Returns containing widget for character position
Parameters
----------
x : int
Horizontal character position
y : int
Vertical character position, top down
Returns
-------
in_widget : UIElement
Widget or popup that is within the position None if nothing
"""
if self._popup is not None and self._popup._contains_position(x, y):
return self._popup
elif self._popup is None:
for widget_id in self.get_widgets().keys():
if self.get_widgets()[widget_id]._contains_position(x, y):
return self.get_widgets()[widget_id]
return None
def _get_horizontal_neighbors(self, widget, direction):
"""Gets all horizontal (left, right) neighbor widgets
Parameters
----------
widget : py_cui.widgets.Widget
The currently selected widget
direction : py_cui.keys.KEY*
must be an arrow key value
Returns
-------
id_list : list[]
A list of the neighbor widget ids
"""
if not direction in py_cui.keys.ARROW_KEYS:
return None
_, num_cols = self._grid.get_dimensions()
row_start, col_start = widget.get_grid_cell()
row_span, col_span = widget.get_grid_cell_spans()
id_list = []
if direction == py_cui.keys.KEY_LEFT_ARROW:
col_range_start = 0
col_range_stop = col_start
else:
col_range_start = col_start + col_span
col_range_stop = num_cols
for col in range(col_range_start, col_range_stop):
for row in range(row_start, row_start + row_span):
for widget_id in self.get_widgets().keys():
if self.get_widgets()[widget_id]._is_row_col_inside(row, col) and widget_id not in id_list:
id_list.append(widget_id)
if direction == py_cui.keys.KEY_LEFT_ARROW:
id_list.reverse()
self._logger.info('Neighbors with ids {} for cell {},{} span {},{}'.format(id_list,
row_start,
col_start,
row_span,
col_span))
return id_list
def _get_vertical_neighbors(self, widget, direction):
"""Gets all vertical (up, down) neighbor widgets
Parameters
----------
widget : py_cui.widgets.Widget
The currently selected widget
direction : py_cui.keys.KEY*
must be an arrow key value
Returns
-------
id_list : list[]
A list of the neighbor widget ids
"""
if not direction in py_cui.keys.ARROW_KEYS:
return None
num_rows, _ = self._grid.get_dimensions()
row_start, col_start = widget.get_grid_cell()
row_span, col_span = widget.get_grid_cell_spans()
id_list = []
if direction == py_cui.keys.KEY_UP_ARROW:
row_range_start = 0
row_range_stop = row_start
else:
row_range_start = row_start + row_span
row_range_stop = num_rows
for row in range(row_range_start, row_range_stop):
for col in range(col_start, col_start + col_span):
for widget_id in self.get_widgets().keys():
if self.get_widgets()[widget_id]._is_row_col_inside(row, col) and widget_id not in id_list:
id_list.append(widget_id)
if direction == py_cui.keys.KEY_UP_ARROW:
id_list.reverse()
self._logger.info('Neighbors with ids {} for cell {},{} span {},{}'.format(id_list,
row_start,
col_start,
row_span,
col_span))
return id_list
# CUI status functions. Used to switch between widgets, set the mode, and
# identify neighbors for overview mode
def _check_if_neighbor_exists(self, direction):
"""Function that checks if widget has neighbor in specified cell.
Used for navigating CUI, as arrow keys find the immediate neighbor
Parameters
----------
direction : py_cui.keys.KEY_*
The direction in which to search
Returns
-------
widget_id : str
The widget neighbor ID if found, None otherwise
"""
start_widget = self.get_widgets()[self._selected_widget]
# Find all the widgets in the given row or column
neighbors = []
if direction in [py_cui.keys.KEY_DOWN_ARROW, py_cui.keys.KEY_UP_ARROW]:
neighbors = self._get_vertical_neighbors(start_widget, direction)
elif direction in [py_cui.keys.KEY_RIGHT_ARROW, py_cui.keys.KEY_LEFT_ARROW]:
neighbors = self._get_horizontal_neighbors(start_widget, direction)
if len(neighbors) == 0:
return None
# We select the best match to jump to (first neighbor)
return neighbors[0]
def get_selected_widget(self):
"""Function that gets currently selected widget
Returns
-------
selected_widget : py_cui.widgets.Widget
Reference to currently selected widget object
"""
if self._selected_widget is not None and self._selected_widget in self.get_widgets().keys():
return self.get_widgets()[self._selected_widget]
else:
self._logger.warn('Selected widget ID is None or invalid')
return None
def set_selected_widget(self, widget_id):
"""Function that sets the selected widget for the CUI
Parameters
----------
widget_id : str
the id of the widget to select
"""
if widget_id in self.get_widgets().keys():
self._logger.info('Setting selected widget to ID {}'.format(widget_id))
self._selected_widget = widget_id
else:
self._logger.warn('Widget w/ ID {} does not exist among current widgets.'.format(widget_id))
def lose_focus(self):
"""Function that forces py_cui out of focus mode.
After popup is called, focus is lost
"""
if self._in_focused_mode:
self._in_focused_mode = False
self.status_bar.set_text(self._init_status_bar_text)
self.get_widgets()[self._selected_widget].set_selected(False)
else:
self._logger.info('lose_focus: Not currently in focus mode')
def move_focus(self, widget, auto_press_buttons=True):
"""Moves focus mode to different widget
Parameters
----------
widget : Widget
The widget object we want to move focus to.
"""
self.lose_focus()
self.set_selected_widget(widget.get_id())
# If autofocus buttons is selected, we automatically process the button command and reset to overview mode
if self._auto_focus_buttons and auto_press_buttons and isinstance(widget, py_cui.widgets.Button):
widget.command()
self._logger.info('Moved focus to button {} - ran autofocus command'.format(widget.get_title()))
elif self._auto_focus_buttons and isinstance(widget, py_cui.widgets.Button):
self.status_bar.set_text(self._init_status_bar_text)
else:
widget.set_selected(True)
self._in_focused_mode = True
self.status_bar.set_text(widget.get_help_text())
self._logger.info('Moved focus to widget {}'.format(widget.get_title()))
def _cycle_widgets(self, reverse=False):
"""Function that is fired if cycle key is pressed to move to next widget
Parameters
----------
reverse : bool
Default false. If true, cycle widgets in reverse order.
"""
num_widgets = len(self.get_widgets().keys())
current_widget_num = int(self._selected_widget.split('Widget')[1])
if not reverse:
next_widget_num = current_widget_num + 1
if next_widget_num == num_widgets:
next_widget_num = 0
cycle_key = self._forward_cycle_key
else:
next_widget_num = current_widget_num - 1
if next_widget_num < 0:
next_widget_num = num_widgets - 1
cycle_key = self._reverse_cycle_key
current_widget_id = 'Widget{}'.format(current_widget_num)
next_widget_id = 'Widget{}'.format(next_widget_num)
if self._in_focused_mode and cycle_key in self.get_widgets()[current_widget_id]._key_commands.keys():
# In the event that we are focusing on a widget with that key defined, we do not cycle.
pass
else:
self.move_focus(self.get_widgets()[next_widget_id], auto_press_buttons=False)
def add_key_command(self, key, command):
"""Function that adds a keybinding to the CUI when in overview mode
Parameters
----------
key : py_cui.keys.KEY_*
The key bound to the command
command : Function
A no-arg or lambda function to fire on keypress
"""
self._keybindings[key] = command
# Popup functions. Used to display messages, warnings, and errors to the user.
def show_message_popup(self, title, text):
"""Shows a message popup
Parameters
----------
title : str
Message title
text : str
Message text
"""
color = WHITE_ON_BLACK
self._popup = py_cui.popups.MessagePopup(self, title, text, color, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_warning_popup(self, title, text):
"""Shows a warning popup
Parameters
----------
title : str
Warning title
text : str
Warning text
"""
color = YELLOW_ON_BLACK
self._popup = py_cui.popups.MessagePopup(self, 'WARNING - ' + title, text, color, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_error_popup(self, title, text):
"""Shows an error popup
Parameters
----------
title : str
Error title
text : str
Error text
"""
color = RED_ON_BLACK
self._popup = py_cui.popups.MessagePopup(self, 'ERROR - ' + title, text, color, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_yes_no_popup(self, title, command):
"""Shows a yes/no popup.
The 'command' parameter must be a function with a single boolean parameter
Parameters
----------
title : str
Message title
command : function
A function taking in a single boolean parameter. Will be fired with True if yes selected, false otherwise
"""
color = WHITE_ON_BLACK
self._popup = py_cui.popups.YesNoPopup(self, title + '- (y/n)', 'Yes - (y), No - (n)', color, command, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_text_box_popup(self, title, command, password=False):
"""Shows a textbox popup.
The 'command' parameter must be a function with a single string parameter
Parameters
----------
title : str
Message title
command : Function
A function with a single string parameter, fired with contents of textbox when enter key pressed
password=False : bool
If true, write characters as '*'
"""
color = WHITE_ON_BLACK
self._popup = py_cui.popups.TextBoxPopup(self, title, color, command, self._renderer, password, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_menu_popup(self, title, menu_items, command, run_command_if_none=False):
"""Shows a menu popup.
The 'command' parameter must be a function with a single string parameter
Parameters
----------
title : str
menu title
menu_items : list of str
A list of menu items
command : Function
A function taking in a single string argument. Fired with selected menu item when ENTER pressed.
run_command_if_none=False : bool
If True, will run command passing in None if no menu item selected.
"""
color = WHITE_ON_BLACK
self._popup = py_cui.popups.MenuPopup(self, menu_items, title, color, command, self._renderer, self._logger, run_command_if_none)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_loading_icon_popup(self, title, message, callback=None):
"""Shows a loading icon popup
Parameters
----------
title : str
Message title
message : str
Message text. Will show as '$message...'
callback=None : Function
If not none, fired after loading is completed. Must be a no-arg function
"""
if callback is not None:
self._post_loading_callback = callback
color = WHITE_ON_BLACK
self._loading = True
self._popup = py_cui.popups.LoadingIconPopup(self, title, message, color, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_loading_bar_popup(self, title, num_items, callback=None):
"""Shows loading bar popup.
Use 'increment_loading_bar' to show progress
Parameters
----------
title : str
Message title
num_items : int
Number of items to iterate through for loading
callback=None : Function
If not none, fired after loading is completed. Must be a no-arg function
"""
if callback is not None:
self._post_loading_callback = callback
color = WHITE_ON_BLACK
self._loading = True
self._popup = py_cui.popups.LoadingBarPopup(self, title, num_items, color, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_form_popup(self, title, fields, passwd_fields=[], required=[], callback=None):
"""Shows form popup.
Used for inputting several fields worth of values
Parameters
---------
title : str
Message title
fields : List[str]
Names of each individual field
passwd_fields : List[str]
Field names that should have characters hidden
required : List[str]
Fields that are required before submission
callback=None : Function
If not none, fired after loading is completed. Must be a no-arg function
"""
self._popup = py_cui.dialogs.form.FormPopup(self, fields, passwd_fields, required, {}, title, py_cui.WHITE_ON_BLACK, self._renderer, self._logger)
if callback is not None:
self._popup.set_on_submit_action(callback)
def show_filedialog_popup(self, popup_type='openfile', initial_dir='.', callback=None, ascii_icons=True, limit_extensions=[]):
"""Shows form popup.
Used for inputting several fields worth of values
Paramters
---------
title : str
Message title
fields : List[str]
Names of each individual field
passwd_fields : List[str]
Field names that should have characters hidden
required : List[str]
Fields that are required before submission
callback=None : Function
If not none, fired after loading is completed. Must be a no-arg function
"""
self._popup = py_cui.dialogs.filedialog.FileDialogPopup(self, callback, initial_dir, popup_type, ascii_icons, limit_extensions, py_cui.WHITE_ON_BLACK, self._renderer, self._logger)
def increment_loading_bar(self):
"""Increments progress bar if loading bar popup is open
"""
if self._popup is not None:
self._popup._increment_counter()
else:
self._logger.warn('No popup is currently opened.')
def stop_loading_popup(self):
"""Leaves loading state, and closes popup.
Must be called by user to escape loading.
"""
self._loading = False
self.close_popup()
self._logger.info('Stopping open loading popup')
def close_popup(self):
"""Closes the popup, and resets focus
"""
self.lose_focus()
self._popup = None
def _refresh_height_width(self, height, width):
"""Function that updates the height and width of the CUI based on terminal window size
Parameters
----------
height : int
Window height in terminal characters
width : int
Window width in terminal characters
"""
self._height = height
self._width = width
self._grid.update_grid_height_width(self._height, self._width)
for widget_id in self._widgets.keys():
self._widgets[widget_id].update_height_width()
if self._popup is not None:
self._popup.update_height_width()
def get_absolute_size(self):
"""Returns dimensions of CUI
Returns
-------
height, width : int
The dimensions of drawable CUI space in characters
"""
return self._height, self._width
# Draw Functions. Function for drawing widgets, status bars, and popups
def _draw_widgets(self):
"""Function that draws all of the widgets to the screen
"""
for widget_key in self.get_widgets().keys():
if widget_key != self._selected_widget:
self.get_widgets()[widget_key]._draw()
# We draw the selected widget last to support cursor location.
if self._selected_widget is not None:
self.get_widgets()[self._selected_widget]._draw()
self._logger.info('Drew widgets')
def _draw_status_bars(self, stdscr, height, width):
"""Draws status bar and title bar
Parameters
----------
stdscr : curses Standard cursor
The cursor used to draw the status bar
height : int
Window height in terminal characters
width : int
Window width in terminal characters
"""
if self.status_bar is not None:
stdscr.attron(curses.color_pair(self.status_bar.get_color()))
stdscr.addstr(height + 3, 0, fit_text(width, self.status_bar.get_text()))
stdscr.attroff(curses.color_pair(self.status_bar.get_color()))
if self.title_bar is not None:
stdscr.attron(curses.color_pair(self.title_bar.get_color()))
stdscr.addstr(0, 0, fit_text(width, self._title, center=True))
stdscr.attroff(curses.color_pair(self.title_bar.get_color()))
def _display_window_warning(self, stdscr, error_info):
"""Function that prints some basic error info if there is an error with the CUI
Parameters
----------
stdscr : curses Standard cursor
The cursor used to draw the warning
error_info : str
The information regarding the error.
"""
stdscr.clear()
stdscr.attron(curses.color_pair(RED_ON_BLACK))
stdscr.addstr(0, 0, 'Error displaying CUI!!!')
stdscr.addstr(1, 0, 'Error Type: {}'.format(error_info))
stdscr.addstr(2, 0, 'Most likely terminal dimensions are too small.')
stdscr.attroff(curses.color_pair(RED_ON_BLACK))
stdscr.refresh()
self._logger.info('Encountered error -> {}'.format(error_info))
def _handle_key_presses(self, key_pressed):
"""Function that handles all main loop key presses.
Parameters
----------
key_pressed : py_cui.keys.KEY_*
The key being pressed
"""
# Selected widget represents which widget is being hovered over, though not necessarily in focus mode
if self._selected_widget is None:
return
selected_widget = self.get_widgets()[self._selected_widget]
# If we are in focus mode, the widget has all of the control of the keyboard except
# for the escape key, which exits focus mode.
if self._in_focused_mode and self._popup is None:
if key_pressed == py_cui.keys.KEY_ESCAPE:
self.status_bar.set_text(self._init_status_bar_text)
self._in_focused_mode = False
selected_widget.set_selected(False)
self._logger.info('Exiting focus mode on widget {}'.format(selected_widget.get_title()))
else:
# widget handles remaining py_cui.keys
self._logger.info('Widget {} handling {} key'.format(selected_widget.get_title(), key_pressed))
selected_widget._handle_key_press(key_pressed)
# Otherwise, barring a popup, we are in overview mode, meaning that arrow py_cui.keys move between widgets, and Enter key starts focus mode
elif self._popup is None:
if key_pressed == py_cui.keys.KEY_ENTER and self._selected_widget is not None and selected_widget.is_selectable():
self.move_focus(selected_widget)
for key in self._keybindings.keys():
if key_pressed == key:
command = self._keybindings[key]
self._logger.info('Detected binding for key {}, running command {}'.format(key_pressed, command.__name__))
command()
# If not in focus mode, use the arrow py_cui.keys to move around the selectable widgets.
neighbor = None
if key_pressed in py_cui.keys.ARROW_KEYS:
neighbor = self._check_if_neighbor_exists(key_pressed)
if neighbor is not None:
self.set_selected_widget(neighbor)
self._logger.info('Navigated to neighbor widget {}'.format(self.get_widgets()[self._selected_widget].get_title()))
# if we have a popup, that takes key control from both overview and focus mode
elif self._popup is not None:
self._logger.info('Popup {} handling key {}'.format(self._popup.get_title(), key_pressed))
self._popup._handle_key_press(key_pressed)
def _draw(self, stdscr):
"""Main CUI draw loop called by start()
Parameters
----------
stdscr : curses Standard screen
The screen buffer used for drawing CUI elements
"""
self._stdscr = stdscr
key_pressed = 0
# Clear and refresh the screen for a blank canvas
stdscr.clear()
stdscr.refresh()
curses.mousemask(curses.ALL_MOUSE_EVENTS)
# stdscr.nodelay(False)
#stdscr.keypad(True)
# Initialization functions. Generates colors and renderer
self._initialize_colors()
self._initialize_widget_renderer()
# If user specified a refresh timeout, apply it here
if self._refresh_timeout > 0:
self._stdscr.timeout(self._refresh_timeout)
# If user sets non-default border characters, update them here
if self._border_characters is not None:
self._renderer._set_border_renderer_chars(self._border_characters)
# Loop where key_pressed is the last character pressed. Wait for exit key while no popup or focus mode
while key_pressed != self._exit_key or self._in_focused_mode or self._popup is not None:
try:
# If we call stop, we want to break out of the main draw loop
if self._stopped:
break
# Initialization and size adjustment
stdscr.erase()
# find height width, adjust if status/title bar added. We decrement the height by 4 to account for status/title bar and padding
if self._simulated_terminal is None:
height, width = stdscr.getmaxyx()
else:
height = self._simulated_terminal[0]
width = self._simulated_terminal[1]
height = height - 4
# If the user defined an update function to fire on each draw call,
# Run it here. This can of course be also handled user-side
# through a separate thread.
if self._on_draw_update_func is not None:
self._on_draw_update_func()
# This is what allows the CUI to be responsive. Adjust grid size based on current terminal size
# Resize the grid and the widgets if there was a resize operation
if key_pressed == curses.KEY_RESIZE:
self._logger.info('Resizing CUI to new dimensions {} by {}'.format(height, width))
try:
self._refresh_height_width(height, width)
except py_cui.errors.PyCUIOutOfBoundsError as e:
self._logger.info('Resized terminal too small')
self._display_window_warning(stdscr, str(e))
# Here we handle mouse click events globally, or pass them to the UI element to handle
elif key_pressed == curses.KEY_MOUSE:
self._logger.info('Detected mouse click')
_, x, y, _, _ = curses.getmouse()
in_element = self.get_element_at_position(x, y)
# In first case, we click inside already selected widget, pass click for processing
if in_element is not None and in_element.is_selected():
in_element._handle_mouse_press(x, y)
# Otherwise, if not a popup, select the clicked on widget
elif in_element is not None and not isinstance(in_element, py_cui.popups.Popup):
self.move_focus(in_element)
in_element._handle_mouse_press(x, y)
# If we have a post_loading_callback, fire it here
if self._post_loading_callback is not None and not self._loading:
self._logger.info('Firing post-loading callback function {}'.format(self._post_loading_callback.__name__))
self._post_loading_callback()
self._post_loading_callback = None
# Handle widget cycling
if key_pressed == self._forward_cycle_key:
self._cycle_widgets()
elif key_pressed == self._reverse_cycle_key:
self._cycle_widgets(reverse=True)
# Handle keypresses
self._handle_key_presses(key_pressed)
try:
# Draw status/title bar, and all widgets. Selected widget will be bolded.
self._draw_status_bars(stdscr, height, width)
self._draw_widgets()
# draw the popup if required
if self._popup is not None:
self._popup._draw()
except curses.error as e:
self._logger.error('Curses error while drawing TUI')
self._display_window_warning(stdscr, str(e))
except py_cui.errors.PyCUIOutOfBoundsError as e:
self._logger.error('Resized terminal too small')
self._display_window_warning(stdscr, str(e))
# Refresh the screen
stdscr.refresh()
# Wait for next input
if self._loading or self._post_loading_callback is not None:
# When loading, refresh screen every quarter second
time.sleep(0.25)
# Need to reset key_pressed, because otherwise the previously pressed key will be used.
key_pressed = 0
elif self._stopped:
key_pressed = self._exit_key
else:
self._logger.info('Waiting for next keypress')
key_pressed = stdscr.getch()
except KeyboardInterrupt:
self._logger.info('Detect Keyboard Interrupt, Exiting...')
self._stopped = True
stdscr.erase()
stdscr.refresh()
curses.endwin()
if self._on_stop is not None:
self._logger.info('Firing onstop function {}'.format(self._on_stop.__name__))
self._on_stop()
def __format__(self, fmt):
"""Override of base format function. Prints list of current widgets.
Parameters
----------
fmt : Format
The format to override
"""
out = ''
for widget in self.get_widgets().keys():
out += '{}\n'.format(self.get_widgets()[widget].get_title())
return out
| [((120, 8, 120, 47), 'os.environ.setdefault', 'os.environ.setdefault', ({(120, 30, 120, 40): '"""ESCDELAY"""', (120, 42, 120, 46): '"""25"""'}, {}), "('ESCDELAY', '25')", False, 'import os\n'), ((141, 25, 141, 80), 'py_cui.statusbar.StatusBar', 'py_cui.statusbar.StatusBar', ({(141, 52, 141, 63): 'self._title', (141, 65, 141, 79): 'BLACK_ON_WHITE'}, {}), '(self._title, BLACK_ON_WHITE)', False, 'import py_cui\n'), ((142, 24, 142, 65), 'py_cui.keys.get_char_from_ascii', 'py_cui.keys.get_char_from_ascii', ({(142, 56, 142, 64): 'exit_key'}, {}), '(exit_key)', False, 'import py_cui\n'), ((146, 26, 147, 68), 'py_cui.statusbar.StatusBar', 'py_cui.statusbar.StatusBar', ({(146, 53, 146, 79): 'self._init_status_bar_text', (147, 53, 147, 67): 'BLACK_ON_WHITE'}, {}), '(self._init_status_bar_text, BLACK_ON_WHITE)', False, 'import py_cui\n'), ((150, 23, 151, 69), 'py_cui.debug._initialize_logger', 'py_cui.debug._initialize_logger', (), '', False, 'import py_cui\n'), ((154, 38, 154, 115), 'py_cui.grid.Grid', 'py_cui.grid.Grid', ({(154, 55, 154, 63): 'num_rows', (154, 65, 154, 73): 'num_cols', (154, 75, 154, 87): 'self._height', (154, 89, 154, 100): 'self._width', (154, 102, 154, 114): 'self._logger'}, {}), '(num_rows, num_cols, self._height, self._width, self._logger)', False, 'import py_cui\n'), ((303, 15, 304, 88), 'py_cui.widget_set.WidgetSet', 'py_cui.widget_set.WidgetSet', (), '', False, 'import py_cui\n'), ((318, 8, 318, 34), 'curses.wrapper', 'curses.wrapper', ({(318, 23, 318, 33): 'self._draw'}, {}), '(self._draw)', False, 'import curses\n'), ((374, 8, 374, 28), 'curses.start_color', 'curses.start_color', ({}, {}), '()', False, 'import curses\n'), ((375, 8, 375, 55), 'curses.init_color', 'curses.init_color', ({(375, 26, 375, 43): 'curses.COLOR_BLUE', (375, 45, 375, 46): '(0)', (375, 48, 375, 49): '(0)', (375, 51, 375, 54): '(500)'}, {}), '(curses.COLOR_BLUE, 0, 0, 500)', False, 'import curses\n'), ((376, 26, 376, 57), 'py_cui.colors._COLOR_MAP.keys', 'py_cui.colors._COLOR_MAP.keys', ({}, {}), '()', False, 'import py_cui\n'), ((474, 30, 483, 62), 'py_cui.widgets.ScrollMenu', 'py_cui.widgets.ScrollMenu', ({(474, 56, 474, 58): 'id', (475, 49, 475, 54): 'title', (476, 49, 476, 59): 'self._grid', (477, 49, 477, 52): 'row', (478, 49, 478, 55): 'column', (479, 49, 479, 57): 'row_span', (480, 49, 480, 60): 'column_span', (481, 49, 481, 53): 'padx', (482, 49, 482, 53): 'pady', (483, 49, 483, 61): 'self._logger'}, {}), '(id, title, self._grid, row, column, row_span,\n column_span, padx, pady, self._logger)', False, 'import py_cui\n'), ((520, 30, 530, 64), 'py_cui.widgets.CheckBoxMenu', 'py_cui.widgets.CheckBoxMenu', ({(520, 58, 520, 60): 'id', (521, 51, 521, 56): 'title', (522, 51, 522, 61): 'self._grid', (523, 51, 523, 54): 'row', (524, 51, 524, 57): 'column', (525, 51, 525, 59): 'row_span', (526, 51, 526, 62): 'column_span', (527, 51, 527, 55): 'padx', (528, 51, 528, 55): 'pady', (529, 51, 529, 63): 'self._logger', (530, 51, 530, 63): 'checked_char'}, {}), '(id, title, self._grid, row, column, row_span,\n column_span, padx, pady, self._logger, checked_char)', False, 'import py_cui\n'), ((569, 30, 578, 55), 'py_cui.widgets.TextBox', 'py_cui.widgets.TextBox', ({(569, 53, 569, 55): 'id', (570, 46, 570, 51): 'title', (571, 46, 571, 56): 'self._grid', (572, 46, 572, 49): 'row', (572, 51, 572, 57): 'column', (573, 46, 573, 54): 'row_span', (574, 46, 574, 57): 'column_span', (575, 46, 575, 50): 'padx', (575, 52, 575, 56): 'pady', (576, 46, 576, 58): 'self._logger', (577, 46, 577, 58): 'initial_text', (578, 46, 578, 54): 'password'}, {}), '(id, title, self._grid, row, column, row_span,\n column_span, padx, pady, self._logger, initial_text, password)', False, 'import py_cui\n'), ((615, 30, 625, 67), 'py_cui.widgets.ScrollTextBlock', 'py_cui.widgets.ScrollTextBlock', ({(615, 61, 615, 63): 'id', (616, 54, 616, 59): 'title', (617, 54, 617, 64): 'self._grid', (618, 54, 618, 57): 'row', (619, 54, 619, 60): 'column', (620, 54, 620, 62): 'row_span', (621, 54, 621, 65): 'column_span', (622, 54, 622, 58): 'padx', (623, 54, 623, 58): 'pady', (624, 54, 624, 66): 'self._logger', (625, 54, 625, 66): 'initial_text'}, {}), '(id, title, self._grid, row, column, row_span,\n column_span, padx, pady, self._logger, initial_text)', False, 'import py_cui\n'), ((660, 30, 669, 57), 'py_cui.widgets.Label', 'py_cui.widgets.Label', ({(660, 51, 660, 53): 'id', (661, 44, 661, 49): 'title', (662, 44, 662, 54): 'self._grid', (663, 44, 663, 47): 'row', (664, 44, 664, 50): 'column', (665, 44, 665, 52): 'row_span', (666, 44, 666, 55): 'column_span', (667, 44, 667, 48): 'padx', (668, 44, 668, 48): 'pady', (669, 44, 669, 56): 'self._logger'}, {}), '(id, title, self._grid, row, column, row_span,\n column_span, padx, pady, self._logger)', False, 'import py_cui\n'), ((704, 30, 714, 62), 'py_cui.widgets.BlockLabel', 'py_cui.widgets.BlockLabel', ({(704, 56, 704, 58): 'id', (705, 49, 705, 54): 'title', (706, 49, 706, 59): 'self._grid', (707, 49, 707, 52): 'row', (708, 49, 708, 55): 'column', (709, 49, 709, 57): 'row_span', (710, 49, 710, 60): 'column_span', (711, 49, 711, 53): 'padx', (712, 49, 712, 53): 'pady', (713, 49, 713, 55): 'center', (714, 49, 714, 61): 'self._logger'}, {}), '(id, title, self._grid, row, column, row_span,\n column_span, padx, pady, center, self._logger)', False, 'import py_cui\n'), ((749, 30, 759, 53), 'py_cui.widgets.Button', 'py_cui.widgets.Button', ({(749, 52, 749, 54): 'id', (750, 45, 750, 50): 'title', (751, 45, 751, 55): 'self._grid', (752, 45, 752, 48): 'row', (753, 45, 753, 51): 'column', (754, 45, 754, 53): 'row_span', (755, 45, 755, 56): 'column_span', (756, 45, 756, 49): 'padx', (757, 45, 757, 49): 'pady', (758, 45, 758, 57): 'self._logger', (759, 45, 759, 52): 'command'}, {}), '(id, title, self._grid, row, column, row_span,\n column_span, padx, pady, self._logger, command)', False, 'import py_cui\n'), ((804, 21, 817, 66), 'py_cui.controls.slider.SliderWidget', 'py_cui.controls.slider.SliderWidget', ({(804, 57, 804, 59): 'id', (805, 57, 805, 62): 'title', (806, 57, 806, 67): 'self._grid', (807, 57, 807, 60): 'row', (808, 57, 808, 63): 'column', (809, 57, 809, 65): 'row_span', (810, 57, 810, 68): 'column_span', (811, 57, 811, 61): 'padx', (812, 57, 812, 61): 'pady', (813, 57, 813, 69): 'self._logger', (814, 57, 814, 64): 'min_val', (815, 57, 815, 64): 'max_val', (816, 57, 816, 61): 'step', (817, 57, 817, 65): 'init_val'}, {}), '(id, title, self._grid, row, column,\n row_span, column_span, padx, pady, self._logger, min_val, max_val, step,\n init_val)', False, 'import py_cui\n'), ((1108, 22, 1108, 104), 'py_cui.popups.MessagePopup', 'py_cui.popups.MessagePopup', ({(1108, 49, 1108, 53): 'self', (1108, 55, 1108, 60): 'title', (1108, 62, 1108, 66): 'text', (1108, 68, 1108, 73): 'color', (1108, 75, 1108, 89): 'self._renderer', (1108, 91, 1108, 103): 'self._logger'}, {}), '(self, title, text, color, self._renderer, self.\n _logger)', False, 'import py_cui\n'), ((1124, 22, 1124, 119), 'py_cui.popups.MessagePopup', 'py_cui.popups.MessagePopup', ({(1124, 49, 1124, 53): 'self', (1124, 55, 1124, 75): "'WARNING - ' + title", (1124, 77, 1124, 81): 'text', (1124, 83, 1124, 88): 'color', (1124, 90, 1124, 104): 'self._renderer', (1124, 106, 1124, 118): 'self._logger'}, {}), "(self, 'WARNING - ' + title, text, color, self.\n _renderer, self._logger)", False, 'import py_cui\n'), ((1140, 22, 1140, 117), 'py_cui.popups.MessagePopup', 'py_cui.popups.MessagePopup', ({(1140, 49, 1140, 53): 'self', (1140, 55, 1140, 73): "'ERROR - ' + title", (1140, 75, 1140, 79): 'text', (1140, 81, 1140, 86): 'color', (1140, 88, 1140, 102): 'self._renderer', (1140, 104, 1140, 116): 'self._logger'}, {}), "(self, 'ERROR - ' + title, text, color, self.\n _renderer, self._logger)", False, 'import py_cui\n'), ((1158, 22, 1158, 140), 'py_cui.popups.YesNoPopup', 'py_cui.popups.YesNoPopup', ({(1158, 47, 1158, 51): 'self', (1158, 53, 1158, 70): "title + '- (y/n)'", (1158, 72, 1158, 93): '"""Yes - (y), No - (n)"""', (1158, 95, 1158, 100): 'color', (1158, 102, 1158, 109): 'command', (1158, 111, 1158, 125): 'self._renderer', (1158, 127, 1158, 139): 'self._logger'}, {}), "(self, title + '- (y/n)', 'Yes - (y), No - (n)',\n color, command, self._renderer, self._logger)", False, 'import py_cui\n'), ((1178, 22, 1178, 117), 'py_cui.popups.TextBoxPopup', 'py_cui.popups.TextBoxPopup', ({(1178, 49, 1178, 53): 'self', (1178, 55, 1178, 60): 'title', (1178, 62, 1178, 67): 'color', (1178, 69, 1178, 76): 'command', (1178, 78, 1178, 92): 'self._renderer', (1178, 94, 1178, 102): 'password', (1178, 104, 1178, 116): 'self._logger'}, {}), '(self, title, color, command, self._renderer,\n password, self._logger)', False, 'import py_cui\n'), ((1200, 22, 1200, 137), 'py_cui.popups.MenuPopup', 'py_cui.popups.MenuPopup', ({(1200, 46, 1200, 50): 'self', (1200, 52, 1200, 62): 'menu_items', (1200, 64, 1200, 69): 'title', (1200, 71, 1200, 76): 'color', (1200, 78, 1200, 85): 'command', (1200, 87, 1200, 101): 'self._renderer', (1200, 103, 1200, 115): 'self._logger', (1200, 117, 1200, 136): 'run_command_if_none'}, {}), '(self, menu_items, title, color, command, self.\n _renderer, self._logger, run_command_if_none)', False, 'import py_cui\n'), ((1222, 24, 1222, 113), 'py_cui.popups.LoadingIconPopup', 'py_cui.popups.LoadingIconPopup', ({(1222, 55, 1222, 59): 'self', (1222, 61, 1222, 66): 'title', (1222, 68, 1222, 75): 'message', (1222, 77, 1222, 82): 'color', (1222, 84, 1222, 98): 'self._renderer', (1222, 100, 1222, 112): 'self._logger'}, {}), '(self, title, message, color, self._renderer,\n self._logger)', False, 'import py_cui\n'), ((1246, 24, 1246, 114), 'py_cui.popups.LoadingBarPopup', 'py_cui.popups.LoadingBarPopup', ({(1246, 54, 1246, 58): 'self', (1246, 60, 1246, 65): 'title', (1246, 67, 1246, 76): 'num_items', (1246, 78, 1246, 83): 'color', (1246, 85, 1246, 99): 'self._renderer', (1246, 101, 1246, 113): 'self._logger'}, {}), '(self, title, num_items, color, self._renderer,\n self._logger)', False, 'import py_cui\n'), ((1269, 22, 1269, 154), 'py_cui.dialogs.form.FormPopup', 'py_cui.dialogs.form.FormPopup', ({(1269, 52, 1269, 56): 'self', (1269, 58, 1269, 64): 'fields', (1269, 66, 1269, 79): 'passwd_fields', (1269, 81, 1269, 89): 'required', (1269, 91, 1269, 93): '{}', (1269, 95, 1269, 100): 'title', (1269, 102, 1269, 123): 'py_cui.WHITE_ON_BLACK', (1269, 125, 1269, 139): 'self._renderer', (1269, 141, 1269, 153): 'self._logger'}, {}), '(self, fields, passwd_fields, required, {},\n title, py_cui.WHITE_ON_BLACK, self._renderer, self._logger)', False, 'import py_cui\n'), ((1293, 22, 1293, 188), 'py_cui.dialogs.filedialog.FileDialogPopup', 'py_cui.dialogs.filedialog.FileDialogPopup', ({(1293, 64, 1293, 68): 'self', (1293, 70, 1293, 78): 'callback', (1293, 80, 1293, 91): 'initial_dir', (1293, 93, 1293, 103): 'popup_type', (1293, 105, 1293, 116): 'ascii_icons', (1293, 118, 1293, 134): 'limit_extensions', (1293, 136, 1293, 157): 'py_cui.WHITE_ON_BLACK', (1293, 159, 1293, 173): 'self._renderer', (1293, 175, 1293, 187): 'self._logger'}, {}), '(self, callback, initial_dir,\n popup_type, ascii_icons, limit_extensions, py_cui.WHITE_ON_BLACK, self.\n _renderer, self._logger)', False, 'import py_cui\n'), ((1486, 8, 1486, 49), 'curses.mousemask', 'curses.mousemask', ({(1486, 25, 1486, 48): 'curses.ALL_MOUSE_EVENTS'}, {}), '(curses.ALL_MOUSE_EVENTS)', False, 'import curses\n'), ((1603, 8, 1603, 23), 'curses.endwin', 'curses.endwin', ({}, {}), '()', False, 'import curses\n'), ((127, 24, 127, 50), 'shutil.get_terminal_size', 'shutil.get_terminal_size', ({}, {}), '()', False, 'import shutil\n'), ((235, 12, 235, 107), 'py_cui.debug._enable_logging', 'py_cui.debug._enable_logging', (), '', False, 'import py_cui\n'), ((378, 12, 378, 60), 'curses.init_pair', 'curses.init_pair', ({(378, 29, 378, 39): 'color_pair', (378, 41, 378, 49): 'fg_color', (378, 51, 378, 59): 'bg_color'}, {}), '(color_pair, fg_color, bg_color)', False, 'import curses\n'), ((386, 29, 386, 87), 'py_cui.renderer.Renderer', 'py_cui.renderer.Renderer', ({(386, 54, 386, 58): 'self', (386, 60, 386, 72): 'self._stdscr', (386, 74, 386, 86): 'self._logger'}, {}), '(self, self._stdscr, self._logger)', False, 'import py_cui\n'), ((1409, 22, 1409, 53), 'curses.color_pair', 'curses.color_pair', ({(1409, 40, 1409, 52): 'RED_ON_BLACK'}, {}), '(RED_ON_BLACK)', False, 'import curses\n'), ((1413, 23, 1413, 54), 'curses.color_pair', 'curses.color_pair', ({(1413, 41, 1413, 53): 'RED_ON_BLACK'}, {}), '(RED_ON_BLACK)', False, 'import curses\n'), ((263, 32, 263, 58), 'shutil.get_terminal_size', 'shutil.get_terminal_size', ({}, {}), '()', False, 'import shutil\n'), ((1587, 20, 1587, 36), 'time.sleep', 'time.sleep', ({(1587, 31, 1587, 35): '(0.25)'}, {}), '(0.25)', False, 'import time\n'), ((1541, 36, 1541, 53), 'curses.getmouse', 'curses.getmouse', ({}, {}), '()', False, 'import curses\n')] |
mohitjain97/isilon_sdk_python | isi_sdk_8_0/isi_sdk_8_0/models/auth_access_access_item_file.py | a371f438f542568edb8cda35e929e6b300b1177c | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AuthAccessAccessItemFile(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'group': 'str',
'mode': 'str',
'owner': 'str',
'relevant_mode': 'str'
}
attribute_map = {
'group': 'group',
'mode': 'mode',
'owner': 'owner',
'relevant_mode': 'relevant_mode'
}
def __init__(self, group=None, mode=None, owner=None, relevant_mode=None): # noqa: E501
"""AuthAccessAccessItemFile - a model defined in Swagger""" # noqa: E501
self._group = None
self._mode = None
self._owner = None
self._relevant_mode = None
self.discriminator = None
if group is not None:
self.group = group
if mode is not None:
self.mode = mode
if owner is not None:
self.owner = owner
if relevant_mode is not None:
self.relevant_mode = relevant_mode
@property
def group(self):
"""Gets the group of this AuthAccessAccessItemFile. # noqa: E501
Specifies the group name or ID for the file. # noqa: E501
:return: The group of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this AuthAccessAccessItemFile.
Specifies the group name or ID for the file. # noqa: E501
:param group: The group of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._group = group
@property
def mode(self):
"""Gets the mode of this AuthAccessAccessItemFile. # noqa: E501
Specifies the mode bits on the file. # noqa: E501
:return: The mode of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._mode
@mode.setter
def mode(self, mode):
"""Sets the mode of this AuthAccessAccessItemFile.
Specifies the mode bits on the file. # noqa: E501
:param mode: The mode of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._mode = mode
@property
def owner(self):
"""Gets the owner of this AuthAccessAccessItemFile. # noqa: E501
Specifies the name or ID of the file owner. # noqa: E501
:return: The owner of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this AuthAccessAccessItemFile.
Specifies the name or ID of the file owner. # noqa: E501
:param owner: The owner of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._owner = owner
@property
def relevant_mode(self):
"""Gets the relevant_mode of this AuthAccessAccessItemFile. # noqa: E501
Specifies the mode bits that are related to the user. # noqa: E501
:return: The relevant_mode of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._relevant_mode
@relevant_mode.setter
def relevant_mode(self, relevant_mode):
"""Sets the relevant_mode of this AuthAccessAccessItemFile.
Specifies the mode bits that are related to the user. # noqa: E501
:param relevant_mode: The relevant_mode of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._relevant_mode = relevant_mode
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AuthAccessAccessItemFile):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [((161, 23, 161, 56), 'six.iteritems', 'six.iteritems', ({(161, 37, 161, 55): 'self.swagger_types'}, {}), '(self.swagger_types)', False, 'import six\n')] |
iamthinkking/COMP4217_FinalProject | dataconnector.py | 98cadb013bab52677bffb951b6d173caf4bb22b3 | #!/usr/bin/python3
import pymysql
class Connection:
SQL_HOST = 'localhost'
SQL_USR = ''
SQL_PWD = ''
SQL_DB = 'HOSPITAL'
# initialize database object
def __init__(self, usr, pwd):
self.USR = usr
self.PWD = pwd
# return an database connection
def __enter__(self):
# Open database connection
self.CON = pymysql.connect("localhost", self.USR, self.PWD, "HOSPITAL", autocommit=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# make sure the database connection gets closed
self.CON.close()
def get_doctors(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_doctors();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def get_nurses(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_nurses();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetMedicineAllergyByMostPatients(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL GetMedicineAllergyByMostPatients();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetInternsByMostPatient(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL GetInternsByMostPatient();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetInternPerformanceData(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL GetInternPerformanceData();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def get_patients(self, q=""):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL get_patients('"+str(q)+"');")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
print(e)
return data
finally:
return data
def GetPatientByDiagnosisAndDate(self, start_date, end_date, diagnosis=""):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL GetPatientByDiagnosisAndDate('" + str(start_date) + "', '"
+ str(end_date) + "', '" + str(diagnosis) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_allergens_of_patient(self, patID):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL get_allergens_of_patient('"+str(patID)+"');")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
print(e)
return data
finally:
return data
def add_patient(self, fname, lname, dob, address, phone):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_add_patient('" + fname + "', '" + lname + "', '" + str(dob) + "', '" + address +
"', " + str(phone) + ");")
self.CON.commit()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def make_diagnosis(self, docID, patID, icdID, icdDesc, icdname, specifics):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL make_diagnosis(" + str(docID) + ", " + str(patID) + ", " + str(icdID) + ", '" +
icdDesc + "', '" + str(icdname) + "', '" + specifics + "');")
except pymysql.err.OperationalError as e:
return data
finally:
self.CON.commit()
return data
def check_vitals(self, nurseID, patID, temp, pulse_arg, bp, resp):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL check_vitals(" + str(nurseID) + ", " + str(patID) + ", " + str(temp) + ", '" +
str(pulse_arg) + "', '" + str(bp) + "', '" + str(resp) + "');")
except pymysql.err.OperationalError as e:
return data
finally:
self.CON.commit()
return data
def login(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_currentuser('" + self.USR + "');")
# gets only one tuple from the database's response
data = cursor.fetchone()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def get_role(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_currentuser('" + self.USR + "');")
# gets only one tuple from the database's response
data = cursor.fetchone()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetNursesByPatientAndDate(self, start_date, end_date, pat_ID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL GetNursesByPatientAndDate('" + str(start_date) + "', '"
+ str(end_date) + "', '" + str(pat_ID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_allergens_of_patient(self,patID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_allergens_of_patient('" + str(patID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_medicine_allergy_by_most_patients(self):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_medicine_allergy_by_most_patients();")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def GetResultsByPatient(self,patID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL GetResultsByPatient('" + str(patID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_nurses_by_patient_and_date(self,start_date, end_date, patID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_nurses_by_patient_and_date('" + str(start_date) + "', '" + str(end_date) + "', '"
+ str(patID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_interns_by_most_patients(self):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_interns_by_most_patients();")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
| [((20, 19, 20, 96), 'pymysql.connect', 'pymysql.connect', (), '', False, 'import pymysql\n')] |
AHammoudeh/Flow_AH | flow/visualize/plot_custom_callables.py | 16c5641be3e9e85511756f75efd002478edaee9b | """Generate charts from with .npy files containing custom callables through replay."""
import argparse
from datetime import datetime
import errno
import numpy as np
import matplotlib.pyplot as plt
import os
import pytz
import sys
def make_bar_plot(vals, title):
print(len(vals))
fig = plt.figure()
plt.hist(vals, 10, facecolor='blue', alpha=0.5)
plt.title(title)
plt.xlim(1000,3000)
return fig
def plot_trip_distribution(all_trip_energy_distribution):
non_av_vals = []
figures = []
figure_names = []
for key in all_trip_energy_distribution:
if key != 'av':
non_av_vals.extend(all_trip_energy_distribution[key])
figures.append(make_bar_plot(all_trip_energy_distribution[key], key))
figure_names.append(key)
figure_names.append('All Non-AV')
figures.append(make_bar_plot(non_av_vals, 'All Non-AV'))
figure_names.append('All')
figures.append(make_bar_plot(non_av_vals + all_trip_energy_distribution['av'], 'All'))
return figure_names, figures
def parse_flags(args):
"""Parse training options user can specify in command line.
Returns
-------
argparse.Namespace
the output parser object
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Parse argument used when running a Flow simulation.",
epilog="python train.py EXP_CONFIG")
parser.add_argument("target_folder", type=str,
help='Folder containing results')
parser.add_argument("--output_folder", type=str, required=False, default=None,
help='Folder to save charts to.')
parser.add_argument("--show_images", action='store_true',
help='Whether to display charts.')
parser.add_argument("--heatmap", type=str, required=False,
help='Make a heatmap of the supplied variable.')
return parser.parse_args(args)
if __name__ == "__main__":
flags = parse_flags(sys.argv[1:])
date = datetime.now(tz=pytz.utc)
date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y")
if flags.output_folder:
if not os.path.exists(flags.output_folder):
try:
os.makedirs(flags.output_folder)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
info_dicts = []
custom_callable_names = set()
exp_names = []
for (dirpath, dir_names, file_names) in os.walk(flags.target_folder):
for file_name in file_names:
if file_name[-8:] == "info.npy":
exp_name = os.path.basename(dirpath)
info_dict = np.load(os.path.join(dirpath, file_name), allow_pickle=True).item()
info_dicts.append(info_dict)
print(info_dict.keys())
exp_names.append(exp_name)
custom_callable_names.update(info_dict.keys())
idxs = np.argsort(exp_names)
exp_names = [exp_names[i] for i in idxs]
info_dicts = [info_dicts[i] for i in idxs]
if flags.heatmap is not None:
heatmap = np.zeros((4, 6))
pr_spacing = np.around(np.linspace(0, 0.3, 4), decimals=2)
apr_spacing = np.around(np.linspace(0, 0.5, 6), decimals=2)
for exp_name, info_dict in zip(exp_names, info_dicts):
apr_bucket = int(np.around(float(exp_name.split('_')[1][3:]) / 0.1))
pr_bucket = int(np.around(float(exp_name.split('_')[0][2:]) / 0.1))
if flags.heatmap not in info_dict:
print(exp_name)
continue
else:
val = np.mean(info_dict[flags.heatmap])
print(exp_name, pr_bucket, pr_spacing[pr_bucket], apr_bucket, apr_spacing[apr_bucket], val)
heatmap[pr_bucket, apr_bucket] = val
fig = plt.figure()
plt.imshow(heatmap, interpolation='nearest', cmap='seismic', aspect='equal', vmin=1500, vmax=3000)
plt.title(flags.heatmap)
plt.yticks(ticks=np.arange(len(pr_spacing)), labels=pr_spacing)
plt.ylabel("AV Penetration")
plt.xticks(ticks=np.arange(len(apr_spacing)), labels=apr_spacing)
plt.xlabel("Aggressive Driver Penetration")
plt.colorbar()
plt.show()
plt.close(fig)
else:
for name in custom_callable_names:
y_vals = [np.mean(info_dict[name]) for info_dict in info_dicts]
y_stds = [np.std(info_dict[name]) for info_dict in info_dicts]
x_pos = np.arange(len(exp_names))
plt.bar(x_pos, y_vals, align='center', alpha=0.5)
plt.xticks(x_pos, [exp_name for exp_name in exp_names], rotation=60)
plt.xlabel('Experiment')
plt.title('I210 Replay Result: {}'.format(name))
plt.tight_layout()
if flags.output_folder:
plt.savefig(os.path.join(flags.output_folder, '{}-plot.png'.format(name)))
plt.show()
| [((14, 10, 14, 22), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((15, 4, 15, 51), 'matplotlib.pyplot.hist', 'plt.hist', (), '', True, 'import matplotlib.pyplot as plt\n'), ((16, 4, 16, 20), 'matplotlib.pyplot.title', 'plt.title', ({(16, 14, 16, 19): 'title'}, {}), '(title)', True, 'import matplotlib.pyplot as plt\n'), ((17, 4, 17, 23), 'matplotlib.pyplot.xlim', 'plt.xlim', ({(17, 13, 17, 17): '(1000)', (17, 18, 17, 22): '(3000)'}, {}), '(1000, 3000)', True, 'import matplotlib.pyplot as plt\n'), ((48, 13, 51, 44), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((66, 11, 66, 36), 'datetime.datetime.now', 'datetime.now', (), '', False, 'from datetime import datetime\n'), ((80, 44, 80, 72), 'os.walk', 'os.walk', ({(80, 52, 80, 71): 'flags.target_folder'}, {}), '(flags.target_folder)', False, 'import os\n'), ((91, 11, 91, 32), 'numpy.argsort', 'np.argsort', ({(91, 22, 91, 31): 'exp_names'}, {}), '(exp_names)', True, 'import numpy as np\n'), ((96, 18, 96, 34), 'numpy.zeros', 'np.zeros', ({(96, 27, 96, 33): '(4, 6)'}, {}), '((4, 6))', True, 'import numpy as np\n'), ((111, 14, 111, 26), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((112, 8, 112, 106), 'matplotlib.pyplot.imshow', 'plt.imshow', (), '', True, 'import matplotlib.pyplot as plt\n'), ((113, 8, 113, 32), 'matplotlib.pyplot.title', 'plt.title', ({(113, 18, 113, 31): 'flags.heatmap'}, {}), '(flags.heatmap)', True, 'import matplotlib.pyplot as plt\n'), ((115, 8, 115, 36), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(115, 19, 115, 35): '"""AV Penetration"""'}, {}), "('AV Penetration')", True, 'import matplotlib.pyplot as plt\n'), ((117, 8, 117, 51), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(117, 19, 117, 50): '"""Aggressive Driver Penetration"""'}, {}), "('Aggressive Driver Penetration')", True, 'import matplotlib.pyplot as plt\n'), ((118, 8, 118, 22), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((119, 8, 119, 18), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((120, 8, 120, 22), 'matplotlib.pyplot.close', 'plt.close', ({(120, 18, 120, 21): 'fig'}, {}), '(fig)', True, 'import matplotlib.pyplot as plt\n'), ((70, 15, 70, 50), 'os.path.exists', 'os.path.exists', ({(70, 30, 70, 49): 'flags.output_folder'}, {}), '(flags.output_folder)', False, 'import os\n'), ((97, 31, 97, 53), 'numpy.linspace', 'np.linspace', ({(97, 43, 97, 44): '0', (97, 46, 97, 49): '0.3', (97, 51, 97, 52): '4'}, {}), '(0, 0.3, 4)', True, 'import numpy as np\n'), ((98, 32, 98, 54), 'numpy.linspace', 'np.linspace', ({(98, 44, 98, 45): '0', (98, 47, 98, 50): '0.5', (98, 52, 98, 53): '6'}, {}), '(0, 0.5, 6)', True, 'import numpy as np\n'), ((128, 12, 128, 61), 'matplotlib.pyplot.bar', 'plt.bar', (), '', True, 'import matplotlib.pyplot as plt\n'), ((129, 12, 129, 80), 'matplotlib.pyplot.xticks', 'plt.xticks', (), '', True, 'import matplotlib.pyplot as plt\n'), ((130, 12, 130, 36), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(130, 23, 130, 35): '"""Experiment"""'}, {}), "('Experiment')", True, 'import matplotlib.pyplot as plt\n'), ((132, 12, 132, 30), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((136, 12, 136, 22), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((67, 27, 67, 54), 'pytz.timezone', 'pytz.timezone', ({(67, 41, 67, 53): '"""US/Pacific"""'}, {}), "('US/Pacific')", False, 'import pytz\n'), ((72, 16, 72, 48), 'os.makedirs', 'os.makedirs', ({(72, 28, 72, 47): 'flags.output_folder'}, {}), '(flags.output_folder)', False, 'import os\n'), ((83, 27, 83, 52), 'os.path.basename', 'os.path.basename', ({(83, 44, 83, 51): 'dirpath'}, {}), '(dirpath)', False, 'import os\n'), ((107, 22, 107, 55), 'numpy.mean', 'np.mean', ({(107, 30, 107, 54): 'info_dict[flags.heatmap]'}, {}), '(info_dict[flags.heatmap])', True, 'import numpy as np\n'), ((124, 22, 124, 46), 'numpy.mean', 'np.mean', ({(124, 30, 124, 45): 'info_dict[name]'}, {}), '(info_dict[name])', True, 'import numpy as np\n'), ((125, 22, 125, 45), 'numpy.std', 'np.std', ({(125, 29, 125, 44): 'info_dict[name]'}, {}), '(info_dict[name])', True, 'import numpy as np\n'), ((84, 36, 84, 68), 'os.path.join', 'os.path.join', ({(84, 49, 84, 56): 'dirpath', (84, 58, 84, 67): 'file_name'}, {}), '(dirpath, file_name)', False, 'import os\n')] |
yugabyte/docsearch-scraper | deployer/src/config_manager.py | 8b58d364c7721cbce892843e946834a3ccc5fcd7 | import algoliasearch
from os import environ
from . import algolia_helper
from . import snippeter
from . import emails
from . import helpers
from . import fetchers
from .helpdesk_helper import add_note, get_conversation, \
get_emails_from_conversation, get_conversation_url_from_cuid
from deployer.src.algolia_internal_api import remove_user_from_index
class ConfigManager:
instance = None
def __init__(self):
if not ConfigManager.instance:
ConfigManager.instance = ConfigManager.__ConfigManager()
@staticmethod
def encode_set(to_encode):
encoded = []
for config_name in to_encode:
try:
config_name = config_name.decode()
except AttributeError:
print("Error decoding non string var {}".format(config_name))
pass
encoded.append(config_name)
return encoded
class __ConfigManager:
def __init__(self):
self.public_dir = environ.get('PUBLIC_CONFIG_FOLDER')
self.private_dir = environ.get('PRIVATE_CONFIG_FOLDER')
if self.public_dir is None or self.private_dir is None:
print(
'PUBLIC_CONFIG_FOLDER and PRIVATE_CONFIG_FOLDER must be defined in the environment')
exit()
self.initial_public_nb_stash = None
self.final_nb_public_stash = None
self.initial_private_nb_stash = None
self.final_nb_private_stash = None
self.init()
self.ref_configs = fetchers.get_configs_from_repos()
def init(self):
output = helpers.check_output_decoded(['git', 'stash', 'list'],
cwd=self.public_dir)
self.initial_public_nb_stash = len(output.split('\n'))
helpers.check_output_decoded(
['git', 'stash', '--include-untracked'],
cwd=self.public_dir)
output2 = helpers.check_output_decoded(['git', 'stash', 'list'],
cwd=self.public_dir)
self.final_nb_public_stash = len(output2.split('\n'))
helpers.check_output_decoded(
['git', 'pull', '-r', 'origin', 'master'],
cwd=self.public_dir)
output = helpers.check_output_decoded(['git', 'stash', 'list'],
cwd=self.private_dir)
self.initial_private_nb_stash = len(output.split('\n'))
helpers.check_output_decoded(
['git', 'stash', '--include-untracked'],
cwd=self.private_dir)
output2 = helpers.check_output_decoded(['git', 'stash', 'list'],
cwd=self.private_dir)
self.final_nb_private_stash = len(output2.split('\n'))
helpers.check_output_decoded(
['git', 'pull', '-r', 'origin', 'master'],
cwd=self.private_dir)
def destroy(self):
if self.final_nb_public_stash != self.initial_public_nb_stash:
helpers.check_output_decoded(['git', 'stash', 'pop'],
cwd=self.public_dir)
if self.final_nb_private_stash != self.initial_private_nb_stash:
helpers.check_output_decoded(['git', 'stash', 'pop'],
cwd=self.private_dir)
def add_config(self, config_name):
key = algolia_helper.add_docsearch_key(config_name)
print(config_name + ' (' + key + ')')
config = self.ref_configs[config_name]
print('\n================================\n')
if "conversation_id" in config:
cuid = config["conversation_id"][0]
# Add email(s) to the private config & grant access
conversation = get_conversation(cuid)
emails_from_conv = get_emails_from_conversation(conversation)
analytics_statuses = emails.add(config_name, self.private_dir,
emails_to_add=emails_from_conv)
note_content = snippeter.get_email_for_config(config_name,
analytics_statuses)
add_note(cuid, note_content)
print(
'Email address fetched and stored, conversation updated and available at {}\n'.format(
get_conversation_url_from_cuid(cuid)))
else:
if helpers.confirm(
'\nDo you want to add emails for {}?'.format(
config_name)):
analytics_statuses = emails.add(config_name,
self.private_dir)
print(snippeter.get_email_for_config(config_name,
analytics_statuses))
else:
print(snippeter.get_email_for_config(config_name))
def update_config(self, config_name):
message = config_name
try:
key = algolia_helper.get_docsearch_key(config_name)
message = message + ' (' + key + ')'
except algoliasearch.helpers.AlgoliaException:
pass
print(message)
print('\n================================\n')
print(snippeter.get_email_for_config(config_name))
if helpers.confirm(
'\nDo you want to add emails for {}?'.format(config_name)):
emails.add(config_name, self.private_dir)
def remove_config(self, config_name):
algolia_helper.delete_docsearch_key(config_name)
algolia_helper.delete_docsearch_index(config_name)
algolia_helper.delete_docsearch_index(config_name + '_tmp')
analytics_keys = algolia_helper.list_index_analytics_key(
config_name)
for key in analytics_keys:
description = key['description'].split()
email = description[4]
print(email)
if email is not None:
remove_user_from_index(config_name, email)
emails.delete(config_name, self.private_dir)
| [((37, 30, 37, 65), 'os.environ.get', 'environ.get', ({(37, 42, 37, 64): '"""PUBLIC_CONFIG_FOLDER"""'}, {}), "('PUBLIC_CONFIG_FOLDER')", False, 'from os import environ\n'), ((38, 31, 38, 67), 'os.environ.get', 'environ.get', ({(38, 43, 38, 66): '"""PRIVATE_CONFIG_FOLDER"""'}, {}), "('PRIVATE_CONFIG_FOLDER')", False, 'from os import environ\n'), ((156, 20, 156, 62), 'deployer.src.algolia_internal_api.remove_user_from_index', 'remove_user_from_index', ({(156, 43, 156, 54): 'config_name', (156, 56, 156, 61): 'email'}, {}), '(config_name, email)', False, 'from deployer.src.algolia_internal_api import remove_user_from_index\n')] |
pylover/budgie | Source/budgie/__init__.py | f453cf2fbbf440e8e2314c7fb63f101dbe048e17 |
import sys
from sqlalchemy.exc import DatabaseError
from . import cli
from .configuration import settings, init as init_config
from .observer import HelpdeskObserver, MaximumClientsReached
from .models import init as init_models, metadata, engine, check_db
from .smtp import SMTPConfigurationError
__version__ = '0.1.0-dev.0'
def start_server(cli_arguments):
init_models()
# Checking database
try:
check_db()
except DatabaseError:
print(
'Cannot connect to database. or database objects are not created yet. Please run `budgie setup-db`.',
file=sys.stderr
)
sys.exit(-1)
try:
manager = HelpdeskObserver()
manager.start()
except (
MaximumClientsReached,
SMTPConfigurationError) as ex:
print(ex, file=sys.stderr)
sys.exit(-1)
def main():
arguments = cli.init()
if arguments.version:
print(__version__)
sys.exit(0)
init_config(arguments.config_file if arguments.config_file else None)
if arguments.func is not None:
arguments.func(arguments)
| [((44, 8, 44, 19), 'sys.exit', 'sys.exit', ({(44, 17, 44, 18): '(0)'}, {}), '(0)', False, 'import sys\n'), ((27, 8, 27, 20), 'sys.exit', 'sys.exit', ({(27, 17, 27, 19): '(-1)'}, {}), '(-1)', False, 'import sys\n'), ((37, 8, 37, 20), 'sys.exit', 'sys.exit', ({(37, 17, 37, 19): '(-1)'}, {}), '(-1)', False, 'import sys\n')] |
thismakessand/alltheplaces | locations/spiders/shopnsave.py | b6116199844c9e88bff3a691290f07a7457470ba | # -*- coding: utf-8 -*-
import scrapy
import re
from locations.items import GeojsonPointItem
DAY_DICT = {
'Mon': 'Mo',
'Tue': 'Tu',
'Wed': 'We',
'Thu': 'Th',
'Fri': 'Fr',
'Sat': 'Sa',
'Sun': 'Su',
'Monday': 'Mo',
'Tuesday': 'Tu',
'Wednesday': 'We',
'Thursday': 'Th',
'Thurs': 'Th',
'Thur': 'Th',
'Friday': 'Fr',
'Saturday': 'Sa',
'Sunday': 'Su',
'24 hours/7 days a week': '24/7',
'Please contact store for hours': 'N/A',
}
class ShopnSaveSpider(scrapy.Spider):
name = "shopnsave"
allowed_domains = ["www.shopnsave.com"]
start_urls = (
'https://www.shopnsave.com/stores/view-stores-by-state.html?state=IL&page=1',
'https://www.shopnsave.com/stores/view-stores-by-state.html?state=IL&page=2',
'https://www.shopnsave.com/stores/view-stores-by-state.html?state=MO&page=1',
'https://www.shopnsave.com/stores/view-stores-by-state.html?state=MO&page=2',
'https://www.shopnsave.com/stores/view-stores-by-state.html?state=MO&page=3',
)
def parse(self, response):
stores = response.xpath('//table[@id="store-search-result"]/tbody/tr[@class="" or @class="store-grey"]')
for store in stores:
properties = {
"ref": store.xpath('td[@class="store-result-address"]/text()').extract_first(),
"name": store.xpath('td[@class="store-result-address"]/text()').extract_first(),
"opening_hours": self.store_hours(store.xpath('td[@class="store-result-address"]/text()[last()-1]').extract_first()),
"addr_full": store.xpath('td[@class="store-result-address"]/text()')[1].extract(),
"city": self.city(store.xpath('td[@class="store-result-address"]/text()')[2].extract()),
"state": self.state(store.xpath('td[@class="store-result-address"]/text()')[2].extract()),
"postcode": self.postCode(store.xpath('td[@class="store-result-address"]/text()')[2].extract()),
"phone": self.phone(store.xpath('td[@class="store-result-phone"]/strong/text()')[0].extract()),
}
yield GeojsonPointItem(**properties)
def city(self, data):
str_list = data.split(',')
return str_list[0].strip()
def state(self, data):
str_list = data.split(',')
state = str_list[1].strip()
state = state[:2]
return state
def postCode(self, data):
str_list = data.split(',')
zipCode = str_list[1].strip()
return zipCode[-5:]
def phone(self, data):
return data.replace('— Main', '')
def store_hours(self, store_hours):
if "day" not in store_hours and "-" not in store_hours:
return ""
if "24 Hours, 7 days a week" in store_hours:
return "24/7"
store_hours = store_hours.replace('\r\n\t\t\t\t\t\t', '')
store_hours = store_hours.replace('Midnight', '00:00')
pattern = re.compile(r'\b(' + '|'.join(DAY_DICT.keys()) + r')\b')
store_hours = pattern.sub(lambda x: DAY_DICT[x.group()], ''.join(store_hours))
store_hours = store_hours.replace('am', ':00')
m = re.search('([0-9]{1,2})(\spm)', store_hours)
if m:
h = m.group(1)
new_h = int(h) + 12
store_hours = store_hours.replace(h + ' pm', str(new_h) + ':00')
return store_hours
| [((88, 12, 88, 56), 're.search', 're.search', ({(88, 22, 88, 42): '"""([0-9]{1,2})(\\\\spm)"""', (88, 44, 88, 55): 'store_hours'}, {}), "('([0-9]{1,2})(\\\\spm)', store_hours)", False, 'import re\n'), ((55, 18, 55, 48), 'locations.items.GeojsonPointItem', 'GeojsonPointItem', ({}, {}), '(**properties)', False, 'from locations.items import GeojsonPointItem\n')] |
TovarischSuhov/QR_quest | run.py | d2735a60f9018e59fcef09fd76b40c3a1e9d7412 | #!/usr/bin/env python
from app import app
app.run(debug = True)
| [((4, 0, 4, 21), 'app.app.run', 'app.run', (), '', False, 'from app import app\n')] |
DEV3L/twitter-learning-journal | tests/twitter_learning_journal/dao/test_os_env.py | a51d22a60a3d1249add352d8357975a7f2db585c | from unittest.mock import patch
from app.twitter_learning_journal.dao.os_env import os_environ
@patch('app.twitter_learning_journal.dao.os_env.os')
def test_os_environ(mock_os):
expected_value = 'environment_value'
mock_os.environ.__contains__.return_value = True # patch in statement
mock_os.environ.__getitem__.return_value = expected_value
os_variable = os_environ('a_key')
assert expected_value == os_variable
mock_os.environ.__getitem__.assert_called_with('a_key')
def test_os_environ_key_missing():
expected_value = None
os_variable = os_environ('a_key')
assert expected_value == os_variable
def test_os_environ_key_missing_with_default():
expected_value = 'a_default'
os_variable = os_environ('a_key', default=expected_value)
assert expected_value == os_variable
| [((6, 1, 6, 52), 'unittest.mock.patch', 'patch', ({(6, 7, 6, 51): '"""app.twitter_learning_journal.dao.os_env.os"""'}, {}), "('app.twitter_learning_journal.dao.os_env.os')", False, 'from unittest.mock import patch\n'), ((13, 18, 13, 37), 'app.twitter_learning_journal.dao.os_env.os_environ', 'os_environ', ({(13, 29, 13, 36): '"""a_key"""'}, {}), "('a_key')", False, 'from app.twitter_learning_journal.dao.os_env import os_environ\n'), ((21, 18, 21, 37), 'app.twitter_learning_journal.dao.os_env.os_environ', 'os_environ', ({(21, 29, 21, 36): '"""a_key"""'}, {}), "('a_key')", False, 'from app.twitter_learning_journal.dao.os_env import os_environ\n'), ((28, 18, 28, 61), 'app.twitter_learning_journal.dao.os_env.os_environ', 'os_environ', (), '', False, 'from app.twitter_learning_journal.dao.os_env import os_environ\n')] |
naveenr414/hack-umbc | web-scraper/mongoscraper/populate.py | f5d0fa5b6c3203d54a3c98b8a43b8028229431f8 | import pymongo
myclient = pymongo.MongoClient()
mydb = myclient["mydb"]
hor = mydb["HoR"]
sen = mydb["Senator"]
gov = mydb["Governor"]
def write(fileJSON):
myDoc = fileJSON
if( "hor" in myDoc.values()):
hor.insert_one(myDoc)
elif( "senate" in myDoc.values()):
sen.insert_one(myDoc)
else:
gov.insert_one(myDoc)
def deletes():
for x in sen.find():
sen.delete_one(x)
def prints():
for x in sen.find():
print(x)
| [((3, 11, 3, 32), 'pymongo.MongoClient', 'pymongo.MongoClient', ({}, {}), '()', False, 'import pymongo\n')] |
ZSD-tim/dayu_widgets | tests/test_utils_obj_value.py | 31c2530bdc4161d9311574d9850c2e9471e53072 | """
Test get_obj_value set_obj_value has_obj_value
"""
import pytest
from dayu_widgets import utils
class _HasNameAgeObject(object):
def __init__(self, name, age):
super(_HasNameAgeObject, self).__init__()
self.name = name
self.age = age
@pytest.mark.parametrize('obj', (
{'name': 'xiaoming', 'age': 18},
_HasNameAgeObject('xiaoming', 18)
))
class TestObjValue(object):
"""Test get_obj_value has_obj_value set_obj_value collection."""
@pytest.mark.parametrize('attr, default, result', (
('name', 'hhh', 'xiaoming'),
('age', 0, 18),
('score', 0, 0)
))
def test_get_obj_value(self, obj, attr, default, result):
"""Test get_obj_value with dict/object as arg. """
assert utils.get_obj_value(obj, attr, default) == result
@pytest.mark.parametrize('attr, result', (
('name', True),
('age', True),
('sex', False),
))
def test_has_obj_value(self, obj, attr, result):
"""Test has_obj_value with dict/object as arg. """
assert utils.has_obj_value(obj, attr) == result
@pytest.mark.parametrize('attr, value', (
('name', 'xiaohua'),
('age', 30),
('id', 80),
))
def test_set_obj_value(self, obj, attr, value):
"""Test set_obj_value with dict/object as arg. """
utils.set_obj_value(obj, attr, value)
assert utils.get_obj_value(obj, attr) == value
| [((22, 5, 26, 6), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(22, 29, 22, 52): '"""attr, default, result"""', (22, 54, 26, 5): "(('name', 'hhh', 'xiaoming'), ('age', 0, 18), ('score', 0, 0))"}, {}), "('attr, default, result', (('name', 'hhh',\n 'xiaoming'), ('age', 0, 18), ('score', 0, 0)))", False, 'import pytest\n'), ((31, 5, 35, 6), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(31, 29, 31, 43): '"""attr, result"""', (31, 45, 35, 5): "(('name', True), ('age', True), ('sex', False))"}, {}), "('attr, result', (('name', True), ('age', True), (\n 'sex', False)))", False, 'import pytest\n'), ((40, 5, 44, 6), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(40, 29, 40, 42): '"""attr, value"""', (40, 44, 44, 5): "(('name', 'xiaohua'), ('age', 30), ('id', 80))"}, {}), "('attr, value', (('name', 'xiaohua'), ('age', 30), (\n 'id', 80)))", False, 'import pytest\n'), ((47, 8, 47, 45), 'dayu_widgets.utils.set_obj_value', 'utils.set_obj_value', ({(47, 28, 47, 31): 'obj', (47, 33, 47, 37): 'attr', (47, 39, 47, 44): 'value'}, {}), '(obj, attr, value)', False, 'from dayu_widgets import utils\n'), ((29, 15, 29, 54), 'dayu_widgets.utils.get_obj_value', 'utils.get_obj_value', ({(29, 35, 29, 38): 'obj', (29, 40, 29, 44): 'attr', (29, 46, 29, 53): 'default'}, {}), '(obj, attr, default)', False, 'from dayu_widgets import utils\n'), ((38, 15, 38, 45), 'dayu_widgets.utils.has_obj_value', 'utils.has_obj_value', ({(38, 35, 38, 38): 'obj', (38, 40, 38, 44): 'attr'}, {}), '(obj, attr)', False, 'from dayu_widgets import utils\n'), ((48, 15, 48, 45), 'dayu_widgets.utils.get_obj_value', 'utils.get_obj_value', ({(48, 35, 48, 38): 'obj', (48, 40, 48, 44): 'attr'}, {}), '(obj, attr)', False, 'from dayu_widgets import utils\n')] |
kokosing/hue | desktop/core/ext-py/PyYAML-3.12/tests/lib3/test_all.py | 2307f5379a35aae9be871e836432e6f45138b3d9 |
import sys, yaml, test_appliance
def main(args=None):
collections = []
import test_yaml
collections.append(test_yaml)
if yaml.__with_libyaml__:
import test_yaml_ext
collections.append(test_yaml_ext)
return test_appliance.run(collections, args)
if __name__ == '__main__':
main()
| [((11, 11, 11, 48), 'test_appliance.run', 'test_appliance.run', ({(11, 30, 11, 41): 'collections', (11, 43, 11, 47): 'args'}, {}), '(collections, args)', False, 'import sys, yaml, test_appliance\n')] |
Tim-orius/aidem | tim_camera/oop_detection_webcam.py | 965a71888db72f42223777e890f4bcf88cde7fd3 | """ Webcam Detection with Tensorflow calssifier and object distance calculation """
__version__ = "0.1.0"
__author__ = "Tim Rosenkranz"
__email__ = "[email protected]"
__credits__ = "Special thanks to The Anh Vuong who came up with the original idea." \
"This code is also based off of the code from Evan Juras (see below)"
# This script is based off of a script by Evan Juras (see below).
# I rewrote this script to be object oriented and added the tkinter-ui (removed command
# line functionalities) as well as several functionalities to calculate the distance
# between two detected object
######## Webcam Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 10/27/19
# Description:
# This program uses a TensorFlow Lite model to perform object detection on a live webcam
# feed. It draws boxes and scores around the objects of interest in each frame from the
# webcam. To improve FPS, the webcam object runs in a separate thread from the main program.
# This script will work with either a Picamera or regular USB webcam.
#
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
#
# I [Evan Juras] added my own method of drawing boxes and labels using OpenCV.
# Import packages
import os
import argparse
import cv2
import numpy as np
import sys
import time
from threading import Thread
import importlib.util
import math
# Define VideoStream class to handle streaming of video from webcam in separate processing thread
# Source - Adrian Rosebrock, PyImageSearch: https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
class VideoStream:
"""Camera object that controls video streaming from the Picamera"""
def __init__(self,resolution=(640,480),framerate=30):
# Initialize the PiCamera and the camera image stream
self.stream = cv2.VideoCapture(0)
ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
ret = self.stream.set(3,resolution[0])
ret = self.stream.set(4,resolution[1])
self.width = self.stream.get(3)
# Read first frame from the stream
(self.grabbed, self.frame) = self.stream.read()
# Variable to control when the camera is stopped
self.stopped = False
def start(self):
# Start the thread that reads frames from the video stream
Thread(target=self.update,args=()).start()
return self
def update(self):
# Keep looping indefinitely until the thread is stopped
while True:
# If the camera is stopped, stop the thread
if self.stopped:
# Close camera resources
self.stream.release()
return
# Otherwise, grab the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Return the most recent frame
return self.frame
def stop(self):
# Indicate that the camera and thread should be stopped
self.stopped = True
def continue_video(self):
# Indicate that camera should resume
self.stopped = False
self.start()
class LiveDetection:
"""
"""
def __init__(self):
"""
"""
MODEL_NAME = 'Sample_Model'
GRAPH_NAME = 'detect.tflite'
LABELMAP_NAME = 'labelmap.txt'
self.__min_conf_threshold = 0.5
resW, resH = '1280x720'.split('x')
self.__imW, self.__imH = int(resW), int(resH)
use_TPU = ''
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
pkg = importlib.util.find_spec('tflite_runtime')
if pkg:
from tflite_runtime.interpreter import Interpreter
if use_TPU:
from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
if use_TPU:
from tensorflow.lite.python.interpreter import load_delegate
# If using Edge TPU, assign filename for Edge TPU model
if use_TPU:
# If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
if (GRAPH_NAME == 'detect.tflite'):
GRAPH_NAME = 'edgetpu.tflite'
# Get path to current working directory
CWD_PATH = os.getcwd()
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
self.__labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if self.__labels[0] == '???':
del(self.__labels[0])
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if use_TPU:
self._interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
self._interpreter = Interpreter(model_path=PATH_TO_CKPT)
self._interpreter.allocate_tensors()
# Get model details
self.__input_details = self._interpreter.get_input_details()
self.__output_details = self._interpreter.get_output_details()
self.__height = self.__input_details[0]['shape'][1]
self.__width = self.__input_details[0]['shape'][2]
self.__floating_model = (self.__input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
# Initialize frame rate calculation
self.__frame_rate_calc = 1
self.__freq = cv2.getTickFrequency()
# Initialize video stream
self._videostream = VideoStream(resolution=(self.__imW,self.__imH),framerate=30).start()
time.sleep(1)
# -----------------------------------------------------------------
# Average parameters
self.avg_width_person = 45+8+4 # +8 due to borders not aligning to body
self.avg_height_person = 172
self.avg_proportion_person = self.avg_width_person / self.avg_height_person
self.test_distance = 216
# Old value:
self.fokal_empir = 1500
# Variable for new calibrated value:
self.focal_value = 0
def calibrate(self,
obj_width_cm:int=0,
obj_dist_cm:int=0,
obj_name:str=""
):
"""
"""
color_variation = 0
foc_meas = 0
for i in range(10):
# Grab frame from video stream
frame1 = self._videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (self.__width, self.__height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if self.__floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
self._interpreter.set_tensor(self.__input_details[0]['index'],input_data)
self._interpreter.invoke()
# Retrieve detection results
boxes = self._interpreter.get_tensor(self.__output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = self._interpreter.get_tensor(self.__output_details[1]['index'])[0] # Class index of detected objects
scores = self._interpreter.get_tensor(self.__output_details[2]['index'])[0] # Confidence of detected objects
obj_type = []
for i in range(len(scores)):
if ((scores[i] > self.__min_conf_threshold) and (scores[i] <= 1.0)):
# Check for the right object (ensure correct measurement when several objects are detected)
if(self.__labels[int(classes[i])] != obj_name):
continue
else:
obj_type.append(str(self.__labels[int(classes[i])]))
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * self.__imH)))
xmin = int(max(1,(boxes[i][1] * self.__imW)))
ymax = int(min(self.__imH,(boxes[i][2] * self.__imH)))
xmax = int(min(self.__imW,(boxes[i][3] * self.__imW)))
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, (40+(40*i))%255, (color_variation*40)%255), 2)
# Calculate object width in pixel
obj_width_pixels = xmax - xmin
foc_meas += (obj_width_pixels * obj_dist_cm) / obj_width_cm
# Draw label
object_name = self.__labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# Draw framerate in corner of frame
cv2.putText(frame,'FPS: {0:.2f}'.format(self.__frame_rate_calc),(15,35),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
self.focal_value = foc_meas / 10
print("Calculated focal value:",self.focal_value)
print("Calibration done")
def detect(self):
"""
"""
color_variation = 0;
#for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
while True:
# Start timer (for calculating frame rate)
t1 = cv2.getTickCount()
# Grab frame from video stream
frame1 = self._videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (self.__width, self.__height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if self.__floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
self._interpreter.set_tensor(self.__input_details[0]['index'],input_data)
self._interpreter.invoke()
# Retrieve detection results
boxes = self._interpreter.get_tensor(self.__output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = self._interpreter.get_tensor(self.__output_details[1]['index'])[0] # Class index of detected objects
scores = self._interpreter.get_tensor(self.__output_details[2]['index'])[0] # Confidence of detected objects
#num = self._interpreter.get_tensor(self.__output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
# --------------------------------------------------------------------------------------------------------
coords = []
proportion_x = []
proportion_y = []
camera_distance = []
obj_type = []
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > self.__min_conf_threshold) and (scores[i] <= 1.0)):
if(self.__labels[int(classes[i])] != "person" and self.__labels[int(classes[i])] != "teddy bear" and self.__labels[int(classes[i])] != "chair"):
continue
else:
obj_type.append(str(self.__labels[int(classes[i])]))
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * self.__imH)))
xmin = int(max(1,(boxes[i][1] * self.__imW)))
ymax = int(min(self.__imH,(boxes[i][2] * self.__imH)))
xmax = int(min(self.__imW,(boxes[i][3] * self.__imW)))
if (i+1)*40 > 255:
color_variation += 1
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, (40+(40*i))%255, (color_variation*40)%255), 2)
# Save coordinates of detected person
coords.append([[xmin, ymin],[xmax, ymax]])
# For testing (screen width of camera)
vid_width = int(self._videostream.width)
if(len(coords) > 1):
# preparation
for a in range(len(coords)):
proportion_x.append(0)
proportion_y.append(0)
for i in range(len(coords)):
# Measure height and width of detected person (in pixel)
proportion_x[i] = coords[i][1][0] - coords[i][0][0] # Width
#proportion_y[i] = coords[i][1][1] - coords[i][0][1] # Height
#proportion_x[i] = xmax - xmin
# P = proportion_x[i]
# F = Fokalwert, W = Objektbreite (cm), P = Objektbreite (Pixel), D = Distanz (cm)
# F = (P * D) / W -> D = (F * W) / P
# F = (P * test_distance) / (45+8)
# print(F)
# Calculate object distance to camera
camera_distance.append((self.focal_value * self.avg_width_person) / proportion_x[i])
print("Distance obj "+str(i)+" ("+str(obj_type)+") - camera: "+str(camera_distance[i]), flush=True)
if(i>0):
# Calculate min dist (only horizontal)
if(obj_type[i] == "person"):
min_dist_x = proportion_x[i]/self.avg_width_person * 150
elif(obj_type[i] == "chair"):
min_dist_x = proportion_x[i]/80 * 150
else:
min_dist_x = 500
#min_dist_x = 300
for j in range(i):
min_dist_obj_x_1 = abs(coords[i][1][0] - coords[j][0][0])
min_dist_obj_x_2 = abs(coords[j][1][0] - coords[i][0][0])
dist_obj_z = abs(camera_distance[i] - camera_distance[j])
# Test with distance to borders
#min_dist_obj_x_1 = abs(coords[i][1][0] - vid_width) # To the right
#min_dist_obj_x_2 = abs(coords[i][0][0] - 0) # To the left
print("X-Distanz objekt i -> j: "+str(min_dist_obj_x_1)+" - X-Distanz obj j -> i: "+str(min_dist_obj_x_2)+" - minimale Distanz: "+str(min_dist_x), flush=True)
print("Z-Distanz objekt i - j: "+str(dist_obj_z), flush=True)
# Check for smaller distance
if(min_dist_obj_x_1 < min_dist_obj_x_2):
objects_distance = math.sqrt(min_dist_obj_x_1**2 + dist_obj_z**2)
if(objects_distance < min_dist_x):
print("AAAA "+str(objects_distance)+" j = "+obj_type[j], flush=True)
cv2.line(frame, (coords[i][1][0], coords[i][1][1]), (coords[j][0][0],coords[j][1][1]), (255,10,0), 2)
#cv2.line(frame, (coords[i][1][0], coords[i][1][1]+30), (vid_width,coords[i][1][1]+30), (255,10,0), 2)
dist_label = '%s / %d' % (round(objects_distance, 2), round(min_dist_x, 2))
dist_labelSize, dist_baseLine = cv2.getTextSize(dist_label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)
dist_label_ymin = max(coords[i][1][1], dist_labelSize[1] + 10)
cv2.rectangle(frame, (coords[i][1][0], dist_label_ymin-dist_labelSize[1]-10), (coords[i][1][0]+dist_labelSize[0], dist_label_ymin+dist_baseLine-10), (255, 255, 255), cv2.FILLED)
cv2.putText(frame, dist_label, (coords[i][1][0], dist_label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
elif(min_dist_obj_x_1 > min_dist_obj_x_2):
objects_distance = math.sqrt(min_dist_obj_x_2**2 + dist_obj_z**2)
if(objects_distance < min_dist_x):
print("BBB "+str(objects_distance)+" j = "+obj_type[j], flush=True)
cv2.line(frame, (coords[j][1][0], coords[j][1][1]), (coords[i][0][0],coords[i][1][1]), (255,10,0), 2)
#cv2.line(frame, (coords[i][0][0], coords[i][0][1]), (0,coords[i][0][1]), (255,10,0), 2)
dist_label = '%s / %d' % (round(objects_distance, 2), round(min_dist_x, 2))
dist_labelSize, dist_baseLine = cv2.getTextSize(dist_label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)
dist_label_ymin = max(coords[j][1][1], dist_labelSize[1] + 10)
cv2.rectangle(frame, (coords[j][1][0], dist_label_ymin-dist_labelSize[1]-10), (coords[j][1][0]+dist_labelSize[0], dist_label_ymin+dist_baseLine-10), (255, 255, 255), cv2.FILLED)
cv2.putText(frame, dist_label, (coords[j][1][0], dist_label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
else:
# ...
b = 1
else:
# ...
b = 2
else:
# ...
b = 3
# Draw label
object_name = self.__labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# Draw framerate in corner of frame
cv2.putText(frame,'FPS: {0:.2f}'.format(self.__frame_rate_calc),(15,35),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2-t1)/self.__freq
self.__frame_rate_calc= 1/time1
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
def __del__(self):
"""
"""
# Clean up
self._videostream.stop()
cv2.destroyAllWindows()
def main():
det_ob = LiveDetection()
det_ob.detect()
del det_ob
if __name__ == "__main__":
main()
| [((46, 22, 46, 41), 'cv2.VideoCapture', 'cv2.VideoCapture', ({(46, 39, 46, 40): '0'}, {}), '(0)', False, 'import cv2\n'), ((129, 19, 129, 30), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((132, 23, 132, 67), 'os.path.join', 'os.path.join', ({(132, 36, 132, 44): 'CWD_PATH', (132, 45, 132, 55): 'MODEL_NAME', (132, 56, 132, 66): 'GRAPH_NAME'}, {}), '(CWD_PATH, MODEL_NAME, GRAPH_NAME)', False, 'import os\n'), ((135, 25, 135, 72), 'os.path.join', 'os.path.join', ({(135, 38, 135, 46): 'CWD_PATH', (135, 47, 135, 57): 'MODEL_NAME', (135, 58, 135, 71): 'LABELMAP_NAME'}, {}), '(CWD_PATH, MODEL_NAME, LABELMAP_NAME)', False, 'import os\n'), ((172, 22, 172, 44), 'cv2.getTickFrequency', 'cv2.getTickFrequency', ({}, {}), '()', False, 'import cv2\n'), ((176, 8, 176, 21), 'time.sleep', 'time.sleep', ({(176, 19, 176, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((454, 8, 454, 31), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ({}, {}), '()', False, 'import cv2\n'), ((47, 51, 47, 82), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', ({(47, 74, 47, 81): "*'MJPG'"}, {}), "(*'MJPG')", False, 'import cv2\n'), ((154, 32, 154, 68), 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', (), '', False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), ((210, 24, 210, 62), 'cv2.cvtColor', 'cv2.cvtColor', ({(210, 37, 210, 42): 'frame', (210, 44, 210, 61): 'cv2.COLOR_BGR2RGB'}, {}), '(frame, cv2.COLOR_BGR2RGB)', False, 'import cv2\n'), ((211, 28, 211, 80), 'cv2.resize', 'cv2.resize', ({(211, 39, 211, 48): 'frame_rgb', (211, 50, 211, 79): '(self.__width, self.__height)'}, {}), '(frame_rgb, (self.__width, self.__height))', False, 'import cv2\n'), ((212, 25, 212, 62), 'numpy.expand_dims', 'np.expand_dims', (), '', True, 'import numpy as np\n'), ((263, 12, 263, 48), 'cv2.imshow', 'cv2.imshow', ({(263, 23, 263, 40): '"""Object detector"""', (263, 42, 263, 47): 'frame'}, {}), "('Object detector', frame)", False, 'import cv2\n'), ((282, 17, 282, 35), 'cv2.getTickCount', 'cv2.getTickCount', ({}, {}), '()', False, 'import cv2\n'), ((289, 24, 289, 62), 'cv2.cvtColor', 'cv2.cvtColor', ({(289, 37, 289, 42): 'frame', (289, 44, 289, 61): 'cv2.COLOR_BGR2RGB'}, {}), '(frame, cv2.COLOR_BGR2RGB)', False, 'import cv2\n'), ((290, 28, 290, 80), 'cv2.resize', 'cv2.resize', ({(290, 39, 290, 48): 'frame_rgb', (290, 50, 290, 79): '(self.__width, self.__height)'}, {}), '(frame_rgb, (self.__width, self.__height))', False, 'import cv2\n'), ((291, 25, 291, 62), 'numpy.expand_dims', 'np.expand_dims', (), '', True, 'import numpy as np\n'), ((435, 12, 435, 48), 'cv2.imshow', 'cv2.imshow', ({(435, 23, 435, 40): '"""Object detector"""', (435, 42, 435, 47): 'frame'}, {}), "('Object detector', frame)", False, 'import cv2\n'), ((438, 17, 438, 35), 'cv2.getTickCount', 'cv2.getTickCount', ({}, {}), '()', False, 'import cv2\n'), ((61, 8, 61, 42), 'threading.Thread', 'Thread', (), '', False, 'from threading import Thread\n'), ((443, 15, 443, 29), 'cv2.waitKey', 'cv2.waitKey', ({(443, 27, 443, 28): '(1)'}, {}), '(1)', False, 'import cv2\n'), ((245, 20, 245, 118), 'cv2.rectangle', 'cv2.rectangle', ({(245, 34, 245, 39): 'frame', (245, 41, 245, 52): '(xmin, ymin)', (245, 54, 245, 65): '(xmax, ymax)', (245, 67, 245, 114): '(10, (40 + 40 * i) % 255, color_variation * 40 % 255)', (245, 116, 245, 117): '(2)'}, {}), '(frame, (xmin, ymin), (xmax, ymax), (10, (40 + 40 * i) % 255, \n color_variation * 40 % 255), 2)', False, 'import cv2\n'), ((254, 42, 254, 98), 'cv2.getTextSize', 'cv2.getTextSize', ({(254, 58, 254, 63): 'label', (254, 65, 254, 89): 'cv2.FONT_HERSHEY_SIMPLEX', (254, 91, 254, 94): '0.7', (254, 96, 254, 97): '2'}, {}), '(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)', False, 'import cv2\n'), ((256, 20, 256, 150), 'cv2.rectangle', 'cv2.rectangle', ({(256, 34, 256, 39): 'frame', (256, 41, 256, 75): '(xmin, label_ymin - labelSize[1] - 10)', (256, 77, 256, 120): '(xmin + labelSize[0], label_ymin + baseLine - 10)', (256, 122, 256, 137): '(255, 255, 255)', (256, 139, 256, 149): 'cv2.FILLED'}, {}), '(frame, (xmin, label_ymin - labelSize[1] - 10), (xmin +\n labelSize[0], label_ymin + baseLine - 10), (255, 255, 255), cv2.FILLED)', False, 'import cv2\n'), ((257, 20, 257, 112), 'cv2.putText', 'cv2.putText', ({(257, 32, 257, 37): 'frame', (257, 39, 257, 44): 'label', (257, 46, 257, 66): '(xmin, label_ymin - 7)', (257, 68, 257, 92): 'cv2.FONT_HERSHEY_SIMPLEX', (257, 94, 257, 97): '(0.7)', (257, 99, 257, 108): '(0, 0, 0)', (257, 110, 257, 111): '(2)'}, {}), '(frame, label, (xmin, label_ymin - 7), cv2.FONT_HERSHEY_SIMPLEX,\n 0.7, (0, 0, 0), 2)', False, 'import cv2\n'), ((332, 20, 332, 118), 'cv2.rectangle', 'cv2.rectangle', ({(332, 34, 332, 39): 'frame', (332, 41, 332, 52): '(xmin, ymin)', (332, 54, 332, 65): '(xmax, ymax)', (332, 67, 332, 114): '(10, (40 + 40 * i) % 255, color_variation * 40 % 255)', (332, 116, 332, 117): '(2)'}, {}), '(frame, (xmin, ymin), (xmax, ymax), (10, (40 + 40 * i) % 255, \n color_variation * 40 % 255), 2)', False, 'import cv2\n'), ((426, 42, 426, 98), 'cv2.getTextSize', 'cv2.getTextSize', ({(426, 58, 426, 63): 'label', (426, 65, 426, 89): 'cv2.FONT_HERSHEY_SIMPLEX', (426, 91, 426, 94): '0.7', (426, 96, 426, 97): '2'}, {}), '(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)', False, 'import cv2\n'), ((428, 20, 428, 150), 'cv2.rectangle', 'cv2.rectangle', ({(428, 34, 428, 39): 'frame', (428, 41, 428, 75): '(xmin, label_ymin - labelSize[1] - 10)', (428, 77, 428, 120): '(xmin + labelSize[0], label_ymin + baseLine - 10)', (428, 122, 428, 137): '(255, 255, 255)', (428, 139, 428, 149): 'cv2.FILLED'}, {}), '(frame, (xmin, label_ymin - labelSize[1] - 10), (xmin +\n labelSize[0], label_ymin + baseLine - 10), (255, 255, 255), cv2.FILLED)', False, 'import cv2\n'), ((429, 20, 429, 112), 'cv2.putText', 'cv2.putText', ({(429, 32, 429, 37): 'frame', (429, 39, 429, 44): 'label', (429, 46, 429, 66): '(xmin, label_ymin - 7)', (429, 68, 429, 92): 'cv2.FONT_HERSHEY_SIMPLEX', (429, 94, 429, 97): '(0.7)', (429, 99, 429, 108): '(0, 0, 0)', (429, 110, 429, 111): '(2)'}, {}), '(frame, label, (xmin, label_ymin - 7), cv2.FONT_HERSHEY_SIMPLEX,\n 0.7, (0, 0, 0), 2)', False, 'import cv2\n'), ((151, 62, 151, 96), 'tensorflow.lite.python.interpreter.load_delegate', 'load_delegate', ({(151, 76, 151, 95): '"""libedgetpu.so.1.0"""'}, {}), "('libedgetpu.so.1.0')", False, 'from tensorflow.lite.python.interpreter import load_delegate\n'), ((216, 30, 216, 52), 'numpy.float32', 'np.float32', ({(216, 41, 216, 51): 'input_data'}, {}), '(input_data)', True, 'import numpy as np\n'), ((295, 30, 295, 52), 'numpy.float32', 'np.float32', ({(295, 41, 295, 51): 'input_data'}, {}), '(input_data)', True, 'import numpy as np\n'), ((388, 59, 388, 105), 'math.sqrt', 'math.sqrt', ({(388, 69, 388, 104): 'min_dist_obj_x_1 ** 2 + dist_obj_z ** 2'}, {}), '(min_dist_obj_x_1 ** 2 + dist_obj_z ** 2)', False, 'import math\n'), ((391, 44, 391, 145), 'cv2.line', 'cv2.line', ({(391, 53, 391, 58): 'frame', (391, 60, 391, 94): '(coords[i][1][0], coords[i][1][1])', (391, 96, 391, 129): '(coords[j][0][0], coords[j][1][1])', (391, 131, 391, 141): '(255, 10, 0)', (391, 143, 391, 144): '(2)'}, {}), '(frame, (coords[i][1][0], coords[i][1][1]), (coords[j][0][0],\n coords[j][1][1]), (255, 10, 0), 2)', False, 'import cv2\n'), ((395, 76, 395, 137), 'cv2.getTextSize', 'cv2.getTextSize', ({(395, 92, 395, 102): 'dist_label', (395, 104, 395, 128): 'cv2.FONT_HERSHEY_SIMPLEX', (395, 130, 395, 133): '0.7', (395, 135, 395, 136): '2'}, {}), '(dist_label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)', False, 'import cv2\n'), ((397, 44, 397, 221), 'cv2.rectangle', 'cv2.rectangle', ({(397, 58, 397, 63): 'frame', (397, 65, 397, 120): '(coords[i][1][0], dist_label_ymin - dist_labelSize[1] - 10)', (397, 122, 397, 191): '(coords[i][1][0] + dist_labelSize[0], dist_label_ymin + dist_baseLine - 10)', (397, 193, 397, 208): '(255, 255, 255)', (397, 210, 397, 220): 'cv2.FILLED'}, {}), '(frame, (coords[i][1][0], dist_label_ymin - dist_labelSize[1] -\n 10), (coords[i][1][0] + dist_labelSize[0], dist_label_ymin +\n dist_baseLine - 10), (255, 255, 255), cv2.FILLED)', False, 'import cv2\n'), ((398, 44, 398, 157), 'cv2.putText', 'cv2.putText', ({(398, 56, 398, 61): 'frame', (398, 63, 398, 73): 'dist_label', (398, 75, 398, 111): '(coords[i][1][0], dist_label_ymin - 7)', (398, 113, 398, 137): 'cv2.FONT_HERSHEY_SIMPLEX', (398, 139, 398, 142): '(0.7)', (398, 144, 398, 153): '(0, 0, 0)', (398, 155, 398, 156): '(2)'}, {}), '(frame, dist_label, (coords[i][1][0], dist_label_ymin - 7), cv2.\n FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)', False, 'import cv2\n'), ((401, 59, 401, 105), 'math.sqrt', 'math.sqrt', ({(401, 69, 401, 104): 'min_dist_obj_x_2 ** 2 + dist_obj_z ** 2'}, {}), '(min_dist_obj_x_2 ** 2 + dist_obj_z ** 2)', False, 'import math\n'), ((404, 44, 404, 145), 'cv2.line', 'cv2.line', ({(404, 53, 404, 58): 'frame', (404, 60, 404, 94): '(coords[j][1][0], coords[j][1][1])', (404, 96, 404, 129): '(coords[i][0][0], coords[i][1][1])', (404, 131, 404, 141): '(255, 10, 0)', (404, 143, 404, 144): '(2)'}, {}), '(frame, (coords[j][1][0], coords[j][1][1]), (coords[i][0][0],\n coords[i][1][1]), (255, 10, 0), 2)', False, 'import cv2\n'), ((408, 76, 408, 137), 'cv2.getTextSize', 'cv2.getTextSize', ({(408, 92, 408, 102): 'dist_label', (408, 104, 408, 128): 'cv2.FONT_HERSHEY_SIMPLEX', (408, 130, 408, 133): '0.7', (408, 135, 408, 136): '2'}, {}), '(dist_label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)', False, 'import cv2\n'), ((410, 44, 410, 221), 'cv2.rectangle', 'cv2.rectangle', ({(410, 58, 410, 63): 'frame', (410, 65, 410, 120): '(coords[j][1][0], dist_label_ymin - dist_labelSize[1] - 10)', (410, 122, 410, 191): '(coords[j][1][0] + dist_labelSize[0], dist_label_ymin + dist_baseLine - 10)', (410, 193, 410, 208): '(255, 255, 255)', (410, 210, 410, 220): 'cv2.FILLED'}, {}), '(frame, (coords[j][1][0], dist_label_ymin - dist_labelSize[1] -\n 10), (coords[j][1][0] + dist_labelSize[0], dist_label_ymin +\n dist_baseLine - 10), (255, 255, 255), cv2.FILLED)', False, 'import cv2\n'), ((411, 44, 411, 157), 'cv2.putText', 'cv2.putText', ({(411, 56, 411, 61): 'frame', (411, 63, 411, 73): 'dist_label', (411, 75, 411, 111): '(coords[j][1][0], dist_label_ymin - 7)', (411, 113, 411, 137): 'cv2.FONT_HERSHEY_SIMPLEX', (411, 139, 411, 142): '(0.7)', (411, 144, 411, 153): '(0, 0, 0)', (411, 155, 411, 156): '(2)'}, {}), '(frame, dist_label, (coords[j][1][0], dist_label_ymin - 7), cv2.\n FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)', False, 'import cv2\n')] |
yakumo-saki/smart_to_zabbix | modules/zabbix_smart.py | 04dd1debe0c831b4ec94962884543c989ad57730 | import json
import logging
import config as cfg
from modules.const import Keys, AttrKey
from modules.zabbix_sender import send_to_zabbix
logger = logging.getLogger(__name__)
SMART_ATTR_KEY = "ata_smart_attributes"
NVME_ATTR_KEY = "nvme_smart_health_information_log"
def send_attribute_discovery(result):
"""
zabbixにS.M.A.R.T Attribute LLDデータを送信します。
Attribute LLDとは要するにSMART値すべて
"""
logger.info("Sending S.M.A.R.T attribute discovery to zabbix")
discovery_result = []
for device in result:
logger.info("Listing S.M.A.R.T attributes: " + device)
detail = result[device]
discovery = {AttrKey.DEV_NAME: device, AttrKey.DISK_NAME: detail["model_name"]}
if (SMART_ATTR_KEY in detail):
discovery_result = create_attribute_list_non_nvme(discovery, detail[SMART_ATTR_KEY])
elif (NVME_ATTR_KEY in detail):
discovery_result = create_attribute_list_nvme(discovery, detail[NVME_ATTR_KEY])
data = {"request": "sender data", "data":[]}
valueStr = json.dumps({"data": discovery_result})
one_data = {"host": cfg.ZABBIX_HOST, "key": AttrKey.KEY, "value": f"{valueStr}"}
data["data"].append(one_data)
send_to_zabbix(data)
return None
def create_attribute_list_non_nvme(discovery_base, smart_attributes):
import copy
result = []
for attr in smart_attributes["table"]:
discovery = copy.deepcopy(discovery_base)
# non NVMeの場合、 Unknown Attributeがあり得るので、SMART ID を名前の先頭につけておく
discovery[AttrKey.ATTR_NAME] = "{0} {1}".format(attr["id"], attr["name"])
discovery[AttrKey.ATTR_ID] = attr["id"]
result.append(discovery)
return result
def create_attribute_list_nvme(discovery_base, nvme_health_info):
import copy
result = []
for key in nvme_health_info:
discovery = copy.deepcopy(discovery_base)
if key == "temperature_sensors":
for idx, _ in enumerate(nvme_health_info[key]):
# temperature_sensorsの名前の通り、複数の温度センサーがあると値が複数入るので
# temperature_sensors1,2 のような名前に展開する
discovery[AttrKey.ATTR_NAME] = f"temperature_sensors{idx}"
discovery[AttrKey.ATTR_ID] = f"temperature_sensors{idx}"
else:
discovery[AttrKey.ATTR_NAME] = key
discovery[AttrKey.ATTR_ID] = key
result.append(discovery)
return result
def send_smart_data(data):
logger.info("Send S.M.A.R.T data to zabbix")
results = []
for dev in data:
logger.info("Listing S.M.A.R.T data: " + dev)
detail = data[dev] # /dev/sda
if ("ata_smart_attributes" in detail):
results = create_value_list_non_nvme(dev, detail["ata_smart_attributes"])
elif ("nvme_smart_health_information_log" in detail):
results = create_value_list_nvme(dev, detail["nvme_smart_health_information_log"])
sender_data = {"request": "sender data", "data": results}
#valueStr = json.dumps({"data": discovery_result})
# print(json.dumps(sender_data, indent=2))
send_to_zabbix(sender_data)
return None
def create_value_list_non_nvme(dev, smart_attributes):
results = []
for attr in smart_attributes["table"]:
keyvalue = {
AttrKey.RAWVALUE_KEY.format(dev, attr["id"]): attr["raw"]["value"],
AttrKey.VALUE_KEY.format(dev, attr["id"]): attr["value"],
AttrKey.WORST_KEY.format(dev, attr["id"]): attr["worst"]
}
if ("thresh" in attr):
keyvalue[AttrKey.THRESH_KEY.format(dev, attr["id"])] = attr["thresh"]
for k,v in keyvalue.items():
results.append({"host": cfg.ZABBIX_HOST, "key": k, "value": v})
return results
def create_value_list_nvme(dev, nvme_health_info):
results = []
for key in nvme_health_info:
# NVMe にはthreshouldやworstはなく、valueだけ
if key == "temperature_sensors":
# temperature_sensorsの複数の値は 末尾に連番をつけて展開されている
for idx, val in enumerate(nvme_health_info[key]):
key = AttrKey.VALUE_KEY.format(dev, f"temperature_sensors{idx}")
results.append({"host": cfg.ZABBIX_HOST, "key": key, "value": val})
else:
val = nvme_health_info[key]
key = AttrKey.VALUE_KEY.format(dev, key)
results.append({"host": cfg.ZABBIX_HOST, "key": key, "value": val})
return results
| [((9, 9, 9, 36), 'logging.getLogger', 'logging.getLogger', ({(9, 27, 9, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((34, 13, 34, 51), 'json.dumps', 'json.dumps', ({(34, 24, 34, 50): "{'data': discovery_result}"}, {}), "({'data': discovery_result})", False, 'import json\n'), ((38, 2, 38, 22), 'modules.zabbix_sender.send_to_zabbix', 'send_to_zabbix', ({(38, 17, 38, 21): 'data'}, {}), '(data)', False, 'from modules.zabbix_sender import send_to_zabbix\n'), ((96, 2, 96, 29), 'modules.zabbix_sender.send_to_zabbix', 'send_to_zabbix', ({(96, 17, 96, 28): 'sender_data'}, {}), '(sender_data)', False, 'from modules.zabbix_sender import send_to_zabbix\n'), ((48, 16, 48, 45), 'copy.deepcopy', 'copy.deepcopy', ({(48, 30, 48, 44): 'discovery_base'}, {}), '(discovery_base)', False, 'import copy\n'), ((63, 16, 63, 45), 'copy.deepcopy', 'copy.deepcopy', ({(63, 30, 63, 44): 'discovery_base'}, {}), '(discovery_base)', False, 'import copy\n'), ((106, 6, 106, 50), 'modules.const.AttrKey.RAWVALUE_KEY.format', 'AttrKey.RAWVALUE_KEY.format', ({(106, 34, 106, 37): 'dev', (106, 39, 106, 49): "attr['id']"}, {}), "(dev, attr['id'])", False, 'from modules.const import Keys, AttrKey\n'), ((107, 6, 107, 47), 'modules.const.AttrKey.VALUE_KEY.format', 'AttrKey.VALUE_KEY.format', ({(107, 31, 107, 34): 'dev', (107, 36, 107, 46): "attr['id']"}, {}), "(dev, attr['id'])", False, 'from modules.const import Keys, AttrKey\n'), ((108, 6, 108, 47), 'modules.const.AttrKey.WORST_KEY.format', 'AttrKey.WORST_KEY.format', ({(108, 31, 108, 34): 'dev', (108, 36, 108, 46): "attr['id']"}, {}), "(dev, attr['id'])", False, 'from modules.const import Keys, AttrKey\n'), ((132, 12, 132, 46), 'modules.const.AttrKey.VALUE_KEY.format', 'AttrKey.VALUE_KEY.format', ({(132, 37, 132, 40): 'dev', (132, 42, 132, 45): 'key'}, {}), '(dev, key)', False, 'from modules.const import Keys, AttrKey\n'), ((128, 14, 128, 72), 'modules.const.AttrKey.VALUE_KEY.format', 'AttrKey.VALUE_KEY.format', ({(128, 39, 128, 42): 'dev', (128, 44, 128, 71): 'f"""temperature_sensors{idx}"""'}, {}), "(dev, f'temperature_sensors{idx}')", False, 'from modules.const import Keys, AttrKey\n'), ((112, 15, 112, 57), 'modules.const.AttrKey.THRESH_KEY.format', 'AttrKey.THRESH_KEY.format', ({(112, 41, 112, 44): 'dev', (112, 46, 112, 56): "attr['id']"}, {}), "(dev, attr['id'])", False, 'from modules.const import Keys, AttrKey\n')] |
kpister/biaxial-rnn-music-composition | data.py | f6feafad0fe1066dd957293803a86d6c584d9952 | import itertools
from midi_to_statematrix import UPPER_BOUND, LOWER_BOUND
def startSentinel():
def noteSentinel(note):
position = note
part_position = [position]
pitchclass = (note + LOWER_BOUND) % 12
part_pitchclass = [int(i == pitchclass) for i in range(12)]
return part_position + part_pitchclass + [0] * 66 + [1]
return [noteSentinel(note) for note in range(UPPER_BOUND - LOWER_BOUND)]
def getOrDefault(l, i, d):
try:
return l[i]
except IndexError:
return d
def buildContext(state):
context = [0] * 12
for note, notestate in enumerate(state):
if notestate[0] == 1:
pitchclass = (note + LOWER_BOUND) % 12
context[pitchclass] += 1
return context
def buildBeat(time):
return [
2 * x - 1 for x in [time % 2, (time // 2) % 2, (time // 4) % 2, (time // 8) % 2]
]
def noteInputForm(note, state, context, beat):
position = note
part_position = [position]
pitchclass = (note + LOWER_BOUND) % 12
part_pitchclass = [int(i == pitchclass) for i in range(12)]
# Concatenate the note states for the previous vicinity
part_prev_vicinity = list(
itertools.chain.from_iterable(
(getOrDefault(state, note + i, [0, 0]) for i in range(-12, 13))
)
)
part_context = context[pitchclass:] + context[:pitchclass]
return (
part_position + part_pitchclass + part_prev_vicinity + part_context + beat + [0]
)
def noteStateSingleToInputForm(state, time):
beat = buildBeat(time)
context = buildContext(state)
return [noteInputForm(note, state, context, beat) for note in range(len(state))]
def noteStateMatrixToInputForm(statematrix):
# NOTE: May have to transpose this or transform it in some way to make Theano like it
# [startSentinel()] +
inputform = [
noteStateSingleToInputForm(state, time)
for time, state in enumerate(statematrix)
]
return inputform
| [] |
eocdb/ocdb-server | ocdb/ws/controllers/datasets.py | 0e28d092e8ecf5f4813878aab43de990cc5fb4ee | # The MIT License (MIT)
# Copyright (c) 2018 by EUMETSAT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import List, Union
from ..context import WsContext
from ...core.asserts import assert_not_none, assert_one_of, assert_instance
from ...core.models.dataset import Dataset
from ...core.models.dataset_query import DatasetQuery
from ...core.models.dataset_query_result import DatasetQueryResult
from ...core.models.dataset_ref import DatasetRef
from ...core.models.dataset_validation_result import DatasetValidationResult
from ...core.models.qc_info import QcInfo, QC_STATUS_SUBMITTED
from ...core.val import validator
from ...ws.errors import WsResourceNotFoundError, WsBadRequestError, WsNotImplementedError
def validate_dataset(ctx: WsContext, dataset: Dataset) -> DatasetValidationResult:
return validator.validate_dataset(dataset, ctx.config)
def find_datasets(ctx: WsContext,
expr: str = None,
region: List[float] = None,
time: List[str] = None,
wdepth: List[float] = None,
mtype: str = 'all',
wlmode: str = 'all',
shallow: str = 'no',
pmode: str = 'contains',
pgroup: List[str] = None,
status: str = None,
submission_id: str = None,
pname: List[str] = None,
geojson: bool = False,
offset: int = 1,
user_id: str = None,
count: int = 1000) -> DatasetQueryResult:
"""Find datasets."""
assert_one_of(wlmode, ['all', 'multispectral', 'hyperspectral'], name='wlmode')
assert_one_of(shallow, ['no', 'yes', 'exclusively'], name='shallow')
assert_one_of(pmode, ['contains', 'same_cruise', 'dont_apply'], name='pmode')
if pgroup is not None:
assert_instance(pgroup, [])
# Ensuring that the search uses lower case pnames
if pname:
pname = [p.lower() for p in pname]
query = DatasetQuery()
query.expr = expr
query.region = region
query.time = time
query.wdepth = wdepth
query.mtype = mtype
query.wlmode = wlmode
query.shallow = shallow
query.pmode = pmode
query.pgroup = pgroup
query.submission_id = submission_id
query.status = status
query.pname = pname
query.geojson = geojson
query.offset = offset
query.count = count
query.user_id = user_id
result = DatasetQueryResult({}, 0, [], query)
for driver in ctx.db_drivers:
result_part = driver.instance().find_datasets(query)
result.total_count += result_part.total_count
result.datasets += result_part.datasets
result.dataset_ids += result_part.dataset_ids
result.locations.update(result_part.locations)
return result
def add_dataset(ctx: WsContext,
dataset: Dataset) -> DatasetRef:
"""Add a new dataset."""
assert_not_none(dataset)
validation_result = validator.validate_dataset(dataset, ctx.config)
if validation_result.status == "ERROR":
raise WsBadRequestError(f"Invalid dataset.")
dataset_id = ctx.db_driver.instance().add_dataset(dataset)
if not dataset_id:
raise WsBadRequestError(f"Could not add dataset {dataset.path}")
return DatasetRef(dataset_id, dataset.path, dataset.filename)
def update_dataset(ctx: WsContext,
dataset: Dataset):
"""Update an existing dataset."""
assert_not_none(dataset)
validation_result = validator.validate_dataset(dataset, ctx.config)
if validation_result.status == "ERROR":
raise WsBadRequestError(f"Invalid dataset.")
updated = ctx.db_driver.instance().update_dataset(dataset)
if not updated:
raise WsResourceNotFoundError(f"Dataset with ID {dataset.id} not found")
return updated
def delete_dataset(ctx: WsContext,
dataset_id: str):
"""Delete an existing dataset."""
# assert_not_none(api_key, name='api_key')
assert_not_none(dataset_id, name='dataset_id')
deleted = ctx.db_driver.instance().delete_dataset(dataset_id)
if not deleted:
raise WsResourceNotFoundError(f"Dataset with ID {dataset_id} not found")
return deleted
def get_dataset_by_id_strict(ctx: WsContext,
dataset_id: str) -> Dataset:
"""Get dataset by ID."""
assert_not_none(dataset_id, name='dataset_id')
dataset = ctx.db_driver.instance().get_dataset(dataset_id)
if dataset is not None:
return dataset
raise WsResourceNotFoundError(f"Dataset with ID {dataset_id} not found")
def get_dataset_by_id(ctx: WsContext,
dataset_id: Union[dict, str]) -> Dataset:
"""Get dataset by ID."""
assert_not_none(dataset_id, name='dataset_id')
# The dataset_id may be a dataset json object
if isinstance(dataset_id, dict):
dataset_id = dataset_id['id']
dataset = ctx.db_driver.instance().get_dataset(dataset_id)
return dataset
# noinspection PyUnusedLocal,PyTypeChecker
def get_datasets_in_path(ctx: WsContext,
affil: str,
project: str,
cruise: str) -> List[DatasetRef]:
assert_not_none(affil, name='affil')
assert_not_none(project, name='project')
assert_not_none(cruise, name='cruise')
# TODO (generated): implement operation get_datasets_in_bucket()
raise WsNotImplementedError('Operation get_datasets_in_bucket() not yet implemented')
# noinspection PyUnusedLocal,PyTypeChecker
def get_dataset_by_name(ctx: WsContext,
affil: str,
project: str,
cruise: str,
name: str) -> str:
assert_not_none(affil, name='affil')
assert_not_none(project, name='project')
assert_not_none(cruise, name='cruise')
assert_not_none(name, name='name')
# TODO (generated): implement operation get_dataset_by_bucket_and_name()
raise WsNotImplementedError('Operation get_dataset_by_bucket_and_name() not yet implemented')
# noinspection PyUnusedLocal
def get_dataset_qc_info(ctx: WsContext,
dataset_id: str) -> QcInfo:
assert_not_none(dataset_id, name='dataset_id')
dataset = ctx.db_driver.get_dataset(dataset_id)
qc_info_dict = dataset.metadata.get("qc_info")
return QcInfo.from_dict(qc_info_dict) if qc_info_dict else QcInfo(QC_STATUS_SUBMITTED)
# noinspection PyUnusedLocal
def set_dataset_qc_info(ctx: WsContext,
dataset_id: str,
qc_info: QcInfo):
assert_not_none(dataset_id, name='dataset_id')
dataset = ctx.db_driver.get_dataset(dataset_id)
dataset.metadata["qc_info"] = qc_info.to_dict()
ctx.db_driver.update_dataset(dataset)
| [] |
ayanezcasal/AntLibAYC | libAnt/node.py | c266af973f4c32d4baf30130fe51a572478488ec | import threading
from queue import Queue, Empty
from time import sleep
from libAnt.drivers.driver import Driver
from libAnt.message import *
class Network:
def __init__(self, key: bytes = b'\x00' * 8, name: str = None):
self.key = key
self.name = name
self.number = 0
def __str__(self):
return self.name
class Pump(threading.Thread):
def __init__(self, driver: Driver, initMessages, out: Queue, onSucces, onFailure):
super().__init__()
self._stopper = threading.Event()
self._driver = driver
self._out = out
self._initMessages = initMessages
self._waiters = []
self._onSuccess = onSucces
self._onFailure = onFailure
def stop(self):
self._driver.abort()
self._stopper.set()
def stopped(self):
return self._stopper.isSet()
def run(self):
while not self.stopped():
try:
with self._driver as d:
# Startup
rst = SystemResetMessage()
self._waiters.append(rst)
d.write(rst)
for m in self._initMessages:
self._waiters.append(m)
d.write(m)
while not self.stopped():
# Write
try:
outMsg = self._out.get(block=False)
self._waiters.append(outMsg)
d.write(outMsg)
except Empty:
pass
# Read
try:
msg = d.read(timeout=1)
if msg.type == MESSAGE_CHANNEL_EVENT:
# This is a response to our outgoing message
for w in self._waiters:
if w.type == msg.content[1]: # ACK
self._waiters.remove(w)
# TODO: Call waiter callback from tuple (waiter, callback)
break
elif msg.type == MESSAGE_CHANNEL_BROADCAST_DATA:
bmsg = BroadcastMessage(msg.type, msg.content).build(msg.content)
self._onSuccess(bmsg)
except Empty:
pass
except Exception as e:
self._onFailure(e)
except:
pass
self._waiters.clear()
sleep(1)
class Node:
def __init__(self, driver: Driver, name: str = None):
self._driver = driver
self._name = name
self._out = Queue()
self._init = []
self._pump = None
self._configMessages = Queue()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def start(self, onSuccess, onFailure):
if not self.isRunning():
self._pump = Pump(self._driver, self._init, self._out, onSuccess, onFailure)
self._pump.start()
def enableRxScanMode(self, networkKey=ANTPLUS_NETWORK_KEY, channelType=CHANNEL_TYPE_ONEWAY_RECEIVE,
frequency: int = 2457, rxTimestamp: bool = True, rssi: bool = True, channelId: bool = True):
self._init.append(SystemResetMessage())
self._init.append(SetNetworkKeyMessage(0, networkKey))
self._init.append(AssignChannelMessage(0, channelType))
self._init.append(SetChannelIdMessage(0))
self._init.append(SetChannelRfFrequencyMessage(0, frequency))
self._init.append(EnableExtendedMessagesMessage())
self._init.append(LibConfigMessage(rxTimestamp, rssi, channelId))
self._init.append(OpenRxScanModeMessage())
def stop(self):
if self.isRunning():
self._pump.stop()
self._pump.join()
def isRunning(self):
if self._pump is None:
return False
return self._pump.is_alive()
def getCapabilities(self):
pass
| [((22, 24, 22, 41), 'threading.Event', 'threading.Event', ({}, {}), '()', False, 'import threading\n'), ((85, 20, 85, 27), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue, Empty\n'), ((88, 31, 88, 38), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue, Empty\n'), ((78, 12, 78, 20), 'time.sleep', 'sleep', ({(78, 18, 78, 19): '(1)'}, {}), '(1)', False, 'from time import sleep\n')] |
OliPerkins1987/Wildfire_Human_Agency_Model | tests/test_seasonality.py | 49ac17c7c2ad5e03d572b6ae22c227e89a944624 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 30 12:17:04 2021
@author: Oli
"""
import pytest
import pandas as pd
import numpy as np
import netCDF4 as nc
import os
from copy import deepcopy
os.chdir(os.path.dirname(os.path.realpath(__file__)))
wd = os.getcwd().replace('\\', '/')
exec(open("test_setup.py").read())
os.chdir((wd[0:-6] + '/src/data_import'))
exec(open("local_load_up.py").read())
from model_interface.wham import WHAM
from Core_functionality.AFTs.agent_class import AFT
from Core_functionality.AFTs.arable_afts import Swidden, SOSH, MOSH, Intense_arable
from Core_functionality.AFTs.livestock_afts import Pastoralist, Ext_LF_r, Int_LF_r, Ext_LF_p, Int_LF_p
from Core_functionality.AFTs.forestry_afts import Agroforestry, Logger, Managed_forestry, Abandoned_forestry
from Core_functionality.AFTs.nonex_afts import Hunter_gatherer, Recreationalist, SLM, Conservationist
from Core_functionality.AFTs.land_system_class import land_system
from Core_functionality.AFTs.land_systems import Cropland, Pasture, Rangeland, Forestry, Urban, Unoccupied, Nonex
from Core_functionality.top_down_processes.arson import arson
from Core_functionality.top_down_processes.background_ignitions import background_rate
from Core_functionality.top_down_processes.fire_constraints import fuel_ct, dominant_afr_ct
from Core_functionality.Trees.Transfer_tree import define_tree_links, predict_from_tree, update_pars, predict_from_tree_fast
from Core_functionality.prediction_tools.regression_families import regression_link, regression_transformation
#####################################################################
### Run model year then reproduce outputs
#####################################################################
### Run model for 1 year
all_afts = [Swidden, SOSH, MOSH, Intense_arable,
Pastoralist, Ext_LF_r, Int_LF_r, Ext_LF_p, Int_LF_p,
Agroforestry, Logger, Managed_forestry, Abandoned_forestry,
Hunter_gatherer, Recreationalist, SLM, Conservationist]
parameters = {
'xlen': 192,
'ylen': 144,
'AFTs': all_afts,
'LS' : [Cropland, Rangeland, Pasture, Forestry, Nonex, Unoccupied, Urban],
'Fire_types': {'cfp': 'Vegetation', 'crb': 'Arable', 'hg': 'Vegetation',
'pasture': 'Pasture', 'pyrome': 'Vegetation'},
'Observers': {'arson': arson, 'background_rate': background_rate},
'AFT_pars': Core_pars,
'Maps' : Map_data,
'Constraint_pars': {'Soil_threshold': 0.1325,
'Dominant_afr_threshold': 0.5,
'Rangeland_stocking_contstraint': True,
'R_s_c_Positive' : False,
'HG_Market_constraint': 7800,
'Arson_threshold': 0.5},
'timestep': 0,
'end_run' : 0,
'reporters': ['Managed_fire', 'Background_ignitions','Arson'],
'theta' : 0.1,
'bootstrap': False,
'Seasonality': False
}
mod = WHAM(parameters)
### setup
mod.setup()
### go
mod.go()
mod_annual = deepcopy(mod.results['Managed_fire'][0]['Total'])
#######################
### Run model monthly
#######################
all_afts = [Swidden, SOSH, MOSH, Intense_arable,
Pastoralist, Ext_LF_r, Int_LF_r, Ext_LF_p, Int_LF_p,
Agroforestry, Logger, Managed_forestry, Abandoned_forestry,
Hunter_gatherer, Recreationalist, SLM, Conservationist]
parameters = {
'xlen': 192,
'ylen': 144,
'AFTs': all_afts,
'LS' : [Cropland, Rangeland, Pasture, Forestry, Nonex, Unoccupied, Urban],
'Fire_types': {'cfp': 'Vegetation', 'crb': 'Arable', 'hg': 'Vegetation',
'pasture': 'Pasture', 'pyrome': 'Vegetation'},
'Fire_seasonality': Seasonality,
'Observers': {'arson': arson, 'background_rate': background_rate},
'AFT_pars': Core_pars,
'Maps' : Map_data,
'Constraint_pars': {'Soil_threshold': 0.1325,
'Dominant_afr_threshold': 0.5,
'Rangeland_stocking_contstraint': True,
'R_s_c_Positive' : False,
'HG_Market_constraint': 7800,
'Arson_threshold': 0.5},
'timestep': 0,
'end_run' : 0,
'reporters': ['Managed_fire', 'Background_ignitions','Arson'],
'theta' : 0.1,
'bootstrap': False,
'Seasonality': True
}
mod = WHAM(parameters)
### setup
mod.setup()
### go
mod.go()
##################################
### tests
##################################
def test_seasonality_mean():
seasonal = np.nansum(mod.results['Managed_fire'][0]['Total'], axis = 0)
assert pytest.approx(np.nanmean(mod_annual)) == np.nanmean(seasonal)
def test_seasonality_quantiles():
seasonal = np.nansum(mod.results['Managed_fire'][0]['Total'], axis = 0)
quants = [0, 0.2, 0.4, 0.5, 0.6, 0.8, 1]
assert pytest.approx(np.nanquantile(mod_annual, quants)) == np.nanquantile(seasonal, quants)
| [((19, 0, 19, 41), 'os.chdir', 'os.chdir', ({(19, 10, 19, 39): "(wd[0:-6] + '/src/data_import')"}, {}), "(wd[0:-6] + '/src/data_import')", False, 'import os\n'), ((82, 6, 82, 22), 'model_interface.wham.WHAM', 'WHAM', ({(82, 11, 82, 21): 'parameters'}, {}), '(parameters)', False, 'from model_interface.wham import WHAM\n'), ((90, 13, 90, 62), 'copy.deepcopy', 'deepcopy', ({(90, 22, 90, 61): "mod.results['Managed_fire'][0]['Total']"}, {}), "(mod.results['Managed_fire'][0]['Total'])", False, 'from copy import deepcopy\n'), ((129, 6, 129, 22), 'model_interface.wham.WHAM', 'WHAM', ({(129, 11, 129, 21): 'parameters'}, {}), '(parameters)', False, 'from model_interface.wham import WHAM\n'), ((145, 15, 145, 75), 'numpy.nansum', 'np.nansum', (), '', True, 'import numpy as np\n'), ((152, 15, 152, 75), 'numpy.nansum', 'np.nansum', (), '', True, 'import numpy as np\n'), ((15, 25, 15, 51), 'os.path.realpath', 'os.path.realpath', ({(15, 42, 15, 50): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((16, 5, 16, 16), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((147, 52, 147, 72), 'numpy.nanmean', 'np.nanmean', ({(147, 63, 147, 71): 'seasonal'}, {}), '(seasonal)', True, 'import numpy as np\n'), ((156, 64, 156, 96), 'numpy.nanquantile', 'np.nanquantile', ({(156, 79, 156, 87): 'seasonal', (156, 89, 156, 95): 'quants'}, {}), '(seasonal, quants)', True, 'import numpy as np\n'), ((147, 25, 147, 47), 'numpy.nanmean', 'np.nanmean', ({(147, 36, 147, 46): 'mod_annual'}, {}), '(mod_annual)', True, 'import numpy as np\n'), ((156, 25, 156, 59), 'numpy.nanquantile', 'np.nanquantile', ({(156, 40, 156, 50): 'mod_annual', (156, 52, 156, 58): 'quants'}, {}), '(mod_annual, quants)', True, 'import numpy as np\n')] |
unizar-30226-2019-08/Backend | bookalo/funciones_report.py | d14e6fce293330611cd697af033823aa01a2ebfe | from django.shortcuts import render, redirect
from bookalo.pyrebase_settings import db, auth
from bookalo.models import *
from bookalo.serializers import *
#from bookalo.functions import *
from rest_framework import status, permissions
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from operator import itemgetter
from django.http import HttpResponse
from datetime import datetime, timedelta, timezone
from django.db.models import Q, Count
from django.contrib.gis.geoip2 import GeoIP2
from math import sin, cos, sqrt, atan2, radians
from decimal import Decimal
from django.core.mail import EmailMessage
from .funciones_chat import *
def CrearReport(reporteduserUid, cause, comment):
reporteduser = Usuario.objects.get(uid=reporteduserUid)
reporte = Report.objects.create(usuario_reportado=reporteduser, causa=cause, comentario=comment)
return reporte
def MandarCorreo(user,reporteduserUid, cause, comment, id_chat, pk_report):
try:
correo = '[email protected]'
reporteduser = Usuario.objects.get(uid=reporteduserUid)
mensaje = 'El usuario ' + reporteduser.nombre + ' con uid ' + reporteduser.uid + ' y una media de valoraciones de ' + str(reporteduser.media_valoraciones) + ', ha sido reportado por el usuario ' + user.nombre + ' con uid ' + user.uid + '\n\nCausa: ' + cause + '\nComentario del usuario: ' + comment + '.'
if id_chat != 'nothing':
chat = Chat.objects.get(id=int(id_chat))
mensaje = mensaje + '\n\nMensajes del chat:'
mensajes_chat = Mensaje.objects.filter(chat_asociado=chat).order_by('hora')
for m in mensajes_chat:
hora_mensaje = str(m.hora.year)+ '-' + str(m.hora.month) + '-' + str(m.hora.day) + ' a las ' + str(m.hora.hour) +':'+ str(m.hora.minute) +':'+ str(m.hora.second)
mensaje = mensaje +'\n' + "[" + m.emisor.nombre +', ' + hora_mensaje + "]" + ': ' + m.texto
mensaje = mensaje + "\nA continuación se te presentan las distintas acciones posibles que tienes como moderador:\n\n"
mensaje = mensaje + "Aceptar reporte: https://bookalo.es/api/accept_report?id=" + str(pk_report) + "\n\n"
mensaje = mensaje + "Rechazar reporte: https://bookalo.es/api/reject_report?id=" + str(pk_report) + "\n"
email = EmailMessage('Reporte de usuario ' + reporteduser.nombre, mensaje,
to=[correo])
email.send()
return True
except:
return False | [((41, 10, 42, 16), 'django.core.mail.EmailMessage', 'EmailMessage', (), '', False, 'from django.core.mail import EmailMessage\n')] |
patvdleer/nefit-client-python | tests/test_client.py | 97f2c1e454b7c0d5829e1a9c285c998980c603e3 | import os
import unittest
from nefit import NefitClient, NefitResponseException
class ClientTest(unittest.TestCase):
def test_exceptions(self):
client = NefitClient(
os.environ.get("NEFIT_SERIAL", 123456789),
os.environ.get("NEFIT_ACCESS_KEY", "abc1abc2abc3abc4"),
"asddasadsasdcx"
)
client.connect()
with self.assertRaises(NefitResponseException):
client.get_display_code()
client.disconnect()
client.force_disconnect()
if __name__ == '__main__':
unittest.main()
| [((21, 4, 21, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((9, 12, 9, 53), 'os.environ.get', 'os.environ.get', ({(9, 27, 9, 41): '"""NEFIT_SERIAL"""', (9, 43, 9, 52): '123456789'}, {}), "('NEFIT_SERIAL', 123456789)", False, 'import os\n'), ((10, 12, 10, 66), 'os.environ.get', 'os.environ.get', ({(10, 27, 10, 45): '"""NEFIT_ACCESS_KEY"""', (10, 47, 10, 65): '"""abc1abc2abc3abc4"""'}, {}), "('NEFIT_ACCESS_KEY', 'abc1abc2abc3abc4')", False, 'import os\n')] |
GovHawkDC/python-opencivicdata | opencivicdata/merge.py | 1679a4e5df381c777c3e6c53d7c056321662e99a | import datetime
from django.db import transaction
def compute_diff(obj1, obj2):
"""
Given two objects compute a list of differences.
Each diff dict has the following keys:
field - name of the field
new - the new value for the field
one - value of the field in obj1
two - value of the field in obj2
diff - none|one|two|new
list - true if field is a list of related objects
"""
comparison = []
fields = obj1._meta.get_fields()
exclude = ('created_at', 'updated_at', 'id', 'locked_fields')
if obj1 == obj2:
raise ValueError('cannot merge object with itself')
for field in fields:
if field.name in exclude:
continue
elif not field.is_relation:
piece_one = getattr(obj1, field.name)
piece_two = getattr(obj2, field.name)
if piece_one == piece_two:
diff = 'none'
new = piece_one
elif piece_one:
diff = 'one'
new = piece_one
elif piece_two:
diff = 'two'
new = piece_two
comparison.append({
'field': field.name,
'new': new,
'one': getattr(obj1, field.name),
'two': getattr(obj2, field.name),
'diff': diff,
'list': False,
})
else:
related_name = field.get_accessor_name()
piece_one = list(getattr(obj1, related_name).all())
piece_two = list(getattr(obj2, related_name).all())
# TODO: try and deduplicate the lists?
new = piece_one + piece_two
diff = 'none' if piece_one == piece_two else 'one'
if (field.name == 'other_names' and obj1.name != obj2.name):
new.append(field.related_model(name=obj2.name,
note='from merge w/ ' + obj2.id)
)
diff = 'new'
if field.name == 'identifiers':
new.append(field.related_model(identifier=obj2.id))
diff = 'new'
if field.name == 'memberships':
new = _dedupe_memberships(new)
comparison.append({
'field': related_name,
'new': new,
'one': piece_one,
'two': piece_two,
'diff': diff,
'list': True,
})
comparison.append({'field': 'created_at',
'new': min(obj1.created_at, obj2.created_at),
'one': obj1.created_at,
'two': obj2.created_at,
'diff': 'one' if obj1.created_at < obj2.created_at else 'two',
'list': False,
})
comparison.append({'field': 'updated_at',
'new': datetime.datetime.utcnow(),
'one': obj1.updated_at,
'two': obj2.updated_at,
'diff': 'new',
'list': False,
})
# locked fields are any fields that change that aren't M2M relations
# (ending in _set)
new_locked_fields = obj1.locked_fields + obj2.locked_fields + [
c['field'] for c in comparison if c['diff'] != 'none' and not c['field'].endswith('_set')
]
new_locked_fields = set(new_locked_fields) - {'updated_at', 'created_at'}
comparison.append({'field': 'locked_fields',
'new': list(new_locked_fields),
'one': obj1.locked_fields,
'two': obj2.updated_at,
'diff': 'new',
'list': False,
})
return comparison
@transaction.atomic
def apply_diff(obj1, obj2, diff):
for row in diff:
if row['diff'] != 'none':
if row['list']:
# save items, the ids have been set to obj1
for item in row['new']:
setattr(item,
getattr(obj1, row['field']).field.name,
obj1)
item.save()
else:
setattr(obj1, row['field'], row['new'])
obj1.save()
count, delete_plan = obj2.delete()
if count > 1:
# shouldn't happen, but let's be sure
raise AssertionError('deletion failed due to related objects left unmerged')
def merge(obj1, obj2):
diff = compute_diff(obj1, obj2)
apply_diff(obj1, obj2, diff)
def _dedupe_memberships(memberships):
deduped = []
mset = set()
for membership in memberships:
mkey = (membership.organization_id,
membership.label,
membership.end_date,
membership.post_id)
if mkey not in mset:
deduped.append(membership)
mset.add(mkey)
else:
membership.delete()
return deduped
| [((83, 30, 83, 56), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ({}, {}), '()', False, 'import datetime\n')] |
silverguo/pants | src/python/pants/core/project_info/filedeps.py | 141510d03fbf2b7e1a0b54f66b54088697f6fa51 | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
from pathlib import PurePath
from typing import Iterable
from pants.base.build_root import BuildRoot
from pants.engine.addresses import Address, Addresses, BuildFileAddress
from pants.engine.console import Console
from pants.engine.goal import Goal, GoalSubsystem, LineOriented
from pants.engine.rules import goal_rule
from pants.engine.selectors import Get, MultiGet
from pants.engine.target import (
HydratedSources,
HydrateSourcesRequest,
Sources,
Target,
Targets,
TransitiveTargets,
)
class FiledepsOptions(LineOriented, GoalSubsystem):
"""List all source and BUILD files a target depends on."""
name = "filedeps2"
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--absolute",
type=bool,
default=True,
help=(
"If True, output with absolute path; else, output with path relative to the "
"build root."
),
)
register(
"--globs",
type=bool,
default=False,
help=(
"Instead of outputting filenames, output the original globs used in the BUILD "
"file. This will not include exclude globs (i.e. globs that start with `!`)."
),
)
register(
"--transitive",
type=bool,
default=False,
help="If True, include the files used by dependencies in the output.",
)
class Filedeps(Goal):
subsystem_cls = FiledepsOptions
@goal_rule
async def file_deps(
console: Console, options: FiledepsOptions, build_root: BuildRoot, addresses: Addresses,
) -> Filedeps:
targets: Iterable[Target]
if options.values.transitive:
transitive_targets = await Get[TransitiveTargets](Addresses, addresses)
targets = transitive_targets.closure
else:
targets = await Get[Targets](Addresses, addresses)
build_file_addresses = await MultiGet(
Get[BuildFileAddress](Address, tgt.address) for tgt in targets
)
unique_rel_paths = {bfa.rel_path for bfa in build_file_addresses}
if options.values.globs:
unique_rel_paths.update(
itertools.chain.from_iterable(tgt.get(Sources).filespec["globs"] for tgt in targets)
)
else:
all_hydrated_sources = await MultiGet(
Get[HydratedSources](HydrateSourcesRequest, tgt.get(Sources).request) for tgt in targets
)
unique_rel_paths.update(
itertools.chain.from_iterable(
hydrated_sources.snapshot.files for hydrated_sources in all_hydrated_sources
)
)
with options.line_oriented(console) as print_stdout:
for rel_path in sorted(unique_rel_paths):
final_path = (
PurePath(build_root.path, rel_path).as_posix()
if options.values.absolute
else rel_path
)
print_stdout(final_path)
return Filedeps(exit_code=0)
def rules():
return [file_deps]
| [((87, 12, 89, 13), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', ({(87, 41, 89, 13): '(hydrated_sources.snapshot.files for hydrated_sources in all_hydrated_sources)'}, {}), '(hydrated_sources.snapshot.files for\n hydrated_sources in all_hydrated_sources)', False, 'import itertools\n'), ((95, 16, 95, 51), 'pathlib.PurePath', 'PurePath', ({(95, 25, 95, 40): 'build_root.path', (95, 42, 95, 50): 'rel_path'}, {}), '(build_root.path, rel_path)', False, 'from pathlib import PurePath\n')] |
dq922/CloudControlVM | perfkitbenchmarker/providers/rackspace/rackspace_network.py | fae2cf7d2c4388e1dc657bd9245d88f2cb1b9b52 | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing classes related to Rackspace VM networking.
The SecurityGroup class provides a way of opening VM ports. The Network class
allows VMs to communicate via internal IPs.
"""
import json
import os
import threading
from perfkitbenchmarker import flags
from perfkitbenchmarker import network
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.rackspace import util
from perfkitbenchmarker import providers
FLAGS = flags.FLAGS
SSH_PORT = 22
class RackspaceSecurityGroup(network.BaseFirewall):
"""An object representing the Rackspace Security Group."""
CLOUD = providers.RACKSPACE
def __init__(self):
"""Initialize Rackspace security group class."""
self._lock = threading.Lock()
self.firewall_names = set()
self.sg_counter = 0
def AllowPort(self, vm, port):
"""Opens a port on the firewall.
Args:
vm: The BaseVirtualMachine object to open the port for.
port: The local port to open.
"""
if vm.is_static or not FLAGS.use_security_group or port == SSH_PORT:
return
with self._lock:
firewall_name = ('perfkit-firewall-%s-%d-%d' %
(FLAGS.run_uri, port, self.sg_counter))
self.sg_counter += 1
if firewall_name in self.firewall_names:
return
firewall_env = dict(os.environ.copy(),
**util.GetDefaultRackspaceNeutronEnv(self))
firewall_cmd = [FLAGS.neutron_path]
firewall_cmd.extend(['security-group-create'])
firewall_cmd.append(firewall_name)
vm_util.IssueRetryableCommand(firewall_cmd, env=firewall_env)
self.firewall_names.add(firewall_name)
for protocol in ['tcp', 'udp']:
rule_cmd = []
rule_cmd.extend([FLAGS.neutron_path,
'security-group-rule-create',
'--direction', 'ingress',
'--ethertype', 'IPv4',
'--protocol', protocol,
'--port-range-min', str(port),
'--port-range-max', str(port)])
rule_cmd.append(firewall_name)
vm_util.IssueRetryableCommand(rule_cmd, env=firewall_env)
rule_cmd = []
rule_cmd.extend([FLAGS.neutron_path,
'security-group-rule-create',
'--direction', 'ingress',
'--ethertype', 'IPv4',
'--protocol', 'tcp',
'--port-range-min', str(SSH_PORT),
'--port-range-max', str(SSH_PORT)])
rule_cmd.append(firewall_name)
vm_util.IssueRetryableCommand(rule_cmd, env=firewall_env)
getport_cmd = []
getport_cmd.extend([FLAGS.neutron_path, 'port-list',
'--format', 'table'])
stdout, _ = vm_util.IssueRetryableCommand(getport_cmd,
env=firewall_env)
attrs = stdout.split('\n')
for attr in attrs:
if vm.ip_address in attr or vm.ip_address6 in attr:
port_id = [v.strip() for v in attr.split('|') if v != ''][0]
if port_id != '':
break
if not port_id:
raise ValueError('Could not find port_id from response.')
updateport_cmd = []
updateport_cmd.extend([FLAGS.neutron_path, 'port-update'])
for firewall in self.firewall_names:
updateport_cmd.extend(['--security-group', firewall])
updateport_cmd.append(port_id)
vm_util.IssueRetryableCommand(updateport_cmd, env=firewall_env)
def DisallowAllPorts(self):
"""Closes all ports on the firewall."""
firewall_env = dict(os.environ.copy(),
**util.GetDefaultRackspaceNeutronEnv(self))
for firewall in self.firewall_names:
firewall_cmd = []
firewall_cmd.extend([FLAGS.neutron_path,
'security-group-show',
'--format', 'value'])
firewall_cmd.append(firewall)
stdout, _ = vm_util.IssueRetryableCommand(firewall_cmd,
env=firewall_env)
rules = [v for v in stdout.split('\n') if v != ''][2:-1]
for rule in rules:
rule_id = str(json.loads(rule)['id'])
rule_cmd = []
rule_cmd.extend([FLAGS.neutron_path,
'security-group-rule-delete'])
rule_cmd.append(rule_id)
vm_util.IssueRetryableCommand(rule_cmd, env=firewall_env)
firewall_cmd = [FLAGS.neutron_path]
firewall_cmd.extend(['security-group-delete'])
firewall_cmd.append(firewall)
vm_util.IssueRetryableCommand(firewall_cmd, env=firewall_env)
self.firewall_names.remove(firewall)
| [((41, 21, 41, 37), 'threading.Lock', 'threading.Lock', ({}, {}), '()', False, 'import threading\n'), ((68, 12, 68, 73), 'perfkitbenchmarker.vm_util.IssueRetryableCommand', 'vm_util.IssueRetryableCommand', (), '', False, 'from perfkitbenchmarker import vm_util\n'), ((95, 12, 95, 69), 'perfkitbenchmarker.vm_util.IssueRetryableCommand', 'vm_util.IssueRetryableCommand', (), '', False, 'from perfkitbenchmarker import vm_util\n'), ((101, 24, 102, 71), 'perfkitbenchmarker.vm_util.IssueRetryableCommand', 'vm_util.IssueRetryableCommand', (), '', False, 'from perfkitbenchmarker import vm_util\n'), ((119, 12, 119, 75), 'perfkitbenchmarker.vm_util.IssueRetryableCommand', 'vm_util.IssueRetryableCommand', (), '', False, 'from perfkitbenchmarker import vm_util\n'), ((123, 28, 123, 45), 'os.environ.copy', 'os.environ.copy', ({}, {}), '()', False, 'import os\n'), ((133, 24, 134, 71), 'perfkitbenchmarker.vm_util.IssueRetryableCommand', 'vm_util.IssueRetryableCommand', (), '', False, 'from perfkitbenchmarker import vm_util\n'), ((150, 12, 150, 73), 'perfkitbenchmarker.vm_util.IssueRetryableCommand', 'vm_util.IssueRetryableCommand', (), '', False, 'from perfkitbenchmarker import vm_util\n'), ((61, 32, 61, 49), 'os.environ.copy', 'os.environ.copy', ({}, {}), '()', False, 'import os\n'), ((83, 16, 83, 73), 'perfkitbenchmarker.vm_util.IssueRetryableCommand', 'vm_util.IssueRetryableCommand', (), '', False, 'from perfkitbenchmarker import vm_util\n'), ((124, 30, 124, 70), 'perfkitbenchmarker.providers.rackspace.util.GetDefaultRackspaceNeutronEnv', 'util.GetDefaultRackspaceNeutronEnv', ({(124, 65, 124, 69): 'self'}, {}), '(self)', False, 'from perfkitbenchmarker.providers.rackspace import util\n'), ((144, 16, 144, 73), 'perfkitbenchmarker.vm_util.IssueRetryableCommand', 'vm_util.IssueRetryableCommand', (), '', False, 'from perfkitbenchmarker import vm_util\n'), ((62, 34, 62, 74), 'perfkitbenchmarker.providers.rackspace.util.GetDefaultRackspaceNeutronEnv', 'util.GetDefaultRackspaceNeutronEnv', ({(62, 69, 62, 73): 'self'}, {}), '(self)', False, 'from perfkitbenchmarker.providers.rackspace import util\n'), ((138, 30, 138, 46), 'json.loads', 'json.loads', ({(138, 41, 138, 45): 'rule'}, {}), '(rule)', False, 'import json\n')] |
mjdesrosiers/dota2py | dota2py/proto/dota_usermessages_pb2.py | 744f44ba6993c99932037df15de2c08dbd265674 | # Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
import google.protobuf.descriptor_pb2
import netmessages_pb2
import ai_activity_pb2
import dota_commonmessages_pb2
DESCRIPTOR = descriptor.FileDescriptor(
name='dota_usermessages.proto',
package='',
serialized_pb='\n\x17\x64ota_usermessages.proto\x1a google/protobuf/descriptor.proto\x1a\x11netmessages.proto\x1a\x11\x61i_activity.proto\x1a\x19\x64ota_commonmessages.proto\"+\n\x18\x43\x44OTAUserMsg_AIDebugLine\x12\x0f\n\x07message\x18\x01 \x01(\t\"$\n\x11\x43\x44OTAUserMsg_Ping\x12\x0f\n\x07message\x18\x01 \x01(\t\",\n\x17\x43\x44OTAUserMsg_SwapVerify\x12\x11\n\tplayer_id\x18\x01 \x01(\r\"\xef\x01\n\x16\x43\x44OTAUserMsg_ChatEvent\x12\x36\n\x04type\x18\x01 \x02(\x0e\x32\x12.DOTA_CHAT_MESSAGE:\x14\x43HAT_MESSAGE_INVALID\x12\r\n\x05value\x18\x02 \x01(\r\x12\x16\n\nplayerid_1\x18\x03 \x01(\x11:\x02-1\x12\x16\n\nplayerid_2\x18\x04 \x01(\x11:\x02-1\x12\x16\n\nplayerid_3\x18\x05 \x01(\x11:\x02-1\x12\x16\n\nplayerid_4\x18\x06 \x01(\x11:\x02-1\x12\x16\n\nplayerid_5\x18\x07 \x01(\x11:\x02-1\x12\x16\n\nplayerid_6\x18\x08 \x01(\x11:\x02-1\"\xfd\x01\n\x1a\x43\x44OTAUserMsg_CombatLogData\x12:\n\x04type\x18\x01 \x01(\x0e\x32\x15.DOTA_COMBATLOG_TYPES:\x15\x44OTA_COMBATLOG_DAMAGE\x12\x13\n\x0btarget_name\x18\x02 \x01(\r\x12\x15\n\rattacker_name\x18\x03 \x01(\r\x12\x19\n\x11\x61ttacker_illusion\x18\x04 \x01(\x08\x12\x17\n\x0ftarget_illusion\x18\x05 \x01(\x08\x12\x16\n\x0einflictor_name\x18\x06 \x01(\r\x12\r\n\x05value\x18\x07 \x01(\x05\x12\x0e\n\x06health\x18\x08 \x01(\x05\x12\x0c\n\x04time\x18\t \x01(\x02\"!\n\x1f\x43\x44OTAUserMsg_CombatLogShowDeath\"Z\n\x14\x43\x44OTAUserMsg_BotChat\x12\x11\n\tplayer_id\x18\x01 \x01(\r\x12\x0e\n\x06\x66ormat\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x0e\n\x06target\x18\x04 \x01(\t\"q\n CDOTAUserMsg_CombatHeroPositions\x12\r\n\x05index\x18\x01 \x01(\r\x12\x0c\n\x04time\x18\x02 \x01(\x05\x12 \n\tworld_pos\x18\x03 \x01(\x0b\x32\r.CMsgVector2D\x12\x0e\n\x06health\x18\x04 \x01(\x05\"\xfd\x01\n\x1c\x43\x44OTAUserMsg_MiniKillCamInfo\x12\x39\n\tattackers\x18\x01 \x03(\x0b\x32&.CDOTAUserMsg_MiniKillCamInfo.Attacker\x1a\xa1\x01\n\x08\x41ttacker\x12\x10\n\x08\x61ttacker\x18\x01 \x01(\r\x12\x14\n\x0ctotal_damage\x18\x02 \x01(\x05\x12\x41\n\tabilities\x18\x03 \x03(\x0b\x32..CDOTAUserMsg_MiniKillCamInfo.Attacker.Ability\x1a*\n\x07\x41\x62ility\x12\x0f\n\x07\x61\x62ility\x18\x01 \x01(\r\x12\x0e\n\x06\x64\x61mage\x18\x02 \x01(\x05\"@\n\x1d\x43\x44OTAUserMsg_GlobalLightColor\x12\r\n\x05\x63olor\x18\x01 \x01(\r\x12\x10\n\x08\x64uration\x18\x02 \x01(\x02\"U\n!CDOTAUserMsg_GlobalLightDirection\x12\x1e\n\tdirection\x18\x01 \x01(\x0b\x32\x0b.CMsgVector\x12\x10\n\x08\x64uration\x18\x02 \x01(\x02\"]\n\x19\x43\x44OTAUserMsg_LocationPing\x12\x11\n\tplayer_id\x18\x01 \x01(\r\x12-\n\rlocation_ping\x18\x02 \x01(\x0b\x32\x16.CDOTAMsg_LocationPing\"T\n\x16\x43\x44OTAUserMsg_ItemAlert\x12\x11\n\tplayer_id\x18\x01 \x01(\r\x12\'\n\nitem_alert\x18\x02 \x01(\x0b\x32\x13.CDOTAMsg_ItemAlert\"n\n\x19\x43\x44OTAUserMsg_MinimapEvent\x12\x12\n\nevent_type\x18\x01 \x01(\x05\x12\x15\n\rentity_handle\x18\x02 \x01(\x05\x12\t\n\x01x\x18\x03 \x01(\x05\x12\t\n\x01y\x18\x04 \x01(\x05\x12\x10\n\x08\x64uration\x18\x05 \x01(\x05\"M\n\x14\x43\x44OTAUserMsg_MapLine\x12\x11\n\tplayer_id\x18\x01 \x01(\x05\x12\"\n\x07mapline\x18\x02 \x01(\x0b\x32\x11.CDOTAMsg_MapLine\"n\n\x1e\x43\x44OTAUserMsg_MinimapDebugPoint\x12\x1d\n\x08location\x18\x01 \x01(\x0b\x32\x0b.CMsgVector\x12\r\n\x05\x63olor\x18\x02 \x01(\r\x12\x0c\n\x04size\x18\x03 \x01(\x05\x12\x10\n\x08\x64uration\x18\x04 \x01(\x02\"\xae\x01\n#CDOTAUserMsg_CreateLinearProjectile\x12\x1b\n\x06origin\x18\x01 \x01(\x0b\x32\x0b.CMsgVector\x12\x1f\n\x08velocity\x18\x02 \x01(\x0b\x32\r.CMsgVector2D\x12\x0f\n\x07latency\x18\x03 \x01(\x05\x12\x10\n\x08\x65ntindex\x18\x04 \x01(\x05\x12\x16\n\x0eparticle_index\x18\x05 \x01(\x05\x12\x0e\n\x06handle\x18\x06 \x01(\x05\"6\n$CDOTAUserMsg_DestroyLinearProjectile\x12\x0e\n\x06handle\x18\x01 \x01(\x05\"9\n%CDOTAUserMsg_DodgeTrackingProjectiles\x12\x10\n\x08\x65ntindex\x18\x01 \x02(\x05\"_\n!CDOTAUserMsg_SpectatorPlayerClick\x12\x10\n\x08\x65ntindex\x18\x01 \x02(\x05\x12\x12\n\norder_type\x18\x02 \x01(\x05\x12\x14\n\x0ctarget_index\x18\x03 \x01(\x05\"b\n\x1d\x43\x44OTAUserMsg_NevermoreRequiem\x12\x15\n\rentity_handle\x18\x01 \x01(\x05\x12\r\n\x05lines\x18\x02 \x01(\x05\x12\x1b\n\x06origin\x18\x03 \x01(\x0b\x32\x0b.CMsgVector\".\n\x1b\x43\x44OTAUserMsg_InvalidCommand\x12\x0f\n\x07message\x18\x01 \x01(\t\")\n\x15\x43\x44OTAUserMsg_HudError\x12\x10\n\x08order_id\x18\x01 \x01(\x05\"c\n\x1b\x43\x44OTAUserMsg_SharedCooldown\x12\x10\n\x08\x65ntindex\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x10\n\x08\x63ooldown\x18\x03 \x01(\x02\x12\x12\n\nname_index\x18\x04 \x01(\x05\"/\n\x1f\x43\x44OTAUserMsg_SetNextAutobuyItem\x12\x0c\n\x04name\x18\x01 \x01(\t\"X\n\x1b\x43\x44OTAUserMsg_HalloweenDrops\x12\x11\n\titem_defs\x18\x01 \x03(\r\x12\x12\n\nplayer_ids\x18\x02 \x03(\r\x12\x12\n\nprize_list\x18\x03 \x01(\r\"\xfe\x01\n\x1c\x43\x44OTAResponseQuerySerialized\x12\x31\n\x05\x66\x61\x63ts\x18\x01 \x03(\x0b\x32\".CDOTAResponseQuerySerialized.Fact\x1a\xaa\x01\n\x04\x46\x61\x63t\x12\x0b\n\x03key\x18\x01 \x02(\x05\x12\x46\n\x07valtype\x18\x02 \x02(\x0e\x32,.CDOTAResponseQuerySerialized.Fact.ValueType:\x07NUMERIC\x12\x13\n\x0bval_numeric\x18\x03 \x01(\x02\x12\x12\n\nval_string\x18\x04 \x01(\t\"$\n\tValueType\x12\x0b\n\x07NUMERIC\x10\x01\x12\n\n\x06STRING\x10\x02\"\x90\x01\n\x18\x43\x44OTASpeechMatchOnClient\x12\x0f\n\x07\x63oncept\x18\x01 \x01(\x05\x12\x16\n\x0erecipient_type\x18\x02 \x01(\x05\x12\x34\n\rresponsequery\x18\x03 \x01(\x0b\x32\x1d.CDOTAResponseQuerySerialized\x12\x15\n\nrandomseed\x18\x04 \x01(\x0f:\x01\x30\"\xb0\x07\n\x16\x43\x44OTAUserMsg_UnitEvent\x12\x38\n\x08msg_type\x18\x01 \x02(\x0e\x32\x14.EDotaEntityMessages:\x10\x44OTA_UNIT_SPEECH\x12\x14\n\x0c\x65ntity_index\x18\x02 \x02(\x05\x12.\n\x06speech\x18\x03 \x01(\x0b\x32\x1e.CDOTAUserMsg_UnitEvent.Speech\x12\x37\n\x0bspeech_mute\x18\x04 \x01(\x0b\x32\".CDOTAUserMsg_UnitEvent.SpeechMute\x12\x37\n\x0b\x61\x64\x64_gesture\x18\x05 \x01(\x0b\x32\".CDOTAUserMsg_UnitEvent.AddGesture\x12=\n\x0eremove_gesture\x18\x06 \x01(\x0b\x32%.CDOTAUserMsg_UnitEvent.RemoveGesture\x12\x39\n\x0c\x62lood_impact\x18\x07 \x01(\x0b\x32#.CDOTAUserMsg_UnitEvent.BloodImpact\x12\x39\n\x0c\x66\x61\x64\x65_gesture\x18\x08 \x01(\x0b\x32#.CDOTAUserMsg_UnitEvent.FadeGesture\x12\x39\n\x16speech_match_on_client\x18\t \x01(\x0b\x32\x19.CDOTASpeechMatchOnClient\x1ak\n\x06Speech\x12\x0f\n\x07\x63oncept\x18\x01 \x01(\x05\x12\x10\n\x08response\x18\x02 \x01(\t\x12\x16\n\x0erecipient_type\x18\x03 \x01(\x05\x12\r\n\x05level\x18\x04 \x01(\x05\x12\x17\n\x08muteable\x18\x05 \x01(\x08:\x05\x66\x61lse\x1a \n\nSpeechMute\x12\x12\n\x05\x64\x65lay\x18\x01 \x01(\x02:\x03\x30.5\x1ao\n\nAddGesture\x12(\n\x08\x61\x63tivity\x18\x01 \x01(\x0e\x32\t.Activity:\x0b\x41\x43T_INVALID\x12\x0c\n\x04slot\x18\x02 \x01(\x05\x12\x12\n\x07\x66\x61\x64\x65_in\x18\x03 \x01(\x02:\x01\x30\x12\x15\n\x08\x66\x61\x64\x65_out\x18\x04 \x01(\x02:\x03\x30.1\x1a\x39\n\rRemoveGesture\x12(\n\x08\x61\x63tivity\x18\x01 \x01(\x0e\x32\t.Activity:\x0b\x41\x43T_INVALID\x1a@\n\x0b\x42loodImpact\x12\r\n\x05scale\x18\x01 \x01(\x05\x12\x10\n\x08x_normal\x18\x02 \x01(\x05\x12\x10\n\x08y_normal\x18\x03 \x01(\x05\x1a\x37\n\x0b\x46\x61\x64\x65Gesture\x12(\n\x08\x61\x63tivity\x18\x01 \x01(\x0e\x32\t.Activity:\x0b\x41\x43T_INVALID\"0\n\x1a\x43\x44OTAUserMsg_ItemPurchased\x12\x12\n\nitem_index\x18\x01 \x01(\x05\"j\n\x16\x43\x44OTAUserMsg_ItemFound\x12\x0e\n\x06player\x18\x01 \x01(\x05\x12\x0f\n\x07quality\x18\x02 \x01(\x05\x12\x0e\n\x06rarity\x18\x03 \x01(\x05\x12\x0e\n\x06method\x18\x04 \x01(\x05\x12\x0f\n\x07itemdef\x18\x05 \x01(\x05\"\xfd\x0f\n\x1c\x43\x44OTAUserMsg_ParticleManager\x12H\n\x04type\x18\x01 \x02(\x0e\x32\x16.DOTA_PARTICLE_MESSAGE:\"DOTA_PARTICLE_MANAGER_EVENT_CREATE\x12\r\n\x05index\x18\x02 \x02(\r\x12R\n\x16release_particle_index\x18\x03 \x01(\x0b\x32\x32.CDOTAUserMsg_ParticleManager.ReleaseParticleIndex\x12\x45\n\x0f\x63reate_particle\x18\x04 \x01(\x0b\x32,.CDOTAUserMsg_ParticleManager.CreateParticle\x12G\n\x10\x64\x65stroy_particle\x18\x05 \x01(\x0b\x32-.CDOTAUserMsg_ParticleManager.DestroyParticle\x12Z\n\x1a\x64\x65stroy_particle_involving\x18\x06 \x01(\x0b\x32\x36.CDOTAUserMsg_ParticleManager.DestroyParticleInvolving\x12\x45\n\x0fupdate_particle\x18\x07 \x01(\x0b\x32,.CDOTAUserMsg_ParticleManager.UpdateParticle\x12L\n\x13update_particle_fwd\x18\x08 \x01(\x0b\x32/.CDOTAUserMsg_ParticleManager.UpdateParticleFwd\x12R\n\x16update_particle_orient\x18\t \x01(\x0b\x32\x32.CDOTAUserMsg_ParticleManager.UpdateParticleOrient\x12V\n\x18update_particle_fallback\x18\n \x01(\x0b\x32\x34.CDOTAUserMsg_ParticleManager.UpdateParticleFallback\x12R\n\x16update_particle_offset\x18\x0b \x01(\x0b\x32\x32.CDOTAUserMsg_ParticleManager.UpdateParticleOffset\x12L\n\x13update_particle_ent\x18\x0c \x01(\x0b\x32/.CDOTAUserMsg_ParticleManager.UpdateParticleEnt\x12T\n\x17update_particle_latency\x18\r \x01(\x0b\x32\x33.CDOTAUserMsg_ParticleManager.UpdateParticleLatency\x12[\n\x1bupdate_particle_should_draw\x18\x0e \x01(\x0b\x32\x36.CDOTAUserMsg_ParticleManager.UpdateParticleShouldDraw\x1a\x16\n\x14ReleaseParticleIndex\x1aY\n\x0e\x43reateParticle\x12\x1b\n\x13particle_name_index\x18\x01 \x01(\x05\x12\x13\n\x0b\x61ttach_type\x18\x02 \x01(\x05\x12\x15\n\rentity_handle\x18\x03 \x01(\x05\x1a.\n\x0f\x44\x65stroyParticle\x12\x1b\n\x13\x64\x65stroy_immediately\x18\x01 \x01(\x08\x1aN\n\x18\x44\x65stroyParticleInvolving\x12\x1b\n\x13\x64\x65stroy_immediately\x18\x01 \x01(\x08\x12\x15\n\rentity_handle\x18\x03 \x01(\x05\x1a\x46\n\x0eUpdateParticle\x12\x15\n\rcontrol_point\x18\x01 \x01(\x05\x12\x1d\n\x08position\x18\x02 \x01(\x0b\x32\x0b.CMsgVector\x1aH\n\x11UpdateParticleFwd\x12\x15\n\rcontrol_point\x18\x01 \x01(\x05\x12\x1c\n\x07\x66orward\x18\x02 \x01(\x0b\x32\x0b.CMsgVector\x1a\x80\x01\n\x14UpdateParticleOrient\x12\x15\n\rcontrol_point\x18\x01 \x01(\x05\x12\x1c\n\x07\x66orward\x18\x02 \x01(\x0b\x32\x0b.CMsgVector\x12\x1a\n\x05right\x18\x03 \x01(\x0b\x32\x0b.CMsgVector\x12\x17\n\x02up\x18\x04 \x01(\x0b\x32\x0b.CMsgVector\x1aN\n\x16UpdateParticleFallback\x12\x15\n\rcontrol_point\x18\x01 \x01(\x05\x12\x1d\n\x08position\x18\x02 \x01(\x0b\x32\x0b.CMsgVector\x1aQ\n\x14UpdateParticleOffset\x12\x15\n\rcontrol_point\x18\x01 \x01(\x05\x12\"\n\rorigin_offset\x18\x02 \x01(\x0b\x32\x0b.CMsgVector\x1a\x92\x01\n\x11UpdateParticleEnt\x12\x15\n\rcontrol_point\x18\x01 \x01(\x05\x12\x15\n\rentity_handle\x18\x02 \x01(\x05\x12\x13\n\x0b\x61ttach_type\x18\x03 \x01(\x05\x12\x12\n\nattachment\x18\x04 \x01(\x05\x12&\n\x11\x66\x61llback_position\x18\x05 \x01(\x0b\x32\x0b.CMsgVector\x1a=\n\x15UpdateParticleLatency\x12\x16\n\x0eplayer_latency\x18\x01 \x01(\x05\x12\x0c\n\x04tick\x18\x02 \x01(\x05\x1a/\n\x18UpdateParticleShouldDraw\x12\x13\n\x0bshould_draw\x18\x01 \x01(\x08\"\xc5\x01\n\x1a\x43\x44OTAUserMsg_OverheadEvent\x12?\n\x0cmessage_type\x18\x01 \x02(\x0e\x32\x14.DOTA_OVERHEAD_ALERT:\x13OVERHEAD_ALERT_GOLD\x12\r\n\x05value\x18\x02 \x01(\x05\x12\x1e\n\x16target_player_entindex\x18\x03 \x01(\x05\x12\x17\n\x0ftarget_entindex\x18\x04 \x01(\x05\x12\x1e\n\x16source_player_entindex\x18\x05 \x01(\x05\">\n\x1c\x43\x44OTAUserMsg_TutorialTipInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08progress\x18\x02 \x01(\x05\"S\n\x16\x43\x44OTAUserMsg_WorldLine\x12\x11\n\tplayer_id\x18\x01 \x01(\x05\x12&\n\tworldline\x18\x02 \x01(\x0b\x32\x13.CDOTAMsg_WorldLine\"F\n\x1b\x43\x44OTAUserMsg_TournamentDrop\x12\x13\n\x0bwinner_name\x18\x01 \x01(\t\x12\x12\n\nevent_type\x18\x02 \x01(\x05\"|\n\x16\x43\x44OTAUserMsg_ChatWheel\x12;\n\x0c\x63hat_message\x18\x01 \x01(\x0e\x32\x16.EDOTAChatWheelMessage:\rk_EDOTA_CW_Ok\x12\x11\n\tplayer_id\x18\x02 \x01(\r\x12\x12\n\naccount_id\x18\x03 \x01(\r\"]\n\x1d\x43\x44OTAUserMsg_ReceivedXmasGift\x12\x11\n\tplayer_id\x18\x01 \x01(\x05\x12\x11\n\titem_name\x18\x02 \x01(\t\x12\x16\n\x0einventory_slot\x18\x03 \x01(\x05*\xbc\x08\n\x11\x45\x44otaUserMessages\x12\x1e\n\x1a\x44OTA_UM_AddUnitToSelection\x10@\x12\x17\n\x13\x44OTA_UM_AIDebugLine\x10\x41\x12\x15\n\x11\x44OTA_UM_ChatEvent\x10\x42\x12\x1f\n\x1b\x44OTA_UM_CombatHeroPositions\x10\x43\x12\x19\n\x15\x44OTA_UM_CombatLogData\x10\x44\x12\x1e\n\x1a\x44OTA_UM_CombatLogShowDeath\x10\x46\x12\"\n\x1e\x44OTA_UM_CreateLinearProjectile\x10G\x12#\n\x1f\x44OTA_UM_DestroyLinearProjectile\x10H\x12$\n DOTA_UM_DodgeTrackingProjectiles\x10I\x12\x1c\n\x18\x44OTA_UM_GlobalLightColor\x10J\x12 \n\x1c\x44OTA_UM_GlobalLightDirection\x10K\x12\x1a\n\x16\x44OTA_UM_InvalidCommand\x10L\x12\x18\n\x14\x44OTA_UM_LocationPing\x10M\x12\x13\n\x0f\x44OTA_UM_MapLine\x10N\x12\x1b\n\x17\x44OTA_UM_MiniKillCamInfo\x10O\x12\x1d\n\x19\x44OTA_UM_MinimapDebugPoint\x10P\x12\x18\n\x14\x44OTA_UM_MinimapEvent\x10Q\x12\x1c\n\x18\x44OTA_UM_NevermoreRequiem\x10R\x12\x19\n\x15\x44OTA_UM_OverheadEvent\x10S\x12\x1e\n\x1a\x44OTA_UM_SetNextAutobuyItem\x10T\x12\x1a\n\x16\x44OTA_UM_SharedCooldown\x10U\x12 \n\x1c\x44OTA_UM_SpectatorPlayerClick\x10V\x12\x1b\n\x17\x44OTA_UM_TutorialTipInfo\x10W\x12\x15\n\x11\x44OTA_UM_UnitEvent\x10X\x12\x1b\n\x17\x44OTA_UM_ParticleManager\x10Y\x12\x13\n\x0f\x44OTA_UM_BotChat\x10Z\x12\x14\n\x10\x44OTA_UM_HudError\x10[\x12\x19\n\x15\x44OTA_UM_ItemPurchased\x10\\\x12\x10\n\x0c\x44OTA_UM_Ping\x10]\x12\x15\n\x11\x44OTA_UM_ItemFound\x10^\x12!\n\x1d\x44OTA_UM_CharacterSpeakConcept\x10_\x12\x16\n\x12\x44OTA_UM_SwapVerify\x10`\x12\x15\n\x11\x44OTA_UM_WorldLine\x10\x61\x12\x1a\n\x16\x44OTA_UM_TournamentDrop\x10\x62\x12\x15\n\x11\x44OTA_UM_ItemAlert\x10\x63\x12\x1a\n\x16\x44OTA_UM_HalloweenDrops\x10\x64\x12\x15\n\x11\x44OTA_UM_ChatWheel\x10\x65\x12\x1c\n\x18\x44OTA_UM_ReceivedXmasGift\x10\x66*\xe3\x0e\n\x11\x44OTA_CHAT_MESSAGE\x12!\n\x14\x43HAT_MESSAGE_INVALID\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x12\x1a\n\x16\x43HAT_MESSAGE_HERO_KILL\x10\x00\x12\x1a\n\x16\x43HAT_MESSAGE_HERO_DENY\x10\x01\x12\x1e\n\x1a\x43HAT_MESSAGE_BARRACKS_KILL\x10\x02\x12\x1b\n\x17\x43HAT_MESSAGE_TOWER_KILL\x10\x03\x12\x1b\n\x17\x43HAT_MESSAGE_TOWER_DENY\x10\x04\x12\x1b\n\x17\x43HAT_MESSAGE_FIRSTBLOOD\x10\x05\x12\x1c\n\x18\x43HAT_MESSAGE_STREAK_KILL\x10\x06\x12\x18\n\x14\x43HAT_MESSAGE_BUYBACK\x10\x07\x12\x16\n\x12\x43HAT_MESSAGE_AEGIS\x10\x08\x12\x1c\n\x18\x43HAT_MESSAGE_ROSHAN_KILL\x10\t\x12\x1d\n\x19\x43HAT_MESSAGE_COURIER_LOST\x10\n\x12\"\n\x1e\x43HAT_MESSAGE_COURIER_RESPAWNED\x10\x0b\x12\x1b\n\x17\x43HAT_MESSAGE_GLYPH_USED\x10\x0c\x12\x1e\n\x1a\x43HAT_MESSAGE_ITEM_PURCHASE\x10\r\x12\x18\n\x14\x43HAT_MESSAGE_CONNECT\x10\x0e\x12\x1b\n\x17\x43HAT_MESSAGE_DISCONNECT\x10\x0f\x12.\n*CHAT_MESSAGE_DISCONNECT_WAIT_FOR_RECONNECT\x10\x10\x12*\n&CHAT_MESSAGE_DISCONNECT_TIME_REMAINING\x10\x11\x12\x31\n-CHAT_MESSAGE_DISCONNECT_TIME_REMAINING_PLURAL\x10\x12\x12\x1a\n\x16\x43HAT_MESSAGE_RECONNECT\x10\x13\x12\x18\n\x14\x43HAT_MESSAGE_ABANDON\x10\x14\x12\x1e\n\x1a\x43HAT_MESSAGE_SAFE_TO_LEAVE\x10\x15\x12\x1c\n\x18\x43HAT_MESSAGE_RUNE_PICKUP\x10\x16\x12\x1c\n\x18\x43HAT_MESSAGE_RUNE_BOTTLE\x10\x17\x12\x19\n\x15\x43HAT_MESSAGE_INTHEBAG\x10\x18\x12\x1b\n\x17\x43HAT_MESSAGE_SECRETSHOP\x10\x19\x12#\n\x1f\x43HAT_MESSAGE_ITEM_AUTOPURCHASED\x10\x1a\x12\x1f\n\x1b\x43HAT_MESSAGE_ITEMS_COMBINED\x10\x1b\x12\x1d\n\x19\x43HAT_MESSAGE_SUPER_CREEPS\x10\x1c\x12%\n!CHAT_MESSAGE_CANT_USE_ACTION_ITEM\x10\x1d\x12\"\n\x1e\x43HAT_MESSAGE_CHARGES_EXHAUSTED\x10\x1e\x12\x1a\n\x16\x43HAT_MESSAGE_CANTPAUSE\x10\x1f\x12\x1d\n\x19\x43HAT_MESSAGE_NOPAUSESLEFT\x10 \x12\x1d\n\x19\x43HAT_MESSAGE_CANTPAUSEYET\x10!\x12\x17\n\x13\x43HAT_MESSAGE_PAUSED\x10\"\x12\"\n\x1e\x43HAT_MESSAGE_UNPAUSE_COUNTDOWN\x10#\x12\x19\n\x15\x43HAT_MESSAGE_UNPAUSED\x10$\x12\x1e\n\x1a\x43HAT_MESSAGE_AUTO_UNPAUSED\x10%\x12\x1a\n\x16\x43HAT_MESSAGE_YOUPAUSED\x10&\x12 \n\x1c\x43HAT_MESSAGE_CANTUNPAUSETEAM\x10\'\x12(\n$CHAT_MESSAGE_SAFE_TO_LEAVE_ABANDONER\x10(\x12\"\n\x1e\x43HAT_MESSAGE_VOICE_TEXT_BANNED\x10)\x12.\n*CHAT_MESSAGE_SPECTATORS_WATCHING_THIS_GAME\x10*\x12 \n\x1c\x43HAT_MESSAGE_REPORT_REMINDER\x10+\x12\x1a\n\x16\x43HAT_MESSAGE_ECON_ITEM\x10,\x12\x16\n\x12\x43HAT_MESSAGE_TAUNT\x10-\x12\x17\n\x13\x43HAT_MESSAGE_RANDOM\x10.\x12\x18\n\x14\x43HAT_MESSAGE_RD_TURN\x10/\x12.\n*CHAT_MESSAGE_SAFE_TO_LEAVE_ABANDONER_EARLY\x10\x30\x12 \n\x1c\x43HAT_MESSAGE_DROP_RATE_BONUS\x10\x31\x12!\n\x1d\x43HAT_MESSAGE_NO_BATTLE_POINTS\x10\x32\x12\x1d\n\x19\x43HAT_MESSAGE_DENIED_AEGIS\x10\x33\x12\x1e\n\x1a\x43HAT_MESSAGE_INFORMATIONAL\x10\x34\x12\x1d\n\x19\x43HAT_MESSAGE_AEGIS_STOLEN\x10\x35\x12\x1d\n\x19\x43HAT_MESSAGE_ROSHAN_CANDY\x10\x36\x12\x1c\n\x18\x43HAT_MESSAGE_ITEM_GIFTED\x10\x37\x12\'\n#CHAT_MESSAGE_HERO_KILL_WITH_GREEVIL\x10\x38*\xb2\x01\n\x1d\x44OTA_NO_BATTLE_POINTS_REASONS\x12%\n!NO_BATTLE_POINTS_WRONG_LOBBY_TYPE\x10\x01\x12\"\n\x1eNO_BATTLE_POINTS_PRACTICE_BOTS\x10\x02\x12#\n\x1fNO_BATTLE_POINTS_CHEATS_ENABLED\x10\x03\x12!\n\x1dNO_BATTLE_POINTS_LOW_PRIORITY\x10\x04*7\n\x17\x44OTA_CHAT_INFORMATIONAL\x12\x1c\n\x18\x43OOP_BATTLE_POINTS_RULES\x10\x01*\xa9\x01\n\x14\x44OTA_COMBATLOG_TYPES\x12\x19\n\x15\x44OTA_COMBATLOG_DAMAGE\x10\x00\x12\x17\n\x13\x44OTA_COMBATLOG_HEAL\x10\x01\x12\x1f\n\x1b\x44OTA_COMBATLOG_MODIFIER_ADD\x10\x02\x12\"\n\x1e\x44OTA_COMBATLOG_MODIFIER_REMOVE\x10\x03\x12\x18\n\x14\x44OTA_COMBATLOG_DEATH\x10\x04*\xe5\x01\n\x13\x45\x44otaEntityMessages\x12\x14\n\x10\x44OTA_UNIT_SPEECH\x10\x00\x12\x19\n\x15\x44OTA_UNIT_SPEECH_MUTE\x10\x01\x12\x19\n\x15\x44OTA_UNIT_ADD_GESTURE\x10\x02\x12\x1c\n\x18\x44OTA_UNIT_REMOVE_GESTURE\x10\x03\x12!\n\x1d\x44OTA_UNIT_REMOVE_ALL_GESTURES\x10\x04\x12\x1a\n\x16\x44OTA_UNIT_FADE_GESTURE\x10\x06\x12%\n!DOTA_UNIT_SPEECH_CLIENTSIDE_RULES\x10\x07*\xb2\x04\n\x15\x44OTA_PARTICLE_MESSAGE\x12&\n\"DOTA_PARTICLE_MANAGER_EVENT_CREATE\x10\x00\x12&\n\"DOTA_PARTICLE_MANAGER_EVENT_UPDATE\x10\x01\x12.\n*DOTA_PARTICLE_MANAGER_EVENT_UPDATE_FORWARD\x10\x02\x12\x32\n.DOTA_PARTICLE_MANAGER_EVENT_UPDATE_ORIENTATION\x10\x03\x12/\n+DOTA_PARTICLE_MANAGER_EVENT_UPDATE_FALLBACK\x10\x04\x12*\n&DOTA_PARTICLE_MANAGER_EVENT_UPDATE_ENT\x10\x05\x12-\n)DOTA_PARTICLE_MANAGER_EVENT_UPDATE_OFFSET\x10\x06\x12\'\n#DOTA_PARTICLE_MANAGER_EVENT_DESTROY\x10\x07\x12\x31\n-DOTA_PARTICLE_MANAGER_EVENT_DESTROY_INVOLVING\x10\x08\x12\'\n#DOTA_PARTICLE_MANAGER_EVENT_RELEASE\x10\t\x12\'\n#DOTA_PARTICLE_MANAGER_EVENT_LATENCY\x10\n\x12+\n\'DOTA_PARTICLE_MANAGER_EVENT_SHOULD_DRAW\x10\x0b*\x86\x03\n\x13\x44OTA_OVERHEAD_ALERT\x12\x17\n\x13OVERHEAD_ALERT_GOLD\x10\x00\x12\x17\n\x13OVERHEAD_ALERT_DENY\x10\x01\x12\x1b\n\x17OVERHEAD_ALERT_CRITICAL\x10\x02\x12\x15\n\x11OVERHEAD_ALERT_XP\x10\x03\x12%\n!OVERHEAD_ALERT_BONUS_SPELL_DAMAGE\x10\x04\x12\x17\n\x13OVERHEAD_ALERT_MISS\x10\x05\x12\x19\n\x15OVERHEAD_ALERT_DAMAGE\x10\x06\x12\x18\n\x14OVERHEAD_ALERT_EVADE\x10\x07\x12\x18\n\x14OVERHEAD_ALERT_BLOCK\x10\x08\x12&\n\"OVERHEAD_ALERT_BONUS_POISON_DAMAGE\x10\t\x12\x17\n\x13OVERHEAD_ALERT_HEAL\x10\n\x12\x1b\n\x17OVERHEAD_ALERT_MANA_ADD\x10\x0b\x12\x1c\n\x18OVERHEAD_ALERT_MANA_LOSS\x10\x0c')
_EDOTAUSERMESSAGES = descriptor.EnumDescriptor(
name='EDotaUserMessages',
full_name='EDotaUserMessages',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='DOTA_UM_AddUnitToSelection', index=0, number=64,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_AIDebugLine', index=1, number=65,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_ChatEvent', index=2, number=66,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_CombatHeroPositions', index=3, number=67,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_CombatLogData', index=4, number=68,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_CombatLogShowDeath', index=5, number=70,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_CreateLinearProjectile', index=6, number=71,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_DestroyLinearProjectile', index=7, number=72,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_DodgeTrackingProjectiles', index=8, number=73,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_GlobalLightColor', index=9, number=74,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_GlobalLightDirection', index=10, number=75,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_InvalidCommand', index=11, number=76,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_LocationPing', index=12, number=77,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_MapLine', index=13, number=78,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_MiniKillCamInfo', index=14, number=79,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_MinimapDebugPoint', index=15, number=80,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_MinimapEvent', index=16, number=81,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_NevermoreRequiem', index=17, number=82,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_OverheadEvent', index=18, number=83,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_SetNextAutobuyItem', index=19, number=84,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_SharedCooldown', index=20, number=85,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_SpectatorPlayerClick', index=21, number=86,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_TutorialTipInfo', index=22, number=87,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_UnitEvent', index=23, number=88,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_ParticleManager', index=24, number=89,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_BotChat', index=25, number=90,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_HudError', index=26, number=91,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_ItemPurchased', index=27, number=92,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_Ping', index=28, number=93,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_ItemFound', index=29, number=94,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_CharacterSpeakConcept', index=30, number=95,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_SwapVerify', index=31, number=96,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_WorldLine', index=32, number=97,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_TournamentDrop', index=33, number=98,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_ItemAlert', index=34, number=99,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_HalloweenDrops', index=35, number=100,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_ChatWheel', index=36, number=101,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UM_ReceivedXmasGift', index=37, number=102,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6908,
serialized_end=7992,
)
_DOTA_CHAT_MESSAGE = descriptor.EnumDescriptor(
name='DOTA_CHAT_MESSAGE',
full_name='DOTA_CHAT_MESSAGE',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_INVALID', index=0, number=-1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_HERO_KILL', index=1, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_HERO_DENY', index=2, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_BARRACKS_KILL', index=3, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_TOWER_KILL', index=4, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_TOWER_DENY', index=5, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_FIRSTBLOOD', index=6, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_STREAK_KILL', index=7, number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_BUYBACK', index=8, number=7,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_AEGIS', index=9, number=8,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_ROSHAN_KILL', index=10, number=9,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_COURIER_LOST', index=11, number=10,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_COURIER_RESPAWNED', index=12, number=11,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_GLYPH_USED', index=13, number=12,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_ITEM_PURCHASE', index=14, number=13,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_CONNECT', index=15, number=14,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_DISCONNECT', index=16, number=15,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_DISCONNECT_WAIT_FOR_RECONNECT', index=17, number=16,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_DISCONNECT_TIME_REMAINING', index=18, number=17,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_DISCONNECT_TIME_REMAINING_PLURAL', index=19, number=18,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_RECONNECT', index=20, number=19,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_ABANDON', index=21, number=20,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_SAFE_TO_LEAVE', index=22, number=21,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_RUNE_PICKUP', index=23, number=22,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_RUNE_BOTTLE', index=24, number=23,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_INTHEBAG', index=25, number=24,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_SECRETSHOP', index=26, number=25,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_ITEM_AUTOPURCHASED', index=27, number=26,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_ITEMS_COMBINED', index=28, number=27,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_SUPER_CREEPS', index=29, number=28,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_CANT_USE_ACTION_ITEM', index=30, number=29,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_CHARGES_EXHAUSTED', index=31, number=30,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_CANTPAUSE', index=32, number=31,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_NOPAUSESLEFT', index=33, number=32,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_CANTPAUSEYET', index=34, number=33,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_PAUSED', index=35, number=34,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_UNPAUSE_COUNTDOWN', index=36, number=35,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_UNPAUSED', index=37, number=36,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_AUTO_UNPAUSED', index=38, number=37,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_YOUPAUSED', index=39, number=38,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_CANTUNPAUSETEAM', index=40, number=39,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_SAFE_TO_LEAVE_ABANDONER', index=41, number=40,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_VOICE_TEXT_BANNED', index=42, number=41,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_SPECTATORS_WATCHING_THIS_GAME', index=43, number=42,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_REPORT_REMINDER', index=44, number=43,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_ECON_ITEM', index=45, number=44,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_TAUNT', index=46, number=45,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_RANDOM', index=47, number=46,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_RD_TURN', index=48, number=47,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_SAFE_TO_LEAVE_ABANDONER_EARLY', index=49, number=48,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_DROP_RATE_BONUS', index=50, number=49,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_NO_BATTLE_POINTS', index=51, number=50,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_DENIED_AEGIS', index=52, number=51,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_INFORMATIONAL', index=53, number=52,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_AEGIS_STOLEN', index=54, number=53,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_ROSHAN_CANDY', index=55, number=54,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_ITEM_GIFTED', index=56, number=55,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CHAT_MESSAGE_HERO_KILL_WITH_GREEVIL', index=57, number=56,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=7995,
serialized_end=9886,
)
_DOTA_NO_BATTLE_POINTS_REASONS = descriptor.EnumDescriptor(
name='DOTA_NO_BATTLE_POINTS_REASONS',
full_name='DOTA_NO_BATTLE_POINTS_REASONS',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='NO_BATTLE_POINTS_WRONG_LOBBY_TYPE', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NO_BATTLE_POINTS_PRACTICE_BOTS', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NO_BATTLE_POINTS_CHEATS_ENABLED', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='NO_BATTLE_POINTS_LOW_PRIORITY', index=3, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9889,
serialized_end=10067,
)
_DOTA_CHAT_INFORMATIONAL = descriptor.EnumDescriptor(
name='DOTA_CHAT_INFORMATIONAL',
full_name='DOTA_CHAT_INFORMATIONAL',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='COOP_BATTLE_POINTS_RULES', index=0, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=10069,
serialized_end=10124,
)
_DOTA_COMBATLOG_TYPES = descriptor.EnumDescriptor(
name='DOTA_COMBATLOG_TYPES',
full_name='DOTA_COMBATLOG_TYPES',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='DOTA_COMBATLOG_DAMAGE', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_COMBATLOG_HEAL', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_COMBATLOG_MODIFIER_ADD', index=2, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_COMBATLOG_MODIFIER_REMOVE', index=3, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_COMBATLOG_DEATH', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=10127,
serialized_end=10296,
)
_EDOTAENTITYMESSAGES = descriptor.EnumDescriptor(
name='EDotaEntityMessages',
full_name='EDotaEntityMessages',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='DOTA_UNIT_SPEECH', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UNIT_SPEECH_MUTE', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UNIT_ADD_GESTURE', index=2, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UNIT_REMOVE_GESTURE', index=3, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UNIT_REMOVE_ALL_GESTURES', index=4, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UNIT_FADE_GESTURE', index=5, number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_UNIT_SPEECH_CLIENTSIDE_RULES', index=6, number=7,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=10299,
serialized_end=10528,
)
_DOTA_PARTICLE_MESSAGE = descriptor.EnumDescriptor(
name='DOTA_PARTICLE_MESSAGE',
full_name='DOTA_PARTICLE_MESSAGE',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='DOTA_PARTICLE_MANAGER_EVENT_CREATE', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_PARTICLE_MANAGER_EVENT_UPDATE', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_PARTICLE_MANAGER_EVENT_UPDATE_FORWARD', index=2, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_PARTICLE_MANAGER_EVENT_UPDATE_ORIENTATION', index=3, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_PARTICLE_MANAGER_EVENT_UPDATE_FALLBACK', index=4, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_PARTICLE_MANAGER_EVENT_UPDATE_ENT', index=5, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_PARTICLE_MANAGER_EVENT_UPDATE_OFFSET', index=6, number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_PARTICLE_MANAGER_EVENT_DESTROY', index=7, number=7,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_PARTICLE_MANAGER_EVENT_DESTROY_INVOLVING', index=8, number=8,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_PARTICLE_MANAGER_EVENT_RELEASE', index=9, number=9,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_PARTICLE_MANAGER_EVENT_LATENCY', index=10, number=10,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOTA_PARTICLE_MANAGER_EVENT_SHOULD_DRAW', index=11, number=11,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=10531,
serialized_end=11093,
)
_DOTA_OVERHEAD_ALERT = descriptor.EnumDescriptor(
name='DOTA_OVERHEAD_ALERT',
full_name='DOTA_OVERHEAD_ALERT',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='OVERHEAD_ALERT_GOLD', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OVERHEAD_ALERT_DENY', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OVERHEAD_ALERT_CRITICAL', index=2, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OVERHEAD_ALERT_XP', index=3, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OVERHEAD_ALERT_BONUS_SPELL_DAMAGE', index=4, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OVERHEAD_ALERT_MISS', index=5, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OVERHEAD_ALERT_DAMAGE', index=6, number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OVERHEAD_ALERT_EVADE', index=7, number=7,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OVERHEAD_ALERT_BLOCK', index=8, number=8,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OVERHEAD_ALERT_BONUS_POISON_DAMAGE', index=9, number=9,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OVERHEAD_ALERT_HEAL', index=10, number=10,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OVERHEAD_ALERT_MANA_ADD', index=11, number=11,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='OVERHEAD_ALERT_MANA_LOSS', index=12, number=12,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=11096,
serialized_end=11486,
)
DOTA_UM_AddUnitToSelection = 64
DOTA_UM_AIDebugLine = 65
DOTA_UM_ChatEvent = 66
DOTA_UM_CombatHeroPositions = 67
DOTA_UM_CombatLogData = 68
DOTA_UM_CombatLogShowDeath = 70
DOTA_UM_CreateLinearProjectile = 71
DOTA_UM_DestroyLinearProjectile = 72
DOTA_UM_DodgeTrackingProjectiles = 73
DOTA_UM_GlobalLightColor = 74
DOTA_UM_GlobalLightDirection = 75
DOTA_UM_InvalidCommand = 76
DOTA_UM_LocationPing = 77
DOTA_UM_MapLine = 78
DOTA_UM_MiniKillCamInfo = 79
DOTA_UM_MinimapDebugPoint = 80
DOTA_UM_MinimapEvent = 81
DOTA_UM_NevermoreRequiem = 82
DOTA_UM_OverheadEvent = 83
DOTA_UM_SetNextAutobuyItem = 84
DOTA_UM_SharedCooldown = 85
DOTA_UM_SpectatorPlayerClick = 86
DOTA_UM_TutorialTipInfo = 87
DOTA_UM_UnitEvent = 88
DOTA_UM_ParticleManager = 89
DOTA_UM_BotChat = 90
DOTA_UM_HudError = 91
DOTA_UM_ItemPurchased = 92
DOTA_UM_Ping = 93
DOTA_UM_ItemFound = 94
DOTA_UM_CharacterSpeakConcept = 95
DOTA_UM_SwapVerify = 96
DOTA_UM_WorldLine = 97
DOTA_UM_TournamentDrop = 98
DOTA_UM_ItemAlert = 99
DOTA_UM_HalloweenDrops = 100
DOTA_UM_ChatWheel = 101
DOTA_UM_ReceivedXmasGift = 102
CHAT_MESSAGE_INVALID = -1
CHAT_MESSAGE_HERO_KILL = 0
CHAT_MESSAGE_HERO_DENY = 1
CHAT_MESSAGE_BARRACKS_KILL = 2
CHAT_MESSAGE_TOWER_KILL = 3
CHAT_MESSAGE_TOWER_DENY = 4
CHAT_MESSAGE_FIRSTBLOOD = 5
CHAT_MESSAGE_STREAK_KILL = 6
CHAT_MESSAGE_BUYBACK = 7
CHAT_MESSAGE_AEGIS = 8
CHAT_MESSAGE_ROSHAN_KILL = 9
CHAT_MESSAGE_COURIER_LOST = 10
CHAT_MESSAGE_COURIER_RESPAWNED = 11
CHAT_MESSAGE_GLYPH_USED = 12
CHAT_MESSAGE_ITEM_PURCHASE = 13
CHAT_MESSAGE_CONNECT = 14
CHAT_MESSAGE_DISCONNECT = 15
CHAT_MESSAGE_DISCONNECT_WAIT_FOR_RECONNECT = 16
CHAT_MESSAGE_DISCONNECT_TIME_REMAINING = 17
CHAT_MESSAGE_DISCONNECT_TIME_REMAINING_PLURAL = 18
CHAT_MESSAGE_RECONNECT = 19
CHAT_MESSAGE_ABANDON = 20
CHAT_MESSAGE_SAFE_TO_LEAVE = 21
CHAT_MESSAGE_RUNE_PICKUP = 22
CHAT_MESSAGE_RUNE_BOTTLE = 23
CHAT_MESSAGE_INTHEBAG = 24
CHAT_MESSAGE_SECRETSHOP = 25
CHAT_MESSAGE_ITEM_AUTOPURCHASED = 26
CHAT_MESSAGE_ITEMS_COMBINED = 27
CHAT_MESSAGE_SUPER_CREEPS = 28
CHAT_MESSAGE_CANT_USE_ACTION_ITEM = 29
CHAT_MESSAGE_CHARGES_EXHAUSTED = 30
CHAT_MESSAGE_CANTPAUSE = 31
CHAT_MESSAGE_NOPAUSESLEFT = 32
CHAT_MESSAGE_CANTPAUSEYET = 33
CHAT_MESSAGE_PAUSED = 34
CHAT_MESSAGE_UNPAUSE_COUNTDOWN = 35
CHAT_MESSAGE_UNPAUSED = 36
CHAT_MESSAGE_AUTO_UNPAUSED = 37
CHAT_MESSAGE_YOUPAUSED = 38
CHAT_MESSAGE_CANTUNPAUSETEAM = 39
CHAT_MESSAGE_SAFE_TO_LEAVE_ABANDONER = 40
CHAT_MESSAGE_VOICE_TEXT_BANNED = 41
CHAT_MESSAGE_SPECTATORS_WATCHING_THIS_GAME = 42
CHAT_MESSAGE_REPORT_REMINDER = 43
CHAT_MESSAGE_ECON_ITEM = 44
CHAT_MESSAGE_TAUNT = 45
CHAT_MESSAGE_RANDOM = 46
CHAT_MESSAGE_RD_TURN = 47
CHAT_MESSAGE_SAFE_TO_LEAVE_ABANDONER_EARLY = 48
CHAT_MESSAGE_DROP_RATE_BONUS = 49
CHAT_MESSAGE_NO_BATTLE_POINTS = 50
CHAT_MESSAGE_DENIED_AEGIS = 51
CHAT_MESSAGE_INFORMATIONAL = 52
CHAT_MESSAGE_AEGIS_STOLEN = 53
CHAT_MESSAGE_ROSHAN_CANDY = 54
CHAT_MESSAGE_ITEM_GIFTED = 55
CHAT_MESSAGE_HERO_KILL_WITH_GREEVIL = 56
NO_BATTLE_POINTS_WRONG_LOBBY_TYPE = 1
NO_BATTLE_POINTS_PRACTICE_BOTS = 2
NO_BATTLE_POINTS_CHEATS_ENABLED = 3
NO_BATTLE_POINTS_LOW_PRIORITY = 4
COOP_BATTLE_POINTS_RULES = 1
DOTA_COMBATLOG_DAMAGE = 0
DOTA_COMBATLOG_HEAL = 1
DOTA_COMBATLOG_MODIFIER_ADD = 2
DOTA_COMBATLOG_MODIFIER_REMOVE = 3
DOTA_COMBATLOG_DEATH = 4
DOTA_UNIT_SPEECH = 0
DOTA_UNIT_SPEECH_MUTE = 1
DOTA_UNIT_ADD_GESTURE = 2
DOTA_UNIT_REMOVE_GESTURE = 3
DOTA_UNIT_REMOVE_ALL_GESTURES = 4
DOTA_UNIT_FADE_GESTURE = 6
DOTA_UNIT_SPEECH_CLIENTSIDE_RULES = 7
DOTA_PARTICLE_MANAGER_EVENT_CREATE = 0
DOTA_PARTICLE_MANAGER_EVENT_UPDATE = 1
DOTA_PARTICLE_MANAGER_EVENT_UPDATE_FORWARD = 2
DOTA_PARTICLE_MANAGER_EVENT_UPDATE_ORIENTATION = 3
DOTA_PARTICLE_MANAGER_EVENT_UPDATE_FALLBACK = 4
DOTA_PARTICLE_MANAGER_EVENT_UPDATE_ENT = 5
DOTA_PARTICLE_MANAGER_EVENT_UPDATE_OFFSET = 6
DOTA_PARTICLE_MANAGER_EVENT_DESTROY = 7
DOTA_PARTICLE_MANAGER_EVENT_DESTROY_INVOLVING = 8
DOTA_PARTICLE_MANAGER_EVENT_RELEASE = 9
DOTA_PARTICLE_MANAGER_EVENT_LATENCY = 10
DOTA_PARTICLE_MANAGER_EVENT_SHOULD_DRAW = 11
OVERHEAD_ALERT_GOLD = 0
OVERHEAD_ALERT_DENY = 1
OVERHEAD_ALERT_CRITICAL = 2
OVERHEAD_ALERT_XP = 3
OVERHEAD_ALERT_BONUS_SPELL_DAMAGE = 4
OVERHEAD_ALERT_MISS = 5
OVERHEAD_ALERT_DAMAGE = 6
OVERHEAD_ALERT_EVADE = 7
OVERHEAD_ALERT_BLOCK = 8
OVERHEAD_ALERT_BONUS_POISON_DAMAGE = 9
OVERHEAD_ALERT_HEAL = 10
OVERHEAD_ALERT_MANA_ADD = 11
OVERHEAD_ALERT_MANA_LOSS = 12
_CDOTARESPONSEQUERYSERIALIZED_FACT_VALUETYPE = descriptor.EnumDescriptor(
name='ValueType',
full_name='CDOTAResponseQuerySerialized.Fact.ValueType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='NUMERIC', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='STRING', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2927,
serialized_end=2963,
)
_CDOTAUSERMSG_AIDEBUGLINE = descriptor.Descriptor(
name='CDOTAUserMsg_AIDebugLine',
full_name='CDOTAUserMsg_AIDebugLine',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='message', full_name='CDOTAUserMsg_AIDebugLine.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=126,
serialized_end=169,
)
_CDOTAUSERMSG_PING = descriptor.Descriptor(
name='CDOTAUserMsg_Ping',
full_name='CDOTAUserMsg_Ping',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='message', full_name='CDOTAUserMsg_Ping.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=171,
serialized_end=207,
)
_CDOTAUSERMSG_SWAPVERIFY = descriptor.Descriptor(
name='CDOTAUserMsg_SwapVerify',
full_name='CDOTAUserMsg_SwapVerify',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='player_id', full_name='CDOTAUserMsg_SwapVerify.player_id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=209,
serialized_end=253,
)
_CDOTAUSERMSG_CHATEVENT = descriptor.Descriptor(
name='CDOTAUserMsg_ChatEvent',
full_name='CDOTAUserMsg_ChatEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='type', full_name='CDOTAUserMsg_ChatEvent.type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='value', full_name='CDOTAUserMsg_ChatEvent.value', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playerid_1', full_name='CDOTAUserMsg_ChatEvent.playerid_1', index=2,
number=3, type=17, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playerid_2', full_name='CDOTAUserMsg_ChatEvent.playerid_2', index=3,
number=4, type=17, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playerid_3', full_name='CDOTAUserMsg_ChatEvent.playerid_3', index=4,
number=5, type=17, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playerid_4', full_name='CDOTAUserMsg_ChatEvent.playerid_4', index=5,
number=6, type=17, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playerid_5', full_name='CDOTAUserMsg_ChatEvent.playerid_5', index=6,
number=7, type=17, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playerid_6', full_name='CDOTAUserMsg_ChatEvent.playerid_6', index=7,
number=8, type=17, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=256,
serialized_end=495,
)
_CDOTAUSERMSG_COMBATLOGDATA = descriptor.Descriptor(
name='CDOTAUserMsg_CombatLogData',
full_name='CDOTAUserMsg_CombatLogData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='type', full_name='CDOTAUserMsg_CombatLogData.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='target_name', full_name='CDOTAUserMsg_CombatLogData.target_name', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='attacker_name', full_name='CDOTAUserMsg_CombatLogData.attacker_name', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='attacker_illusion', full_name='CDOTAUserMsg_CombatLogData.attacker_illusion', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='target_illusion', full_name='CDOTAUserMsg_CombatLogData.target_illusion', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='inflictor_name', full_name='CDOTAUserMsg_CombatLogData.inflictor_name', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='value', full_name='CDOTAUserMsg_CombatLogData.value', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='health', full_name='CDOTAUserMsg_CombatLogData.health', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='time', full_name='CDOTAUserMsg_CombatLogData.time', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=498,
serialized_end=751,
)
_CDOTAUSERMSG_COMBATLOGSHOWDEATH = descriptor.Descriptor(
name='CDOTAUserMsg_CombatLogShowDeath',
full_name='CDOTAUserMsg_CombatLogShowDeath',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=753,
serialized_end=786,
)
_CDOTAUSERMSG_BOTCHAT = descriptor.Descriptor(
name='CDOTAUserMsg_BotChat',
full_name='CDOTAUserMsg_BotChat',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='player_id', full_name='CDOTAUserMsg_BotChat.player_id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='format', full_name='CDOTAUserMsg_BotChat.format', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='message', full_name='CDOTAUserMsg_BotChat.message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='target', full_name='CDOTAUserMsg_BotChat.target', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=788,
serialized_end=878,
)
_CDOTAUSERMSG_COMBATHEROPOSITIONS = descriptor.Descriptor(
name='CDOTAUserMsg_CombatHeroPositions',
full_name='CDOTAUserMsg_CombatHeroPositions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='index', full_name='CDOTAUserMsg_CombatHeroPositions.index', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='time', full_name='CDOTAUserMsg_CombatHeroPositions.time', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='world_pos', full_name='CDOTAUserMsg_CombatHeroPositions.world_pos', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='health', full_name='CDOTAUserMsg_CombatHeroPositions.health', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=880,
serialized_end=993,
)
_CDOTAUSERMSG_MINIKILLCAMINFO_ATTACKER_ABILITY = descriptor.Descriptor(
name='Ability',
full_name='CDOTAUserMsg_MiniKillCamInfo.Attacker.Ability',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='ability', full_name='CDOTAUserMsg_MiniKillCamInfo.Attacker.Ability.ability', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='damage', full_name='CDOTAUserMsg_MiniKillCamInfo.Attacker.Ability.damage', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1207,
serialized_end=1249,
)
_CDOTAUSERMSG_MINIKILLCAMINFO_ATTACKER = descriptor.Descriptor(
name='Attacker',
full_name='CDOTAUserMsg_MiniKillCamInfo.Attacker',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='attacker', full_name='CDOTAUserMsg_MiniKillCamInfo.Attacker.attacker', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='total_damage', full_name='CDOTAUserMsg_MiniKillCamInfo.Attacker.total_damage', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='abilities', full_name='CDOTAUserMsg_MiniKillCamInfo.Attacker.abilities', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CDOTAUSERMSG_MINIKILLCAMINFO_ATTACKER_ABILITY, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1088,
serialized_end=1249,
)
_CDOTAUSERMSG_MINIKILLCAMINFO = descriptor.Descriptor(
name='CDOTAUserMsg_MiniKillCamInfo',
full_name='CDOTAUserMsg_MiniKillCamInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='attackers', full_name='CDOTAUserMsg_MiniKillCamInfo.attackers', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CDOTAUSERMSG_MINIKILLCAMINFO_ATTACKER, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=996,
serialized_end=1249,
)
_CDOTAUSERMSG_GLOBALLIGHTCOLOR = descriptor.Descriptor(
name='CDOTAUserMsg_GlobalLightColor',
full_name='CDOTAUserMsg_GlobalLightColor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='color', full_name='CDOTAUserMsg_GlobalLightColor.color', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='duration', full_name='CDOTAUserMsg_GlobalLightColor.duration', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1251,
serialized_end=1315,
)
_CDOTAUSERMSG_GLOBALLIGHTDIRECTION = descriptor.Descriptor(
name='CDOTAUserMsg_GlobalLightDirection',
full_name='CDOTAUserMsg_GlobalLightDirection',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='direction', full_name='CDOTAUserMsg_GlobalLightDirection.direction', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='duration', full_name='CDOTAUserMsg_GlobalLightDirection.duration', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1317,
serialized_end=1402,
)
_CDOTAUSERMSG_LOCATIONPING = descriptor.Descriptor(
name='CDOTAUserMsg_LocationPing',
full_name='CDOTAUserMsg_LocationPing',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='player_id', full_name='CDOTAUserMsg_LocationPing.player_id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='location_ping', full_name='CDOTAUserMsg_LocationPing.location_ping', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1404,
serialized_end=1497,
)
_CDOTAUSERMSG_ITEMALERT = descriptor.Descriptor(
name='CDOTAUserMsg_ItemAlert',
full_name='CDOTAUserMsg_ItemAlert',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='player_id', full_name='CDOTAUserMsg_ItemAlert.player_id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='item_alert', full_name='CDOTAUserMsg_ItemAlert.item_alert', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1499,
serialized_end=1583,
)
_CDOTAUSERMSG_MINIMAPEVENT = descriptor.Descriptor(
name='CDOTAUserMsg_MinimapEvent',
full_name='CDOTAUserMsg_MinimapEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='event_type', full_name='CDOTAUserMsg_MinimapEvent.event_type', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='entity_handle', full_name='CDOTAUserMsg_MinimapEvent.entity_handle', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='x', full_name='CDOTAUserMsg_MinimapEvent.x', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='y', full_name='CDOTAUserMsg_MinimapEvent.y', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='duration', full_name='CDOTAUserMsg_MinimapEvent.duration', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1585,
serialized_end=1695,
)
_CDOTAUSERMSG_MAPLINE = descriptor.Descriptor(
name='CDOTAUserMsg_MapLine',
full_name='CDOTAUserMsg_MapLine',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='player_id', full_name='CDOTAUserMsg_MapLine.player_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='mapline', full_name='CDOTAUserMsg_MapLine.mapline', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1697,
serialized_end=1774,
)
_CDOTAUSERMSG_MINIMAPDEBUGPOINT = descriptor.Descriptor(
name='CDOTAUserMsg_MinimapDebugPoint',
full_name='CDOTAUserMsg_MinimapDebugPoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='location', full_name='CDOTAUserMsg_MinimapDebugPoint.location', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='color', full_name='CDOTAUserMsg_MinimapDebugPoint.color', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='size', full_name='CDOTAUserMsg_MinimapDebugPoint.size', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='duration', full_name='CDOTAUserMsg_MinimapDebugPoint.duration', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1776,
serialized_end=1886,
)
_CDOTAUSERMSG_CREATELINEARPROJECTILE = descriptor.Descriptor(
name='CDOTAUserMsg_CreateLinearProjectile',
full_name='CDOTAUserMsg_CreateLinearProjectile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='origin', full_name='CDOTAUserMsg_CreateLinearProjectile.origin', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='velocity', full_name='CDOTAUserMsg_CreateLinearProjectile.velocity', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='latency', full_name='CDOTAUserMsg_CreateLinearProjectile.latency', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='entindex', full_name='CDOTAUserMsg_CreateLinearProjectile.entindex', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='particle_index', full_name='CDOTAUserMsg_CreateLinearProjectile.particle_index', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='handle', full_name='CDOTAUserMsg_CreateLinearProjectile.handle', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1889,
serialized_end=2063,
)
_CDOTAUSERMSG_DESTROYLINEARPROJECTILE = descriptor.Descriptor(
name='CDOTAUserMsg_DestroyLinearProjectile',
full_name='CDOTAUserMsg_DestroyLinearProjectile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='handle', full_name='CDOTAUserMsg_DestroyLinearProjectile.handle', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2065,
serialized_end=2119,
)
_CDOTAUSERMSG_DODGETRACKINGPROJECTILES = descriptor.Descriptor(
name='CDOTAUserMsg_DodgeTrackingProjectiles',
full_name='CDOTAUserMsg_DodgeTrackingProjectiles',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='entindex', full_name='CDOTAUserMsg_DodgeTrackingProjectiles.entindex', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2121,
serialized_end=2178,
)
_CDOTAUSERMSG_SPECTATORPLAYERCLICK = descriptor.Descriptor(
name='CDOTAUserMsg_SpectatorPlayerClick',
full_name='CDOTAUserMsg_SpectatorPlayerClick',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='entindex', full_name='CDOTAUserMsg_SpectatorPlayerClick.entindex', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='order_type', full_name='CDOTAUserMsg_SpectatorPlayerClick.order_type', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='target_index', full_name='CDOTAUserMsg_SpectatorPlayerClick.target_index', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2180,
serialized_end=2275,
)
_CDOTAUSERMSG_NEVERMOREREQUIEM = descriptor.Descriptor(
name='CDOTAUserMsg_NevermoreRequiem',
full_name='CDOTAUserMsg_NevermoreRequiem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='entity_handle', full_name='CDOTAUserMsg_NevermoreRequiem.entity_handle', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='lines', full_name='CDOTAUserMsg_NevermoreRequiem.lines', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='origin', full_name='CDOTAUserMsg_NevermoreRequiem.origin', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2277,
serialized_end=2375,
)
_CDOTAUSERMSG_INVALIDCOMMAND = descriptor.Descriptor(
name='CDOTAUserMsg_InvalidCommand',
full_name='CDOTAUserMsg_InvalidCommand',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='message', full_name='CDOTAUserMsg_InvalidCommand.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2377,
serialized_end=2423,
)
_CDOTAUSERMSG_HUDERROR = descriptor.Descriptor(
name='CDOTAUserMsg_HudError',
full_name='CDOTAUserMsg_HudError',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='order_id', full_name='CDOTAUserMsg_HudError.order_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2425,
serialized_end=2466,
)
_CDOTAUSERMSG_SHAREDCOOLDOWN = descriptor.Descriptor(
name='CDOTAUserMsg_SharedCooldown',
full_name='CDOTAUserMsg_SharedCooldown',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='entindex', full_name='CDOTAUserMsg_SharedCooldown.entindex', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='name', full_name='CDOTAUserMsg_SharedCooldown.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='cooldown', full_name='CDOTAUserMsg_SharedCooldown.cooldown', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='name_index', full_name='CDOTAUserMsg_SharedCooldown.name_index', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2468,
serialized_end=2567,
)
_CDOTAUSERMSG_SETNEXTAUTOBUYITEM = descriptor.Descriptor(
name='CDOTAUserMsg_SetNextAutobuyItem',
full_name='CDOTAUserMsg_SetNextAutobuyItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='CDOTAUserMsg_SetNextAutobuyItem.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2569,
serialized_end=2616,
)
_CDOTAUSERMSG_HALLOWEENDROPS = descriptor.Descriptor(
name='CDOTAUserMsg_HalloweenDrops',
full_name='CDOTAUserMsg_HalloweenDrops',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='item_defs', full_name='CDOTAUserMsg_HalloweenDrops.item_defs', index=0,
number=1, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='player_ids', full_name='CDOTAUserMsg_HalloweenDrops.player_ids', index=1,
number=2, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='prize_list', full_name='CDOTAUserMsg_HalloweenDrops.prize_list', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2618,
serialized_end=2706,
)
_CDOTARESPONSEQUERYSERIALIZED_FACT = descriptor.Descriptor(
name='Fact',
full_name='CDOTAResponseQuerySerialized.Fact',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='key', full_name='CDOTAResponseQuerySerialized.Fact.key', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='valtype', full_name='CDOTAResponseQuerySerialized.Fact.valtype', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='val_numeric', full_name='CDOTAResponseQuerySerialized.Fact.val_numeric', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='val_string', full_name='CDOTAResponseQuerySerialized.Fact.val_string', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_CDOTARESPONSEQUERYSERIALIZED_FACT_VALUETYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2793,
serialized_end=2963,
)
_CDOTARESPONSEQUERYSERIALIZED = descriptor.Descriptor(
name='CDOTAResponseQuerySerialized',
full_name='CDOTAResponseQuerySerialized',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='facts', full_name='CDOTAResponseQuerySerialized.facts', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CDOTARESPONSEQUERYSERIALIZED_FACT, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2709,
serialized_end=2963,
)
_CDOTASPEECHMATCHONCLIENT = descriptor.Descriptor(
name='CDOTASpeechMatchOnClient',
full_name='CDOTASpeechMatchOnClient',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='concept', full_name='CDOTASpeechMatchOnClient.concept', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='recipient_type', full_name='CDOTASpeechMatchOnClient.recipient_type', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='responsequery', full_name='CDOTASpeechMatchOnClient.responsequery', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='randomseed', full_name='CDOTASpeechMatchOnClient.randomseed', index=3,
number=4, type=15, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2966,
serialized_end=3110,
)
_CDOTAUSERMSG_UNITEVENT_SPEECH = descriptor.Descriptor(
name='Speech',
full_name='CDOTAUserMsg_UnitEvent.Speech',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='concept', full_name='CDOTAUserMsg_UnitEvent.Speech.concept', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='response', full_name='CDOTAUserMsg_UnitEvent.Speech.response', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='recipient_type', full_name='CDOTAUserMsg_UnitEvent.Speech.recipient_type', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='level', full_name='CDOTAUserMsg_UnitEvent.Speech.level', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='muteable', full_name='CDOTAUserMsg_UnitEvent.Speech.muteable', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3621,
serialized_end=3728,
)
_CDOTAUSERMSG_UNITEVENT_SPEECHMUTE = descriptor.Descriptor(
name='SpeechMute',
full_name='CDOTAUserMsg_UnitEvent.SpeechMute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='delay', full_name='CDOTAUserMsg_UnitEvent.SpeechMute.delay', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=0.5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3730,
serialized_end=3762,
)
_CDOTAUSERMSG_UNITEVENT_ADDGESTURE = descriptor.Descriptor(
name='AddGesture',
full_name='CDOTAUserMsg_UnitEvent.AddGesture',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='activity', full_name='CDOTAUserMsg_UnitEvent.AddGesture.activity', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='slot', full_name='CDOTAUserMsg_UnitEvent.AddGesture.slot', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='fade_in', full_name='CDOTAUserMsg_UnitEvent.AddGesture.fade_in', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='fade_out', full_name='CDOTAUserMsg_UnitEvent.AddGesture.fade_out', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=0.1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3764,
serialized_end=3875,
)
_CDOTAUSERMSG_UNITEVENT_REMOVEGESTURE = descriptor.Descriptor(
name='RemoveGesture',
full_name='CDOTAUserMsg_UnitEvent.RemoveGesture',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='activity', full_name='CDOTAUserMsg_UnitEvent.RemoveGesture.activity', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3877,
serialized_end=3934,
)
_CDOTAUSERMSG_UNITEVENT_BLOODIMPACT = descriptor.Descriptor(
name='BloodImpact',
full_name='CDOTAUserMsg_UnitEvent.BloodImpact',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='scale', full_name='CDOTAUserMsg_UnitEvent.BloodImpact.scale', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='x_normal', full_name='CDOTAUserMsg_UnitEvent.BloodImpact.x_normal', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='y_normal', full_name='CDOTAUserMsg_UnitEvent.BloodImpact.y_normal', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3936,
serialized_end=4000,
)
_CDOTAUSERMSG_UNITEVENT_FADEGESTURE = descriptor.Descriptor(
name='FadeGesture',
full_name='CDOTAUserMsg_UnitEvent.FadeGesture',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='activity', full_name='CDOTAUserMsg_UnitEvent.FadeGesture.activity', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4002,
serialized_end=4057,
)
_CDOTAUSERMSG_UNITEVENT = descriptor.Descriptor(
name='CDOTAUserMsg_UnitEvent',
full_name='CDOTAUserMsg_UnitEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='msg_type', full_name='CDOTAUserMsg_UnitEvent.msg_type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='entity_index', full_name='CDOTAUserMsg_UnitEvent.entity_index', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='speech', full_name='CDOTAUserMsg_UnitEvent.speech', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='speech_mute', full_name='CDOTAUserMsg_UnitEvent.speech_mute', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='add_gesture', full_name='CDOTAUserMsg_UnitEvent.add_gesture', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='remove_gesture', full_name='CDOTAUserMsg_UnitEvent.remove_gesture', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='blood_impact', full_name='CDOTAUserMsg_UnitEvent.blood_impact', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='fade_gesture', full_name='CDOTAUserMsg_UnitEvent.fade_gesture', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='speech_match_on_client', full_name='CDOTAUserMsg_UnitEvent.speech_match_on_client', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CDOTAUSERMSG_UNITEVENT_SPEECH, _CDOTAUSERMSG_UNITEVENT_SPEECHMUTE, _CDOTAUSERMSG_UNITEVENT_ADDGESTURE, _CDOTAUSERMSG_UNITEVENT_REMOVEGESTURE, _CDOTAUSERMSG_UNITEVENT_BLOODIMPACT, _CDOTAUSERMSG_UNITEVENT_FADEGESTURE, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3113,
serialized_end=4057,
)
_CDOTAUSERMSG_ITEMPURCHASED = descriptor.Descriptor(
name='CDOTAUserMsg_ItemPurchased',
full_name='CDOTAUserMsg_ItemPurchased',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='item_index', full_name='CDOTAUserMsg_ItemPurchased.item_index', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4059,
serialized_end=4107,
)
_CDOTAUSERMSG_ITEMFOUND = descriptor.Descriptor(
name='CDOTAUserMsg_ItemFound',
full_name='CDOTAUserMsg_ItemFound',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='player', full_name='CDOTAUserMsg_ItemFound.player', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='quality', full_name='CDOTAUserMsg_ItemFound.quality', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='rarity', full_name='CDOTAUserMsg_ItemFound.rarity', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='method', full_name='CDOTAUserMsg_ItemFound.method', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='itemdef', full_name='CDOTAUserMsg_ItemFound.itemdef', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4109,
serialized_end=4215,
)
_CDOTAUSERMSG_PARTICLEMANAGER_RELEASEPARTICLEINDEX = descriptor.Descriptor(
name='ReleaseParticleIndex',
full_name='CDOTAUserMsg_ParticleManager.ReleaseParticleIndex',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5321,
serialized_end=5343,
)
_CDOTAUSERMSG_PARTICLEMANAGER_CREATEPARTICLE = descriptor.Descriptor(
name='CreateParticle',
full_name='CDOTAUserMsg_ParticleManager.CreateParticle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='particle_name_index', full_name='CDOTAUserMsg_ParticleManager.CreateParticle.particle_name_index', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='attach_type', full_name='CDOTAUserMsg_ParticleManager.CreateParticle.attach_type', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='entity_handle', full_name='CDOTAUserMsg_ParticleManager.CreateParticle.entity_handle', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5345,
serialized_end=5434,
)
_CDOTAUSERMSG_PARTICLEMANAGER_DESTROYPARTICLE = descriptor.Descriptor(
name='DestroyParticle',
full_name='CDOTAUserMsg_ParticleManager.DestroyParticle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='destroy_immediately', full_name='CDOTAUserMsg_ParticleManager.DestroyParticle.destroy_immediately', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5436,
serialized_end=5482,
)
_CDOTAUSERMSG_PARTICLEMANAGER_DESTROYPARTICLEINVOLVING = descriptor.Descriptor(
name='DestroyParticleInvolving',
full_name='CDOTAUserMsg_ParticleManager.DestroyParticleInvolving',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='destroy_immediately', full_name='CDOTAUserMsg_ParticleManager.DestroyParticleInvolving.destroy_immediately', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='entity_handle', full_name='CDOTAUserMsg_ParticleManager.DestroyParticleInvolving.entity_handle', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5484,
serialized_end=5562,
)
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLE = descriptor.Descriptor(
name='UpdateParticle',
full_name='CDOTAUserMsg_ParticleManager.UpdateParticle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='control_point', full_name='CDOTAUserMsg_ParticleManager.UpdateParticle.control_point', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='position', full_name='CDOTAUserMsg_ParticleManager.UpdateParticle.position', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5564,
serialized_end=5634,
)
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEFWD = descriptor.Descriptor(
name='UpdateParticleFwd',
full_name='CDOTAUserMsg_ParticleManager.UpdateParticleFwd',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='control_point', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleFwd.control_point', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='forward', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleFwd.forward', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5636,
serialized_end=5708,
)
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEORIENT = descriptor.Descriptor(
name='UpdateParticleOrient',
full_name='CDOTAUserMsg_ParticleManager.UpdateParticleOrient',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='control_point', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleOrient.control_point', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='forward', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleOrient.forward', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='right', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleOrient.right', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='up', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleOrient.up', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5711,
serialized_end=5839,
)
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEFALLBACK = descriptor.Descriptor(
name='UpdateParticleFallback',
full_name='CDOTAUserMsg_ParticleManager.UpdateParticleFallback',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='control_point', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleFallback.control_point', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='position', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleFallback.position', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5841,
serialized_end=5919,
)
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEOFFSET = descriptor.Descriptor(
name='UpdateParticleOffset',
full_name='CDOTAUserMsg_ParticleManager.UpdateParticleOffset',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='control_point', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleOffset.control_point', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='origin_offset', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleOffset.origin_offset', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5921,
serialized_end=6002,
)
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEENT = descriptor.Descriptor(
name='UpdateParticleEnt',
full_name='CDOTAUserMsg_ParticleManager.UpdateParticleEnt',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='control_point', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleEnt.control_point', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='entity_handle', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleEnt.entity_handle', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='attach_type', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleEnt.attach_type', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='attachment', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleEnt.attachment', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='fallback_position', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleEnt.fallback_position', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6005,
serialized_end=6151,
)
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLELATENCY = descriptor.Descriptor(
name='UpdateParticleLatency',
full_name='CDOTAUserMsg_ParticleManager.UpdateParticleLatency',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='player_latency', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleLatency.player_latency', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='tick', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleLatency.tick', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6153,
serialized_end=6214,
)
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLESHOULDDRAW = descriptor.Descriptor(
name='UpdateParticleShouldDraw',
full_name='CDOTAUserMsg_ParticleManager.UpdateParticleShouldDraw',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='should_draw', full_name='CDOTAUserMsg_ParticleManager.UpdateParticleShouldDraw.should_draw', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6216,
serialized_end=6263,
)
_CDOTAUSERMSG_PARTICLEMANAGER = descriptor.Descriptor(
name='CDOTAUserMsg_ParticleManager',
full_name='CDOTAUserMsg_ParticleManager',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='type', full_name='CDOTAUserMsg_ParticleManager.type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='index', full_name='CDOTAUserMsg_ParticleManager.index', index=1,
number=2, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='release_particle_index', full_name='CDOTAUserMsg_ParticleManager.release_particle_index', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='create_particle', full_name='CDOTAUserMsg_ParticleManager.create_particle', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='destroy_particle', full_name='CDOTAUserMsg_ParticleManager.destroy_particle', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='destroy_particle_involving', full_name='CDOTAUserMsg_ParticleManager.destroy_particle_involving', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_particle', full_name='CDOTAUserMsg_ParticleManager.update_particle', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_particle_fwd', full_name='CDOTAUserMsg_ParticleManager.update_particle_fwd', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_particle_orient', full_name='CDOTAUserMsg_ParticleManager.update_particle_orient', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_particle_fallback', full_name='CDOTAUserMsg_ParticleManager.update_particle_fallback', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_particle_offset', full_name='CDOTAUserMsg_ParticleManager.update_particle_offset', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_particle_ent', full_name='CDOTAUserMsg_ParticleManager.update_particle_ent', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_particle_latency', full_name='CDOTAUserMsg_ParticleManager.update_particle_latency', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='update_particle_should_draw', full_name='CDOTAUserMsg_ParticleManager.update_particle_should_draw', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CDOTAUSERMSG_PARTICLEMANAGER_RELEASEPARTICLEINDEX, _CDOTAUSERMSG_PARTICLEMANAGER_CREATEPARTICLE, _CDOTAUSERMSG_PARTICLEMANAGER_DESTROYPARTICLE, _CDOTAUSERMSG_PARTICLEMANAGER_DESTROYPARTICLEINVOLVING, _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLE, _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEFWD, _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEORIENT, _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEFALLBACK, _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEOFFSET, _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEENT, _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLELATENCY, _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLESHOULDDRAW, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4218,
serialized_end=6263,
)
_CDOTAUSERMSG_OVERHEADEVENT = descriptor.Descriptor(
name='CDOTAUserMsg_OverheadEvent',
full_name='CDOTAUserMsg_OverheadEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='message_type', full_name='CDOTAUserMsg_OverheadEvent.message_type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='value', full_name='CDOTAUserMsg_OverheadEvent.value', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='target_player_entindex', full_name='CDOTAUserMsg_OverheadEvent.target_player_entindex', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='target_entindex', full_name='CDOTAUserMsg_OverheadEvent.target_entindex', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='source_player_entindex', full_name='CDOTAUserMsg_OverheadEvent.source_player_entindex', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6266,
serialized_end=6463,
)
_CDOTAUSERMSG_TUTORIALTIPINFO = descriptor.Descriptor(
name='CDOTAUserMsg_TutorialTipInfo',
full_name='CDOTAUserMsg_TutorialTipInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='CDOTAUserMsg_TutorialTipInfo.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='progress', full_name='CDOTAUserMsg_TutorialTipInfo.progress', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6465,
serialized_end=6527,
)
_CDOTAUSERMSG_WORLDLINE = descriptor.Descriptor(
name='CDOTAUserMsg_WorldLine',
full_name='CDOTAUserMsg_WorldLine',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='player_id', full_name='CDOTAUserMsg_WorldLine.player_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='worldline', full_name='CDOTAUserMsg_WorldLine.worldline', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6529,
serialized_end=6612,
)
_CDOTAUSERMSG_TOURNAMENTDROP = descriptor.Descriptor(
name='CDOTAUserMsg_TournamentDrop',
full_name='CDOTAUserMsg_TournamentDrop',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='winner_name', full_name='CDOTAUserMsg_TournamentDrop.winner_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='event_type', full_name='CDOTAUserMsg_TournamentDrop.event_type', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6614,
serialized_end=6684,
)
_CDOTAUSERMSG_CHATWHEEL = descriptor.Descriptor(
name='CDOTAUserMsg_ChatWheel',
full_name='CDOTAUserMsg_ChatWheel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='chat_message', full_name='CDOTAUserMsg_ChatWheel.chat_message', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='player_id', full_name='CDOTAUserMsg_ChatWheel.player_id', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='account_id', full_name='CDOTAUserMsg_ChatWheel.account_id', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6686,
serialized_end=6810,
)
_CDOTAUSERMSG_RECEIVEDXMASGIFT = descriptor.Descriptor(
name='CDOTAUserMsg_ReceivedXmasGift',
full_name='CDOTAUserMsg_ReceivedXmasGift',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='player_id', full_name='CDOTAUserMsg_ReceivedXmasGift.player_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='item_name', full_name='CDOTAUserMsg_ReceivedXmasGift.item_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='inventory_slot', full_name='CDOTAUserMsg_ReceivedXmasGift.inventory_slot', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6812,
serialized_end=6905,
)
_CDOTAUSERMSG_CHATEVENT.fields_by_name['type'].enum_type = _DOTA_CHAT_MESSAGE
_CDOTAUSERMSG_COMBATLOGDATA.fields_by_name['type'].enum_type = _DOTA_COMBATLOG_TYPES
_CDOTAUSERMSG_COMBATHEROPOSITIONS.fields_by_name['world_pos'].message_type = netmessages_pb2._CMSGVECTOR2D
_CDOTAUSERMSG_MINIKILLCAMINFO_ATTACKER_ABILITY.containing_type = _CDOTAUSERMSG_MINIKILLCAMINFO_ATTACKER;
_CDOTAUSERMSG_MINIKILLCAMINFO_ATTACKER.fields_by_name['abilities'].message_type = _CDOTAUSERMSG_MINIKILLCAMINFO_ATTACKER_ABILITY
_CDOTAUSERMSG_MINIKILLCAMINFO_ATTACKER.containing_type = _CDOTAUSERMSG_MINIKILLCAMINFO;
_CDOTAUSERMSG_MINIKILLCAMINFO.fields_by_name['attackers'].message_type = _CDOTAUSERMSG_MINIKILLCAMINFO_ATTACKER
_CDOTAUSERMSG_GLOBALLIGHTDIRECTION.fields_by_name['direction'].message_type = netmessages_pb2._CMSGVECTOR
_CDOTAUSERMSG_LOCATIONPING.fields_by_name['location_ping'].message_type = dota_commonmessages_pb2._CDOTAMSG_LOCATIONPING
_CDOTAUSERMSG_ITEMALERT.fields_by_name['item_alert'].message_type = dota_commonmessages_pb2._CDOTAMSG_ITEMALERT
_CDOTAUSERMSG_MAPLINE.fields_by_name['mapline'].message_type = dota_commonmessages_pb2._CDOTAMSG_MAPLINE
_CDOTAUSERMSG_MINIMAPDEBUGPOINT.fields_by_name['location'].message_type = netmessages_pb2._CMSGVECTOR
_CDOTAUSERMSG_CREATELINEARPROJECTILE.fields_by_name['origin'].message_type = netmessages_pb2._CMSGVECTOR
_CDOTAUSERMSG_CREATELINEARPROJECTILE.fields_by_name['velocity'].message_type = netmessages_pb2._CMSGVECTOR2D
_CDOTAUSERMSG_NEVERMOREREQUIEM.fields_by_name['origin'].message_type = netmessages_pb2._CMSGVECTOR
_CDOTARESPONSEQUERYSERIALIZED_FACT.fields_by_name['valtype'].enum_type = _CDOTARESPONSEQUERYSERIALIZED_FACT_VALUETYPE
_CDOTARESPONSEQUERYSERIALIZED_FACT.containing_type = _CDOTARESPONSEQUERYSERIALIZED;
_CDOTARESPONSEQUERYSERIALIZED_FACT_VALUETYPE.containing_type = _CDOTARESPONSEQUERYSERIALIZED_FACT;
_CDOTARESPONSEQUERYSERIALIZED.fields_by_name['facts'].message_type = _CDOTARESPONSEQUERYSERIALIZED_FACT
_CDOTASPEECHMATCHONCLIENT.fields_by_name['responsequery'].message_type = _CDOTARESPONSEQUERYSERIALIZED
_CDOTAUSERMSG_UNITEVENT_SPEECH.containing_type = _CDOTAUSERMSG_UNITEVENT;
_CDOTAUSERMSG_UNITEVENT_SPEECHMUTE.containing_type = _CDOTAUSERMSG_UNITEVENT;
_CDOTAUSERMSG_UNITEVENT_ADDGESTURE.fields_by_name['activity'].enum_type = ai_activity_pb2._ACTIVITY
_CDOTAUSERMSG_UNITEVENT_ADDGESTURE.containing_type = _CDOTAUSERMSG_UNITEVENT;
_CDOTAUSERMSG_UNITEVENT_REMOVEGESTURE.fields_by_name['activity'].enum_type = ai_activity_pb2._ACTIVITY
_CDOTAUSERMSG_UNITEVENT_REMOVEGESTURE.containing_type = _CDOTAUSERMSG_UNITEVENT;
_CDOTAUSERMSG_UNITEVENT_BLOODIMPACT.containing_type = _CDOTAUSERMSG_UNITEVENT;
_CDOTAUSERMSG_UNITEVENT_FADEGESTURE.fields_by_name['activity'].enum_type = ai_activity_pb2._ACTIVITY
_CDOTAUSERMSG_UNITEVENT_FADEGESTURE.containing_type = _CDOTAUSERMSG_UNITEVENT;
_CDOTAUSERMSG_UNITEVENT.fields_by_name['msg_type'].enum_type = _EDOTAENTITYMESSAGES
_CDOTAUSERMSG_UNITEVENT.fields_by_name['speech'].message_type = _CDOTAUSERMSG_UNITEVENT_SPEECH
_CDOTAUSERMSG_UNITEVENT.fields_by_name['speech_mute'].message_type = _CDOTAUSERMSG_UNITEVENT_SPEECHMUTE
_CDOTAUSERMSG_UNITEVENT.fields_by_name['add_gesture'].message_type = _CDOTAUSERMSG_UNITEVENT_ADDGESTURE
_CDOTAUSERMSG_UNITEVENT.fields_by_name['remove_gesture'].message_type = _CDOTAUSERMSG_UNITEVENT_REMOVEGESTURE
_CDOTAUSERMSG_UNITEVENT.fields_by_name['blood_impact'].message_type = _CDOTAUSERMSG_UNITEVENT_BLOODIMPACT
_CDOTAUSERMSG_UNITEVENT.fields_by_name['fade_gesture'].message_type = _CDOTAUSERMSG_UNITEVENT_FADEGESTURE
_CDOTAUSERMSG_UNITEVENT.fields_by_name['speech_match_on_client'].message_type = _CDOTASPEECHMATCHONCLIENT
_CDOTAUSERMSG_PARTICLEMANAGER_RELEASEPARTICLEINDEX.containing_type = _CDOTAUSERMSG_PARTICLEMANAGER;
_CDOTAUSERMSG_PARTICLEMANAGER_CREATEPARTICLE.containing_type = _CDOTAUSERMSG_PARTICLEMANAGER;
_CDOTAUSERMSG_PARTICLEMANAGER_DESTROYPARTICLE.containing_type = _CDOTAUSERMSG_PARTICLEMANAGER;
_CDOTAUSERMSG_PARTICLEMANAGER_DESTROYPARTICLEINVOLVING.containing_type = _CDOTAUSERMSG_PARTICLEMANAGER;
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLE.fields_by_name['position'].message_type = netmessages_pb2._CMSGVECTOR
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLE.containing_type = _CDOTAUSERMSG_PARTICLEMANAGER;
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEFWD.fields_by_name['forward'].message_type = netmessages_pb2._CMSGVECTOR
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEFWD.containing_type = _CDOTAUSERMSG_PARTICLEMANAGER;
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEORIENT.fields_by_name['forward'].message_type = netmessages_pb2._CMSGVECTOR
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEORIENT.fields_by_name['right'].message_type = netmessages_pb2._CMSGVECTOR
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEORIENT.fields_by_name['up'].message_type = netmessages_pb2._CMSGVECTOR
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEORIENT.containing_type = _CDOTAUSERMSG_PARTICLEMANAGER;
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEFALLBACK.fields_by_name['position'].message_type = netmessages_pb2._CMSGVECTOR
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEFALLBACK.containing_type = _CDOTAUSERMSG_PARTICLEMANAGER;
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEOFFSET.fields_by_name['origin_offset'].message_type = netmessages_pb2._CMSGVECTOR
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEOFFSET.containing_type = _CDOTAUSERMSG_PARTICLEMANAGER;
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEENT.fields_by_name['fallback_position'].message_type = netmessages_pb2._CMSGVECTOR
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEENT.containing_type = _CDOTAUSERMSG_PARTICLEMANAGER;
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLELATENCY.containing_type = _CDOTAUSERMSG_PARTICLEMANAGER;
_CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLESHOULDDRAW.containing_type = _CDOTAUSERMSG_PARTICLEMANAGER;
_CDOTAUSERMSG_PARTICLEMANAGER.fields_by_name['type'].enum_type = _DOTA_PARTICLE_MESSAGE
_CDOTAUSERMSG_PARTICLEMANAGER.fields_by_name['release_particle_index'].message_type = _CDOTAUSERMSG_PARTICLEMANAGER_RELEASEPARTICLEINDEX
_CDOTAUSERMSG_PARTICLEMANAGER.fields_by_name['create_particle'].message_type = _CDOTAUSERMSG_PARTICLEMANAGER_CREATEPARTICLE
_CDOTAUSERMSG_PARTICLEMANAGER.fields_by_name['destroy_particle'].message_type = _CDOTAUSERMSG_PARTICLEMANAGER_DESTROYPARTICLE
_CDOTAUSERMSG_PARTICLEMANAGER.fields_by_name['destroy_particle_involving'].message_type = _CDOTAUSERMSG_PARTICLEMANAGER_DESTROYPARTICLEINVOLVING
_CDOTAUSERMSG_PARTICLEMANAGER.fields_by_name['update_particle'].message_type = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLE
_CDOTAUSERMSG_PARTICLEMANAGER.fields_by_name['update_particle_fwd'].message_type = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEFWD
_CDOTAUSERMSG_PARTICLEMANAGER.fields_by_name['update_particle_orient'].message_type = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEORIENT
_CDOTAUSERMSG_PARTICLEMANAGER.fields_by_name['update_particle_fallback'].message_type = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEFALLBACK
_CDOTAUSERMSG_PARTICLEMANAGER.fields_by_name['update_particle_offset'].message_type = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEOFFSET
_CDOTAUSERMSG_PARTICLEMANAGER.fields_by_name['update_particle_ent'].message_type = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEENT
_CDOTAUSERMSG_PARTICLEMANAGER.fields_by_name['update_particle_latency'].message_type = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLELATENCY
_CDOTAUSERMSG_PARTICLEMANAGER.fields_by_name['update_particle_should_draw'].message_type = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLESHOULDDRAW
_CDOTAUSERMSG_OVERHEADEVENT.fields_by_name['message_type'].enum_type = _DOTA_OVERHEAD_ALERT
_CDOTAUSERMSG_WORLDLINE.fields_by_name['worldline'].message_type = dota_commonmessages_pb2._CDOTAMSG_WORLDLINE
_CDOTAUSERMSG_CHATWHEEL.fields_by_name['chat_message'].enum_type = dota_commonmessages_pb2._EDOTACHATWHEELMESSAGE
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_AIDebugLine'] = _CDOTAUSERMSG_AIDEBUGLINE
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_Ping'] = _CDOTAUSERMSG_PING
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_SwapVerify'] = _CDOTAUSERMSG_SWAPVERIFY
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_ChatEvent'] = _CDOTAUSERMSG_CHATEVENT
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_CombatLogData'] = _CDOTAUSERMSG_COMBATLOGDATA
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_CombatLogShowDeath'] = _CDOTAUSERMSG_COMBATLOGSHOWDEATH
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_BotChat'] = _CDOTAUSERMSG_BOTCHAT
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_CombatHeroPositions'] = _CDOTAUSERMSG_COMBATHEROPOSITIONS
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_MiniKillCamInfo'] = _CDOTAUSERMSG_MINIKILLCAMINFO
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_GlobalLightColor'] = _CDOTAUSERMSG_GLOBALLIGHTCOLOR
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_GlobalLightDirection'] = _CDOTAUSERMSG_GLOBALLIGHTDIRECTION
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_LocationPing'] = _CDOTAUSERMSG_LOCATIONPING
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_ItemAlert'] = _CDOTAUSERMSG_ITEMALERT
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_MinimapEvent'] = _CDOTAUSERMSG_MINIMAPEVENT
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_MapLine'] = _CDOTAUSERMSG_MAPLINE
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_MinimapDebugPoint'] = _CDOTAUSERMSG_MINIMAPDEBUGPOINT
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_CreateLinearProjectile'] = _CDOTAUSERMSG_CREATELINEARPROJECTILE
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_DestroyLinearProjectile'] = _CDOTAUSERMSG_DESTROYLINEARPROJECTILE
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_DodgeTrackingProjectiles'] = _CDOTAUSERMSG_DODGETRACKINGPROJECTILES
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_SpectatorPlayerClick'] = _CDOTAUSERMSG_SPECTATORPLAYERCLICK
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_NevermoreRequiem'] = _CDOTAUSERMSG_NEVERMOREREQUIEM
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_InvalidCommand'] = _CDOTAUSERMSG_INVALIDCOMMAND
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_HudError'] = _CDOTAUSERMSG_HUDERROR
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_SharedCooldown'] = _CDOTAUSERMSG_SHAREDCOOLDOWN
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_SetNextAutobuyItem'] = _CDOTAUSERMSG_SETNEXTAUTOBUYITEM
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_HalloweenDrops'] = _CDOTAUSERMSG_HALLOWEENDROPS
DESCRIPTOR.message_types_by_name['CDOTAResponseQuerySerialized'] = _CDOTARESPONSEQUERYSERIALIZED
DESCRIPTOR.message_types_by_name['CDOTASpeechMatchOnClient'] = _CDOTASPEECHMATCHONCLIENT
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_UnitEvent'] = _CDOTAUSERMSG_UNITEVENT
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_ItemPurchased'] = _CDOTAUSERMSG_ITEMPURCHASED
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_ItemFound'] = _CDOTAUSERMSG_ITEMFOUND
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_ParticleManager'] = _CDOTAUSERMSG_PARTICLEMANAGER
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_OverheadEvent'] = _CDOTAUSERMSG_OVERHEADEVENT
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_TutorialTipInfo'] = _CDOTAUSERMSG_TUTORIALTIPINFO
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_WorldLine'] = _CDOTAUSERMSG_WORLDLINE
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_TournamentDrop'] = _CDOTAUSERMSG_TOURNAMENTDROP
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_ChatWheel'] = _CDOTAUSERMSG_CHATWHEEL
DESCRIPTOR.message_types_by_name['CDOTAUserMsg_ReceivedXmasGift'] = _CDOTAUSERMSG_RECEIVEDXMASGIFT
class CDOTAUserMsg_AIDebugLine(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_AIDEBUGLINE
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_AIDebugLine)
class CDOTAUserMsg_Ping(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_PING
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_Ping)
class CDOTAUserMsg_SwapVerify(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_SWAPVERIFY
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_SwapVerify)
class CDOTAUserMsg_ChatEvent(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_CHATEVENT
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ChatEvent)
class CDOTAUserMsg_CombatLogData(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_COMBATLOGDATA
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_CombatLogData)
class CDOTAUserMsg_CombatLogShowDeath(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_COMBATLOGSHOWDEATH
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_CombatLogShowDeath)
class CDOTAUserMsg_BotChat(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_BOTCHAT
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_BotChat)
class CDOTAUserMsg_CombatHeroPositions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_COMBATHEROPOSITIONS
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_CombatHeroPositions)
class CDOTAUserMsg_MiniKillCamInfo(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class Attacker(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class Ability(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_MINIKILLCAMINFO_ATTACKER_ABILITY
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_MiniKillCamInfo.Attacker.Ability)
DESCRIPTOR = _CDOTAUSERMSG_MINIKILLCAMINFO_ATTACKER
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_MiniKillCamInfo.Attacker)
DESCRIPTOR = _CDOTAUSERMSG_MINIKILLCAMINFO
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_MiniKillCamInfo)
class CDOTAUserMsg_GlobalLightColor(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_GLOBALLIGHTCOLOR
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_GlobalLightColor)
class CDOTAUserMsg_GlobalLightDirection(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_GLOBALLIGHTDIRECTION
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_GlobalLightDirection)
class CDOTAUserMsg_LocationPing(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_LOCATIONPING
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_LocationPing)
class CDOTAUserMsg_ItemAlert(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_ITEMALERT
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ItemAlert)
class CDOTAUserMsg_MinimapEvent(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_MINIMAPEVENT
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_MinimapEvent)
class CDOTAUserMsg_MapLine(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_MAPLINE
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_MapLine)
class CDOTAUserMsg_MinimapDebugPoint(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_MINIMAPDEBUGPOINT
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_MinimapDebugPoint)
class CDOTAUserMsg_CreateLinearProjectile(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_CREATELINEARPROJECTILE
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_CreateLinearProjectile)
class CDOTAUserMsg_DestroyLinearProjectile(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_DESTROYLINEARPROJECTILE
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_DestroyLinearProjectile)
class CDOTAUserMsg_DodgeTrackingProjectiles(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_DODGETRACKINGPROJECTILES
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_DodgeTrackingProjectiles)
class CDOTAUserMsg_SpectatorPlayerClick(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_SPECTATORPLAYERCLICK
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_SpectatorPlayerClick)
class CDOTAUserMsg_NevermoreRequiem(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_NEVERMOREREQUIEM
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_NevermoreRequiem)
class CDOTAUserMsg_InvalidCommand(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_INVALIDCOMMAND
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_InvalidCommand)
class CDOTAUserMsg_HudError(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_HUDERROR
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_HudError)
class CDOTAUserMsg_SharedCooldown(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_SHAREDCOOLDOWN
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_SharedCooldown)
class CDOTAUserMsg_SetNextAutobuyItem(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_SETNEXTAUTOBUYITEM
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_SetNextAutobuyItem)
class CDOTAUserMsg_HalloweenDrops(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_HALLOWEENDROPS
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_HalloweenDrops)
class CDOTAResponseQuerySerialized(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class Fact(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTARESPONSEQUERYSERIALIZED_FACT
# @@protoc_insertion_point(class_scope:CDOTAResponseQuerySerialized.Fact)
DESCRIPTOR = _CDOTARESPONSEQUERYSERIALIZED
# @@protoc_insertion_point(class_scope:CDOTAResponseQuerySerialized)
class CDOTASpeechMatchOnClient(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTASPEECHMATCHONCLIENT
# @@protoc_insertion_point(class_scope:CDOTASpeechMatchOnClient)
class CDOTAUserMsg_UnitEvent(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class Speech(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_UNITEVENT_SPEECH
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_UnitEvent.Speech)
class SpeechMute(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_UNITEVENT_SPEECHMUTE
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_UnitEvent.SpeechMute)
class AddGesture(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_UNITEVENT_ADDGESTURE
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_UnitEvent.AddGesture)
class RemoveGesture(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_UNITEVENT_REMOVEGESTURE
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_UnitEvent.RemoveGesture)
class BloodImpact(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_UNITEVENT_BLOODIMPACT
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_UnitEvent.BloodImpact)
class FadeGesture(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_UNITEVENT_FADEGESTURE
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_UnitEvent.FadeGesture)
DESCRIPTOR = _CDOTAUSERMSG_UNITEVENT
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_UnitEvent)
class CDOTAUserMsg_ItemPurchased(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_ITEMPURCHASED
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ItemPurchased)
class CDOTAUserMsg_ItemFound(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_ITEMFOUND
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ItemFound)
class CDOTAUserMsg_ParticleManager(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class ReleaseParticleIndex(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_PARTICLEMANAGER_RELEASEPARTICLEINDEX
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ParticleManager.ReleaseParticleIndex)
class CreateParticle(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_PARTICLEMANAGER_CREATEPARTICLE
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ParticleManager.CreateParticle)
class DestroyParticle(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_PARTICLEMANAGER_DESTROYPARTICLE
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ParticleManager.DestroyParticle)
class DestroyParticleInvolving(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_PARTICLEMANAGER_DESTROYPARTICLEINVOLVING
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ParticleManager.DestroyParticleInvolving)
class UpdateParticle(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLE
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ParticleManager.UpdateParticle)
class UpdateParticleFwd(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEFWD
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ParticleManager.UpdateParticleFwd)
class UpdateParticleOrient(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEORIENT
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ParticleManager.UpdateParticleOrient)
class UpdateParticleFallback(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEFALLBACK
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ParticleManager.UpdateParticleFallback)
class UpdateParticleOffset(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEOFFSET
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ParticleManager.UpdateParticleOffset)
class UpdateParticleEnt(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLEENT
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ParticleManager.UpdateParticleEnt)
class UpdateParticleLatency(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLELATENCY
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ParticleManager.UpdateParticleLatency)
class UpdateParticleShouldDraw(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_PARTICLEMANAGER_UPDATEPARTICLESHOULDDRAW
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ParticleManager.UpdateParticleShouldDraw)
DESCRIPTOR = _CDOTAUSERMSG_PARTICLEMANAGER
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ParticleManager)
class CDOTAUserMsg_OverheadEvent(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_OVERHEADEVENT
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_OverheadEvent)
class CDOTAUserMsg_TutorialTipInfo(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_TUTORIALTIPINFO
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_TutorialTipInfo)
class CDOTAUserMsg_WorldLine(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_WORLDLINE
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_WorldLine)
class CDOTAUserMsg_TournamentDrop(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_TOURNAMENTDROP
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_TournamentDrop)
class CDOTAUserMsg_ChatWheel(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_CHATWHEEL
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ChatWheel)
class CDOTAUserMsg_ReceivedXmasGift(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CDOTAUSERMSG_RECEIVEDXMASGIFT
# @@protoc_insertion_point(class_scope:CDOTAUserMsg_ReceivedXmasGift)
# @@protoc_insertion_point(module_scope)
| [((15, 13, 18, 18615), 'google.protobuf.descriptor.FileDescriptor', 'descriptor.FileDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1091, 35, 1109, 1), 'google.protobuf.descriptor.Descriptor', 'descriptor.Descriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2490, 53, 2508, 1), 'google.protobuf.descriptor.Descriptor', 'descriptor.Descriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((26, 4, 29, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((30, 4, 33, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((34, 4, 37, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((38, 4, 41, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((42, 4, 45, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((46, 4, 49, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((50, 4, 53, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((54, 4, 57, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((58, 4, 61, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((62, 4, 65, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((66, 4, 69, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((70, 4, 73, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((74, 4, 77, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((78, 4, 81, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((82, 4, 85, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((86, 4, 89, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((90, 4, 93, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((94, 4, 97, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((98, 4, 101, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((102, 4, 105, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((106, 4, 109, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((110, 4, 113, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((114, 4, 117, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((118, 4, 121, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((122, 4, 125, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((126, 4, 129, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((130, 4, 133, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((134, 4, 137, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((138, 4, 141, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((142, 4, 145, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((146, 4, 149, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((150, 4, 153, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((154, 4, 157, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((158, 4, 161, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((162, 4, 165, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((166, 4, 169, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((170, 4, 173, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((174, 4, 177, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((192, 4, 195, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((196, 4, 199, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((200, 4, 203, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((204, 4, 207, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((208, 4, 211, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((212, 4, 215, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((216, 4, 219, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((220, 4, 223, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((224, 4, 227, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((228, 4, 231, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((232, 4, 235, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((236, 4, 239, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((240, 4, 243, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((244, 4, 247, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((248, 4, 251, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((252, 4, 255, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((256, 4, 259, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((260, 4, 263, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((264, 4, 267, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((268, 4, 271, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((272, 4, 275, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((276, 4, 279, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((280, 4, 283, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((284, 4, 287, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((288, 4, 291, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((292, 4, 295, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((296, 4, 299, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((300, 4, 303, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((304, 4, 307, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((308, 4, 311, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((312, 4, 315, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((316, 4, 319, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((320, 4, 323, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((324, 4, 327, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((328, 4, 331, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((332, 4, 335, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((336, 4, 339, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((340, 4, 343, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((344, 4, 347, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((348, 4, 351, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((352, 4, 355, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((356, 4, 359, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((360, 4, 363, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((364, 4, 367, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((368, 4, 371, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((372, 4, 375, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((376, 4, 379, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((380, 4, 383, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((384, 4, 387, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((388, 4, 391, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((392, 4, 395, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((396, 4, 399, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((400, 4, 403, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((404, 4, 407, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((408, 4, 411, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((412, 4, 415, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((416, 4, 419, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((420, 4, 423, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((438, 4, 441, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((442, 4, 445, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((446, 4, 449, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((450, 4, 453, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((468, 4, 471, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((486, 4, 489, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((490, 4, 493, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((494, 4, 497, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((498, 4, 501, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((502, 4, 505, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((520, 4, 523, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((524, 4, 527, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((528, 4, 531, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((532, 4, 535, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((536, 4, 539, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((540, 4, 543, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((544, 4, 547, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((562, 4, 565, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((566, 4, 569, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((570, 4, 573, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((574, 4, 577, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((578, 4, 581, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((582, 4, 585, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((586, 4, 589, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((590, 4, 593, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((594, 4, 597, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((598, 4, 601, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((602, 4, 605, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((606, 4, 609, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((624, 4, 627, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((628, 4, 631, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((632, 4, 635, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((636, 4, 639, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((640, 4, 643, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((644, 4, 647, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((648, 4, 651, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((652, 4, 655, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((656, 4, 659, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((660, 4, 663, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((664, 4, 667, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((668, 4, 671, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((672, 4, 675, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((830, 4, 833, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((834, 4, 837, 16), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((909, 4, 915, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((937, 4, 943, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((944, 4, 950, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((951, 4, 957, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((958, 4, 964, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((965, 4, 971, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((972, 4, 978, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((979, 4, 985, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((986, 4, 992, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1014, 4, 1020, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1021, 4, 1027, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1028, 4, 1034, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1035, 4, 1041, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1042, 4, 1048, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1049, 4, 1055, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1056, 4, 1062, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1063, 4, 1069, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1070, 4, 1076, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1119, 4, 1125, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1168, 4, 1174, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1175, 4, 1181, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1182, 4, 1188, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1189, 4, 1195, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1217, 4, 1223, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1224, 4, 1230, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1251, 4, 1257, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1258, 4, 1264, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1265, 4, 1271, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1292, 4, 1298, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1320, 4, 1326, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1327, 4, 1333, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1355, 4, 1361, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1362, 4, 1368, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1390, 4, 1396, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1397, 4, 1403, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1425, 4, 1431, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1432, 4, 1438, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1460, 4, 1466, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1467, 4, 1473, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1474, 4, 1480, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1481, 4, 1487, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1488, 4, 1494, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1516, 4, 1522, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1523, 4, 1529, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1551, 4, 1557, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1558, 4, 1564, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1565, 4, 1571, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1572, 4, 1578, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1600, 4, 1606, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1607, 4, 1613, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1614, 4, 1620, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1621, 4, 1627, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1628, 4, 1634, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1635, 4, 1641, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1663, 4, 1669, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1691, 4, 1697, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1719, 4, 1725, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1726, 4, 1732, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1733, 4, 1739, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1761, 4, 1767, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1768, 4, 1774, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1775, 4, 1781, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1831, 4, 1837, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1859, 4, 1865, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1873, 4, 1879, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1880, 4, 1886, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1936, 4, 1942, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1943, 4, 1949, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1950, 4, 1956, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1978, 4, 1984, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1985, 4, 1991, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((1992, 4, 1998, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2027, 4, 2033, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2055, 4, 2061, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2062, 4, 2068, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2069, 4, 2075, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2076, 4, 2082, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2104, 4, 2110, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2118, 4, 2124, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2125, 4, 2131, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2132, 4, 2138, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2159, 4, 2165, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2186, 4, 2192, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2193, 4, 2199, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2200, 4, 2206, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2207, 4, 2213, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2234, 4, 2240, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2261, 4, 2267, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2268, 4, 2274, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2275, 4, 2281, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2302, 4, 2308, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2329, 4, 2335, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2336, 4, 2342, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2343, 4, 2349, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2350, 4, 2356, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2357, 4, 2363, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2364, 4, 2370, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2371, 4, 2377, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2378, 4, 2384, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2385, 4, 2391, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2413, 4, 2419, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2441, 4, 2447, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2448, 4, 2454, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2455, 4, 2461, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2462, 4, 2468, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2469, 4, 2475, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2517, 4, 2523, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2524, 4, 2530, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2531, 4, 2537, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2558, 4, 2564, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2585, 4, 2591, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2592, 4, 2598, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2619, 4, 2625, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2626, 4, 2632, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2653, 4, 2659, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2660, 4, 2666, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2687, 4, 2693, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2694, 4, 2700, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2701, 4, 2707, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2708, 4, 2714, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2735, 4, 2741, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2742, 4, 2748, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2769, 4, 2775, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2776, 4, 2782, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2803, 4, 2809, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2810, 4, 2816, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2817, 4, 2823, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2824, 4, 2830, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2831, 4, 2837, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2858, 4, 2864, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2865, 4, 2871, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2892, 4, 2898, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2919, 4, 2925, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2926, 4, 2932, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2933, 4, 2939, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2940, 4, 2946, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2947, 4, 2953, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2954, 4, 2960, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2961, 4, 2967, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2968, 4, 2974, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2975, 4, 2981, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2982, 4, 2988, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2989, 4, 2995, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((2996, 4, 3002, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3003, 4, 3009, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3010, 4, 3016, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3038, 4, 3044, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3045, 4, 3051, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3052, 4, 3058, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3059, 4, 3065, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3066, 4, 3072, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3101, 4, 3107, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3129, 4, 3135, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3136, 4, 3142, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3171, 4, 3177, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3199, 4, 3205, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3206, 4, 3212, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3213, 4, 3219, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3241, 4, 3247, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n'), ((3255, 4, 3261, 19), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor\n')] |
rubelw/auth0_client | auth0_client/menu/datafiles/scripts/get_active_user_count.py | 51e68239babcf7c40e40491d1aaa3f8547a67f63 | #!/usr/bin/env python
import json
from auth0_client.Auth0Client import Auth0Client
from auth0_client.menu.menu_helper.common import *
from auth0_client.menu.menu_helper.pretty import *
try:
users = {}
client = Auth0Client(auth_config())
results = client.active_users()
print(pretty(results))
except (KeyboardInterrupt, SystemExit):
sys.exit()
| [] |
dark-codr/encryptfinance | encryptfinance/transactions/admin.py | 573a8179c3a7c4b0f68d71bc9d461246f6fdba29 | from __future__ import absolute_import
from django.contrib import admin
from .models import Deposit, Withdrawal, Support
from .forms import DepositForm, WithdrawalForm
# Register your models here.
@admin.register(Deposit)
class DepositAdmin(admin.ModelAdmin):
# form = DepositForm
list_display = ["__str__", "amount", "approval", "deposited", "created"]
list_filter = ["approval", "created"]
list_editable = ["approval", "amount", "deposited"]
class Meta:
model = Deposit
@admin.register(Withdrawal)
class WithdrawalAdmin(admin.ModelAdmin):
form = WithdrawalForm
list_display = ["__str__", "amount", "wallet_id", "approval", "withdrawn", "created"]
list_filter = ["approval", "created"]
list_editable = ["approval", "withdrawn"]
class Meta:
model = Withdrawal
admin.site.register(Support)
| [((9, 1, 9, 24), 'django.contrib.admin.register', 'admin.register', ({(9, 16, 9, 23): 'Deposit'}, {}), '(Deposit)', False, 'from django.contrib import admin\n'), ((18, 1, 18, 27), 'django.contrib.admin.register', 'admin.register', ({(18, 16, 18, 26): 'Withdrawal'}, {}), '(Withdrawal)', False, 'from django.contrib import admin\n'), ((29, 0, 29, 28), 'django.contrib.admin.site.register', 'admin.site.register', ({(29, 20, 29, 27): 'Support'}, {}), '(Support)', False, 'from django.contrib import admin\n')] |
jlin/inventory | vendor-local/src/django-piston/tests/test_project/settings.py | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | import os
DEBUG = True
DATABASES = {
'default':
{
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/tmp/piston.db'
}
}
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = '/tmp/piston.db'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'piston',
'test_project.apps.testapp',
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
SITE_ID = 1
ROOT_URLCONF = 'test_project.urls'
MIDDLEWARE_CLASSES = (
'piston.middleware.ConditionalMiddlewareCompatProxy',
'django.contrib.sessions.middleware.SessionMiddleware',
'piston.middleware.CommonMiddlewareCompatProxy',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
| [((22, 17, 22, 42), 'os.path.dirname', 'os.path.dirname', ({(22, 33, 22, 41): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
UCD4IDS/sage | src/sage/combinat/combinatorial_map.py | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | """
Combinatorial maps
This module provides a decorator that can be used to add semantic to a
Python method by marking it as implementing a *combinatorial map*,
that is a map between two :class:`enumerated sets <EnumeratedSets>`::
sage: from sage.combinat.combinatorial_map import combinatorial_map
sage: class MyPermutation(object):
....: @combinatorial_map()
....: def reverse(self):
....: '''
....: Reverse the permutation
....: '''
....: # ... code ...
By default, this decorator is a no-op: it returns the decorated method
as is::
sage: MyPermutation.reverse
<function MyPermutation.reverse at ...>
See :func:`combinatorial_map_wrapper` for the various options this
decorator can take.
Projects built on top of Sage are welcome to customize locally this
hook to instrument the Sage code and exploit this semantic
information. Typically, the decorator could be used to populate a
database of maps. For a real-life application, see the project
`FindStat <http://findstat.org/>`. As a basic example, a variant of
the decorator is provided as :func:`combinatorial_map_wrapper`; it
wraps the decorated method, so that one can later use
:func:`combinatorial_maps_in_class` to query an object, or class
thereof, for all the combinatorial maps that apply to it.
.. NOTE::
Since decorators are evaluated upon loading Python modules,
customizing :obj:`combinatorial map` needs to be done before the
modules using it are loaded. In the examples below, where we
illustrate the customized ``combinatorial_map`` decorator on the
:mod:`sage.combinat.permutation` module, we resort to force a
reload of this module after dynamically changing
``sage.combinat.combinatorial_map.combinatorial_map``. This is
good enough for those doctests, but remains fragile.
For real use cases, it is probably best to just edit this source
file statically (see below).
"""
# ****************************************************************************
# Copyright (C) 2011 Christian Stump <christian.stump at gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# ****************************************************************************
def combinatorial_map_trivial(f=None, order=None, name=None):
r"""
Combinatorial map decorator
See :ref:`sage.combinat.combinatorial_map` for a description of
this decorator and its purpose. This default implementation does
nothing.
INPUT:
- ``f`` -- (default: ``None``, if combinatorial_map is used as a decorator) a function
- ``name`` -- (default: ``None``) the name for nicer outputs on combinatorial maps
- ``order`` -- (default: ``None``) the order of the combinatorial map, if it is known. Is not used, but might be helpful later
OUTPUT:
- ``f`` unchanged
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map_trivial as combinatorial_map
sage: class MyPermutation(object):
....: @combinatorial_map
....: def reverse(self):
....: '''
....: Reverse the permutation
....: '''
....: # ... code ...
....: @combinatorial_map(name='descent set of permutation')
....: def descent_set(self):
....: '''
....: The descent set of the permutation
....: '''
....: # ... code ...
sage: MyPermutation.reverse
<function MyPermutation.reverse at ...>
sage: MyPermutation.descent_set
<function MyPermutation.descent_set at ...>
"""
if f is None:
return lambda f: f
else:
return f
def combinatorial_map_wrapper(f=None, order=None, name=None):
r"""
Combinatorial map decorator (basic example).
See :ref:`sage.combinat.combinatorial_map` for a description of
the ``combinatorial_map`` decorator and its purpose. This
implementation, together with :func:`combinatorial_maps_in_class`
illustrates how to use this decorator as a hook to instrument the
Sage code.
INPUT:
- ``f`` -- (default: ``None``, if combinatorial_map is used as a decorator) a function
- ``name`` -- (default: ``None``) the name for nicer outputs on combinatorial maps
- ``order`` -- (default: ``None``) the order of the combinatorial map, if it is known. Is not used, but might be helpful later
OUTPUT:
- A combinatorial map. This is an instance of the :class:`CombinatorialMap`.
EXAMPLES:
We define a class illustrating the use of this implementation of
the :obj:`combinatorial_map` decorator with its various arguments::
sage: from sage.combinat.combinatorial_map import combinatorial_map_wrapper as combinatorial_map
sage: class MyPermutation(object):
....: @combinatorial_map()
....: def reverse(self):
....: '''
....: Reverse the permutation
....: '''
....: pass
....: @combinatorial_map(order=2)
....: def inverse(self):
....: '''
....: The inverse of the permutation
....: '''
....: pass
....: @combinatorial_map(name='descent set of permutation')
....: def descent_set(self):
....: '''
....: The descent set of the permutation
....: '''
....: pass
....: def major_index(self):
....: '''
....: The major index of the permutation
....: '''
....: pass
sage: MyPermutation.reverse
Combinatorial map: reverse
sage: MyPermutation.descent_set
Combinatorial map: descent set of permutation
sage: MyPermutation.inverse
Combinatorial map: inverse
One can now determine all the combinatorial maps associated with a
given object as follows::
sage: from sage.combinat.combinatorial_map import combinatorial_maps_in_class
sage: X = combinatorial_maps_in_class(MyPermutation); X # random
[Combinatorial map: reverse,
Combinatorial map: descent set of permutation,
Combinatorial map: inverse]
The method ``major_index`` defined about is not a combinatorial map::
sage: MyPermutation.major_index
<function MyPermutation.major_index at ...>
But one can define a function that turns ``major_index`` into a combinatorial map::
sage: def major_index(p):
....: return p.major_index()
sage: major_index
<function major_index at ...>
sage: combinatorial_map(major_index)
Combinatorial map: major_index
"""
if f is None:
return lambda f: CombinatorialMap(f, order=order, name=name)
else:
return CombinatorialMap(f, order=order, name=name)
##############################################################################
# Edit here to customize the combinatorial_map hook
##############################################################################
combinatorial_map = combinatorial_map_trivial
# combinatorial_map = combinatorial_map_wrapper
class CombinatorialMap(object):
r"""
This is a wrapper class for methods that are *combinatorial maps*.
For further details and doctests, see
:ref:`sage.combinat.combinatorial_map` and
:func:`combinatorial_map_wrapper`.
"""
def __init__(self, f, order=None, name=None):
"""
Constructor for combinatorial maps.
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map_wrapper as combinatorial_map
sage: def f(x):
....: "doc of f"
....: return x
sage: x = combinatorial_map(f); x
Combinatorial map: f
sage: x.__doc__
'doc of f'
sage: x.__name__
'f'
sage: x.__module__
'__main__'
"""
import types
if not isinstance(f, types.FunctionType):
raise ValueError("Only plain functions are supported")
self._f = f
self._order = order
self._name = name
if hasattr(f, "__doc__"):
self.__doc__ = f.__doc__
if hasattr(f, "__name__"):
self.__name__ = f.__name__
else:
self.__name__ = "..."
if hasattr(f, "__module__"):
self.__module__ = f.__module__
def __repr__(self):
"""
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: p.left_tableau.__repr__()
'Combinatorial map: Robinson-Schensted insertion tableau'
"""
return "Combinatorial map: %s" % self.name()
def _sage_src_lines_(self):
r"""
Return the source code location for the wrapped function.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: cm = p.left_tableau; cm
Combinatorial map: Robinson-Schensted insertion tableau
sage: (src, lines) = cm._sage_src_lines_()
sage: src[0]
" @combinatorial_map(name='Robinson-Schensted insertion tableau')\n"
sage: lines # random
2653
"""
from sage.misc.sageinspect import sage_getsourcelines
return sage_getsourcelines(self._f)
def __get__(self, inst, cls=None):
"""
Bounds the method of self to the given instance.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: p.left_tableau #indirect doctest
Combinatorial map: Robinson-Schensted insertion tableau
"""
self._inst = inst
return self
def __call__(self, *args, **kwds):
"""
Calls the combinatorial map.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: cm = type(p).left_tableau; cm
Combinatorial map: Robinson-Schensted insertion tableau
sage: cm(p)
[[1, 2, 4], [3]]
sage: cm(Permutation([4,3,2,1]))
[[1], [2], [3], [4]]
"""
if self._inst is not None:
return self._f(self._inst, *args, **kwds)
else:
return self._f(*args, **kwds)
def unbounded_map(self):
r"""
Return the unbounded version of ``self``.
You can use this method to return a function which takes as input
an element in the domain of the combinatorial map.
See the example below.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: from sage.combinat.permutation import Permutation
sage: pi = Permutation([1,3,2])
sage: f = pi.reverse
sage: F = f.unbounded_map()
sage: F(pi)
[2, 3, 1]
"""
return self._f
def order(self):
"""
Returns the order of ``self``, or ``None`` if the order is not known.
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map
sage: class CombinatorialClass:
....: @combinatorial_map(order=2)
....: def to_self_1(): pass
....: @combinatorial_map()
....: def to_self_2(): pass
sage: CombinatorialClass.to_self_1.order()
2
sage: CombinatorialClass.to_self_2.order() is None
True
"""
return self._order
def name(self):
"""
Returns the name of a combinatorial map.
This is used for the string representation of ``self``.
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map
sage: class CombinatorialClass:
....: @combinatorial_map(name='map1')
....: def to_self_1(): pass
....: @combinatorial_map()
....: def to_self_2(): pass
sage: CombinatorialClass.to_self_1.name()
'map1'
sage: CombinatorialClass.to_self_2.name()
'to_self_2'
"""
if self._name is not None:
return self._name
else:
return self._f.__name__
def combinatorial_maps_in_class(cls):
"""
Return the combinatorial maps of the class as a list of combinatorial maps.
For further details and doctests, see
:ref:`sage.combinat.combinatorial_map` and
:func:`combinatorial_map_wrapper`.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: from sage.combinat.combinatorial_map import combinatorial_maps_in_class
sage: p = Permutation([1,3,2,4])
sage: cmaps = combinatorial_maps_in_class(p)
sage: cmaps # random
[Combinatorial map: Robinson-Schensted insertion tableau,
Combinatorial map: Robinson-Schensted recording tableau,
Combinatorial map: Robinson-Schensted tableau shape,
Combinatorial map: complement,
Combinatorial map: descent composition,
Combinatorial map: inverse, ...]
sage: p.left_tableau in cmaps
True
sage: p.right_tableau in cmaps
True
sage: p.complement in cmaps
True
"""
result = set()
for method in dir(cls):
entry = getattr(cls, method)
if isinstance(entry, CombinatorialMap):
result.add(entry)
return list(result)
| [((274, 15, 274, 43), 'sage.misc.sageinspect.sage_getsourcelines', 'sage_getsourcelines', ({(274, 35, 274, 42): 'self._f'}, {}), '(self._f)', False, 'from sage.misc.sageinspect import sage_getsourcelines\n')] |
xdress/xdress | tests/cppproj/xdressrc.py | eb7f0a02b3edf617d401939ede7f0d713a88917f | import os
from xdress.utils import apiname
package = 'cppproj'
packagedir = 'cppproj'
includes = ['src']
plugins = ('xdress.autoall', 'xdress.pep8names', 'xdress.cythongen',
'xdress.stlwrap', )
extra_types = 'cppproj_extra_types' # non-default value
dtypes = [
('map', 'str', 'int'),
('set', 'int'),
'float32',
('vector', 'int32'),
'ThreeNums',
]
stlcontainers = [
('pair', 'int', ('vector', 'int')),
('pair', 'int', 'str'),
('pair', 'int', 'int'),
('pair', 'int', 'SomeCrazyPairValue'),
('pair', 'ThreeNums', 'int'),
('vector', 'float64'),
('vector', 'str'),
('vector', 'int32'),
('vector', 'complex'),
('vector', ('vector', 'float64')),
('set', 'int'),
('set', 'str'),
('set', 'uint'),
('set', 'char'),
('set', 'ThreeNums'),
('map', 'str', 'str'),
('map', 'str', 'int'),
('map', 'int', 'str'),
('map', 'str', 'uint'),
('map', 'uint', 'str'),
('map', 'uint', 'uint'),
('map', 'str', 'float'),
('map', 'ThreeNums', 'float'),
('map', 'int', 'int'),
('map', 'int', 'bool'),
('map', 'int', 'char'),
('map', 'int', 'float'),
('map', 'uint', 'float'),
('map', 'int', 'complex'),
('map', ('pair', 'int', 'int'), 'float'),
('map', 'int', ('set', 'int')),
('map', 'int', ('set', 'str')),
('map', 'int', ('set', 'uint')),
('map', 'int', ('set', 'char')),
('map', 'int', ('vector', 'str')),
('map', 'int', ('vector', 'int')),
('map', 'int', ('vector', 'uint')),
('map', 'int', ('vector', 'char')),
('map', 'int', ('vector', 'bool')),
('map', 'int', ('vector', 'float')),
('map', 'int', ('vector', ('vector', 'float64'))),
('map', 'int', ('map', 'int', 'bool')),
('map', 'int', ('map', 'int', 'char')),
('map', 'int', ('map', 'int', 'float')),
('map', 'int', ('map', 'int', ('vector', 'bool'))),
('map', 'int', ('map', 'int', ('vector', 'char'))),
('map', 'int', ('map', 'int', ('vector', 'float'))),
('map', 'int', ('vector', ('set', 'int'))),
]
dtypes_module = 'dt'
stlcontainers_module = 'stlc'
_fromsrcdir = lambda x: os.path.join('src', x)
_inbasics = {'srcfiles': _fromsrcdir('basics.[ch]*'),
'incfiles': 'basics.hpp', # trick to get around cython generating *.h
'language': 'c++',
}
_indiscovery = {'srcfiles': _fromsrcdir('discovery*'),
'incfiles': 'discovery.h',
'language': 'c++',
}
variables = [
apiname('PersonID', tarbase='pybasics', **_inbasics),
apiname('*', **_indiscovery),
]
functions = [
apiname('voided', **_inbasics),
apiname('pairs_be_crazy', tarbase='pybasics', **_inbasics),
apiname('call_with_void_fp_struct', **_inbasics),
{'srcname': 'func0',
'tarname': 'a_better_name',
'incfiles': 'basics.h',
'srcfiles': _fromsrcdir('basics.[ch]*')},
apiname('func1', **_inbasics),
apiname('func2', **_inbasics),
apiname('func3', **_inbasics),
apiname('func4', tarbase='pybasics', **_inbasics),
apiname('setfunc', **_inbasics),
apiname(('findmin', 'int32', 'float32',), **_inbasics),
apiname(('findmin', 'float64', 'float32',), **_inbasics),
{'srcname': ('findmin', 'int', 'int',),
'incfiles': 'basics.h',
'tarname': ('regmin', 'int', 'int',),
'srcfiles': _fromsrcdir('basics.[ch]*')},
{'srcname': ('findmin', 'bool', 'bool',),
'tarname': 'sillyBoolMin',
'incfiles': 'basics.h',
'srcfiles': _fromsrcdir('basics.[ch]*')},
apiname(('lessthan', 'int32', 3,), **_inbasics),
apiname('call_threenums_op_from_c', tarbase='pybasics', **_inbasics),
apiname('*', **_indiscovery),
]
classes = [
#apiname('struct0', 'basics', 'pybasics', 'My_Struct_0'), FIXME This needs more work
apiname('Union0', **_inbasics),
apiname('VoidFPStruct', **_inbasics),
apiname('A', **_inbasics),
apiname('B', **_inbasics),
apiname('C', **_inbasics),
apiname('SomeCrazyPairValue', tarbase='pybasics', **_inbasics),
# apiname('SomeCrazyPairValue', **_inbasics),
apiname(('TClass1', 'int32'), **_inbasics),
apiname(('TClass1', 'float64'), **_inbasics),
{'srcname': ('TClass1', 'float32'),
'tarname': 'TC1Floater',
'incfiles': 'basics.h',
'srcfiles': _fromsrcdir('basics.[ch]*')},
apiname(('TClass0', 'int32'), **_inbasics),
apiname(('TClass0', 'float64'), **_inbasics),
{'srcname': ('TClass0', 'bool'),
'tarname': ('TC0Bool', 'bool'),
'incfiles': 'basics.h',
'srcfiles': _fromsrcdir('basics.[ch]*')},
apiname('Untemplated', **_inbasics),
apiname('ThreeNums', tarbase='pybasics', **_inbasics),
apiname('*', **_indiscovery),
apiname(('TClass0', 'float32'), **_inbasics),
apiname(('TClass2', 'float32'), **_inbasics),
apiname('NoDefault', **_inbasics),
apiname('NoDefaultChild', **_inbasics),
apiname(('EnumArg', 'JOAN'), tarbase='pybasics', **_inbasics),
]
del os
del apiname
| [((75, 24, 75, 46), 'os.path.join', 'os.path.join', ({(75, 37, 75, 42): '"""src"""', (75, 44, 75, 45): 'x'}, {}), "('src', x)", False, 'import os\n'), ((86, 4, 86, 56), 'xdress.utils.apiname', 'apiname', (), '', False, 'from xdress.utils import apiname\n'), ((87, 4, 87, 32), 'xdress.utils.apiname', 'apiname', ({(87, 12, 87, 15): '"""*"""'}, {}), "('*', **_indiscovery)", False, 'from xdress.utils import apiname\n'), ((91, 4, 91, 34), 'xdress.utils.apiname', 'apiname', ({(91, 12, 91, 20): '"""voided"""'}, {}), "('voided', **_inbasics)", False, 'from xdress.utils import apiname\n'), ((92, 4, 92, 62), 'xdress.utils.apiname', 'apiname', (), '', False, 'from xdress.utils import apiname\n'), ((93, 4, 93, 52), 'xdress.utils.apiname', 'apiname', ({(93, 12, 93, 38): '"""call_with_void_fp_struct"""'}, {}), "('call_with_void_fp_struct', **_inbasics)", False, 'from xdress.utils import apiname\n'), ((98, 4, 98, 33), 'xdress.utils.apiname', 'apiname', ({(98, 12, 98, 19): '"""func1"""'}, {}), "('func1', **_inbasics)", False, 'from xdress.utils import apiname\n'), ((99, 4, 99, 33), 'xdress.utils.apiname', 'apiname', ({(99, 12, 99, 19): '"""func2"""'}, {}), "('func2', **_inbasics)", False, 'from xdress.utils import apiname\n'), ((100, 4, 100, 33), 'xdress.utils.apiname', 'apiname', ({(100, 12, 100, 19): '"""func3"""'}, {}), "('func3', **_inbasics)", False, 'from xdress.utils import apiname\n'), ((101, 4, 101, 53), 'xdress.utils.apiname', 'apiname', (), '', False, 'from xdress.utils import apiname\n'), ((102, 4, 102, 35), 'xdress.utils.apiname', 'apiname', ({(102, 12, 102, 21): '"""setfunc"""'}, {}), "('setfunc', **_inbasics)", False, 'from xdress.utils import apiname\n'), ((103, 4, 103, 58), 'xdress.utils.apiname', 'apiname', ({(103, 12, 103, 44): "('findmin', 'int32', 'float32')"}, {}), "(('findmin', 'int32', 'float32'), **_inbasics)", False, 'from xdress.utils import apiname\n'), ((104, 4, 104, 60), 'xdress.utils.apiname', 'apiname', ({(104, 12, 104, 46): "('findmin', 'float64', 'float32')"}, {}), "(('findmin', 'float64', 'float32'), **_inbasics)", False, 'from xdress.utils import apiname\n'), ((113, 4, 113, 51), 'xdress.utils.apiname', 'apiname', ({(113, 12, 113, 37): "('lessthan', 'int32', 3)"}, {}), "(('lessthan', 'int32', 3), **_inbasics)", False, 'from xdress.utils import apiname\n'), ((114, 4, 114, 72), 'xdress.utils.apiname', 'apiname', (), '', False, 'from xdress.utils import apiname\n'), ((115, 4, 115, 32), 'xdress.utils.apiname', 'apiname', ({(115, 12, 115, 15): '"""*"""'}, {}), "('*', **_indiscovery)", False, 'from xdress.utils import apiname\n'), ((120, 4, 120, 34), 'xdress.utils.apiname', 'apiname', ({(120, 12, 120, 20): '"""Union0"""'}, {}), "('Union0', **_inbasics)", False, 'from xdress.utils import apiname\n'), ((121, 4, 121, 40), 'xdress.utils.apiname', 'apiname', ({(121, 12, 121, 26): '"""VoidFPStruct"""'}, {}), "('VoidFPStruct', **_inbasics)", False, 'from xdress.utils import apiname\n'), ((122, 4, 122, 29), 'xdress.utils.apiname', 'apiname', ({(122, 12, 122, 15): '"""A"""'}, {}), "('A', **_inbasics)", False, 'from xdress.utils import apiname\n'), ((123, 4, 123, 29), 'xdress.utils.apiname', 'apiname', ({(123, 12, 123, 15): '"""B"""'}, {}), "('B', **_inbasics)", False, 'from xdress.utils import apiname\n'), ((124, 4, 124, 29), 'xdress.utils.apiname', 'apiname', ({(124, 12, 124, 15): '"""C"""'}, {}), "('C', **_inbasics)", False, 'from xdress.utils import apiname\n'), ((125, 4, 125, 66), 'xdress.utils.apiname', 'apiname', (), '', False, 'from xdress.utils import apiname\n'), ((127, 4, 127, 46), 'xdress.utils.apiname', 'apiname', ({(127, 12, 127, 32): "('TClass1', 'int32')"}, {}), "(('TClass1', 'int32'), **_inbasics)", False, 'from xdress.utils import apiname\n'), ((128, 4, 128, 48), 'xdress.utils.apiname', 'apiname', ({(128, 12, 128, 34): "('TClass1', 'float64')"}, {}), "(('TClass1', 'float64'), **_inbasics)", False, 'from xdress.utils import apiname\n'), ((133, 4, 133, 46), 'xdress.utils.apiname', 'apiname', ({(133, 12, 133, 32): "('TClass0', 'int32')"}, {}), "(('TClass0', 'int32'), **_inbasics)", False, 'from xdress.utils import apiname\n'), ((134, 4, 134, 48), 'xdress.utils.apiname', 'apiname', ({(134, 12, 134, 34): "('TClass0', 'float64')"}, {}), "(('TClass0', 'float64'), **_inbasics)", False, 'from xdress.utils import apiname\n'), ((139, 4, 139, 39), 'xdress.utils.apiname', 'apiname', ({(139, 12, 139, 25): '"""Untemplated"""'}, {}), "('Untemplated', **_inbasics)", False, 'from xdress.utils import apiname\n'), ((140, 4, 140, 57), 'xdress.utils.apiname', 'apiname', (), '', False, 'from xdress.utils import apiname\n'), ((141, 4, 141, 32), 'xdress.utils.apiname', 'apiname', ({(141, 12, 141, 15): '"""*"""'}, {}), "('*', **_indiscovery)", False, 'from xdress.utils import apiname\n'), ((142, 4, 142, 48), 'xdress.utils.apiname', 'apiname', ({(142, 12, 142, 34): "('TClass0', 'float32')"}, {}), "(('TClass0', 'float32'), **_inbasics)", False, 'from xdress.utils import apiname\n'), ((143, 4, 143, 48), 'xdress.utils.apiname', 'apiname', ({(143, 12, 143, 34): "('TClass2', 'float32')"}, {}), "(('TClass2', 'float32'), **_inbasics)", False, 'from xdress.utils import apiname\n'), ((144, 4, 144, 37), 'xdress.utils.apiname', 'apiname', ({(144, 12, 144, 23): '"""NoDefault"""'}, {}), "('NoDefault', **_inbasics)", False, 'from xdress.utils import apiname\n'), ((145, 4, 145, 42), 'xdress.utils.apiname', 'apiname', ({(145, 12, 145, 28): '"""NoDefaultChild"""'}, {}), "('NoDefaultChild', **_inbasics)", False, 'from xdress.utils import apiname\n'), ((146, 4, 146, 65), 'xdress.utils.apiname', 'apiname', (), '', False, 'from xdress.utils import apiname\n')] |
henryshunt/c-aws | routines/server.py | 6e15bb18c2243f11a129b01298cb31749033f8d4 | import os
import subprocess
import routines.config as config
import routines.helpers as helpers
def get_static_info():
""" Outputs data concerning the computer in the C-AWS station
"""
startup_time = None
data_drive_space = None
camera_drive_space = None
# Get system startup time
try:
startup_time = (subprocess
.check_output(["uptime", "-s"]).decode().rstrip())
except: pass
# Get data and camera drive space
if config.load() == True:
if os.path.isdir(config.data_directory):
free_space = helpers.remaining_space(config.data_directory)
if free_space != None:
data_drive_space = round(free_space, 2)
if (config.camera_directory != None and os.path.isdir(
config.camera_directory) and os.path.ismount(
config.camera_directory)):
free_space = helpers.remaining_space(config.camera_directory)
if free_space != None:
camera_drive_space = round(free_space, 2)
print(str(helpers.none_to_null(startup_time)) + "\n"
+ str(helpers.none_to_null(data_drive_space)) + "\n"
+ str(helpers.none_to_null(camera_drive_space))) | [((21, 7, 21, 20), 'routines.config.load', 'config.load', ({}, {}), '()', True, 'import routines.config as config\n'), ((22, 11, 22, 47), 'os.path.isdir', 'os.path.isdir', ({(22, 25, 22, 46): 'config.data_directory'}, {}), '(config.data_directory)', False, 'import os\n'), ((23, 25, 23, 71), 'routines.helpers.remaining_space', 'helpers.remaining_space', ({(23, 49, 23, 70): 'config.data_directory'}, {}), '(config.data_directory)', True, 'import routines.helpers as helpers\n'), ((28, 48, 29, 36), 'os.path.isdir', 'os.path.isdir', ({(29, 12, 29, 35): 'config.camera_directory'}, {}), '(config.camera_directory)', False, 'import os\n'), ((29, 41, 30, 36), 'os.path.ismount', 'os.path.ismount', ({(30, 12, 30, 35): 'config.camera_directory'}, {}), '(config.camera_directory)', False, 'import os\n'), ((32, 25, 32, 73), 'routines.helpers.remaining_space', 'helpers.remaining_space', ({(32, 49, 32, 72): 'config.camera_directory'}, {}), '(config.camera_directory)', True, 'import routines.helpers as helpers\n'), ((38, 14, 38, 54), 'routines.helpers.none_to_null', 'helpers.none_to_null', ({(38, 35, 38, 53): 'camera_drive_space'}, {}), '(camera_drive_space)', True, 'import routines.helpers as helpers\n'), ((16, 24, 17, 43), 'subprocess.check_output', 'subprocess.check_output', ({(17, 26, 17, 42): "['uptime', '-s']"}, {}), "(['uptime', '-s'])", False, 'import subprocess\n'), ((37, 14, 37, 52), 'routines.helpers.none_to_null', 'helpers.none_to_null', ({(37, 35, 37, 51): 'data_drive_space'}, {}), '(data_drive_space)', True, 'import routines.helpers as helpers\n'), ((36, 14, 36, 48), 'routines.helpers.none_to_null', 'helpers.none_to_null', ({(36, 35, 36, 47): 'startup_time'}, {}), '(startup_time)', True, 'import routines.helpers as helpers\n')] |
pasmuss/cmssw | DQM/DTMonitorModule/python/dtChamberEfficiencyHI_cfi.py | 566f40c323beef46134485a45ea53349f59ae534 | import FWCore.ParameterSet.Config as cms
from RecoMuon.TrackingTools.MuonServiceProxy_cff import MuonServiceProxy
dtEfficiencyMonitor = cms.EDAnalyzer("DTChamberEfficiency",
MuonServiceProxy,
debug = cms.untracked.bool(True),
TrackCollection = cms.InputTag("standAloneMuons"),
theMaxChi2 = cms.double(1000.),
theNSigma = cms.double(3.),
theMinNrec = cms.double(5.),
dt4DSegments = cms.InputTag("dt4DSegments"),
theRPCRecHits = cms.InputTag("dummy"),
thegemRecHits = cms.InputTag("dummy"),
cscSegments = cms.InputTag("dummy"),
RPCLayers = cms.bool(False),
NavigationType = cms.string("Standard")
)
| [((7, 12, 7, 36), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', ({(7, 31, 7, 35): 'True'}, {}), '(True)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((8, 22, 8, 53), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', ({(8, 35, 8, 52): '"""standAloneMuons"""'}, {}), "('standAloneMuons')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((9, 17, 9, 34), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(9, 28, 9, 33): '1000.0'}, {}), '(1000.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((10, 16, 10, 30), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(10, 27, 10, 29): '3.0'}, {}), '(3.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((11, 17, 11, 31), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(11, 28, 11, 30): '5.0'}, {}), '(5.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((12, 19, 12, 47), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', ({(12, 32, 12, 46): '"""dt4DSegments"""'}, {}), "('dt4DSegments')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((13, 20, 13, 41), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', ({(13, 33, 13, 40): '"""dummy"""'}, {}), "('dummy')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((14, 20, 14, 41), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', ({(14, 33, 14, 40): '"""dummy"""'}, {}), "('dummy')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((15, 18, 15, 39), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', ({(15, 31, 15, 38): '"""dummy"""'}, {}), "('dummy')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((16, 16, 16, 31), 'FWCore.ParameterSet.Config.bool', 'cms.bool', ({(16, 25, 16, 30): 'False'}, {}), '(False)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((17, 21, 17, 43), 'FWCore.ParameterSet.Config.string', 'cms.string', ({(17, 32, 17, 42): '"""Standard"""'}, {}), "('Standard')", True, 'import FWCore.ParameterSet.Config as cms\n')] |
peterminh227/reinmav-gym | gym_reinmav/envs/mujoco/__init__.py | 518122b16b86d59f744b3116e6187dafd49a3de4 | from gym_reinmav.envs.mujoco.mujoco_quad import MujocoQuadEnv
from gym_reinmav.envs.mujoco.mujoco_quad_hovering import MujocoQuadHoveringEnv
from gym_reinmav.envs.mujoco.mujoco_quad_quat import MujocoQuadQuaternionEnv | [] |
jasonivey/scripts | test.py | 09f9702e5ce62abbb7699aae16b45b33711fe856 | #!/usr/bin/env python3
# vim:softtabstop=4:ts=4:sw=4:expandtab:tw=120
from ansimarkup import AnsiMarkup, parse
import csv
import datetime
import operator
import os
from pathlib import Path
import re
import sys
import traceback
_VERBOSE = False
user_tags = {
'error' : parse('<bold><red>'),
'name' : parse('<bold><cyan>'),
'value' : parse('<bold><white>'),
}
am = AnsiMarkup(tags=user_tags)
def _assert_msg(msg):
return am.ansistring(f'<error>{msg}</error>')
def _print_name_value(name, max_name_len, value, prefix=None, postfix=None):
prefix = prefix if prefix is not None else ''
postfix = postfix if postfix is not None else ''
lh = am.ansistring(f'<name>{name}</name>')
rh = am.ansistring(f'<value>{value}</value>')
print(f'{prefix}{lh:{max_name_len + lh.delta}} {rh}{postfix}')
def _get_name_value_compact(name, max_name_len, value, prefix=None, postfix=None):
prefix = prefix if prefix is not None else ''
postfix = postfix if postfix is not None else ''
return am.ansistring(f'{prefix}<name>{name}</name> <value>{value}</value>{postfix}')
def _get_timezone_info():
return datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
def _convert_date_time(dt):
return f'{dt:%d-%b-%Y %I:%M:%S%p %Z}'.replace('AM', 'am').replace('PM', 'pm')
def _parse_datetime(dt_str):
dt = datetime.datetime.strptime(dt_str, '%m/%d/%Y %I:%M %p') # Example '11/08/2011 03:00 PM'
tz = _get_timezone_info()
return dt.replace(tzinfo=tz)
def _parse_datetime_row(row):
return _parse_datetime(' '.join(row[2:4]))
def _parse_appointment_row(row, index):
assert len(row) >= 4, _assert_msg(f'row {index} does not have 4 or more columns as required')
appt_time = _parse_datetime(' '.join(row[2:4]))
appt_type = row[0].title()
doctor = row[1].title()
return appt_time, appt_type, doctor
def parse_doctor_appointments(file_name):
path = Path(os.path.expandvars(file_name))
with path.open(newline='', encoding='utf-8') as handle:
reader = csv.reader(handle)
sorted_rows = sorted(reader, key=lambda x: _parse_datetime_row(x))
for index, row in enumerate(sorted_rows):
yield _parse_appointment_row(row, index)
def get_doctors_appointments():
MAX_WIDTH = len('Appointment:')
file_name = '$HOME/Downloads/crump-visits.csv'
for appt_time, appt_type, doctor in parse_doctor_appointments(file_name):
s = _get_name_value_compact('Appointment:', None, _convert_date_time(appt_time), postfix=', ')
s += _get_name_value_compact('Type:', None, appt_type, postfix=', ')
print(s + _get_name_value_compact('Doctor:', None, doctor))
def main(args):
try:
get_doctors_appointments()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [((21, 5, 21, 31), 'ansimarkup.AnsiMarkup', 'AnsiMarkup', (), '', False, 'from ansimarkup import AnsiMarkup, parse\n'), ((16, 14, 16, 34), 'ansimarkup.parse', 'parse', ({(16, 20, 16, 33): '"""<bold><red>"""'}, {}), "('<bold><red>')", False, 'from ansimarkup import AnsiMarkup, parse\n'), ((17, 14, 17, 35), 'ansimarkup.parse', 'parse', ({(17, 20, 17, 34): '"""<bold><cyan>"""'}, {}), "('<bold><cyan>')", False, 'from ansimarkup import AnsiMarkup, parse\n'), ((18, 14, 18, 36), 'ansimarkup.parse', 'parse', ({(18, 20, 18, 35): '"""<bold><white>"""'}, {}), "('<bold><white>')", False, 'from ansimarkup import AnsiMarkup, parse\n'), ((45, 9, 45, 64), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(45, 36, 45, 42): 'dt_str', (45, 44, 45, 63): '"""%m/%d/%Y %I:%M %p"""'}, {}), "(dt_str, '%m/%d/%Y %I:%M %p')", False, 'import datetime\n'), ((60, 16, 60, 45), 'os.path.expandvars', 'os.path.expandvars', ({(60, 35, 60, 44): 'file_name'}, {}), '(file_name)', False, 'import os\n'), ((62, 17, 62, 35), 'csv.reader', 'csv.reader', ({(62, 28, 62, 34): 'handle'}, {}), '(handle)', False, 'import csv\n'), ((79, 45, 79, 59), 'sys.exc_info', 'sys.exc_info', ({}, {}), '()', False, 'import sys\n'), ((80, 8, 80, 86), 'traceback.print_exception', 'traceback.print_exception', (), '', False, 'import traceback\n'), ((39, 11, 39, 55), 'datetime.datetime.now', 'datetime.datetime.now', ({(39, 33, 39, 54): 'datetime.timezone.utc'}, {}), '(datetime.timezone.utc)', False, 'import datetime\n')] |
nogrady/dynamo | RDyn-master/rdyn/test/rdyn_test.py | 4a94453c810cb6cd0eb976c6db9e379cfb2e1f3b | import unittest
import shutil
from rdyn.alg.RDyn_v2 import RDynV2
class RDynTestCase(unittest.TestCase):
def test_rdyn_simplified(self):
print("1")
rdb = RDynV2(size=500, iterations=100)
rdb.execute(simplified=True)
print("2")
rdb = RDynV2(size=500, iterations=100, max_evts=2)
rdb.execute(simplified=True)
print("3")
rdb = RDynV2(size=500, iterations=100, new_node=0.1, del_node=0.1, max_evts=2, paction=0.8)
rdb.execute(simplified=False)
print("Done")
shutil.rmtree("results")
if __name__ == '__main__':
unittest.main()
| [((25, 4, 25, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((11, 14, 11, 46), 'rdyn.alg.RDyn_v2.RDynV2', 'RDynV2', (), '', False, 'from rdyn.alg.RDyn_v2 import RDynV2\n'), ((14, 14, 14, 58), 'rdyn.alg.RDyn_v2.RDynV2', 'RDynV2', (), '', False, 'from rdyn.alg.RDyn_v2 import RDynV2\n'), ((17, 14, 17, 99), 'rdyn.alg.RDyn_v2.RDynV2', 'RDynV2', (), '', False, 'from rdyn.alg.RDyn_v2 import RDynV2\n'), ((21, 8, 21, 32), 'shutil.rmtree', 'shutil.rmtree', ({(21, 22, 21, 31): '"""results"""'}, {}), "('results')", False, 'import shutil\n')] |
Jung-Jun-Uk/UNPG | recognition/datasets/build.py | a6f9c1731a68fc035eb8fe8198f5a5c643825a5b | import os
from .kface import KFace
from .ms1m import MS1M
from .bin_datasets import BIN
from .ijb import IJB
def build_datasets(data_cfg, batch_size, cuda, workers, mode, rank=-1):
assert mode in ['train', 'test']
cfg = data_cfg[mode]
if cfg['dataset'] == 'kface':
dataset = KFace(cfg['data_path'], cfg['test_idx_txt'], cfg['acs'], cfg['lux'], cfg['eps'], cfg['pose'],
cfg['img_size'], batch_size, cuda, workers, mode=mode)
elif cfg['dataset'] == 'ms1m':
dataset = MS1M(cfg['data_path'], cfg['preprocessed_file'], cfg['img_size'], cfg['min_img'],
batch_size, cuda, workers, mode=mode, rank=rank)
elif cfg['dataset'] == 'bin':
root, file_names = cfg['root'], cfg['file_names']
if isinstance(file_names, str):
data_path = os.path.join(root, file_names)
dataset = BIN(data_path, cfg['img_size'], batch_size, cuda, workers)
elif isinstance(file_names, list):
data_path = [os.path.join(root, f) for f in file_names]
dataset = [BIN(dp, cfg['img_size'], batch_size, cuda, workers) for dp in data_path]
elif cfg['dataset'] in ['ijbb', 'ijbc']:
dataset = IJB(cfg['root'], cfg['inf_list'], cfg['img_size'], batch_size, cuda, workers)
return dataset
| [((21, 24, 21, 54), 'os.path.join', 'os.path.join', ({(21, 37, 21, 41): 'root', (21, 43, 21, 53): 'file_names'}, {}), '(root, file_names)', False, 'import os\n'), ((24, 25, 24, 46), 'os.path.join', 'os.path.join', ({(24, 38, 24, 42): 'root', (24, 44, 24, 45): 'f'}, {}), '(root, f)', False, 'import os\n')] |
ppinard/django-cd | django_cd/notifications.py | 1bc9304466ace12867df3b18a8ef7f204b9744b4 | """"""
# Standard library modules.
import abc
# Third party modules.
from django.core.mail import send_mail
from django.template import Engine, Context
# Local modules.
from .models import RunState
# Globals and constants variables.
class Notification(metaclass=abc.ABCMeta):
@classmethod
def notify(self, jobrun):
raise NotImplementedError
class EmailNotification(Notification):
def __init__(self, recipients, on_success=False, on_failure=True):
self.recipients = tuple(recipients)
self.on_success = on_success
self.on_failure = on_failure
def __str__(self):
return "email"
def notify(self, jobrun):
if (jobrun.state in [RunState.ERROR, RunState.FAILED] and self.on_failure) or (
jobrun.state == RunState.SUCCESS and self.on_success
):
engine = Engine.get_default()
template = engine.get_template("django_cd/jobrun_report.html")
context = Context({"jobrun": jobrun})
html_message = template.render(context)
send_mail(
subject=f"Job report - {jobrun.name} - {jobrun.state}",
message="",
from_email=None,
recipient_list=self.recipients,
html_message=html_message,
)
| [((35, 21, 35, 41), 'django.template.Engine.get_default', 'Engine.get_default', ({}, {}), '()', False, 'from django.template import Engine, Context\n'), ((37, 22, 37, 49), 'django.template.Context', 'Context', ({(37, 30, 37, 48): "{'jobrun': jobrun}"}, {}), "({'jobrun': jobrun})", False, 'from django.template import Engine, Context\n'), ((40, 12, 46, 13), 'django.core.mail.send_mail', 'send_mail', (), '', False, 'from django.core.mail import send_mail\n')] |
jpazdera/PazdKaha22 | Experiment/ltpFR3_MTurk/ListGen/ltpFR3_listgen.py | 9b3157cbcc68aafc829dbd38f3271f884caf541d | #!/usr/bin/env python2
import random
import itertools
import numpy
import sys
import json
import copy
def make_bins_ltpFR3(semArray):
"""
Creates four equal-width bins of WAS scores, identical to those used in ltpFR2. Then combine the middle two to give
three bins: low similarity, medium similarity, and high similarity.
A coordinate in semRows[i][j] and semCols[i][j] is the index of the jth word pair in semArray that falls in the ith
similarity bin.
"""
semArray_nondiag = semArray[numpy.where(semArray != 1)]
# Find lowest and highest similarity
min_sim = semArray_nondiag.min()
max_sim = semArray_nondiag.max()
# Split up the semantic space into four equal segments
semBins = list(numpy.linspace(min_sim, max_sim, 4))
# Combine the two middle bins by removing the bin boundary between them
# semBins = semBins[:2] + semBins[3:]
# Create bounds for the bins
semBins = zip(*[semBins[i:] + semBins[-1:i] for i in range(2)])
# For word pairs within the bounds of each bin, append the indices to semRows and semCols
semRows = []
semCols = []
for bin in semBins:
(i, j) = ((semArray > bin[0]) & (semArray < bin[1])).nonzero()
semRows.append(i)
semCols.append(j)
return semRows, semCols
def randomize_conditions_ltpFR3(config):
"""
Randomize the conditions for all sessions.
:param config: The imported configuration file, containing all parameters for the experiment
:return: A list of lists, where sublist n contains the ordering of list conditions for the nth session. cond[x][y][0]
defines the length of session x, list y; cond[x][y][1] defines the presentation rate of session x, list y;
cond[x][y][2] defines whether session x, list y uses visual or auditory presentation; cond[x][y][3] defines the
duration of the pre-list distractor task for session x, list y.
"""
options = [c for c in itertools.product(config.listLength, config.presRate, config.modality, config.distDur)]
cond = []
for i in range(config.nSessions):
sess = []
for j in range(config.reps):
random.shuffle(options)
sess += options[:]
cond.append(sess)
return cond
def choose_pairs_ltpFR3(wp_tot, cond, config, semRows, semCols):
"""
Selects word pairs to use in each list of each session.
:param wp_tot: A list containing all the words of the word pool. The order of the words is expected to correspond to
the indices used by semRows and semCols.
:param cond: A list of lists, where sublist n contains the ordering of list conditions for the nth session.
:param config: The imported configuration file, containing all parameters for the experiment.
:param semRows: See make_bins_ltpFR3()
:param semCols: See make_bins_ltpFR3()
:return: pairs - pairs[x][y][z] is the zth word pair in session x, list y
:return: pair_dicts - a list of dictionaries, where each dictionary contains all word pairs from a given session
:return: practice_lists - A list containing two practice lists, each with 18 words
"""
# pairs[x][y][z] will be the zth pair of words in the yth list on session x
pairs = []
# points to the other word in the pair for a given session
pair_dicts = []
# Deep copy the full word pool into full_wp_allowed, so it can be shuffled for each session without altering wp_tot
full_wp = wp_tot[:]
# Make word pairs for each session
session_num = 0
while session_num < config.nSessions:
#print 'Making session', session_num, ':',
#sys.stdout.flush()
# Shuffle the order of the word pool; I believe this is technically only necessary for the first session, in
# order to randomize which words are selected for the practice lists. All other lists have their items randomly
# chosen anyway
'''
IMPORTANT NOTE!!!:
Lists containing more than 2080 elements should not be randomized with shuffle, as explained here:
http://stackoverflow.com/questions/3062741/maximal-length-of-list-to-shuffle-with-python-random-shuffle
The full word pool contains 1638 words, so this is only a concern if the word pool is ever expanded.
'''
random.shuffle(full_wp)
# The first session has two 18-word practice lists
if session_num == 0:
practice_lists = [full_wp[:18], full_wp[18:36]]
sess_wp_allowed = full_wp[36:]
else:
sess_wp_allowed = full_wp[:]
# sess_pairs[x][y] will be the yth pair in the xth list on the current session
sess_pairs = []
# Track number of attempts to create the lists for the current session
sess_tries = 0
# Track whether the session completed successfully
goodSess = True
# Make word pairs for each list in the current session
list_num = 0
while list_num < len(cond[session_num]):
#print list_num,
#sys.stdout.flush()
# list_pairs[x] will be the xth pair in the current list on the current session
list_pairs = []
# Track number of attempts to create the current list
list_tries = 0
# Track whether the list completed successfully
goodList = True
# Retrieve the list length condition for the current list by looking in cond
listLength = cond[session_num][list_num][0]
# Length 12 lists have 2 pairs per bin, length 24 list have 4 pairs per bin
pairs_per_bin = 2 if listLength == 12 else 4
# Select two or four word pairs from each bin (based on list length)
for sem_i in range(len(semRows)):
# The pair for each semantic bin gets placed twice
pair_i = 0
while pair_i < pairs_per_bin:
# Get the indices (within the full word pool) of the words chosen for the current session
available_indices = [wp_tot.index(word) for word in sess_wp_allowed]
# Randomly choose indices/words from those in the current session until one is found that has one
# or more pairs in the current bin
index_word1 = random.choice(available_indices)
while index_word1 not in semRows[sem_i]:
index_word1 = random.choice(available_indices)
# Get the indices of all words whose pairing with the chosen word falls into the correct bin
good_second_indices = semCols[sem_i][semRows[sem_i] == index_word1]
# Eliminate the words that are not available in the session
good_second_indices = [i for i in good_second_indices if wp_tot[i] in sess_wp_allowed]
# Ensure that a word cannot be accidentally paired with itself
if index_word1 in good_second_indices:
del good_second_indices[good_second_indices.index(index_word1)]
# If there are no good words to choose from, restart
if len(good_second_indices) == 0:
list_tries += 1
if list_tries > 10:
goodList = False
break
else:
continue
# Choose the second word randomly
index_word2 = random.choice(good_second_indices)
# Add the pairs to list_pairs, delete them from the pool of allowed words
list_pairs.append([wp_tot[index_word1], wp_tot[index_word2]])
del sess_wp_allowed[sess_wp_allowed.index(wp_tot[index_word1])]
del sess_wp_allowed[sess_wp_allowed.index(wp_tot[index_word2])]
pair_i += 1
# If the list is bad, add the words back to the pool of allowed words
if not goodList:
sess_wp_allowed.extend([x[0] for x in list_pairs] + [x[1] for x in list_pairs])
break
# If the list is good, add the list_pairs to sess_pairs,
if goodList:
sess_pairs.append(list_pairs)
list_num += 1
else:
# Otherwise, try the session again (up to 50 times), then restart
list_pairs = []
sess_tries += 1
if sess_tries > 50:
goodSess = False
break
# If the whole session went successfully
if goodSess:
# Get the pairs from the lists, add them backwards and forwards to sess_pair_dict
sess_pair_dict = dict(itertools.chain(*sess_pairs))
sess_pair_dict.update(dict(zip(sess_pair_dict.values(), sess_pair_dict.keys())))
pair_dicts.append(sess_pair_dict)
pairs.append(sess_pairs)
session_num += 1
else: # If the session did not go well, try again.
sess_pairs = []
print ''
return pairs, pair_dicts, practice_lists
def place_pairs_ltpFR3(pairs, cond):
"""
:param pairs:
:param cond:
:param config:
:return:
"""
# Load all valid list compositions for 12-item lists (small lists are too restrictive to use trial and error)
with open('valid12.json', 'r') as f:
valid12 = json.load(f)['3bin-valid12']
# Loop through sessions
subj_wo = []
for (n, sess_pairs) in enumerate(pairs):
sess_wo = []
#print '\nPlacing session', n, ':',
#sys.stdout.flush()
# Loop through lists within each session
for (m, list_pairs) in enumerate(sess_pairs):
#print m,
#sys.stdout.flush()
# Create pairs of word pairs from the same bin -- one pair will have adjacent presentation, one distant
grouped_pairs = [list(group) for group in
zip([list_pairs[i] for i in range(len(list_pairs)) if i % 2 == 0],
[list_pairs[i] for i in range(len(list_pairs)) if i % 2 == 1])]
# Retrieve list length for the current list
list_length = cond[n][m][0]
# For 12-item lists, select a random solution template and assign word pairs to the variables in the
# template, such that one pair from each bin has adjacent presentation and one pair from each bin has
# distant presentation
if list_length == 12:
# Randomize the ordering of the grouped pairs, as well as the orderings within each group and each pair
adjacents = ['a', 'b', 'c']
distants = ['d', 'e', 'f']
random.shuffle(adjacents)
random.shuffle(distants)
key = {}
for group in grouped_pairs:
random.shuffle(group)
random.shuffle(group[0])
random.shuffle(group[1])
key[adjacents.pop(0)] = group[0]
key[distants.pop(0)] = group[1]
# Choose a random valid solution
list_wo = copy.deepcopy(random.choice(valid12))
# Each entry in the solution list is a string containing a letter followed by 0 or 1
# The letter corresponds to the word pair and the number corresponds to the item in the pair.
# Letters a, b, and c are adjacent presentation pairs; d, e, and f are distant presentation pairs.
for i in range(len(list_wo)):
w = list_wo[i]
list_wo[i] = key[w[0]][int(w[1])]
# For 24-item lists, create two 12-item lists based on random solution templates and concatenate them.
elif list_length == 24:
# Randomize the ordering of the grouped pairs, as well as the orderings within each group and each pair
adjacents1 = ['a', 'b', 'c']
distants1 = ['d', 'e', 'f']
adjacents2 = ['a', 'b', 'c']
distants2 = ['d', 'e', 'f']
random.shuffle(adjacents1)
random.shuffle(distants1)
random.shuffle(adjacents2)
random.shuffle(distants2)
key1 = {}
key2 = {}
for group_num, group in enumerate(grouped_pairs):
random.shuffle(group)
random.shuffle(group[0])
random.shuffle(group[1])
if group_num % 2 == 0:
key1[adjacents1.pop(0)] = group[0]
key1[distants1.pop(0)] = group[1]
else:
key2[adjacents2.pop(0)] = group[0]
key2[distants2.pop(0)] = group[1]
# Choose a random valid solution
list_wo1 = copy.deepcopy(random.choice(valid12))
list_wo2 = copy.deepcopy(random.choice(valid12))
# Each entry in the solution list is a string containing a letter followed by 0 or 1
# The letter corresponds to the word pair and the number corresponds to the item in the pair.
# Letters a, b, and c are adjacent presentation pairs; d, e, and f are distant presentation pairs.
for i in range(len(list_wo1)):
w = list_wo1[i]
list_wo1[i] = key1[w[0]][int(w[1])]
w = list_wo2[i]
list_wo2[i] = key2[w[0]][int(w[1])]
list_wo = list_wo1 + list_wo2
else:
raise ValueError('Function place_pairs_ltpFR3() can only handle word lists of length 12 or 24!')
# Add finalized list to the session
sess_wo.append(list_wo)
subj_wo.append(sess_wo)
return subj_wo
def listgen_ltpFR3(n):
"""
Generate all lists for a participant, including the conditions, word pairs
and word ordering. This function saves the results to a json file labelled
with the participant's number.
"""
import config
# Read in the semantic association matrix
semMat = []
with open(config.w2vfile) as w2vfile:
for word in w2vfile:
wordVals = []
wordValsString = word.split()
for val in wordValsString:
thisVal = float(val)
wordVals.append(thisVal)
semMat.append(wordVals)
semArray = numpy.array(semMat)
# Create three semantic similarity bins and sort word pairs by bin
semRows, semCols = make_bins_ltpFR3(semArray)
# Read in the word pool
with open(config.wpfile) as wpfile:
wp_tot = [x.strip() for x in wpfile.readlines()]
counts = numpy.zeros(len(wp_tot))
for i in range(n):
print '\nSubject ' + str(i) + '\n'
# Randomize list conditions (list length, presentation rate, modality, distractor duration)
condi = randomize_conditions_ltpFR3(config)
# Choose all of the pairs to be used in the experiment
pairs, pair_dicts, practice_lists = choose_pairs_ltpFR3(wp_tot, condi, config, semRows, semCols)
# Create all lists by placing the word pairs in appropriate positions
subj_wo = place_pairs_ltpFR3(pairs, condi)
# Add practice lists
subj_wo[0] = practice_lists + subj_wo[0]
practice_condi = [[18, 1200, 'a', 18000], [18, 1200, 'v', 18000]]
random.shuffle(practice_condi)
condi[0] = practice_condi + condi[0]
d = {'word_order': subj_wo, 'pairs': pair_dicts, 'conditions': condi}
for sess_dict in pair_dicts:
counts[numpy.array([wp_tot.index(w) for w in sess_dict])] += 1
counts[numpy.array([wp_tot.index(w) for w in practice_lists[0]])] += 1
counts[numpy.array([wp_tot.index(w) for w in practice_lists[1]])] += 1
with open('/Users/jessepazdera/AtomProjects/ltpFR3_MTurk/static/pools/lists/%d.js' % i, 'w') as f:
s = 'var sess_info = ' + json.dumps(d) + ';'
f.write(s)
with open('/Users/jessepazdera/AtomProjects/ltpFR3_MTurk/static/pools/lists/counts.json', 'w') as f:
f.write(str([c for c in counts]))
print max(counts), min(counts), len([wp_tot[i] for i in range(len(counts)) if counts[i] == 0])
return counts
if __name__ == "__main__":
nsess = input('How many sessions would you like to generate? ')
counts = listgen_ltpFR3(nsess)
print counts.mean()
print counts.std()
print counts.max()
print counts.min()
| [] |
DwijayDS/fastestimator | fastestimator/dataset/data/cifar10.py | 9b288cb2bd870f971ec4cee09d0b3205e1316a94 | # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Tuple
import tensorflow as tf
from fastestimator.dataset.numpy_dataset import NumpyDataset
def load_data(image_key: str = "x", label_key: str = "y") -> Tuple[NumpyDataset, NumpyDataset]:
"""Load and return the CIFAR10 dataset.
Please consider using the ciFAIR10 dataset instead. CIFAR10 contains duplicates between its train and test sets.
Args:
image_key: The key for image.
label_key: The key for label.
Returns:
(train_data, eval_data)
"""
print("\033[93m {}\033[00m".format("FastEstimator-Warn: Consider using the ciFAIR10 dataset instead."))
(x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.cifar10.load_data()
train_data = NumpyDataset({image_key: x_train, label_key: y_train})
eval_data = NumpyDataset({image_key: x_eval, label_key: y_eval})
return train_data, eval_data
| [((35, 43, 35, 80), 'tensorflow.keras.datasets.cifar10.load_data', 'tf.keras.datasets.cifar10.load_data', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((36, 17, 36, 71), 'fastestimator.dataset.numpy_dataset.NumpyDataset', 'NumpyDataset', ({(36, 30, 36, 70): '{image_key: x_train, label_key: y_train}'}, {}), '({image_key: x_train, label_key: y_train})', False, 'from fastestimator.dataset.numpy_dataset import NumpyDataset\n'), ((37, 16, 37, 68), 'fastestimator.dataset.numpy_dataset.NumpyDataset', 'NumpyDataset', ({(37, 29, 37, 67): '{image_key: x_eval, label_key: y_eval}'}, {}), '({image_key: x_eval, label_key: y_eval})', False, 'from fastestimator.dataset.numpy_dataset import NumpyDataset\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.