repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
29rj/Fusion | FusionIIIT/applications/placement_cell/api/serializers.py | bc2941a67532e183adeb0bc4042df0b182b9e3aa | from rest_framework.authtoken.models import Token
from rest_framework import serializers
from applications.placement_cell.models import (Achievement, Course, Education,
Experience, Has, Patent,
Project, Publication, Skill,
PlacementStatus, NotifyStudent)
class SkillSerializer(serializers.ModelSerializer):
class Meta:
model = Skill
fields = ('__all__')
class HasSerializer(serializers.ModelSerializer):
skill_id = SkillSerializer()
class Meta:
model = Has
fields = ('skill_id','skill_rating')
def create(self, validated_data):
skill = validated_data.pop('skill_id')
skill_id, created = Skill.objects.get_or_create(**skill)
try:
has_obj = Has.objects.create(skill_id=skill_id,**validated_data)
except:
raise serializers.ValidationError({'skill': 'This skill is already present'})
return has_obj
class EducationSerializer(serializers.ModelSerializer):
class Meta:
model = Education
fields = ('__all__')
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = ('__all__')
class ExperienceSerializer(serializers.ModelSerializer):
class Meta:
model = Experience
fields = ('__all__')
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ('__all__')
class AchievementSerializer(serializers.ModelSerializer):
class Meta:
model = Achievement
fields = ('__all__')
class PublicationSerializer(serializers.ModelSerializer):
class Meta:
model = Publication
fields = ('__all__')
class PatentSerializer(serializers.ModelSerializer):
class Meta:
model = Patent
fields = ('__all__')
class NotifyStudentSerializer(serializers.ModelSerializer):
class Meta:
model = NotifyStudent
fields = ('__all__')
class PlacementStatusSerializer(serializers.ModelSerializer):
notify_id = NotifyStudentSerializer()
class Meta:
model = PlacementStatus
fields = ('notify_id', 'invitation', 'placed', 'timestamp', 'no_of_days')
| [((24, 28, 24, 64), 'applications.placement_cell.models.Skill.objects.get_or_create', 'Skill.objects.get_or_create', ({}, {}), '(**skill)', False, 'from applications.placement_cell.models import Achievement, Course, Education, Experience, Has, Patent, Project, Publication, Skill, PlacementStatus, NotifyStudent\n'), ((26, 22, 26, 76), 'applications.placement_cell.models.Has.objects.create', 'Has.objects.create', (), '', False, 'from applications.placement_cell.models import Achievement, Course, Education, Experience, Has, Patent, Project, Publication, Skill, PlacementStatus, NotifyStudent\n'), ((28, 18, 28, 89), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', ({(28, 46, 28, 88): "{'skill': 'This skill is already present'}"}, {}), "({'skill': 'This skill is already present'})", False, 'from rest_framework import serializers\n')] |
thinkAmi-sandbox/django-datatables-view-sample | concat_col_app/factories.py | ac3df721089489e61c09ac75d320be3704c72105 | import factory
from concat_col_app.models import Color, Apple
class ColorFactory(factory.django.DjangoModelFactory):
class Meta:
model = Color
class AppleFactory(factory.django.DjangoModelFactory):
class Meta:
model = Apple
| [] |
HansolChoe/defects4cpp | defects4cpp/errors/argparser.py | cb9e3db239c50e6ec38127cec117865f0ee7a5cf | from pathlib import Path
from typing import Dict
from errors.common.exception import DppError
class DppArgparseError(DppError):
pass
class DppArgparseTaxonomyNotFoundError(DppArgparseError):
def __init__(self, taxonomy_name: str):
super().__init__(f"taxonomy '{taxonomy_name}' does not exist")
self.taxonomy_name: str = taxonomy_name
class DppArgparseNotProjectDirectory(DppArgparseError):
def __init__(self, path: Path):
super().__init__(f"directory '{str(path)}' is not a defect taxonomy project")
self.path: Path = path
class DppArgparseDefectIndexError(DppArgparseError):
def __init__(self, index: int):
super().__init__(f"invalid index '{index}' of defects")
self.index: int = index
class DppArgparseFileNotFoundError(DppArgparseError, FileNotFoundError):
def __init__(self, path: str):
super().__init__()
self.path: str = path
class DppArgparseInvalidEnvironment(DppArgparseError):
def __init__(self, value: str):
super().__init__(
f"invalid environment variable format '{value}' (should be KEY=VALUE)"
)
self.value: str = value
class DppArgparseInvalidConfigError(DppArgparseError):
def __init__(self):
super().__init__()
class DppArgparseConfigCorruptedError(DppArgparseError):
def __init__(self, data: Dict):
super().__init__(f"config is corrupted: {data}")
self.data = data
class DppArgparseInvalidCaseExpressionError(DppArgparseError):
def __init__(self, index: int, name: str, cases: int, expr: str):
super().__init__(
f"Defect#{index} of {name} has {cases} test cases, but expression was: {expr}"
)
self.index: int = index
self.name: str = name
self.cases: int = cases
self.expr: str = expr
| [] |
wang97zh/EVS-Net-1 | utils/__init__.py | 3a8457c2d5281b8805ec523f9ced738ccf49d5f5 |
from .utility import *
from .tricks import *
from .tensorlog import *
from .self_op import *
from .resume import *
from .optims import *
from .metric import *
| [] |
calvinfeng/openvino | model-optimizer/extensions/front/mxnet/arange_ext.py | 11f591c16852637506b1b40d083b450e56d0c8ac | """
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from extensions.ops.range import Range
from mo.front.extractor import FrontExtractorOp
from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from mo.graph.graph import Node
class ArangeExt(FrontExtractorOp):
op = '_arange'
enabled = True
@classmethod
def extract(cls, node: Node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
Range.update_node_stat(node, {
'start': attrs.int('start', 0),
'stop': attrs.int('stop', 0),
'repeat': attrs.int('repeat', 1),
'step': attrs.float('step', 1),
'dtype': np.dtype(attrs.str('dtype ', 'float32'))
})
return cls.enabled
| [((31, 16, 31, 55), 'mo.front.mxnet.extractors.utils.get_mxnet_layer_attrs', 'get_mxnet_layer_attrs', ({(31, 38, 31, 54): 'node.symbol_dict'}, {}), '(node.symbol_dict)', False, 'from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs\n')] |
lucasforever24/arcface_noonan | fold_cur_trans.py | 9d805a0d4d478e347a9084ad6ce24fe4c8dc5e65 | import cv2
from PIL import Image
import argparse
from pathlib import Path
from multiprocessing import Process, Pipe,Value,Array
import torch
from config import get_config
from mtcnn import MTCNN
from Learner_trans_tf import face_learner
from utils import load_facebank, draw_box_name, prepare_facebank, save_label_score, label_binarize
from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score
from sklearn.model_selection import KFold
import os
import glob
import shutil
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import datetime
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='for face verification')
parser.add_argument("-ds", "--dataset_dir", help="where to get data", default="noonan", type=str)
parser.add_argument('-sd','--stored_result_dir',help='where to store data as np arrays',
default="results/trans/", type=str)
parser.add_argument("-k", "--kfold", help="returns the number of splitting iterations in the cross-validator.",
default=10, type=int)
parser.add_argument("-e", "--epochs", help="training epochs", default=20, type=int)
parser.add_argument("-n", "--names_considered", help="names for different types considered, separated by commas",
default="normal,noonan,others", type=str)
parser.add_argument("-g", "--gpu_id", help="gpu id to use", default="", type=str)
parser.add_argument("-s", "--use_shuffled_kfold", help="whether to use shuffled kfold.", action="store_true")
parser.add_argument("-rs", "--random_seed", help="random seed used for k-fold split.", default=6, type=int)
parser.add_argument("-tta", "--tta", help="whether test time augmentation",action="store_true")
parser.add_argument("-a", "--additional_data_dir", help="where to get the additional data",
default="", type=str)
parser.add_argument("-ta", "--additional_test_or_train", help="use additional data in only train, or test, or both",
default="", type=str)
parser.add_argument("-as", "--stylegan_data_dir", help="where to get the additional data",
default="", type=str)
parser.add_argument("-ts", "--stylegan_test_or_train", help="use stylegan data in only train, or test, or both",
default="", type=str)
parser.add_argument("-tf", "--transfer", help="how many layer(s) used for transfer learning, "
"but 0 means retraining the whole network.", default=0, type=int)
parser.add_argument("-ac", "--arch", help="types of model used for encoder", default="mobile", type=str)
args = parser.parse_args()
for arg in vars(args):
print(arg+':', getattr(args, arg))
emore_dir = 'faces_emore'
conf = get_config(True, args)
conf.emore_folder = conf.data_path/emore_dir
mtcnn = MTCNN()
print('mtcnn loaded')
names_considered = args.names_considered.strip().split(',')
exp_name = args.dataset_dir[:4]
if args.additional_data_dir:
if 'LAG' in args.additional_data_dir:
exp_name += '_lag'
elif 'literature' in args.additional_data_dir:
exp_name += '_ltr'
if args.kfold != 10:
exp_name += ('_k' + str(args.kfold))
if args.epochs != 20:
exp_name += ('_e' + str(args.epochs))
if args.transfer != 0 and args.transfer != 1:
exp_name += ('_td' + str(args.transfer))
if args.use_shuffled_kfold:
exp_name += ('_s' + str(args.random_seed))
print(exp_name)
# prepare folders
raw_dir = 'raw_112'
verify_type = 'trans'
if args.use_shuffled_kfold:
verify_type += '_shuffled'
# train_dir = conf.facebank_path/args.dataset_dir/verify_type/'train'
train_dir = conf.emore_folder/'imgs'
test_dir = conf.emore_folder/'test'
conf.facebank_path = train_dir
if os.path.exists(train_dir):
shutil.rmtree(train_dir)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.mkdir(train_dir)
os.mkdir(test_dir)
for name in names_considered:
os.makedirs(str(train_dir) + '/' + name, exist_ok=True)
os.makedirs(str(test_dir) + '/' + name, exist_ok=True)
if args.stylegan_data_dir:
#e.g. smile_refine_mtcnn_112_divi
full_stylegan_dir = str(conf.data_path/'facebank'/'stylegan'/args.stylegan_data_dir)
stylegan_folders = os.listdir(full_stylegan_dir)
if args.additional_data_dir:
full_additional_dir = str(conf.data_path/'facebank'/args.additional_data_dir)
# init kfold
if args.use_shuffled_kfold:
kf = KFold(n_splits=args.kfold, shuffle=True, random_state=args.random_seed)
else:
kf = KFold(n_splits=args.kfold, shuffle=False, random_state=None)
# collect and split raw data
data_dict = {}
idx_gen = {}
for name in names_considered:
tmp_list = glob.glob(str(conf.data_path/'facebank'/args.dataset_dir/raw_dir) +
'/' + name + '*')
if 'innm' in args.stylegan_data_dir:
tmp_list = tmp_list + glob.glob(str(full_stylegan_dir) + '/' + name + '*')
stylegan_folders = []
print(str(conf.data_path/'facebank'/args.dataset_dir/raw_dir))
data_dict[name] = np.array(tmp_list)
idx_gen[name] = kf.split(data_dict[name])
if 'literature' in args.additional_data_dir:
data_dict['ltr'] = np.array(glob.glob(str(full_additional_dir) + '/*'))
idx_gen['ltr'] = kf.split(data_dict['ltr'])
score_names = []
scores = []
wrong_names = []
args.stored_result_path = args.stored_result_dir + os.sep + str(datetime.datetime.now())[:19]
if not os.path.exists(args.stored_result_path):
os.mkdir(args.stored_result_path)
# for fold_idx, (train_index, test_index) in enumerate(kf.split(data_dict[names_considered[0]])):
for fold_idx in range(args.kfold):
train_set = {}
test_set = {}
for name in names_considered:
(train_index, test_index) = next(idx_gen[name])
train_set[name], test_set[name] = data_dict[name][train_index], data_dict[name][test_index]
if 'ltr' in data_dict.keys():
(train_index, test_index) = next(idx_gen['ltr'])
train_set['ltr'], test_set['ltr'] = data_dict['ltr'][train_index], data_dict['ltr'][test_index]
if 'train' in args.additional_test_or_train:
train_set['noonan'] = np.concatenate((train_set['noonan'], train_set['ltr']))
if 'test' in args.additional_test_or_train:
test_set['noonan'] = np.concatenate((test_set['noonan'], test_set['ltr']))
# remove previous data
prev = glob.glob(str(train_dir) + '/*/*')
for p in prev:
os.remove(p)
prev = glob.glob(str(test_dir) + '/*/*')
for p in prev:
os.remove(p)
# save trains to conf.facebank_path/args.dataset_dir/'train' and
# tests to conf.data_path/'facebank'/args.dataset_dir/'test'
# count unbalanced data
train_count = {}
test_count = {}
for name in names_considered:
train_count[name] = 0
for i in range(len(train_set[name])):
img_folder = str(train_set[name][i])
for img in os.listdir(img_folder):
shutil.copy(img_folder + os.sep + str(img),
os.path.join(str(train_dir), name, str(img)))
train_count[name] += 1
# addition data from stylegan
if 'interp' not in data_dict.keys():
folder = os.path.basename(train_set[name][i])
if args.stylegan_data_dir and ('train' in args.stylegan_test_or_train) and (folder in stylegan_folders):
for img in os.listdir(full_stylegan_dir + os.sep + folder):
shutil.copy(os.path.join(full_stylegan_dir, folder, str(img)),
os.path.join(str(train_dir), name, str(img)))
# ('/'.join(train_set[name][i].strip().split('/')[:-2]) +
# '/' + verify_type + '/train/' + name + os.sep + img))
train_count[name] += 1
# test
for i in range(len(test_set[name])):
test_count[name] = 0
img_folder = str(test_set[name][i])
for img in os.listdir(img_folder):
shutil.copy(img_folder + os.sep + str(img),
os.path.join(str(test_dir), name, str(img)))
test_count[name] += 1
# addition data from stylegan
if 'interp' not in data_dict.keys():
folder = os.path.basename(test_set[name][i])
if args.stylegan_data_dir and ('test' in args.stylegan_test_or_train) and (folder in stylegan_folders):
# and
# (folder not in ['noonan7','noonan19','noonan23','normal9','normal20','normal23'])):
for img in os.listdir(full_stylegan_dir + os.sep + folder):
shutil.copy(os.path.join(full_stylegan_dir, folder, str(img)),
os.path.join(str(test_dir), name, str(img)))
test_count[name] += 1
print(train_count, test_count)
# deal with unbalanced data
"""
if train_count['normal'] // train_count['noonan'] > 1:
aug_num = train_count['normal'] // train_count['noonan'] - 1
for img in os.listdir(os.path.join(str(train_dir), 'noonan')):
for aug_idx in range(aug_num):
aug_img = img[:img.rfind('.')] + '_' + str(aug_idx) + img[img.rfind('.'):]
shutil.copy(os.path.join(str(train_dir), 'noonan', img),
os.path.join(str(train_dir), 'noonan', aug_img))
"""
if 'fake' in args.additional_data_dir:
fake_dict = {'noonan':'normal', 'normal':'noonan'}
full_additional_dir = conf.data_path/'facebank'/'noonan+normal'/args.additional_data_dir
add_data = glob.glob(str(full_additional_dir) + os.sep + '*.png')
print('additional:', args.additional_data_dir, len(add_data))
for name in names_considered:
for img_f in add_data:
if name in img_f.strip().split(os.sep)[-1]:
# print('source:', img_f)
# print('copy to:', img_f.replace(str(full_additional_dir),
# str(train_dir) + os.sep + fake_dict[name]))
# print('copy to:', img_f.replace(args.additional_data_dir,
# verify_type + '/train/' + name))
shutil.copy(img_f, os.path.join(str(train_dir), fake_dict[name], os.path.basename(img_f)))
print(fold_idx)
print('datasets ready')
conf_train = get_config(True, args)
conf_train.emore_folder = conf.data_path/emore_dir
conf_train.stored_result_dir = args.stored_result_path
learner = face_learner(conf=conf_train, transfer=args.transfer, ext=exp_name+'_'+str(fold_idx))
# conf, inference=False, transfer=0
if args.transfer != 0:
learner.load_state(conf.save_path, False, True)
print('learner loaded')
learner.train(conf_train, args.epochs)
print('learner retrained.')
learner.save_state()
print('Model is saved')
# prepare_facebank
targets, names, names_idx = prepare_facebank(conf, learner.model, mtcnn, tta = args.tta)
print('names_classes:', names)
noonan_idx = names_idx['noonan']
print('facebank updated')
for path in test_dir.iterdir():
if path.is_file():
continue
# print(path)
for fil in path.iterdir():
# print(fil)
orig_name = ''.join([i for i in fil.name.strip().split('.')[0].split('_')[0] if not i.isdigit()])
for name in names_idx.keys():
if name in orig_name:
score_names.append(names_idx[name])
"""
if orig_name not in names_considered:
print("Un-considered name:", fil.name)
continue
"""
frame = cv2.imread(str(fil))
image = Image.fromarray(frame)
faces = [image,]
distance = learner.binfer(conf, faces, targets, args.tta)
label = score_names[-1]
score = np.exp(distance.dot(-1))
pred = np.argmax(score, 1)
if pred != label:
wrong_names.append(orig_name)
scores.append(score)
score_names = np.array(score_names)
wrong_names = np.array(wrong_names)
score_np = np.squeeze(np.array(scores))
n_classes = score_np.shape[1]
score_names = label_binarize(score_names, classes=range(n_classes))
score_sum = np.zeros([score_np.shape[0], 1])
for i in range(n_classes):
score_sum += score_np[:, i, None] # keep the dimension
relative_scores = (score_np / score_sum)
total_scores = relative_scores.ravel()
total_names = score_names.ravel()
name_path = os.path.join(args.stored_result_path, 'wrong_names.npy')
save_label_score(name_path, wrong_names)
label_path = os.path.join(args.stored_result_path, 'labels_trans.npy')
save_label_score(label_path, score_names)
score_path = os.path.join(args.stored_result_path, 'scores_trans.npy')
save_label_score(score_path, relative_scores)
print('saved!')
# Compute ROC curve and ROC area for noonan
fpr, tpr, _ = roc_curve(total_names, total_scores) #scores_np[:, noonan_idx]
roc_auc = auc(fpr, tpr)
# For PR curve
precision, recall, _ = precision_recall_curve(total_names, total_scores)
average_precision = average_precision_score(total_names, total_scores)
# plots
plt.figure()
colors = list(mcolors.TABLEAU_COLORS)
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.4f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC_{}'.format(exp_name))
plt.legend(loc="lower right")
plt.savefig(args.stored_result_path + os.sep + '/fp_tp_{}.png'.format(exp_name))
plt.close()
# plt.show()
plt.figure()
plt.step(recall, precision, where='post')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Average precision score ({}): AP={:0.4f}'.format(exp_name, average_precision))
plt.savefig(args.stored_result_path + os.sep + '/pr_{}.png'.format(exp_name))
plt.close()
| [((23, 13, 23, 73), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((53, 11, 53, 33), 'config.get_config', 'get_config', ({(53, 22, 53, 26): 'True', (53, 28, 53, 32): 'args'}, {}), '(True, args)', False, 'from config import get_config\n'), ((56, 12, 56, 19), 'mtcnn.MTCNN', 'MTCNN', ({}, {}), '()', False, 'from mtcnn import MTCNN\n'), ((89, 7, 89, 32), 'os.path.exists', 'os.path.exists', ({(89, 22, 89, 31): 'train_dir'}, {}), '(train_dir)', False, 'import os\n'), ((91, 7, 91, 31), 'os.path.exists', 'os.path.exists', ({(91, 22, 91, 30): 'test_dir'}, {}), '(test_dir)', False, 'import os\n'), ((93, 4, 93, 23), 'os.mkdir', 'os.mkdir', ({(93, 13, 93, 22): 'train_dir'}, {}), '(train_dir)', False, 'import os\n'), ((94, 4, 94, 22), 'os.mkdir', 'os.mkdir', ({(94, 13, 94, 21): 'test_dir'}, {}), '(test_dir)', False, 'import os\n'), ((289, 18, 289, 39), 'numpy.array', 'np.array', ({(289, 27, 289, 38): 'score_names'}, {}), '(score_names)', True, 'import numpy as np\n'), ((290, 18, 290, 39), 'numpy.array', 'np.array', ({(290, 27, 290, 38): 'wrong_names'}, {}), '(wrong_names)', True, 'import numpy as np\n'), ((295, 16, 295, 48), 'numpy.zeros', 'np.zeros', ({(295, 25, 295, 47): '[score_np.shape[0], 1]'}, {}), '([score_np.shape[0], 1])', True, 'import numpy as np\n'), ((303, 16, 303, 72), 'os.path.join', 'os.path.join', ({(303, 29, 303, 52): 'args.stored_result_path', (303, 54, 303, 71): '"""wrong_names.npy"""'}, {}), "(args.stored_result_path, 'wrong_names.npy')", False, 'import os\n'), ((304, 4, 304, 44), 'utils.save_label_score', 'save_label_score', ({(304, 21, 304, 30): 'name_path', (304, 32, 304, 43): 'wrong_names'}, {}), '(name_path, wrong_names)', False, 'from utils import load_facebank, draw_box_name, prepare_facebank, save_label_score, label_binarize\n'), ((305, 17, 305, 74), 'os.path.join', 'os.path.join', ({(305, 30, 305, 53): 'args.stored_result_path', (305, 55, 305, 73): '"""labels_trans.npy"""'}, {}), "(args.stored_result_path, 'labels_trans.npy')", False, 'import os\n'), ((306, 4, 306, 45), 'utils.save_label_score', 'save_label_score', ({(306, 21, 306, 31): 'label_path', (306, 33, 306, 44): 'score_names'}, {}), '(label_path, score_names)', False, 'from utils import load_facebank, draw_box_name, prepare_facebank, save_label_score, label_binarize\n'), ((307, 17, 307, 74), 'os.path.join', 'os.path.join', ({(307, 30, 307, 53): 'args.stored_result_path', (307, 55, 307, 73): '"""scores_trans.npy"""'}, {}), "(args.stored_result_path, 'scores_trans.npy')", False, 'import os\n'), ((308, 4, 308, 49), 'utils.save_label_score', 'save_label_score', ({(308, 21, 308, 31): 'score_path', (308, 33, 308, 48): 'relative_scores'}, {}), '(score_path, relative_scores)', False, 'from utils import load_facebank, draw_box_name, prepare_facebank, save_label_score, label_binarize\n'), ((312, 18, 312, 54), 'sklearn.metrics.roc_curve', 'roc_curve', ({(312, 28, 312, 39): 'total_names', (312, 41, 312, 53): 'total_scores'}, {}), '(total_names, total_scores)', False, 'from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score\n'), ((313, 14, 313, 27), 'sklearn.metrics.auc', 'auc', ({(313, 18, 313, 21): 'fpr', (313, 23, 313, 26): 'tpr'}, {}), '(fpr, tpr)', False, 'from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score\n'), ((316, 27, 316, 76), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', ({(316, 50, 316, 61): 'total_names', (316, 63, 316, 75): 'total_scores'}, {}), '(total_names, total_scores)', False, 'from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score\n'), ((317, 24, 317, 74), 'sklearn.metrics.average_precision_score', 'average_precision_score', ({(317, 48, 317, 59): 'total_names', (317, 61, 317, 73): 'total_scores'}, {}), '(total_names, total_scores)', False, 'from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score\n'), ((320, 4, 320, 16), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((323, 4, 324, 63), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((325, 4, 325, 65), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((326, 4, 326, 24), 'matplotlib.pyplot.xlim', 'plt.xlim', ({(326, 13, 326, 23): '[0.0, 1.0]'}, {}), '([0.0, 1.0])', True, 'import matplotlib.pyplot as plt\n'), ((327, 4, 327, 25), 'matplotlib.pyplot.ylim', 'plt.ylim', ({(327, 13, 327, 24): '[0.0, 1.05]'}, {}), '([0.0, 1.05])', True, 'import matplotlib.pyplot as plt\n'), ((328, 4, 328, 37), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(328, 15, 328, 36): '"""False Positive Rate"""'}, {}), "('False Positive Rate')", True, 'import matplotlib.pyplot as plt\n'), ((329, 4, 329, 36), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(329, 15, 329, 35): '"""True Positive Rate"""'}, {}), "('True Positive Rate')", True, 'import matplotlib.pyplot as plt\n'), ((331, 4, 331, 33), 'matplotlib.pyplot.legend', 'plt.legend', (), '', True, 'import matplotlib.pyplot as plt\n'), ((333, 4, 333, 15), 'matplotlib.pyplot.close', 'plt.close', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((336, 4, 336, 16), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((337, 4, 337, 45), 'matplotlib.pyplot.step', 'plt.step', (), '', True, 'import matplotlib.pyplot as plt\n'), ((338, 4, 338, 24), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(338, 15, 338, 23): '"""Recall"""'}, {}), "('Recall')", True, 'import matplotlib.pyplot as plt\n'), ((339, 4, 339, 27), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(339, 15, 339, 26): '"""Precision"""'}, {}), "('Precision')", True, 'import matplotlib.pyplot as plt\n'), ((340, 4, 340, 25), 'matplotlib.pyplot.ylim', 'plt.ylim', ({(340, 13, 340, 24): '[0.0, 1.05]'}, {}), '([0.0, 1.05])', True, 'import matplotlib.pyplot as plt\n'), ((341, 4, 341, 24), 'matplotlib.pyplot.xlim', 'plt.xlim', ({(341, 13, 341, 23): '[0.0, 1.0]'}, {}), '([0.0, 1.0])', True, 'import matplotlib.pyplot as plt\n'), ((344, 4, 344, 15), 'matplotlib.pyplot.close', 'plt.close', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((90, 8, 90, 32), 'shutil.rmtree', 'shutil.rmtree', ({(90, 22, 90, 31): 'train_dir'}, {}), '(train_dir)', False, 'import shutil\n'), ((92, 8, 92, 31), 'shutil.rmtree', 'shutil.rmtree', ({(92, 22, 92, 30): 'test_dir'}, {}), '(test_dir)', False, 'import shutil\n'), ((103, 27, 103, 56), 'os.listdir', 'os.listdir', ({(103, 38, 103, 55): 'full_stylegan_dir'}, {}), '(full_stylegan_dir)', False, 'import os\n'), ((109, 13, 109, 84), 'sklearn.model_selection.KFold', 'KFold', (), '', False, 'from sklearn.model_selection import KFold\n'), ((111, 13, 111, 73), 'sklearn.model_selection.KFold', 'KFold', (), '', False, 'from sklearn.model_selection import KFold\n'), ((123, 26, 123, 44), 'numpy.array', 'np.array', ({(123, 35, 123, 43): 'tmp_list'}, {}), '(tmp_list)', True, 'import numpy as np\n'), ((136, 11, 136, 50), 'os.path.exists', 'os.path.exists', ({(136, 26, 136, 49): 'args.stored_result_path'}, {}), '(args.stored_result_path)', False, 'import os\n'), ((137, 8, 137, 41), 'os.mkdir', 'os.mkdir', ({(137, 17, 137, 40): 'args.stored_result_path'}, {}), '(args.stored_result_path)', False, 'import os\n'), ((239, 21, 239, 43), 'config.get_config', 'get_config', ({(239, 32, 239, 36): 'True', (239, 38, 239, 42): 'args'}, {}), '(True, args)', False, 'from config import get_config\n'), ((257, 36, 257, 96), 'utils.prepare_facebank', 'prepare_facebank', (), '', False, 'from utils import load_facebank, draw_box_name, prepare_facebank, save_label_score, label_binarize\n'), ((291, 26, 291, 42), 'numpy.array', 'np.array', ({(291, 35, 291, 41): 'scores'}, {}), '(scores)', True, 'import numpy as np\n'), ((158, 12, 158, 24), 'os.remove', 'os.remove', ({(158, 22, 158, 23): 'p'}, {}), '(p)', False, 'import os\n'), ((161, 12, 161, 24), 'os.remove', 'os.remove', ({(161, 22, 161, 23): 'p'}, {}), '(p)', False, 'import os\n'), ((135, 68, 135, 91), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((151, 38, 151, 93), 'numpy.concatenate', 'np.concatenate', ({(151, 53, 151, 92): "(train_set['noonan'], train_set['ltr'])"}, {}), "((train_set['noonan'], train_set['ltr']))", True, 'import numpy as np\n'), ((153, 37, 153, 90), 'numpy.concatenate', 'np.concatenate', ({(153, 52, 153, 89): "(test_set['noonan'], test_set['ltr'])"}, {}), "((test_set['noonan'], test_set['ltr']))", True, 'import numpy as np\n'), ((173, 27, 173, 49), 'os.listdir', 'os.listdir', ({(173, 38, 173, 48): 'img_folder'}, {}), '(img_folder)', False, 'import os\n'), ((192, 27, 192, 49), 'os.listdir', 'os.listdir', ({(192, 38, 192, 48): 'img_folder'}, {}), '(img_folder)', False, 'import os\n'), ((279, 24, 279, 46), 'PIL.Image.fromarray', 'Image.fromarray', ({(279, 40, 279, 45): 'frame'}, {}), '(frame)', False, 'from PIL import Image\n'), ((284, 23, 284, 42), 'numpy.argmax', 'np.argmax', ({(284, 33, 284, 38): 'score', (284, 40, 284, 41): '1'}, {}), '(score, 1)', True, 'import numpy as np\n'), ((179, 29, 179, 65), 'os.path.basename', 'os.path.basename', ({(179, 46, 179, 64): 'train_set[name][i]'}, {}), '(train_set[name][i])', False, 'import os\n'), ((198, 29, 198, 64), 'os.path.basename', 'os.path.basename', ({(198, 46, 198, 63): 'test_set[name][i]'}, {}), '(test_set[name][i])', False, 'import os\n'), ((181, 35, 181, 82), 'os.listdir', 'os.listdir', ({(181, 46, 181, 81): '(full_stylegan_dir + os.sep + folder)'}, {}), '(full_stylegan_dir + os.sep + folder)', False, 'import os\n'), ((202, 35, 202, 82), 'os.listdir', 'os.listdir', ({(202, 46, 202, 81): '(full_stylegan_dir + os.sep + folder)'}, {}), '(full_stylegan_dir + os.sep + folder)', False, 'import os\n'), ((233, 89, 233, 112), 'os.path.basename', 'os.path.basename', ({(233, 106, 233, 111): 'img_f'}, {}), '(img_f)', False, 'import os\n')] |
manahter/dirio | examples/tryclass.py | c33fcd6c114ffb275d7147156c7041389fab6cfc | import time
class TryClass:
value = 1
valu = 2
val = 3
va = 4
v = 5
def __init__(self, value=4):
print("Created TryClass :", self)
self.value = value
def metod1(self, value, val2=""):
self.value += value
print(f"\t>>> metod 1, add: {value}, now value : {self.value}, val2: {val2}")
time.sleep(2)
return self.value
@classmethod
def metod2(cls, value, val2=""):
cls.value = 2
print(f"\t>>> metod 2, add: {value}, now value : {cls.value}, val2: {val2}")
return cls.value
@staticmethod
def metod3(value, val2=""):
TryClass.value += value
print(f"\t>>> metod 3, add: {value}, now value : {TryClass.value}, val2: {val2}")
return TryClass.value
def event_call(other_arg, kwarg="-", result=None):
"""Call this metod, on returned result"""
print(f"Bind Result, {result}\n"*10)
print("other_arg", other_arg)
print("kwarg", kwarg)
if __name__ == "__main__":
try:
from dirio import Dirio
except:
from ..dirio import Dirio
dr_cls = Dirio(target=TryClass, args=(888,), kwargs={}, worker=False)
print("Starting values :", dr_cls.value, dr_cls)
print("\n"*2)
print("Wait 1 sec for your reply. metod 1 :", dr_cls.metod1(5, val2="1", dr_wait=1))
print("Wait until the reply comes. metod 1 :", dr_cls.metod1(5, val2="1", dr_wait=-1))
code0 = dr_cls.metod1(5, val2="1", dr_code=True)
print("Metod 1, call, via bind to func", dr_cls.dr_bind(code0, event_call, args=("OtHeR aRg", ), kwargs={"kwarg": "KwArG"}))
while True:
#
dr_cls.dr_binds_check()
print("Run the method and give us the response reading code : dr_code=True")
code1 = dr_cls.metod1(5, val2="1", dr_code=True)
print("Is there data in the reading code? : dr_code=43534")
while not dr_cls.metod1(dr_code=code1):
print("We are waiting for the data with this code :", code1)
time.sleep(.5)
print("Returned metod 1 data :", dr_cls.metod1(dr_code=code1))
print("Methods called this way give the last return value : nothing or dr_code=False")
code2 = dr_cls.metod2(10, val2="2", dr_code=True)
print("Search by code only :", dr_cls.dr_code(code2, wait=1))
print("Trying metod 2, called and returned :", dr_cls.metod2(10, val2="2", dr_code=False))
print("Trying metod 3, called and returned :", dr_cls.metod3(15, val2="3"))
print("\n"*2)
time.sleep(3)
dr_cls.dr_terminate()
| [((47, 13, 47, 73), 'dirio.Dirio', 'Dirio', (), '', False, 'from dirio import Dirio\n'), ((18, 8, 18, 21), 'time.sleep', 'time.sleep', ({(18, 19, 18, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((81, 8, 81, 21), 'time.sleep', 'time.sleep', ({(81, 19, 81, 20): '(3)'}, {}), '(3)', False, 'import time\n'), ((69, 12, 69, 26), 'time.sleep', 'time.sleep', ({(69, 23, 69, 25): '(0.5)'}, {}), '(0.5)', False, 'import time\n')] |
ismaila-at-za-ibm/qiskit-terra | qiskit/providers/basebackend.py | 08303ec98ac7b33fde55266dc3a74466fbdcae95 | # -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""This module implements the abstract base class for backend modules.
To create add-on backend modules subclass the Backend class in this module.
Doing so requires that the required backend interface is implemented.
"""
from abc import ABC, abstractmethod
from qiskit.version import __version__
from .models import BackendStatus
class BaseBackend(ABC):
"""Base class for backends."""
@abstractmethod
def __init__(self, configuration, provider=None):
"""Base class for backends.
This method should initialize the module and its configuration, and
raise an exception if a component of the module is
not available.
Args:
configuration (BackendConfiguration): backend configuration
provider (BaseProvider): provider responsible for this backend
Raises:
FileNotFoundError if backend executable is not available.
QiskitError: if there is no name in the configuration
"""
self._configuration = configuration
self._provider = provider
@abstractmethod
def run(self, qobj):
"""Run a Qobj on the the backend."""
pass
def configuration(self):
"""Return the backend configuration.
Returns:
BackendConfiguration: the configuration for the backend.
"""
return self._configuration
def properties(self):
"""Return backend properties.
Returns:
BackendProperties: the configuration for the backend. If the backend
does not support properties, it returns ``None``.
"""
return None
def provider(self):
"""Return the backend Provider.
Returns:
BaseProvider: the Provider responsible for the backend.
"""
return self._provider
def status(self):
"""Return backend status.
Returns:
BackendStatus: the status of the backend.
"""
return BackendStatus(backend_name=self.name(),
backend_version=__version__,
operational=True,
pending_jobs=0,
status_msg='')
def name(self):
"""Return backend name.
Returns:
str: the name of the backend.
"""
return self._configuration.backend_name
def __str__(self):
return self.name()
def __repr__(self):
"""Official string representation of a Backend.
Note that, by Qiskit convention, it is consciously *not* a fully valid
Python expression. Subclasses should provide 'a string of the form
<...some useful description...>'. [0]
[0] https://docs.python.org/3/reference/datamodel.html#object.__repr__
"""
return "<{}('{}') from {}()>".format(self.__class__.__name__,
self.name(),
self._provider)
| [] |
shahbagdadi/py-algo-n-ds | arrays/jump2/Solution.py | ff689534b771ddb4869b001b20a0e21b4896bb0a | from typing import List
import sys
class Solution:
def jump(self, nums: List[int]) -> int:
if len(nums) <=1: return 0
l , r , jumps = 0, nums[0] , 1
while r < len(nums)-1 :
jumps += 1
# you can land anywhere between l & r+1 in a jump and then use Num[i] to jump from there
nxt = max( i + nums[i] for i in range(l, r+1))
l , r = r, nxt
return jumps
s = Solution()
ans = s.jump([3,2,1,0,4])
print(ans) | [] |
shared-tw/shared-tw | share/tests.py | 90dcf92744b4e0ec9e9aa085026b5543c9c3922c | import unittest
from . import states
class DonationStateTestCase(unittest.TestCase):
def test_approve_pending_state(self):
approve_pending_statue = states.PendingApprovalState()
approved_event = states.DonationApprovedEvent()
self.assertIsInstance(
approve_pending_statue.apply(approved_event),
states.PendingDispatchState,
)
cancelled_event = states.DonationCancelledEvent()
self.assertIsInstance(
approve_pending_statue.apply(cancelled_event), states.CancelledState
)
dispatch_event = states.DonationDispatchedEvent()
self.assertIsInstance(
approve_pending_statue.apply(dispatch_event), states.InvalidState
)
def test_dispatch_pending_state(self):
dispatch_pending_state = states.PendingDispatchState()
donation_dispatched_event = states.DonationDispatchedEvent()
self.assertIsInstance(
dispatch_pending_state.apply(donation_dispatched_event),
states.DoneState,
)
cancelled_event = states.DonationCancelledEvent()
self.assertIsInstance(
dispatch_pending_state.apply(cancelled_event), states.CancelledState
)
approved_event = states.DonationApprovedEvent()
self.assertIsInstance(
dispatch_pending_state.apply(approved_event), states.InvalidState
)
def test_collect_pending_state(self):
collect_pending_state = states.PendingDeliveryState()
collected_event = states.DonationDeliveredEvent()
self.assertIsInstance(
collect_pending_state.apply(collected_event), states.DoneState
)
cancelled_event = states.DonationCancelledEvent()
self.assertIsInstance(
collect_pending_state.apply(cancelled_event), states.InvalidState
)
| [] |
grow/airpress | app/extensions.py | b46e951b27b8216f51f0fade3695049455866825 | from jinja2 import nodes
from jinja2.ext import Extension
class FragmentCacheExtension(Extension):
# a set of names that trigger the extension.
tags = set(['cache'])
def __init__(self, environment):
super(FragmentCacheExtension, self).__init__(environment)
# add the defaults to the environment
environment.extend(
fragment_cache_prefix='fragment',
fragment_cache=None
)
def parse(self, parser):
# the first token is the token that started the tag. In our case
# we only listen to ``'cache'`` so this will be a name token with
# `cache` as value. We get the line number so that we can give
# that line number to the nodes we create by hand.
lineno = next(parser.stream).lineno
# now we parse a single expression that is used as cache key.
args = [parser.parse_expression()]
# if there is a comma, the user provided a timeout. If not use
# None as second parameter.
if parser.stream.skip_if('comma'):
args.append(parser.parse_expression())
else:
args.append(nodes.Const(None))
# now we parse the body of the cache block up to `endcache` and
# drop the needle (which would always be `endcache` in that case)
body = parser.parse_statements(['name:endcache'], drop_needle=True)
# now return a `CallBlock` node that calls our _cache_support
# helper method on this extension.
return nodes.CallBlock(self.call_method('_cache_support', args),
[], [], body).set_lineno(lineno)
def _cache_support(self, name, timeout, caller):
"""Helper callback."""
key = self.environment.fragment_cache_prefix + name
# try to load the block from the cache
# if there is no fragment in the cache, render it and store
# it in the cache.
rv = self.environment.fragment_cache.get(key)
if rv is not None:
return rv
rv = caller()
self.environment.fragment_cache.add(key, rv, timeout)
return rv
| [((33, 24, 33, 41), 'jinja2.nodes.Const', 'nodes.Const', ({(33, 36, 33, 40): 'None'}, {}), '(None)', False, 'from jinja2 import nodes\n')] |
danielSoler93/modtox | modtox/Helpers/helpers.py | 757234140cc780f57d031b46d9293fc2bf95d18d | import os
def retrieve_molecule_number(pdb, resname):
"""
IDENTIFICATION OF MOLECULE NUMBER BASED
ON THE TER'S
"""
count = 0
with open(pdb, 'r') as x:
lines = x.readlines()
for i in lines:
if i.split()[0] == 'TER': count += 1
if i.split()[3] == resname:
molecule_number = count + 1
break
return molecule_number
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
| [((24, 23, 24, 50), 'os.path.expanduser', 'os.path.expanduser', ({(24, 42, 24, 49): 'newPath'}, {}), '(newPath)', False, 'import os\n'), ((27, 25, 27, 36), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((28, 8, 28, 30), 'os.chdir', 'os.chdir', ({(28, 17, 28, 29): 'self.newPath'}, {}), '(self.newPath)', False, 'import os\n'), ((31, 8, 31, 32), 'os.chdir', 'os.chdir', ({(31, 17, 31, 31): 'self.savedPath'}, {}), '(self.savedPath)', False, 'import os\n')] |
efargas/PyBBIO | bbio/platform/beaglebone/api.py | b0b15fc52befd56e817dbc5876f738e70ef05541 | # api.py
# Part of PyBBIO
# github.com/alexanderhiam/PyBBIO
# MIT License
#
# Beaglebone platform API file.
from bbio.platform.platform import detect_platform
PLATFORM = detect_platform()
if "3.8" in PLATFORM:
from bone_3_8.adc import analog_init, analog_cleanup
from bone_3_8.pwm import pwm_init, pwm_cleanup
from serial_port import serial_cleanup
elif "3.2" in PLATFORM:
from bone_3_2.adc import analog_init, analog_cleanup
from bone_3_2.pwm import pwm_init, pwm_cleanup
from serial_port import serial_cleanup
def platform_init():
analog_init()
pwm_init()
def platform_cleanup():
analog_cleanup()
pwm_cleanup()
serial_cleanup()
| [((10, 11, 10, 28), 'bbio.platform.platform.detect_platform', 'detect_platform', ({}, {}), '()', False, 'from bbio.platform.platform import detect_platform\n'), ((23, 2, 23, 15), 'bone_3_2.adc.analog_init', 'analog_init', ({}, {}), '()', False, 'from bone_3_2.adc import analog_init, analog_cleanup\n'), ((24, 2, 24, 12), 'bone_3_2.pwm.pwm_init', 'pwm_init', ({}, {}), '()', False, 'from bone_3_2.pwm import pwm_init, pwm_cleanup\n'), ((27, 2, 27, 18), 'bone_3_2.adc.analog_cleanup', 'analog_cleanup', ({}, {}), '()', False, 'from bone_3_2.adc import analog_init, analog_cleanup\n'), ((28, 2, 28, 15), 'bone_3_2.pwm.pwm_cleanup', 'pwm_cleanup', ({}, {}), '()', False, 'from bone_3_2.pwm import pwm_init, pwm_cleanup\n'), ((29, 2, 29, 18), 'serial_port.serial_cleanup', 'serial_cleanup', ({}, {}), '()', False, 'from serial_port import serial_cleanup\n')] |
GnarLito/tryhackme.py | tryhackme/http.py | 20b4dd6a15c13c57e7a7be7f59913b937a992e4b | import re
import sys
from urllib.parse import quote as _uriquote
import requests
from . import __version__, errors, utils
from .converters import _county_types, _leaderboard_types, _vpn_types, _not_none
from . import checks
from .cog import request_cog
GET='get'
POST='post'
class HTTPClient:
__CSRF_token_regex = re.compile("const csrfToken[ ]{0,1}=[ ]{0,1}[\"|'](.{36})[\"|']")
__Username_regex = re.compile("const username[ ]{0,1}=[ ]{0,1}[\"|'](.{1,16})[\"|']")
def __init__(self, session=None):
self._state = None
self.authenticated = False
self.__session = requests.Session()
self.static_session = requests.Session()
self.connect_sid = None
self._CSRF_token = None
self.username = None
self.user_agent = f'Tryhackme: (https://github.com/GnarLito/thm-api-py {__version__}) Python/{sys.version_info[0]}.{sys.version_info[1]} requests/{requests.__version__}'
if session is not None:
self.static_login(session)
def close(self):
if self.__session:
self.__session.close()
def static_login(self, session):
self.connect_sid = session
cookie = requests.cookies.create_cookie('connect.sid', session, domain='tryhackme.com')
self.__session.cookies.set_cookie(cookie)
try:
self.request(RouteList.get_unseen_notifications())
self.authenticated = True
self._CSRF_token = self.retrieve_CSRF_token()
self.username = self.retrieve_username()
except Exception as e:
print("session Issue:", e)
def retrieve_CSRF_token(self):
if not self.authenticated:
return None
try:
page = self.request(RouteList.get_profile_page())
return self._HTTPClient__CSRF_token_regex.search(page).group(1)
except AttributeError:
self.authenticated = False
return None
def retrieve_username(self):
if not self.authenticated:
return None
try:
page = self.request(RouteList.get_profile_page())
return self._HTTPClient__Username_regex.search(page).group(1)
except AttributeError:
self.authenticated = False
return None
def request(self, route, **kwargs):
session = self.__session
endpoint = route.url
method = route.method
settings = kwargs.pop('settings', {})
headers = {
'User-Agent': self.user_agent
}
if 'json' in kwargs:
headers['Content-Type'] = 'application/json'
kwargs['data'] = utils.to_json(kwargs.pop('json'))
if "static" in settings:
session = self.static_session
if "CSRF" in settings:
headers['CSRF-Token'] = self._CSRF_token
kwargs["data"]["_CSRF"] = self._CSRF_token
# TODO: retries, Pagenator
try:
with session.request(method, endpoint, **kwargs) as r:
data = utils.response_to_json_or_text(r)
# * valid return
if 300 > r.status_code >= 200:
# $ if return url is login then no auth
if r.url.split('/')[-1] == "login":
raise errors.Unauthorized(request=r, route=route, data=data)
return data
# $ no auth
if r.status_code in {401, 403}:
raise errors.Unauthorized(request=r, route=route, data=data)
# $ endpoint not found
if 404 == r.status_code:
raise errors.NotFound(request=r, route=route, data=data)
# $ server side issue's
if r.status_code in {500, 502}:
raise errors.ServerError(request=r, route=route, data=data)
except Exception as e:
raise e
class Route:
# TODO: add post payload capabilities
BASE = "https://www.tryhackme.com"
def __init__(self, method=GET, path='', **parameters):
self.method = method
self._path = path
self.path = path
url = self.BASE + self.path
options = parameters.pop("options", None)
if parameters:
try:
self.path = self.path.format(**{k: _uriquote(v) if isinstance(v, str) else v for k, v in parameters.items()})
self.url = self.BASE + self.path
except Exception as e:
raise errors.NotValidUrlParameters(e)
else:
self.url = url
if options:
if "?" not in self.url:
self.url + "?" + "&".join([f"{i}={options[i]}" for i in options.keys() if options[i] != None])
else:
self.url + "&" + "&".join([f"{i}={options[i]}" for i in options.keys() if options[i] != None])
self.bucket = f"{method} {path}"
class RouteList:
def get_profile_page(**parameters): return Route(path="/profile", **parameters)
# * normal site calls
def get_server_time( **parameters): return Route(path="/api/server-time", **parameters)
def get_site_stats( **parameters): return Route(path="/api/site-stats", **parameters)
def get_practise_rooms( **parameters): return Route(path="/api/practice-rooms", **parameters)
def get_series( **parameters): return Route(path="/api/series?show={show}", **parameters)
def get_glossary_terms( **parameters): return Route(path="/glossary/all-terms", **parameters)
# * Leaderboards
def get_leaderboards( **parameters): return Route(path="/api/leaderboards", **parameters)
def get_koth_leaderboards(**parameters): return Route(path="/api/leaderboards/koth", **parameters)
# * networks
def get_networks( **parameters): return Route(path="/api/networks", **parameters)
def get_network( **parameters): return Route(path="/api/room/network?code={network_code}", **parameters)
def get_network_cost( **parameters): return Route(path="/api/room/cost?code={network_code}", **parameters)
# * account
def get_subscription_cost(**parameters): return Route(path="/account/subscription/cost", **parameters)
# * paths
def get_path( **parameters): return Route(path="/paths/single/{path_code}", **parameters)
def get_public_paths( **parameters): return Route(path="/paths/public", **parameters)
def get_path_summary( **parameters): return Route(path="/paths/summary", **parameters)
# * modules
def get_modules_summary(**parameters): return Route(path="/modules/summary", **parameters)
def get_module( **parameters): return Route(path="/modules/data/{module_code}",**parameters)
# * games
def get_machine_pool( **parameters): return Route(path="/games/koth/get/machine-pool", **parameters)
def get_game_detail( **parameters): return Route(path="/games/koth/data/{game_code}", **parameters)
def get_recent_games( **parameters): return Route(path="/games/koth/recent/games", **parameters)
def get_user_games( **parameters): return Route(path="/games/koth/user/games", **parameters)
def get_game_tickets_won(**parameters): return Route(path="/games/tickets/won?username={username}", **parameters)
def post_join_koth( **parameters): return Route(method=POST, path="/games/koth/new", **parameters)
def post_new_koth( **parameters): return Route(method=POST, path="/games/koth/join-public", **parameters) # ? might be different for premium users
# * VPN
def get_available_vpns(**parameters): return Route(path="/vpn/get-available-vpns", **parameters)
def get_vpn_info( **parameters): return Route(path="/vpn/my-data", **parameters)
# * VM
def get_machine_running( **parameters): return Route(path="/api/vm/running", **parameters)
def post_renew_machine( **parameters): return Route(method=POST, path="/api/vm/renew", **parameters)
def post_terminate_machine( **parameters): return Route(method=POST, path="/api/vm/terminate", **parameters)
# * user -badge
def get_own_badges( **parameters): return Route(path="/api/badges/mine", **parameters)
def get_user_badges(**parameters): return Route(path="/api/badges/get/{username}", **parameters)
def get_all_badges( **parameters): return Route(path="/api/badges/get", **parameters)
# * user -team
def get_team_info(**parameters): return Route(path="/api/team/is-member", **parameters)
# * user -notifications
def get_unseen_notifications(**parameters): return Route(path="/notifications/has-unseen", **parameters)
def get_all_notifications( **parameters): return Route(path="/notifications/get", **parameters)
# * user -messages
def get_unseen_messages( **parameters): return Route(path="/message/has-unseen", **parameters)
def get_all_group_messages(**parameters): return Route(path="/message/group/get-all", **parameters)
def get_group_messages( **parameters): return Route(path="/message/group/get/{group_id}", **parameters)
# * user -room
def get_user_completed_rooms_count( **parameters): return Route(path="/api/no-completed-rooms-public/{username}", **parameters)
def get_user_completed_rooms( **parameters): return Route(path="/api/all-completed-rooms?username={username}", **parameters)
def get_user_created_rooms( **parameters): return Route(path="/api/created-rooms/{username}", **parameters)
# * user
def get_user_rank( **parameters): return Route(path="/api/user/rank/{username}", **parameters)
def get_user_activty(**parameters): return Route(path="/api/user/activity-events?username={username}", **parameters)
def get_all_friends( **parameters): return Route(path="/api/friend/all", **parameters)
def get_discord_user(**parameters): return Route(path="/api/discord/user/{username}", **parameters) # ? rename to user profile
def get_user_exist( **parameters): return Route(path="/api/user/exist/{username}", **parameters)
def search_user( **parameters): return Route(path="/api/similar-users/{username}", **parameters)
# * room
def get_new_rooms( **parameters): return Route(path="/api/new-rooms", **parameters)
def get_recommended_rooms( **parameters): return Route(path="/recommend/last-room?type=json", **parameters)
def get_questions_answered( **parameters): return Route(path="/api/questions-answered", **parameters)
def get_joined_rooms( **parameters): return Route(path="/api/my-rooms", **parameters)
def get_room_percetages( **parameters): return Route(method=POST, path="/api/room-percentages", **parameters) # ? is a post but it gets stuff
def get_room_scoreboard( **parameters): return Route(path="/api/room/scoreboard?code={room_code}", **parameters)
def get_room_votes( **parameters): return Route(path="/api/room/votes?code={room_code}", **parameters)
def get_room_details( **parameters): return Route(path="/api/room/details?codes={room_code}", **parameters) # ? list posibility
def get_room_tasks( **parameters): return Route(path="/api/tasks/{room_code}", **parameters)
def post_room_answer( **parameters): return Route(method=POST, path="/api/{room_code}/answer", **parameters)
def post_deploy_machine( **parameters): return Route(method=POST, path="/material/deploy", **parameters)
def post_reset_room_progress(**parameters): return Route(method=POST, path="/api/reset-progress", **parameters)
def post_leave_room( **parameters): return Route(method=POST, path="/api/room/leave", **parameters)
class HTTP(request_cog, HTTPClient):
# * normal site calls
def get_server_time(self, **attrs):
return self.request(RouteList.get_server_time(), **attrs)
def get_site_stats(self, **attrs):
return self.request(RouteList.get_site_stats(), **attrs)
def get_practise_rooms(self, **attrs):
return self.request(RouteList.get_practise_rooms(), **attrs)
def get_serie(self, show, serie_code, **attrs):
return self.request(RouteList.get_series(show=show, options={"name": serie_code}), **attrs)
def get_series(self, show, **attrs):
return self.request(RouteList.get_series(show=show), **attrs)
def get_glossary_terms(self, **attrs):
return self.request(RouteList.get_glossary_terms(), **attrs)
# * Leaderboards
def get_leaderboards(self, country: _county_types, type:_leaderboard_types, **attrs):
return self.request(RouteList.get_leaderboards(country=country.to_lower_case(), type=type), **attrs)
def get_koth_leaderboards(self, country: _county_types, type:_leaderboard_types, **attrs):
return self.request(RouteList.get_koth_leaderboards(country=country.to_lower_case(), type=type), **attrs)
# * networks
def get_network(self, network_code, **attrs):
return self.request(RouteList.get_network(network_code=network_code), **attrs)
def get_networks(self, **attrs):
return self.request(RouteList.get_networks(),**attrs)
def get_network_cost(self, network_code, **attrs):
return self.request(RouteList.get_networks(network_code=network_code), **attrs)
# * account
@checks.is_authenticated()
def get_subscription_cost(self, **attrs):
return self.request(RouteList.get_subscription_cost(), **attrs)
# * paths
def get_path(self, path_code, **attrs):
return self.request(RouteList.get_path(path_code=path_code), **attrs)
def get_public_paths(self, **attrs):
return self.request(RouteList.get_public_paths(), **attrs)
def get_path_summary(self, **attrs):
return self.request(RouteList.get_path_summary(), **attrs)
# * modules
def get_modules_summary(self, **attrs):
return self.request(RouteList.get_modules_summary(), **attrs)
def get_module(self, module_code, **attrs):
return self.request(RouteList.get_module(module_code), **attrs)
# * games
def get_machine_pool(self, **attrs):
return self.request(RouteList.get_machine_pool(), **attrs)
def get_game_detail(self, game_code, **attrs):
return self.request(RouteList.get_game_detail(game_code=game_code), **attrs)
def get_recent_games(self, **attrs):
return self.request(RouteList.get_recent_games(), **attrs)
def get_user_games(self, **attrs):
return self.request(RouteList.get_user_games(), **attrs)
def get_game_tickets_won(self, username, **attrs):
return self.request(RouteList.get_game_tickets_won(username=username), **attrs)
@checks.set_header_CSRF()
def post_join_koth(self, **attrs):
return self.request(RouteList.post_join_koth(), **attrs)
@checks.set_header_CSRF()
def post_new_koth(self, **attrs):
return self.request(RouteList.post_new_koth(), **attrs)
# * VPN
@checks.is_authenticated()
def get_available_vpns(self, type : _vpn_types, **attrs):
return self.request(RouteList.get_available_vpns(options={"type": type}), **attrs)
@checks.is_authenticated()
def get_vpn_info(self, **attrs):
return self.request(RouteList.get_vpn_info(), **attrs)
# * VM
def get_machine_running(self, **attrs):
return self.request(RouteList.get_machine_running(), **attrs)
@checks.set_header_CSRF()
def post_renew_machine(self, room_code, **attrs):
return self.request(RouteList.post_renew_machine(), json={"code": room_code}, **attrs)
@checks.set_header_CSRF()
def post_terminate_machine(self, room_code, **attrs):
return self.request(RouteList.post_terminate_machine(), json={"code": room_code}, **attrs)
# * user -badge
@checks.is_authenticated()
def get_own_badges(self, **attrs):
return self.request(RouteList.get_own_badges(), **attrs)
def get_user_badges(self, username, **attrs):
return self.request(RouteList.get_user_badges(username=username), **attrs)
def get_all_badges(self, **attrs):
return self.request(RouteList.get_all_badges(), **attrs)
# * user -team
@checks.is_authenticated()
def get_team_info(self, **attrs):
return self.request(RouteList.get_team_info(), **attrs)
# * user -notifications
@checks.is_authenticated()
def get_unseen_notifications(self, **attrs):
return self.request(RouteList.get_unseen_notifications(), **attrs)
@checks.is_authenticated()
def get_all_notifications(self, **attrs):
return self.request(RouteList.get_all_notifications(), **attrs)
# * user -messages
@checks.is_authenticated()
def get_unseen_messages(self, **attrs):
return self.request(RouteList.get_unseen_messages(), **attrs)
@checks.is_authenticated()
def get_all_group_messages(self, **attrs):
return self.request(RouteList.get_all_group_messages(), **attrs)
@checks.is_authenticated()
def get_group_messages(self, group_id, **attrs):
return self.request(RouteList.get_group_messages(group_id), **attrs)
# * user -room
def get_user_completed_rooms_count(self, username, **attrs):
return self.request(RouteList.get_user_completed_rooms_count(username=username), **attrs)
def get_user_completed_rooms(self, username, limit:int=10, page:int=1, **attrs):
return self.request(RouteList.get_user_completed_rooms(username=username, options={"limit": limit, "page": page}), **attrs)
def get_user_created_rooms(self, username, limit:int=10, page:int=1, **attrs):
return self.request(RouteList.get_user_created_rooms(username=username, options={"limit": limit, "page": page}), **attrs)
# * user
def get_user_rank(self, username : _not_none, **attrs):
return self.request(RouteList.get_user_rank(username=username), **attrs)
def get_user_activty(self, username : _not_none, **attrs):
return self.request(RouteList.get_user_activty(username=username), **attrs)
@checks.is_authenticated()
def get_all_friends(self, **attrs):
return self.request(RouteList.get_all_friends(), **attrs)
def get_discord_user(self, username : _not_none, **attrs):
return self.request(RouteList.get_discord_user(username=username), **attrs)
def get_user_exist(self, username : _not_none, **attrs):
return self.request(RouteList.get_user_exist(username=username), **attrs)
def search_user(self, username : _not_none, **attrs):
return self.request(RouteList.search_user(username=username), **attrs)
# * room
def get_new_rooms(self, **attrs):
return self.request(RouteList.get_new_rooms(), **attrs)
@checks.is_authenticated()
def get_recommended_rooms(self, **attrs):
return self.request(RouteList.get_recommended_rooms(), **attrs)
def get_questions_answered(self, **attrs):
return self.request(RouteList.get_questions_answered(), **attrs)
@checks.is_authenticated()
def get_joined_rooms(self, **attrs):
return self.request(RouteList.get_joined_rooms(), **attrs)
@checks.is_authenticated()
def get_room_percentages(self, room_codes, **attrs):
return self.request(RouteList.get_room_percetages(), json={"rooms": room_codes}, **attrs)
@checks.is_authenticated()
def get_room_scoreboard(self, room_code, **attrs):
return self.request(RouteList.get_room_scoreboard(room_code=room_code), **attrs)
def get_room_votes(self, room_code, **attrs):
return self.request(RouteList.get_room_votes(room_code=room_code), **attrs)
def get_room_details(self, room_code, loadWriteUps: bool=True, loadCreators: bool=True, loadUser: bool=True, **attrs):
return self.request(RouteList.get_room_details(room_code=room_code, options={"loadWriteUps": loadWriteUps, "loadCreators": loadCreators, "loadUser": loadUser}), **attrs).get(room_code, {})
def get_room_tasks(self, room_code, **attrs):
return self.request(RouteList.get_room_tasks(room_code=room_code), **attrs)
@checks.set_header_CSRF()
@checks.is_authenticated()
def post_room_answer(self, room_code, taskNo: int, questionNo: int, answer: str, **attrs):
return self.request(RouteList.post_room_answer(room_code=room_code), json={"taskNo": taskNo, "questionNo": questionNo, "answer": answer}, **attrs)
@checks.set_header_CSRF()
@checks.is_authenticated()
def post_deploy_machine(self, room_code, uploadId, **attrs):
return self.request(RouteList.post_deploy_machine(), json={"roomCode": room_code, "id": uploadId}, **attrs)
@checks.set_header_CSRF()
@checks.is_authenticated()
def post_reset_room_progress(self, room_code, **attrs):
return self.request(RouteList.post_reset_room_progress(), json={"code": room_code}, **attrs)
@checks.set_header_CSRF()
@checks.is_authenticated()
def post_leave_room(self, room_code, **attrs):
return self.request(RouteList.post_leave_room(), json={"code": room_code}, **attrs)
| [((17, 25, 17, 90), 're.compile', 're.compile', ({(17, 36, 17, 89): '"""const csrfToken[ ]{0,1}=[ ]{0,1}["|\'](.{36})["|\']"""'}, {}), '(\'const csrfToken[ ]{0,1}=[ ]{0,1}["|\\\'](.{36})["|\\\']\')', False, 'import re\n'), ((18, 25, 18, 91), 're.compile', 're.compile', ({(18, 36, 18, 90): '"""const username[ ]{0,1}=[ ]{0,1}["|\'](.{1,16})["|\']"""'}, {}), '(\'const username[ ]{0,1}=[ ]{0,1}["|\\\'](.{1,16})["|\\\']\')', False, 'import re\n'), ((23, 25, 23, 43), 'requests.Session', 'requests.Session', ({}, {}), '()', False, 'import requests\n'), ((24, 30, 24, 48), 'requests.Session', 'requests.Session', ({}, {}), '()', False, 'import requests\n'), ((40, 17, 40, 95), 'requests.cookies.create_cookie', 'requests.cookies.create_cookie', (), '', False, 'import requests\n'), ((133, 51, 133, 63), 'urllib.parse.quote', '_uriquote', ({(133, 61, 133, 62): 'v'}, {}), '(v)', True, 'from urllib.parse import quote as _uriquote\n')] |
wullli/flatlander | flatlander/runner/experiment_runner.py | 2c7fbd3d025f2a05c40895ec735a92d7a6bfb1ad | import os
from argparse import ArgumentParser
from pathlib import Path
import gym
import ray
import ray.tune.result as ray_results
import yaml
from gym.spaces import Tuple
from ray.cluster_utils import Cluster
from ray.rllib.utils import try_import_tf, try_import_torch
from ray.tune import run_experiments, register_env
from ray.tune.logger import TBXLogger
from ray.tune.resources import resources_to_json
from ray.tune.tune import _make_scheduler
from ray.tune.utils import merge_dicts
from flatlander.envs import get_eval_config
from flatlander.envs.flatland_sparse import FlatlandSparse
from flatlander.envs.observations import make_obs
from flatlander.envs.utils.global_gym_env import GlobalFlatlandGymEnv
from flatlander.envs.utils.gym_env_fill_missing import FillingFlatlandGymEnv
from flatlander.logging.custom_metrics import on_episode_end
from flatlander.logging.wandb_logger import WandbLogger
from flatlander.utils.loader import load_envs, load_models
ray_results.DEFAULT_RESULTS_DIR = os.path.join(os.getcwd(), "..", "..", "..", "flatland-challenge-data/results")
class ExperimentRunner:
group_algorithms = ["QMIX", "QMIXApex"]
def __init__(self):
self.tf = try_import_tf()
self.torch, _ = try_import_torch()
load_envs(os.path.dirname(__file__))
load_models(os.path.dirname(__file__))
@staticmethod
def get_experiments(run_args, arg_parser: ArgumentParser = None):
if run_args.config_file:
with open(run_args.config_file) as f:
experiments = yaml.safe_load(f)
else:
experiments = {
run_args.experiment_name: { # i.e. log to ~/ray_results/default
"run": run_args.run,
"checkpoint_freq": run_args.checkpoint_freq,
"keep_checkpoints_num": run_args.keep_checkpoints_num,
"checkpoint_score_attr": run_args.checkpoint_score_attr,
"local_dir": run_args.local_dir,
"resources_per_trial": (
run_args.resources_per_trial and
resources_to_json(run_args.resources_per_trial)),
"stop": run_args.stop,
"config": dict(run_args.config, env=run_args.env),
"restore": run_args.restore,
"num_samples": run_args.num_samples,
"upload_dir": run_args.upload_dir,
}
}
if arg_parser is not None:
for exp in experiments.values():
if not exp.get("run"):
arg_parser.error("the following arguments are required: --run")
if not exp.get("envs") and not exp.get("config", {}).get("envs"):
arg_parser.error("the following arguments are required: --envs")
return experiments
@staticmethod
def setup_grouping(config: dict):
grouping = {
"group_1": list(range(config["env_config"]["max_n_agents"])),
}
obs_space = Tuple([make_obs(config["env_config"]["observation"],
config["env_config"]["observation_config"]).observation_space()
for _ in range(config["env_config"]["max_n_agents"])])
act_space = Tuple([GlobalFlatlandGymEnv.action_space for _ in range(config["env_config"]["max_n_agents"])])
register_env(
"flatland_sparse_grouped",
lambda config: FlatlandSparse(config).with_agent_groups(
grouping, obs_space=obs_space, act_space=act_space))
def setup_policy_map(self, config: dict):
obs_space = make_obs(config["env_config"]["observation"],
config["env_config"]["observation_config"]).observation_space()
config["multiagent"] = {
"policies": {"pol_" + str(i): (None, obs_space, FillingFlatlandGymEnv.action_space, {"agent_id": i})
for i in range(config["env_config"]["observation_config"]["max_n_agents"])},
"policy_mapping_fn": lambda agent_id: "pol_" + str(agent_id)}
def setup_hierarchical_policies(self, config: dict):
obs_space: gym.spaces.Tuple = make_obs(config["env_config"]["observation"],
config["env_config"]["observation_config"]).observation_space()
config["multiagent"] = {
"policies": {"meta": (None, obs_space.spaces[0], gym.spaces.Box(high=1, low=0, shape=(1,)), {}),
"agent": (None, obs_space.spaces[1], FillingFlatlandGymEnv.action_space, {})
},
"policy_mapping_fn": lambda agent_id: "meta" if 'meta' in str(agent_id) else "agent"
}
def apply_args(self, run_args, experiments: dict):
verbose = 1
webui_host = '127.0.0.1'
for exp in experiments.values():
if run_args.eager:
exp["config"]["eager"] = True
if run_args.torch:
exp["config"]["use_pytorch"] = True
if run_args.v:
exp["config"]["log_level"] = "INFO"
verbose = 2
if run_args.vv:
exp["config"]["log_level"] = "DEBUG"
verbose = 3
if run_args.trace:
if not exp["config"].get("eager"):
raise ValueError("Must enable --eager to enable tracing.")
exp["config"]["eager_tracing"] = True
if run_args.bind_all:
webui_host = "0.0.0.0"
if run_args.log_flatland_stats:
exp['config']['callbacks'] = {
'on_episode_end': on_episode_end,
}
return experiments, verbose
@staticmethod
def evaluate(exp):
eval_configs = get_eval_config(exp['config'].get('env_config',
{}).get('eval_generator', "default"))
eval_seed = eval_configs.get('evaluation_config', {}).get('env_config', {}).get('seed')
# add evaluation config to the current config
exp['config'] = merge_dicts(exp['config'], eval_configs)
if exp['config'].get('evaluation_config'):
exp['config']['evaluation_config']['env_config'] = exp['config'].get('env_config')
eval_env_config = exp['config']['evaluation_config'].get('env_config')
if eval_seed and eval_env_config:
# We override the envs seed from the evaluation config
eval_env_config['seed'] = eval_seed
# Remove any wandb related configs
if eval_env_config:
if eval_env_config.get('wandb'):
del eval_env_config['wandb']
# Remove any wandb related configs
if exp['config']['evaluation_config'].get('wandb'):
del exp['config']['evaluation_config']['wandb']
def run(self, experiments: dict, args=None):
verbose = 1
webui_host = "localhost"
for exp in experiments.values():
if exp.get("config", {}).get("input"):
if not isinstance(exp.get("config", {}).get("input"), dict):
if not os.path.exists(exp["config"]["input"]):
rllib_dir = Path(__file__).parent
input_file = rllib_dir.absolute().joinpath(exp["config"]["input"])
exp["config"]["input"] = str(input_file)
if exp["run"] in self.group_algorithms:
self.setup_grouping(exp.get("config"))
if exp["run"] == "contrib/MADDPG" or exp["config"].get("individual_policies", False):
self.setup_policy_map(exp.get("config"))
if exp["config"].get("individual_policies", False):
del exp["config"]["individual_policies"]
if exp["run"] == "contrib/MADDPG":
exp.get("config")["env_config"]["learning_starts"] = 100
exp.get("config")["env_config"]["actions_are_logits"] = True
if exp["env"] == "flatland_sparse_hierarchical":
self.setup_hierarchical_policies(exp.get("config"))
if args is not None:
experiments, verbose = self.apply_args(run_args=args, experiments=experiments)
if args.eval:
self.evaluate(exp)
if args.config_file:
# TODO should be in exp['config'] directly
exp['config']['env_config']['yaml_config'] = args.config_file
exp['loggers'] = [WandbLogger, TBXLogger]
if args.ray_num_nodes:
cluster = Cluster()
for _ in range(args.ray_num_nodes):
cluster.add_node(
num_cpus=args.ray_num_cpus or 1,
num_gpus=args.ray_num_gpus or 1,
object_store_memory=args.ray_object_store_memory,
memory=args.ray_memory,
redis_max_memory=args.ray_redis_max_memory)
ray.init(address=cluster.address)
else:
import multiprocessing
n_cpu = multiprocessing.cpu_count()
import tensorflow as tf
n_gpu = len(tf.config.experimental.list_physical_devices('GPU'))
print("NUM_CPUS AVAILABLE: ", n_cpu)
print("NUM_GPUS AVAILABLE: ", n_gpu)
print("NUM_CPUS ARGS: ", args.ray_num_cpus)
print("NUM_GPUS ARGS: ", args.ray_num_gpus)
ray.init(
local_mode=True if args.local else False,
address=args.ray_address,
object_store_memory=args.ray_object_store_memory,
num_cpus=args.ray_num_cpus if args.ray_num_cpus is not None else n_cpu,
num_gpus=args.ray_num_gpus if args.ray_num_gpus is not None else n_gpu)
run_experiments(
experiments,
scheduler=_make_scheduler(args),
queue_trials=args.queue_trials,
resume=args.resume,
verbose=verbose,
concurrent=True)
ray.shutdown()
| [((27, 47, 27, 58), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((34, 18, 34, 33), 'ray.rllib.utils.try_import_tf', 'try_import_tf', ({}, {}), '()', False, 'from ray.rllib.utils import try_import_tf, try_import_torch\n'), ((35, 24, 35, 42), 'ray.rllib.utils.try_import_torch', 'try_import_torch', ({}, {}), '()', False, 'from ray.rllib.utils import try_import_tf, try_import_torch\n'), ((140, 24, 140, 64), 'ray.tune.utils.merge_dicts', 'merge_dicts', ({(140, 36, 140, 49): "exp['config']", (140, 51, 140, 63): 'eval_configs'}, {}), "(exp['config'], eval_configs)", False, 'from ray.tune.utils import merge_dicts\n'), ((227, 8, 227, 22), 'ray.shutdown', 'ray.shutdown', ({}, {}), '()', False, 'import ray\n'), ((36, 18, 36, 43), 'os.path.dirname', 'os.path.dirname', ({(36, 34, 36, 42): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((37, 20, 37, 45), 'os.path.dirname', 'os.path.dirname', ({(37, 36, 37, 44): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((194, 22, 194, 31), 'ray.cluster_utils.Cluster', 'Cluster', ({}, {}), '()', False, 'from ray.cluster_utils import Cluster\n'), ((202, 12, 202, 45), 'ray.init', 'ray.init', (), '', False, 'import ray\n'), ((205, 20, 205, 47), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ({}, {}), '()', False, 'import multiprocessing\n'), ((212, 12, 217, 87), 'ray.init', 'ray.init', (), '', False, 'import ray\n'), ((43, 30, 43, 47), 'yaml.safe_load', 'yaml.safe_load', ({(43, 45, 43, 46): 'f'}, {}), '(f)', False, 'import yaml\n'), ((90, 20, 91, 72), 'flatlander.envs.observations.make_obs', 'make_obs', ({(90, 29, 90, 64): "config['env_config']['observation']", (91, 29, 91, 71): "config['env_config']['observation_config']"}, {}), "(config['env_config']['observation'], config['env_config'][\n 'observation_config'])", False, 'from flatlander.envs.observations import make_obs\n'), ((98, 38, 99, 72), 'flatlander.envs.observations.make_obs', 'make_obs', ({(98, 47, 98, 82): "config['env_config']['observation']", (99, 29, 99, 71): "config['env_config']['observation_config']"}, {}), "(config['env_config']['observation'], config['env_config'][\n 'observation_config'])", False, 'from flatlander.envs.observations import make_obs\n'), ((207, 24, 207, 75), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', ({(207, 69, 207, 74): '"""GPU"""'}, {}), "('GPU')", True, 'import tensorflow as tf\n'), ((221, 22, 221, 43), 'ray.tune.tune._make_scheduler', '_make_scheduler', ({(221, 38, 221, 42): 'args'}, {}), '(args)', False, 'from ray.tune.tune import _make_scheduler\n'), ((101, 61, 101, 102), 'gym.spaces.Box', 'gym.spaces.Box', (), '', False, 'import gym\n'), ((54, 28, 54, 75), 'ray.tune.resources.resources_to_json', 'resources_to_json', ({(54, 46, 54, 74): 'run_args.resources_per_trial'}, {}), '(run_args.resources_per_trial)', False, 'from ray.tune.resources import resources_to_json\n'), ((78, 27, 79, 79), 'flatlander.envs.observations.make_obs', 'make_obs', ({(78, 36, 78, 71): "config['env_config']['observation']", (79, 36, 79, 78): "config['env_config']['observation_config']"}, {}), "(config['env_config']['observation'], config['env_config'][\n 'observation_config'])", False, 'from flatlander.envs.observations import make_obs\n'), ((86, 27, 86, 49), 'flatlander.envs.flatland_sparse.FlatlandSparse', 'FlatlandSparse', ({(86, 42, 86, 48): 'config'}, {}), '(config)', False, 'from flatlander.envs.flatland_sparse import FlatlandSparse\n'), ((163, 27, 163, 65), 'os.path.exists', 'os.path.exists', ({(163, 42, 163, 64): "exp['config']['input']"}, {}), "(exp['config']['input'])", False, 'import os\n'), ((164, 36, 164, 50), 'pathlib.Path', 'Path', ({(164, 41, 164, 49): '__file__'}, {}), '(__file__)', False, 'from pathlib import Path\n')] |
rahulmah/sample-cloud-native-toolchain-tutorial-20170720084529291 | syslib/utils_keywords.py | 08540c0f083a25b5b4e7a4c839080fe54383038c | #!/usr/bin/env python
r"""
This module contains keyword functions to supplement robot's built in
functions and use in test where generic robot keywords don't support.
"""
import time
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries import DateTime
import re
###############################################################################
def run_until_keyword_fails(retry, retry_interval, name, *args):
r"""
Execute a robot keyword repeatedly until it either fails or the timeout
value is exceeded.
Note: Opposite of robot keyword "Wait Until Keyword Succeeds".
Description of argument(s):
retry Max timeout time in hour(s).
retry_interval Time interval in minute(s) for looping.
name Robot keyword to execute.
args Robot keyword arguments.
"""
# Convert the retry time in seconds
retry_seconds = DateTime.convert_time(retry)
timeout = time.time() + int(retry_seconds)
# Convert the interval time in seconds
interval_seconds = DateTime.convert_time(retry_interval)
interval = int(interval_seconds)
BuiltIn().log(timeout)
BuiltIn().log(interval)
while True:
status = BuiltIn().run_keyword_and_return_status(name, *args)
# Return if keywords returns as failure.
if status is False:
BuiltIn().log("Failed as expected")
return False
# Return if retry timeout as success.
elif time.time() > timeout > 0:
BuiltIn().log("Max retry timeout")
return True
time.sleep(interval)
BuiltIn().log(time.time())
return True
###############################################################################
###############################################################################
def htx_error_log_to_list(htx_error_log_output):
r"""
Parse htx error log output string and return list of strings in the form
"<field name>:<field value>".
The output of this function may be passed to the build_error_dict function.
Description of argument(s):
htx_error_log_output Error entry string containing the stdout
generated by "htxcmdline -geterrlog".
Example of htx_error_log_output contents:
######################## Result Starts Here ###############################
Currently running ECG/MDT : /usr/lpp/htx/mdt/mdt.whit
===========================
---------------------------------------------------------------------
Device id:/dev/nvidia0
Timestamp:Mar 29 19:41:54 2017
err=00000027
sev=1
Exerciser Name:hxenvidia
Serial No:Not Available
Part No:Not Available
Location:Not Available
FRU Number:Not Available
Device:Not Available
Error Text:cudaEventSynchronize for stopEvent returned err = 0039 from file
, line 430.
---------------------------------------------------------------------
---------------------------------------------------------------------
Device id:/dev/nvidia0
Timestamp:Mar 29 19:41:54 2017
err=00000027
sev=1
Exerciser Name:hxenvidia
Serial No:Not Available
Part No:Not Available
Location:Not Available
FRU Number:Not Available
Device:Not Available
Error Text:Hardware Exerciser stopped on error
---------------------------------------------------------------------
######################### Result Ends Here ################################
Example output:
Returns the lists of error string per entry
['Device id:/dev/nvidia0',
'Timestamp:Mar 29 19:41:54 2017',
'err=00000027',
'sev=1',
'Exerciser Name:hxenvidia',
'Serial No:Not Available',
'Part No:Not Available',
'Location:Not Available',
'FRU Number:Not Available',
'Device:Not Available',
'Error Text:cudaEventSynchronize for stopEvent returned err = 0039
from file , line 430.']
"""
# List which will hold all the list of entries.
error_list = []
temp_error_list = []
parse_walk = False
for line in htx_error_log_output.splitlines():
# Skip lines starting with "#"
if line.startswith("#"):
continue
# Mark line starting with "-" and set parse flag.
if line.startswith("-") and parse_walk is False:
parse_walk = True
continue
# Mark line starting with "-" and reset parse flag.
# Set temp error list to EMPTY.
elif line.startswith("-"):
error_list.append(temp_error_list)
parse_walk = False
temp_error_list = []
# Add entry to list if line is not emtpy
elif parse_walk:
temp_error_list.append(str(line))
return error_list
###############################################################################
###############################################################################
def build_error_dict(htx_error_log_output):
r"""
Builds error list into a list of dictionary entries.
Description of argument(s):
error_list Error list entries.
Example output dictionary:
{
0:
{
'sev': '1',
'err': '00000027',
'Timestamp': 'Mar 29 19:41:54 2017',
'Part No': 'Not Available',
'Serial No': 'Not Available',
'Device': 'Not Available',
'FRU Number': 'Not Available',
'Location': 'Not Available',
'Device id': '/dev/nvidia0',
'Error Text': 'cudaEventSynchronize for stopEvent returned err = 0039
from file , line 430.',
'Exerciser Name': 'hxenvidia'
},
1:
{
'sev': '1',
'err': '00000027',
'Timestamp': 'Mar 29 19:41:54 2017',
'Part No': 'Not Available',
'Serial No': 'Not Available',
'Device': 'Not Available',
'FRU Number': 'Not Available',
'Location': 'Not Available',
'Device id': '/dev/nvidia0',
'Error Text': 'Hardware Exerciser stopped on error',
'Exerciser Name': 'hxenvidia'
}
},
"""
# List which will hold all the list of entries.
error_list = []
error_list = htx_error_log_to_list(htx_error_log_output)
# dictionary which holds the error dictionry entry.
error_dict = {}
temp_error_dict = {}
error_index = 0
# Loop through the error list.
for entry_list in error_list:
# Loop through the first error list entry.
for entry in entry_list:
# Split string into list for key value update.
# Example: 'Device id:/dev/nvidia0'
# Example: 'err=00000027'
parm_split = re.split("[:=]", entry)
# Populate temp dictionary with key value pair data.
temp_error_dict[str(parm_split[0])] = parm_split[1]
# Update the master dictionary per entry index.
error_dict[error_index] = temp_error_dict
# Reset temp dict to EMPTY and increment index count.
temp_error_dict = {}
error_index += 1
return error_dict
###############################################################################
| [((30, 20, 30, 48), 'robot.libraries.DateTime.convert_time', 'DateTime.convert_time', ({(30, 42, 30, 47): 'retry'}, {}), '(retry)', False, 'from robot.libraries import DateTime\n'), ((34, 23, 34, 60), 'robot.libraries.DateTime.convert_time', 'DateTime.convert_time', ({(34, 45, 34, 59): 'retry_interval'}, {}), '(retry_interval)', False, 'from robot.libraries import DateTime\n'), ((31, 14, 31, 25), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((51, 8, 51, 28), 'time.sleep', 'time.sleep', ({(51, 19, 51, 27): 'interval'}, {}), '(interval)', False, 'import time\n'), ((37, 4, 37, 13), 'robot.libraries.BuiltIn.BuiltIn', 'BuiltIn', ({}, {}), '()', False, 'from robot.libraries.BuiltIn import BuiltIn\n'), ((38, 4, 38, 13), 'robot.libraries.BuiltIn.BuiltIn', 'BuiltIn', ({}, {}), '()', False, 'from robot.libraries.BuiltIn import BuiltIn\n'), ((52, 22, 52, 33), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((210, 25, 210, 48), 're.split', 're.split', ({(210, 34, 210, 40): '"""[:=]"""', (210, 42, 210, 47): 'entry'}, {}), "('[:=]', entry)", False, 'import re\n'), ((41, 17, 41, 26), 'robot.libraries.BuiltIn.BuiltIn', 'BuiltIn', ({}, {}), '()', False, 'from robot.libraries.BuiltIn import BuiltIn\n'), ((48, 13, 48, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((52, 8, 52, 17), 'robot.libraries.BuiltIn.BuiltIn', 'BuiltIn', ({}, {}), '()', False, 'from robot.libraries.BuiltIn import BuiltIn\n'), ((45, 12, 45, 21), 'robot.libraries.BuiltIn.BuiltIn', 'BuiltIn', ({}, {}), '()', False, 'from robot.libraries.BuiltIn import BuiltIn\n'), ((49, 12, 49, 21), 'robot.libraries.BuiltIn.BuiltIn', 'BuiltIn', ({}, {}), '()', False, 'from robot.libraries.BuiltIn import BuiltIn\n')] |
ivmtorres/mmpose | tools/webcam/webcam_apis/nodes/__init__.py | 662cb50c639653ae2fc19d3421ce10bd02246b85 | # Copyright (c) OpenMMLab. All rights reserved.
from .builder import NODES
from .faceswap_nodes import FaceSwapNode
from .frame_effect_nodes import (BackgroundNode, BugEyeNode, MoustacheNode,
NoticeBoardNode, PoseVisualizerNode,
SaiyanNode, SunglassesNode)
from .helper_nodes import ModelResultBindingNode, MonitorNode, RecorderNode
from .mmdet_nodes import DetectorNode
from .mmpose_nodes import TopDownPoseEstimatorNode
from .xdwendwen_nodes import XDwenDwenNode
__all__ = [
'NODES', 'PoseVisualizerNode', 'DetectorNode', 'TopDownPoseEstimatorNode',
'MonitorNode', 'BugEyeNode', 'SunglassesNode', 'ModelResultBindingNode',
'NoticeBoardNode', 'RecorderNode', 'FaceSwapNode', 'MoustacheNode',
'SaiyanNode', 'BackgroundNode', 'XDwenDwenNode'
]
| [] |
lelle1234/Db2Utils | DBParser/DBMove.py | 55570a1afbe6d4abe61c31952bc178c2443f4e5b | #!/usr/bin/python3
import ibm_db
import getopt
import sys
import os
from toposort import toposort_flatten
db = None
host = "localhost"
port = "50000"
user = None
pwd = None
outfile = None
targetdb = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:d:P:u:p:o:t:")
except getopt.GetoptError:
sys.exit(-1)
for o, a in opts:
if o == "-d":
db = a
if o == "-h":
host = a
if o == "-P":
port = a
if o == "-u":
user = a
if o == "-p":
pwd = a
if o == "-t":
targetdb = a
if db is None or user is None or pwd is None or targetdb is None:
print("Usage: DBMove.py [-h <host> -P <port>] -d <db> -u <user> -p <pwd> -t <target>")
sys.exit(1)
db = db.upper()
targetdb = targetdb.upper()
cfg = (db, host, port, user, pwd)
conn = ibm_db.connect("DATABASE=%s; HOSTNAME=%s; PORT=%s; PROTOCOL=TCPIP; UID=%s; PWD=%s" % cfg, "", "")
get_db_type = "values nya.get_db_type()"
find_edges = """
SELECT rtrim(t.tabschema) || '.' || rtrim(t.tabname)
, coalesce(rtrim(r.reftabschema) || '.' || rtrim(r.reftabname), 'dummy')
FROM syscat.tables t
LEFT JOIN syscat.references r
ON (t.tabschema, t.tabname) = (r.tabschema, r.tabname)
WHERE t.tabschema not like 'SYS%'
AND t.type = 'T'
AND rtrim(t.tabschema) not like 'NYA_%'
AND t.tabschema <> 'TMP'
ORDER BY 1
"""
identity_skip = """
select rtrim(tabschema) || '.' || rtrim(tabname) from syscat.columns
where identity = 'Y' and generated = 'D'
"""
stmt = ibm_db.prepare(conn, get_db_type)
ibm_db.execute(stmt, ())
tpl = ibm_db.fetch_tuple(stmt)
db_type = tpl[0]
edges = dict()
stmt = ibm_db.prepare(conn, find_edges)
ibm_db.execute(stmt, ())
tpl = ibm_db.fetch_tuple(stmt)
while tpl:
n1, n2 = tpl
try:
edges[n1].add(n2)
except KeyError:
edges[n1] = set()
edges[n1].add(n2)
tpl = ibm_db.fetch_tuple(stmt)
sorted_nodes = list(toposort_flatten(edges))
# print(sorted_nodes)
identity_skip_arr = []
edges = dict()
stmt = ibm_db.prepare(conn, identity_skip)
ibm_db.execute(stmt, ())
tpl = ibm_db.fetch_tuple(stmt)
while tpl:
identity_skip_arr.append(tpl[0])
tpl = ibm_db.fetch_tuple(stmt)
# print(identity_skip)
os.makedirs(db, exist_ok=True)
export_file = open("%s/export.sql" % db, "w")
load_file = open("%s/load.sql" % db, "w")
export_file.write("connect to %s;\n" % db)
load_file.write("connect to %s;\n" % targetdb)
if db_type == "N":
load_file.write("""set integrity for nya.person off;\n""")
load_file.write("""alter table nya.person
alter column EMAIL_UC drop generated
alter column NORMALIZED_FIRSTNAME drop generated
alter column NORMALIZED_LASTNAME drop generated;\n""")
load_file.write("""set integrity for nya.person immediate checked;\n""")
for t in sorted_nodes:
if t == "dummy":
continue
export_file.write("export to %s.ixf of ixf lobs to . modified by codepage=819 messages export_%s.msg select * from %s;\n" % (t,t,t))
identityskip = "identityoverride"
if t in identity_skip_arr:
identityskip = " "
load_file.write("load from %s.ixf of ixf lobs from . modified by generatedoverride %s messages load_%s.msg replace into %s;\n" % (t, identityskip, t, t))
if db_type == "N":
load_file.write("""set integrity for nya.person off;\n""")
load_file.write("""alter table nya.person
alter column EMAIL_UC set generated always as ( upper(email))
alter column NORMALIZED_FIRSTNAME set generated always as ( NYA.REMOVE_DIACRITICS( FIRSTNAME ) )
alter column NORMALIZED_LASTNAME set generated always as ( NYA.REMOVE_DIACRITICS( LASTNAME ) );\n""")
load_file.write("""set integrity for nya.person immediate checked force generated;\n""")
load_file.write("""echo set integrity for all tables;\n""")
export_file.write("connect reset;\n")
load_file.write("connect reset;\n")
export_file.close()
load_file.close()
| [((44, 7, 44, 104), 'ibm_db.connect', 'ibm_db.connect', ({(44, 22, 44, 95): "'DATABASE=%s; HOSTNAME=%s; PORT=%s; PROTOCOL=TCPIP; UID=%s; PWD=%s' % cfg", (44, 97, 44, 99): '""""""', (44, 101, 44, 103): '""""""'}, {}), "(\n 'DATABASE=%s; HOSTNAME=%s; PORT=%s; PROTOCOL=TCPIP; UID=%s; PWD=%s' %\n cfg, '', '')", False, 'import ibm_db\n'), ((66, 7, 66, 40), 'ibm_db.prepare', 'ibm_db.prepare', ({(66, 22, 66, 26): 'conn', (66, 28, 66, 39): 'get_db_type'}, {}), '(conn, get_db_type)', False, 'import ibm_db\n'), ((67, 0, 67, 24), 'ibm_db.execute', 'ibm_db.execute', ({(67, 15, 67, 19): 'stmt', (67, 21, 67, 23): '()'}, {}), '(stmt, ())', False, 'import ibm_db\n'), ((68, 6, 68, 30), 'ibm_db.fetch_tuple', 'ibm_db.fetch_tuple', ({(68, 25, 68, 29): 'stmt'}, {}), '(stmt)', False, 'import ibm_db\n'), ((72, 7, 72, 39), 'ibm_db.prepare', 'ibm_db.prepare', ({(72, 22, 72, 26): 'conn', (72, 28, 72, 38): 'find_edges'}, {}), '(conn, find_edges)', False, 'import ibm_db\n'), ((73, 0, 73, 24), 'ibm_db.execute', 'ibm_db.execute', ({(73, 15, 73, 19): 'stmt', (73, 21, 73, 23): '()'}, {}), '(stmt, ())', False, 'import ibm_db\n'), ((74, 6, 74, 30), 'ibm_db.fetch_tuple', 'ibm_db.fetch_tuple', ({(74, 25, 74, 29): 'stmt'}, {}), '(stmt)', False, 'import ibm_db\n'), ((91, 7, 91, 42), 'ibm_db.prepare', 'ibm_db.prepare', ({(91, 22, 91, 26): 'conn', (91, 28, 91, 41): 'identity_skip'}, {}), '(conn, identity_skip)', False, 'import ibm_db\n'), ((92, 0, 92, 24), 'ibm_db.execute', 'ibm_db.execute', ({(92, 15, 92, 19): 'stmt', (92, 21, 92, 23): '()'}, {}), '(stmt, ())', False, 'import ibm_db\n'), ((93, 6, 93, 30), 'ibm_db.fetch_tuple', 'ibm_db.fetch_tuple', ({(93, 25, 93, 29): 'stmt'}, {}), '(stmt)', False, 'import ibm_db\n'), ((99, 0, 99, 30), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((18, 17, 18, 62), 'getopt.getopt', 'getopt.getopt', ({(18, 31, 18, 43): 'sys.argv[1:]', (18, 45, 18, 61): '"""h:d:P:u:p:o:t:"""'}, {}), "(sys.argv[1:], 'h:d:P:u:p:o:t:')", False, 'import getopt\n'), ((38, 4, 38, 15), 'sys.exit', 'sys.exit', ({(38, 13, 38, 14): '(1)'}, {}), '(1)', False, 'import sys\n'), ((83, 10, 83, 34), 'ibm_db.fetch_tuple', 'ibm_db.fetch_tuple', ({(83, 29, 83, 33): 'stmt'}, {}), '(stmt)', False, 'import ibm_db\n'), ((84, 20, 84, 43), 'toposort.toposort_flatten', 'toposort_flatten', ({(84, 37, 84, 42): 'edges'}, {}), '(edges)', False, 'from toposort import toposort_flatten\n'), ((96, 10, 96, 34), 'ibm_db.fetch_tuple', 'ibm_db.fetch_tuple', ({(96, 29, 96, 33): 'stmt'}, {}), '(stmt)', False, 'import ibm_db\n'), ((20, 4, 20, 16), 'sys.exit', 'sys.exit', ({(20, 13, 20, 15): '(-1)'}, {}), '(-1)', False, 'import sys\n')] |
MirunaPislar/Word2vec | utils/glove.py | e9dd01488f081a7b8d7c00a0b21efe0d401d4927 | import numpy as np
DEFAULT_FILE_PATH = "utils/datasets/glove.6B.50d.txt"
def loadWordVectors(tokens, filepath=DEFAULT_FILE_PATH, dimensions=50):
"""Read pretrained GloVe vectors"""
wordVectors = np.zeros((len(tokens), dimensions))
with open(filepath) as ifs:
for line in ifs:
line = line.strip()
if not line:
continue
row = line.split()
token = row[0]
if token not in tokens:
continue
data = [float(x) for x in row[1:]]
if len(data) != dimensions:
raise RuntimeError("wrong number of dimensions")
wordVectors[tokens[token]] = np.asarray(data)
return wordVectors
| [((20, 41, 20, 57), 'numpy.asarray', 'np.asarray', ({(20, 52, 20, 56): 'data'}, {}), '(data)', True, 'import numpy as np\n')] |
stanford-crfm/composer | composer/profiler/__init__.py | 4996fbd818971afd6439961df58b531d9b47a37b | # Copyright 2021 MosaicML. All Rights Reserved.
"""Performance profiling tools.
The profiler gathers performance metrics during a training run that can be used to diagnose bottlenecks and
facilitate model development.
The metrics gathered include:
* Duration of each :class:`.Event` during training
* Time taken by the data loader to return a batch
* Host metrics such as CPU, system memory, disk and network utilization over time
* Execution order, latency and attributes of PyTorch operators and GPU kernels (see :doc:`profiler`)
The following example demonstrates how to setup and perform profiling on a simple training run.
.. literalinclude:: ../../../examples/profiler_demo.py
:language: python
:linenos:
:emphasize-lines: 6, 27-49
It is required to specify an output ``profiler_trace_file`` during :class:`.Trainer` initialization to enable profiling.
The ``profiler_trace_file`` will contain the profiling trace data once the profiling run completes. By default, the :class:`.Profiler`,
:class:`.DataloaderProfiler` and :class:`.SystemProfiler` will be active. The :class:`.TorchProfiler` is **disabled** by default.
To activate the :class:`.TorchProfiler`, the ``torch_profiler_trace_dir`` must be specified *in addition* to the ``profiler_trace_file`` argument.
The ``torch_profiler_trace_dir`` will contain the Torch Profiler traces once the profiling run completes. The :class:`.Profiler` will
automatically merge the Torch traces in the ``torch_profiler_trace_dir`` into the ``profiler_trace_file``, allowing users to view a unified trace.
The complete traces can be viewed by in a Google Chrome browser navigating to ``chrome://tracing`` and loading the ``profiler_trace_file``.
Here is an example trace file:
.. image:: https://storage.googleapis.com/docs.mosaicml.com/images/profiler/profiler_trace_example.png
:alt: Example Profiler Trace File
:align: center
Additonal details an be found in the Profiler Guide.
"""
from composer.profiler._event_handler import ProfilerEventHandler
from composer.profiler._profiler import Marker, Profiler
from composer.profiler._profiler_action import ProfilerAction
# All needs to be defined properly for sphinx autosummary
__all__ = [
"Marker",
"Profiler",
"ProfilerAction",
"ProfilerEventHandler",
]
Marker.__module__ = __name__
Profiler.__module__ = __name__
ProfilerAction.__module__ = __name__
ProfilerEventHandler.__module__ = __name__
| [] |
EvKissle/tinkerpop | gremlin-python/src/main/jython/setup.py | 84195e38fc22a1a089c345fade9c75711e6cfdfe | '''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import codecs
import os
import sys
import time
from setuptools import setup
# Folder containing the setup.py
root = os.path.dirname(os.path.abspath(__file__))
# Path to __version__ module
version_file = os.path.join(root, 'gremlin_python', '__version__.py')
# Check if this is a source distribution.
# If not create the __version__ module containing the version
if not os.path.exists(os.path.join(root, 'PKG-INFO')):
timestamp = int(os.getenv('TIMESTAMP', time.time() * 1000)) / 1000
fd = codecs.open(version_file, 'w', 'utf-8')
fd.write("'''")
fd.write(__doc__)
fd.write("'''\n")
fd.write('version = %r\n' % os.getenv('VERSION', '?').replace('-SNAPSHOT', '.dev-%d' % timestamp))
fd.write('timestamp = %d\n' % timestamp)
fd.close()
# Load version
from gremlin_python import __version__
version = __version__.version
install_requires = [
'aenum==1.4.5',
'tornado==4.4.1',
'six==1.10.0'
]
if sys.version_info < (3,2):
install_requires += ['futures==3.0.5']
setup(
name='gremlinpython',
version=version,
packages=['gremlin_python', 'gremlin_python.driver',
'gremlin_python.driver.tornado', 'gremlin_python.process',
'gremlin_python.structure', 'gremlin_python.structure.io'],
license='Apache 2',
url='http://tinkerpop.apache.org',
description='Gremlin-Python for Apache TinkerPop',
long_description=codecs.open("README", "r", "UTF-8").read(),
test_suite="tests",
data_files=[("", ["LICENSE", "NOTICE"])],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
'mock'
],
install_requires=install_requires,
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
]
)
| [((29, 15, 29, 69), 'os.path.join', 'os.path.join', ({(29, 28, 29, 32): 'root', (29, 34, 29, 50): '"""gremlin_python"""', (29, 52, 29, 68): '"""__version__.py"""'}, {}), "(root, 'gremlin_python', '__version__.py')", False, 'import os\n'), ((26, 23, 26, 48), 'os.path.abspath', 'os.path.abspath', ({(26, 39, 26, 47): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((35, 9, 35, 48), 'codecs.open', 'codecs.open', ({(35, 21, 35, 33): 'version_file', (35, 35, 35, 38): '"""w"""', (35, 40, 35, 47): '"""utf-8"""'}, {}), "(version_file, 'w', 'utf-8')", False, 'import codecs\n'), ((33, 22, 33, 52), 'os.path.join', 'os.path.join', ({(33, 35, 33, 39): 'root', (33, 41, 33, 51): '"""PKG-INFO"""'}, {}), "(root, 'PKG-INFO')", False, 'import os\n'), ((65, 21, 65, 56), 'codecs.open', 'codecs.open', ({(65, 33, 65, 41): '"""README"""', (65, 43, 65, 46): '"""r"""', (65, 48, 65, 55): '"""UTF-8"""'}, {}), "('README', 'r', 'UTF-8')", False, 'import codecs\n'), ((34, 43, 34, 54), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((39, 34, 39, 59), 'os.getenv', 'os.getenv', ({(39, 44, 39, 53): '"""VERSION"""', (39, 55, 39, 58): '"""?"""'}, {}), "('VERSION', '?')", False, 'import os\n')] |
yoshihikosuzuki/plotly_light | src/_bar.py | cef2465486e9147e27feae1193a1b4487e4fc543 | from typing import Optional, Sequence
import plotly.graph_objects as go
def bar(x: Sequence,
y: Sequence,
text: Optional[Sequence] = None,
width: Optional[int] = None,
col: Optional[str] = None,
opacity: float = 1,
name: Optional[str] = None,
show_legend: bool = False,
show_init: bool = True) -> go.Bar:
"""Create a simple Trace object of a histogram.
positional arguments:
@ x : Coordinates of data on x-axis.
@ y : Coordinates of data on y-axis.
optional arguments:
@ col : Color of bars.
@ opacity : Opacity of bars.
@ name : Display name of the trace in legend.
@ show_legend : Show this trace in legend.
@ show_init : Show this trace initially.
"""
return go.Bar(x=x,
y=y,
text=text,
width=width,
marker_color=col,
opacity=opacity,
name=name,
showlegend=show_legend,
visible=None if show_init else "legendonly")
| [((26, 11, 34, 62), 'plotly.graph_objects.Bar', 'go.Bar', (), '', True, 'import plotly.graph_objects as go\n')] |
doomhammerhell/pennylane | pennylane/templates/subroutines/arbitrary_unitary.py | f147f22d8d99ba5891edd45a6a1f7dd679c8a23c | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Contains the ArbitraryUnitary template.
"""
import pennylane as qml
from pennylane.operation import Operation, AnyWires
from pennylane.ops import PauliRot
_PAULIS = ["I", "X", "Y", "Z"]
def _tuple_to_word(index_tuple):
"""Convert an integer tuple to the corresponding Pauli word.
The Pauli operators are converted as ``0 -> I``, ``1 -> X``,
``2 -> Y``, ``3 -> Z``.
Args:
index_tuple (Tuple[int]): An integer tuple describing the Pauli word
Returns:
str: The corresponding Pauli word
"""
return "".join([_PAULIS[i] for i in index_tuple])
def _n_k_gray_code(n, k, start=0):
"""Iterates over a full n-ary Gray code with k digits.
Args:
n (int): Base of the Gray code. Needs to be greater than one.
k (int): Number of digits of the Gray code. Needs to be greater than zero.
start (int, optional): Optional start of the Gray code. The generated code
will be shorter as the code does not wrap. Defaults to 0.
"""
for i in range(start, n ** k):
codeword = [0] * k
base_repesentation = []
val = i
for j in range(k):
base_repesentation.append(val % n)
val //= n
shift = 0
for j in reversed(range(k)):
codeword[j] = (base_repesentation[j] + shift) % n
shift += n - codeword[j]
yield codeword
def _all_pauli_words_but_identity(num_wires):
# Start at 1 to ignore identity
yield from (_tuple_to_word(idx_tuple) for idx_tuple in _n_k_gray_code(4, num_wires, start=1))
class ArbitraryUnitary(Operation):
"""Implements an arbitrary unitary on the specified wires.
An arbitrary unitary on :math:`n` wires is parametrized by :math:`4^n - 1`
independent real parameters. This templates uses Pauli word rotations to
parametrize the unitary.
**Example**
ArbitraryUnitary can be used as a building block, e.g. to parametrize arbitrary
two-qubit operations in a circuit:
.. code-block:: python
@qml.template
def arbitrary_nearest_neighbour_interaction(weights, wires):
qml.broadcast(unitary=ArbitraryUnitary, pattern="double", wires=wires, params=weights)
Args:
weights (tensor_like): The angles of the Pauli word rotations, needs to have length :math:`4^n - 1`
where :math:`n` is the number of wires the template acts upon.
wires (Iterable): wires that the template acts on
"""
num_params = 1
num_wires = AnyWires
par_domain = "A"
def __init__(self, weights, wires, do_queue=True, id=None):
shape = qml.math.shape(weights)
if shape != (4 ** len(wires) - 1,):
raise ValueError(
f"Weights tensor must be of shape {(4 ** len(wires) - 1,)}; got {shape}."
)
super().__init__(weights, wires=wires, do_queue=do_queue, id=id)
def expand(self):
weights = self.parameters[0]
with qml.tape.QuantumTape() as tape:
for i, pauli_word in enumerate(_all_pauli_words_but_identity(len(self.wires))):
PauliRot(weights[i], pauli_word, wires=self.wires)
return tape
@staticmethod
def shape(n_wires):
"""Compute the expected shape of the weights tensor.
Args:
n_wires (int): number of wires that template acts on
"""
return (4 ** n_wires - 1,)
| [((101, 16, 101, 39), 'pennylane.math.shape', 'qml.math.shape', ({(101, 31, 101, 38): 'weights'}, {}), '(weights)', True, 'import pennylane as qml\n'), ((113, 13, 113, 35), 'pennylane.tape.QuantumTape', 'qml.tape.QuantumTape', ({}, {}), '()', True, 'import pennylane as qml\n'), ((116, 16, 116, 66), 'pennylane.ops.PauliRot', 'PauliRot', (), '', False, 'from pennylane.ops import PauliRot\n')] |
aidiary/generative-models-pytorch | vae_celeba.py | c9ae23a4ecbe4bf8f82dbaf9e4e3e1e61530e6b0 | import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from pytorch_lightning.loggers import TensorBoardLogger
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import CelebA
class Encoder(nn.Module):
def __init__(self):
super().__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
)
self.mu_layer = nn.Linear(4096, 200)
self.logvar_layer = nn.Linear(4096, 200)
def forward(self, imgs):
out = self.conv_layers(imgs)
out = nn.Flatten()(out)
mu = self.mu_layer(out)
logvar = self.logvar_layer(out)
return mu, logvar
class Decoder(nn.Module):
def __init__(self):
super().__init__()
self.decoder_input = nn.Linear(200, 4096)
self.deconv_layers = nn.Sequential(
nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(),
nn.ConvTranspose2d(32, 3, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(3),
nn.Sigmoid(),
)
def forward(self, z):
out = self.decoder_input(z)
out = out.view(-1, 64, 8, 8)
recon_img = self.deconv_layers(out)
return recon_img
class VanillaVAE(pl.LightningModule):
def __init__(self):
super().__init__()
self.encoder = Encoder()
self.decoder = Decoder()
def forward(self, img):
mu, logvar = self.encoder(img)
return mu
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=0.005)
return optimizer
def training_step(self, train_batch, batch_idx):
img, labels = train_batch
mu, logvar = self.encoder(img)
z = self.reparameterize(mu, logvar)
recon_img = self.decoder(z)
recon_loss_factor = 10000
recon_loss = F.mse_loss(recon_img, img)
kld_loss = torch.mean(-0.5 * torch.sum(1 + logvar - mu**2 - logvar.exp(), dim=1))
loss = recon_loss_factor * recon_loss + kld_loss
self.log('train/loss', loss)
self.log('train/recon_loss', recon_loss)
self.log('train/kl_loss', kld_loss)
return loss
def validation_step(self, val_batch, batch_idx):
img, labels = val_batch
mu, logvar = self.encoder(img)
z = self.reparameterize(mu, logvar)
recon_img = self.decoder(z)
recon_loss_factor = 10000
recon_loss = F.mse_loss(recon_img, img)
kld_loss = torch.mean(-0.5 * torch.sum(1 + logvar - mu**2 - logvar.exp(), dim=1))
loss = recon_loss_factor * recon_loss + kld_loss
self.log('val/loss', loss)
self.log('val/recon_loss', recon_loss)
self.log('val/kl_loss', kld_loss)
return loss
def reconstruct(self, img):
mu, _ = self.encoder(img)
recon_img = self.decoder(mu)
return recon_img
def sample(self, num_samples=64):
z = torch.randn(num_samples, 200)
samples = self.decoder(z)
return samples
if __name__ == '__main__':
# data
transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.CenterCrop(148),
transforms.Resize(128),
transforms.ToTensor()
])
train_dataset = CelebA(root='data', split='train', transform=transform, download=False)
val_dataset = CelebA(root='data', split='test', transform=transform, download=False)
train_loader = DataLoader(train_dataset,
batch_size=32,
num_workers=8,
shuffle=True,
drop_last=True)
val_loader = DataLoader(val_dataset,
batch_size=32,
num_workers=8,
shuffle=False,
drop_last=True)
# model
model = VanillaVAE()
# training
tb_logger = TensorBoardLogger('lightning_logs', name='vanilla_vae_celeba', default_hp_metric=False)
trainer = pl.Trainer(gpus=[0], max_epochs=200, logger=tb_logger)
trainer.fit(model, train_loader, val_loader)
| [((148, 20, 148, 91), 'torchvision.datasets.CelebA', 'CelebA', (), '', False, 'from torchvision.datasets import CelebA\n'), ((149, 18, 149, 88), 'torchvision.datasets.CelebA', 'CelebA', (), '', False, 'from torchvision.datasets import CelebA\n'), ((151, 19, 155, 45), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((156, 17, 160, 43), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((166, 16, 166, 103), 'pytorch_lightning.loggers.TensorBoardLogger', 'TensorBoardLogger', (), '', False, 'from pytorch_lightning.loggers import TensorBoardLogger\n'), ((167, 14, 167, 68), 'pytorch_lightning.Trainer', 'pl.Trainer', (), '', True, 'import pytorch_lightning as pl\n'), ((31, 24, 31, 44), 'torch.nn.Linear', 'nn.Linear', ({(31, 34, 31, 38): '4096', (31, 40, 31, 43): '200'}, {}), '(4096, 200)', True, 'import torch.nn as nn\n'), ((32, 28, 32, 48), 'torch.nn.Linear', 'nn.Linear', ({(32, 38, 32, 42): '4096', (32, 44, 32, 47): '200'}, {}), '(4096, 200)', True, 'import torch.nn as nn\n'), ((49, 29, 49, 49), 'torch.nn.Linear', 'nn.Linear', ({(49, 39, 49, 42): '200', (49, 44, 49, 48): '4096'}, {}), '(200, 4096)', True, 'import torch.nn as nn\n'), ((84, 14, 84, 37), 'torch.exp', 'torch.exp', ({(84, 24, 84, 36): '0.5 * logvar'}, {}), '(0.5 * logvar)', False, 'import torch\n'), ((85, 14, 85, 35), 'torch.randn_like', 'torch.randn_like', ({(85, 31, 85, 34): 'std'}, {}), '(std)', False, 'import torch\n'), ((100, 21, 100, 47), 'torch.nn.functional.mse_loss', 'F.mse_loss', ({(100, 32, 100, 41): 'recon_img', (100, 43, 100, 46): 'img'}, {}), '(recon_img, img)', True, 'import torch.nn.functional as F\n'), ((118, 21, 118, 47), 'torch.nn.functional.mse_loss', 'F.mse_loss', ({(118, 32, 118, 41): 'recon_img', (118, 43, 118, 46): 'img'}, {}), '(recon_img, img)', True, 'import torch.nn.functional as F\n'), ((134, 12, 134, 41), 'torch.randn', 'torch.randn', ({(134, 24, 134, 35): 'num_samples', (134, 37, 134, 40): '200'}, {}), '(num_samples, 200)', False, 'import torch\n'), ((17, 12, 17, 64), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((18, 12, 18, 30), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(18, 27, 18, 29): '32'}, {}), '(32)', True, 'import torch.nn as nn\n'), ((19, 12, 19, 26), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((20, 12, 20, 65), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((21, 12, 21, 30), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(21, 27, 21, 29): '64'}, {}), '(64)', True, 'import torch.nn as nn\n'), ((22, 12, 22, 26), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((23, 12, 23, 65), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((24, 12, 24, 30), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(24, 27, 24, 29): '64'}, {}), '(64)', True, 'import torch.nn as nn\n'), ((25, 12, 25, 26), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((26, 12, 26, 65), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((27, 12, 27, 30), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(27, 27, 27, 29): '64'}, {}), '(64)', True, 'import torch.nn as nn\n'), ((28, 12, 28, 26), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((37, 14, 37, 26), 'torch.nn.Flatten', 'nn.Flatten', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((52, 12, 52, 92), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (), '', True, 'import torch.nn as nn\n'), ((53, 12, 53, 30), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(53, 27, 53, 29): '64'}, {}), '(64)', True, 'import torch.nn as nn\n'), ((54, 12, 54, 26), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((55, 12, 55, 92), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (), '', True, 'import torch.nn as nn\n'), ((56, 12, 56, 30), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(56, 27, 56, 29): '64'}, {}), '(64)', True, 'import torch.nn as nn\n'), ((57, 12, 57, 26), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((58, 12, 58, 92), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (), '', True, 'import torch.nn as nn\n'), ((59, 12, 59, 30), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(59, 27, 59, 29): '32'}, {}), '(32)', True, 'import torch.nn as nn\n'), ((60, 12, 60, 26), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((61, 12, 61, 91), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (), '', True, 'import torch.nn as nn\n'), ((62, 12, 62, 29), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(62, 27, 62, 28): '3'}, {}), '(3)', True, 'import torch.nn as nn\n'), ((63, 12, 63, 24), 'torch.nn.Sigmoid', 'nn.Sigmoid', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((142, 8, 142, 41), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ({}, {}), '()', False, 'from torchvision import transforms\n'), ((143, 8, 143, 34), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', ({(143, 30, 143, 33): '148'}, {}), '(148)', False, 'from torchvision import transforms\n'), ((144, 8, 144, 30), 'torchvision.transforms.Resize', 'transforms.Resize', ({(144, 26, 144, 29): '128'}, {}), '(128)', False, 'from torchvision import transforms\n'), ((145, 8, 145, 29), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', False, 'from torchvision import transforms\n')] |
julat/DisasterResponse | data/process_data.py | 140489e521a96dc2ff9c9a95f0ce4e99403f03af | # Import libraries
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
Load the data from the disaster response csvs
Parameters:
messages_filepath (str): Path to messages csv
categories_filepath (str): Path to categories csv
Returns:
Dataframe: Merged data
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.merge(messages,categories,on='id')
return df
def clean_data(df):
"""
Cleans the categories
Parameters:
df (DataFrame): Messy DataFrame
Returns:
Dataframe: Cleaned dataframe
"""
categories = df['categories'].str.split( pat=';', expand=True)
row = categories.iloc[[1]]
category_colnames = row.apply(lambda x : x.values[0].split("-")[0])
categories.columns = category_colnames
for column in categories:
categories[column] = categories[column].astype(str).str[-1:]
categories[column] = categories[column].astype(int)
categories[column] = categories[column].map(lambda x: 1 if x > 1 else x)
df.drop(['categories'], axis=1, inplace=True)
df = df = pd.concat([df,categories], axis=1)
df.drop_duplicates(inplace=True)
return df
def save_data(df, database_filename):
"""
Saves the DataFrame
Parameters:
df (DataFrame): Cleaned DataFrame
database_filename (DataFrame): Path to the SQLite Database
"""
engine = create_engine('sqlite:///' + database_filename + '.db')
df.to_sql(database_filename, engine, index=False, if_exists='replace')
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main() | [((19, 15, 19, 45), 'pandas.read_csv', 'pd.read_csv', ({(19, 27, 19, 44): 'messages_filepath'}, {}), '(messages_filepath)', True, 'import pandas as pd\n'), ((20, 17, 20, 49), 'pandas.read_csv', 'pd.read_csv', ({(20, 29, 20, 48): 'categories_filepath'}, {}), '(categories_filepath)', True, 'import pandas as pd\n'), ((21, 9, 21, 46), 'pandas.merge', 'pd.merge', (), '', True, 'import pandas as pd\n'), ((48, 14, 48, 48), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((63, 13, 63, 68), 'sqlalchemy.create_engine', 'create_engine', ({(63, 27, 63, 67): "'sqlite:///' + database_filename + '.db'"}, {}), "('sqlite:///' + database_filename + '.db')", False, 'from sqlalchemy import create_engine\n')] |
atsgen/tf-charms | contrail-controller/files/plugins/check_contrail_status_controller.py | 81110aef700b2f227654d52709614ddb3d62ba17 | #!/usr/bin/env python3
import subprocess
import sys
import json
SERVICES = {
'control': [
'control',
'nodemgr',
'named',
'dns',
],
'config-database': [
'nodemgr',
'zookeeper',
'rabbitmq',
'cassandra',
],
'webui': [
'web',
'job',
],
'config': [
'svc-monitor',
'nodemgr',
'device-manager',
'api',
'schema',
],
}
WARNING = 1
CRITICAL = 2
def get_contrail_status_txt(services):
try:
output = subprocess.check_output("export CONTRAIL_STATUS_CONTAINER_NAME=contrail-status-controller-nrpe ; sudo -E contrail-status", shell=True).decode('UTF-8')
except subprocess.CalledProcessError as err:
message = ('CRITICAL: Could not get contrail-status.'
' return code: {} cmd: {} output: {}'.
format(err.returncode, err.cmd, err.output))
print(message)
sys.exit(CRITICAL)
statuses = dict()
group = None
for line in output.splitlines()[1:]:
words = line.split()
if len(words) == 4 and words[0] == '==' and words[3] == '==':
group = words[2]
continue
if len(words) == 0:
group = None
continue
if group and len(words) >= 2 and group in services:
srv = words[0].split(':')[0]
statuses.setdefault(group, list()).append(
{srv: ' '.join(words[1:])})
return statuses
def get_contrail_status_json(services):
try:
output = json.loads(subprocess.check_output("export CONTRAIL_STATUS_CONTAINER_NAME=contrail-status-controller-nrpe ; sudo -E contrail-status --format json", shell=True).decode('UTF-8'))
except subprocess.CalledProcessError as err:
message = ('CRITICAL: Could not get contrail-status.'
' return code: {} cmd: {} output: {}'.
format(err.returncode, err.cmd, err.output))
print(message)
sys.exit(CRITICAL)
statuses = output["pods"]
return statuses
def check_contrail_status(services, version=None):
if version > 1912:
statuses = get_contrail_status_json(services)
else:
statuses = get_contrail_status_txt(services)
for group in services:
if group not in statuses:
message = ('WARNING: POD {} is absent in the contrail-status'
.format(group))
print(message)
sys.exit(WARNING)
for srv in services[group]:
if not any(srv in key for key in statuses[group]):
message = ('WARNING: {} is absent in the contrail-status'
.format(srv))
print(message)
sys.exit(WARNING)
status = next(stat[srv] for stat in statuses[group] if srv in stat)
if status not in ['active', 'backup']:
message = ('CRITICAL: {} is not ready. Reason: {}'
.format(srv, status))
print(message)
sys.exit(CRITICAL)
print('Contrail status OK')
sys.exit()
if __name__ == '__main__':
cver = sys.argv[1]
if '.' in str(cver):
if cver == '5.0':
version = 500
elif cver == '5.1':
version = 510
else:
print("CRITICAL: invalid version: {}".format(cver))
sys.exit(CRITICAL)
elif not cver.isdigit():
print("CRITICAL: invalid version: {}".format(cver))
sys.exit(CRITICAL)
else:
version = int(cver)
check_contrail_status(SERVICES, version=version)
| [((104, 4, 104, 14), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((45, 8, 45, 26), 'sys.exit', 'sys.exit', ({(45, 17, 45, 25): 'CRITICAL'}, {}), '(CRITICAL)', False, 'import sys\n'), ((73, 8, 73, 26), 'sys.exit', 'sys.exit', ({(73, 17, 73, 25): 'CRITICAL'}, {}), '(CRITICAL)', False, 'import sys\n'), ((90, 12, 90, 29), 'sys.exit', 'sys.exit', ({(90, 21, 90, 28): 'WARNING'}, {}), '(WARNING)', False, 'import sys\n'), ((119, 8, 119, 26), 'sys.exit', 'sys.exit', ({(119, 17, 119, 25): 'CRITICAL'}, {}), '(CRITICAL)', False, 'import sys\n'), ((39, 17, 39, 151), 'subprocess.check_output', 'subprocess.check_output', (), '', False, 'import subprocess\n'), ((96, 16, 96, 33), 'sys.exit', 'sys.exit', ({(96, 25, 96, 32): 'WARNING'}, {}), '(WARNING)', False, 'import sys\n'), ((102, 16, 102, 34), 'sys.exit', 'sys.exit', ({(102, 25, 102, 33): 'CRITICAL'}, {}), '(CRITICAL)', False, 'import sys\n'), ((116, 12, 116, 30), 'sys.exit', 'sys.exit', ({(116, 21, 116, 29): 'CRITICAL'}, {}), '(CRITICAL)', False, 'import sys\n'), ((67, 28, 67, 176), 'subprocess.check_output', 'subprocess.check_output', (), '', False, 'import subprocess\n')] |
harnitsignalfx/skogaming | leaderboard-server/leaderboard-server.py | c860219c89149d686106dfb7a93d27df39830842 | from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
import simplejson as json
from leaderboard.leaderboard import Leaderboard
import uwsgidecorators
import signalfx
app = Flask(__name__)
app.config['CORS_HEADERS'] = 'Content-Type'
cors = CORS(app)
highscore_lb_starship = Leaderboard('highscores-starship',host='redis-instance')
sfx = signalfx.SignalFx(ingest_endpoint='http://otelcol:9943').ingest('token-at-collector')
def parseData(row):
metricDump1 = {}
counterArray = []
metricDump1["dimensions"] = {}
metricDump1["dimensions"]["ip"] = row["ip"] # dimension
metricDump1["metric"] = "starship.shots"
metricDump1["value"] = row["shots"]
counterArray.append(metricDump1)
print('Sending data:',counterArray)
sfx.send(counters=counterArray)
@app.route('/health')
def health():
return '{"status":"OK"}', 200
@app.route('/leaders/<game>')
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
def returnLeaders(game):
if game == "starship":
return json.dumps(highscore_lb_starship.all_leaders()), 200
return '{}', 200
@app.route('/submitScores', methods=['POST'])
@cross_origin(origin='localhost',headers=['Content-Type','application/json'])
def submitScores():
content = request.get_json(force=True)
print('Content:',content)
if "game" in content:
if content["game"]=="starship":
highscore_lb_starship.rank_member(content["aduser"], content["score"])
return '{"status":"OK"}', 200
@app.route("/get_my_ip", methods=["GET"])
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
def get_my_ip():
if 'X-Real-Ip' in request.headers:
return jsonify({'ip':request.headers['X-Real-Ip']}), 200
else:
return jsonify({'ip':'-'}), 200
#return json.dumps({k:v for k, v in request.headers.items()}), 200
@app.route('/submitShots', methods=['POST'])
@cross_origin(origin='localhost',headers=['Content-Type','application/json'])
def submitShots():
content = request.get_json(force=True)
print('Content:',content)
shotSubmission = {}
totalShots = 0
if "game" in content:
if content["game"]=="starship":
if "shots" in content:
totalShots = content["shots"]
shotSubmission["shots"] = totalShots
if 'X-Real-Ip' in request.headers:
shotSubmission["ip"] = request.headers['X-Real-Ip']
else:
shotSubmission["ip"] = "-"
parseData(shotSubmission)
return '{"status":"OK"}', 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=6001)
| [((9, 6, 9, 21), 'flask.Flask', 'Flask', ({(9, 12, 9, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask, jsonify, request\n'), ((11, 7, 11, 16), 'flask_cors.CORS', 'CORS', ({(11, 12, 11, 15): 'app'}, {}), '(app)', False, 'from flask_cors import CORS, cross_origin\n'), ((13, 24, 13, 80), 'leaderboard.leaderboard.Leaderboard', 'Leaderboard', (), '', False, 'from leaderboard.leaderboard import Leaderboard\n'), ((42, 1, 42, 74), 'flask_cors.cross_origin', 'cross_origin', (), '', False, 'from flask_cors import CORS, cross_origin\n'), ((49, 1, 49, 77), 'flask_cors.cross_origin', 'cross_origin', (), '', False, 'from flask_cors import CORS, cross_origin\n'), ((62, 1, 62, 74), 'flask_cors.cross_origin', 'cross_origin', (), '', False, 'from flask_cors import CORS, cross_origin\n'), ((71, 1, 71, 77), 'flask_cors.cross_origin', 'cross_origin', (), '', False, 'from flask_cors import CORS, cross_origin\n'), ((51, 14, 51, 42), 'flask.request.get_json', 'request.get_json', (), '', False, 'from flask import Flask, jsonify, request\n'), ((73, 14, 73, 42), 'flask.request.get_json', 'request.get_json', (), '', False, 'from flask import Flask, jsonify, request\n'), ((16, 6, 16, 62), 'signalfx.SignalFx', 'signalfx.SignalFx', (), '', False, 'import signalfx\n'), ((65, 15, 65, 59), 'flask.jsonify', 'jsonify', ({(65, 23, 65, 58): "{'ip': request.headers['X-Real-Ip']}"}, {}), "({'ip': request.headers['X-Real-Ip']})", False, 'from flask import Flask, jsonify, request\n'), ((67, 15, 67, 34), 'flask.jsonify', 'jsonify', ({(67, 23, 67, 33): "{'ip': '-'}"}, {}), "({'ip': '-'})", False, 'from flask import Flask, jsonify, request\n')] |
jorgensd/meshio | meshio/_cli/_info.py | 0600ac9e9e8d1e1a27d5f3f2f4235414f4482cac | import argparse
import numpy as np
from .._helpers import read, reader_map
from ._helpers import _get_version_text
def info(argv=None):
# Parse command line arguments.
parser = _get_info_parser()
args = parser.parse_args(argv)
# read mesh data
mesh = read(args.infile, file_format=args.input_format)
print(mesh)
# check if the cell arrays are consistent with the points
is_consistent = True
for cells in mesh.cells:
if np.any(cells.data > mesh.points.shape[0]):
print("\nATTENTION: Inconsistent mesh. Cells refer to nonexistent points.")
is_consistent = False
break
# check if there are redundant points
if is_consistent:
point_is_used = np.zeros(mesh.points.shape[0], dtype=bool)
for cells in mesh.cells:
point_is_used[cells.data] = True
if np.any(~point_is_used):
print("ATTENTION: Some points are not part of any cell.")
def _get_info_parser():
parser = argparse.ArgumentParser(
description=("Print mesh info."), formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("infile", type=str, help="mesh file to be read from")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
parser.add_argument(
"--version",
"-v",
action="version",
version=_get_version_text(),
help="display version information",
)
return parser
| [((36, 13, 38, 5), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((21, 11, 21, 52), 'numpy.any', 'np.any', ({(21, 18, 21, 51): '(cells.data > mesh.points.shape[0])'}, {}), '(cells.data > mesh.points.shape[0])', True, 'import numpy as np\n'), ((28, 24, 28, 66), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((31, 11, 31, 33), 'numpy.any', 'np.any', ({(31, 18, 31, 32): '(~point_is_used)'}, {}), '(~point_is_used)', True, 'import numpy as np\n')] |
Data-Linkage/ccslink | ccslink/Zip.py | ee1105888d43c6a2b307deb96ddede34d03a965f | import os, shutil
from CCSLink import Spark_Session as SS
def add_zipped_dependency(zip_from, zip_target):
"""
This method creates a zip of the code to be sent to the executors.
It essentially zips the Python packages installed by PIP and
submits them via addPyFile in the current PySpark context
E.g. if we want to submit "metaphone" package so that we
can do use `import metaphone` and use its methods inside UDF,
we run this method with:
- zip_from = /home/cdsw/.local/lib/python3.6/site-packages/
- zip_target = metaphone
"""
# change this to a path in your project
zipped_fpath = f'/home/cdsw/zipped_packages/{zip_target}'
if os.path.exists(zipped_fpath + '.zip'):
os.remove(zipped_fpath + '.zip')
shutil.make_archive(
# path to the resulting zipped file (without the suffix)
base_name=zipped_fpath, # resulting filename
# specifies the format --> implies .zip suffix
format='zip',
# the root dir from where we want to zip
root_dir=zip_from,
# the dir (relative to root dir) which we want to zip
# (all files in the final zip will have this prefix)
base_dir=zip_target,
)
# add the files to the executors
SS.SPARK().sparkContext.addPyFile(f'{zipped_fpath}.zip')
| [((20, 7, 20, 44), 'os.path.exists', 'os.path.exists', ({(20, 22, 20, 43): "(zipped_fpath + '.zip')"}, {}), "(zipped_fpath + '.zip')", False, 'import os, shutil\n'), ((22, 4, 36, 5), 'shutil.make_archive', 'shutil.make_archive', (), '', False, 'import os, shutil\n'), ((21, 8, 21, 40), 'os.remove', 'os.remove', ({(21, 18, 21, 39): "(zipped_fpath + '.zip')"}, {}), "(zipped_fpath + '.zip')", False, 'import os, shutil\n'), ((39, 4, 39, 14), 'CCSLink.Spark_Session.SPARK', 'SS.SPARK', ({}, {}), '()', True, 'from CCSLink import Spark_Session as SS\n')] |
Mopolino8/moltemplate | moltemplate/nbody_Angles.py | 363df364fcb012e8e4beb7bc616a77d696b8b707 | try:
from .nbody_graph_search import Ugraph
except (SystemError, ValueError):
# not installed as a package
from nbody_graph_search import Ugraph
# This file defines how 3-body angle interactions are generated by moltemplate
# by default. It can be overridden by supplying your own custom file.
# To find 3-body "angle" interactions, we would use this subgraph:
#
#
# *---*---* => 1st bond connects atoms 0 and 1
# 0 1 2 2nd bond connects atoms 1 and 2
#
bond_pattern = Ugraph([(0, 1), (1, 2)])
# (Ugraph atom indices begin at 0, not 1)
# The next function eliminates the redundancy between 0-1-2 and 2-1-0:
def canonical_order(match):
"""
Before defining a new interaction, we must check to see if an
interaction between these same 3 atoms has already been created
(perhaps listed in a different, but equivalent order).
If we don't check for this this, we will create many unnecessary redundant
interactions (which can slow down he simulation).
To avoid this, I define a "canonical_order" function which sorts the atoms
and bonds in a way which is consistent with the symmetry of the interaction
being generated... Later the re-ordered list of atom and bond ids will be
tested against the list of atom/bond ids in the matches-found-so-far,
before it is added to the list of interactions found so far. Note that
the energy of an angle interaction is a function of the angle between.
three consecutively bonded atoms (referred to here as: 0,1,2).
This angle does not change when swapping the atoms at either end (0 and 2).
So it does not make sense to define a separate 3-body angle
interaction between atoms 0,1,2 AS WELL AS an interaction between 2,1,0.
So we sort the atoms and bonds so that the first atom has a always has
a lower atomID than the third atom. (Later we will check to see if we
have already defined an interaction between these 3 atoms. If not then
we create a new one.)
"""
# match[0][0:2] contains the ID numbers for the 3 atoms in the match
atom0 = match[0][0]
atom1 = match[0][1]
atom2 = match[0][2]
# match[1][0:1] contains the ID numbers for the 2 bonds
bond0 = match[1][0]
bond1 = match[1][1]
if atom0 < atom2:
# return ((atom0, atom1, atom2), (bond0, bond1)) same thing as:
return match
else:
return ((atom2, atom1, atom0), (bond1, bond0))
| [((18, 15, 18, 39), 'nbody_graph_search.Ugraph', 'Ugraph', ({(18, 22, 18, 38): '[(0, 1), (1, 2)]'}, {}), '([(0, 1), (1, 2)])', False, 'from nbody_graph_search import Ugraph\n')] |
DougRogers-DigitalFish/USD | extras/usd/examples/usdMakeFileVariantModelAsset/usdMakeFileVariantModelAsset.py | d8a405a1344480f859f025c4f97085143efacb53 | #!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
'''
Creates a top-level, referenceable asset USD file from one or more
'variant' files, each of which can contain arbitrary scene description.
When supplying multiple files, one must also provide the name for a
variantSet that will be constructed to switch between the files.
The asset file will place the variant files behind a "payload", which will
enable consumers to defer loading and processing of the data when composed
onto a UsdStage.
The names of the created variations will be taken directly from the basename
of their corresponding input file.
'''
from __future__ import print_function
from pxr import Tf, Kind, Sdf, Usd
# ToDo:
# - handle multiple variantSets
# - layer multiple kinds of files (e.g. shading.usd over geom.usd)
# - allow output filename to be independently specifiable? (Breaks with Pixar
# convention)
# - allow variant names to be specified independently of variant file names
# - Compute and present (per-variant) UsdGeomModelAPI.extentsHint
# - Compute and author UsdModelAPI::SetPayloadAssetDependencies()
def CreateModelStage(assetName,
assetIdentifier=None,
kind=Kind.Tokens.component,
filesToReference=None,
variantSetName=None,
defaultVariantSelection=None):
# Preconditions....
if not Tf.IsValidIdentifier(assetName):
print("assetName '%s' must be a valid identifier. Aborting." %
assetName)
return None
if variantSetName and not Tf.IsValidIdentifier(variantSetName):
print("variantSetName '%s' must be a valid identifier. Aborting." %
variantSetName)
return None
if filesToReference and len(filesToReference) > 1 and not variantSetName:
# For now, we only allow multiple files to reference if we're switching
# them with a variantSet. We can relax this restriction when we can
# make internal payload arcs (bug #119960)
print("Cannot create multiple-file-reference without a variantSet. Aborting")
return None
if not Kind.Registry.IsA(kind, Kind.Tokens.model):
print("kind '%s' is not a valid model kind, which must be one of:" %
kind)
print(Kind.Registry.GetAllKinds())
return None
# Create the root file for the stage, and make it ASCII text.
# We need some nicer sugar for this.
fileName = assetName + ".usd"
rootLayer = Sdf.Layer.CreateNew(fileName, args = {'format':'usda'})
stage = Usd.Stage.Open(rootLayer)
# Name the root prim after the asset. Don't give it a type, since we
# want that to come from referenced files. Make it be the "default prim"
# so that we can reference the resulting file without specifiying a
# prim path
rootPath = Sdf.Path.absoluteRootPath
modelRootPrim = stage.DefinePrim(rootPath.AppendChild(assetName))
stage.SetDefaultPrim(modelRootPrim)
modelAPI = Usd.ModelAPI(modelRootPrim)
modelAPI.SetKind(kind)
# See http://openusd.org/docs/api/class_usd_model_a_p_i.html#details
# for more on assetInfo
modelAPI.SetAssetName(assetName)
modelAPI.SetAssetIdentifier(assetIdentifier or fileName)
# Add a class named after the asset, and make the asset inherit from it.
# This is not necessary for a valid asset, and the class-naming is a Pixar
# convention. But always having a class associated with each asset is
# extremely useful for non-destructively editing many referenced or
# instanced assets of the same type.
classPrim = stage.CreateClassPrim(rootPath.AppendChild("_class_"+assetName))
modelRootPrim.GetInherits().AddInherit(classPrim.GetPath())
if not filesToReference:
# weird edge case... we're done
return stage
elif len(filesToReference) == 1 and not variantSetName:
# The other, more plausible edge case: we're just wrapping
# some other file (e.g. alembic) in order to give it a payload
# and other proper USD trappings - no variants
modelRootPrim.GetPayloads().AddPayload(Sdf.Payload(filesToReference[0]))
return stage
# OK, we're making a variantSet, and we are going to vary the payload
# in each variant
varSet = modelRootPrim.GetVariantSet(variantSetName)
for variantFile in filesToReference:
import os
variantName = os.path.splitext(os.path.basename(variantFile))[0]
# If we didn't specify a default selection, choose the first one
if not defaultVariantSelection:
defaultVariantSelection = variantName
varSet.AddVariant(variantName)
varSet.SetVariantSelection(variantName)
# The context object makes all edits "go inside" the variant we
# just created.
with varSet.GetVariantEditContext():
modelRootPrim.GetPayloads().AddPayload(Sdf.Payload(variantFile))
# Now put the variantSet into the state we want it to be in by default
varSet.SetVariantSelection(defaultVariantSelection)
return stage
if __name__ == "__main__":
import argparse, os, sys
descr = __doc__.strip()
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
description=descr)
parser.add_argument('assetName')
parser.add_argument('variantFiles', nargs='+')
parser.add_argument(
'-k', '--kind', default='component', action='store', metavar='kind',
help="Model kind, one of: component, group, or assembly")
parser.add_argument(
'-v', '--variantSet', default='', action='store', metavar='variantSet',
help="Variantset to create to modulate variantFiles. Can be elided "
"if only one file is supplied")
parser.add_argument(
'-i', '--identifier', default='', action='store', metavar='identifier',
help="The identifier you would expect your Ar asset-resolver plugin "
"to resolve to the (installed) assetName.usd file this script creates. "
" If unspecified, defaults to assetName.usd")
parser.add_argument(
'-d', '--defaultVariantSelection', default='', action='store',
metavar='defaultVariantSelection',
help="This variant will be selected by default when the asset is "
"added to a composition. If unspecified, will be the variant for "
"'variantFile1'")
args = parser.parse_args()
if not args.assetName or args.assetName == '':
parser.error("No assetName specified")
stage = CreateModelStage(args.assetName,
assetIdentifier=args.identifier,
kind=args.kind,
filesToReference=args.variantFiles,
variantSetName=args.variantSet,
defaultVariantSelection=args.defaultVariantSelection)
if stage:
stage.GetRootLayer().Save()
exit(0)
else:
exit(1)
| [((82, 16, 82, 71), 'pxr.Sdf.Layer.CreateNew', 'Sdf.Layer.CreateNew', (), '', False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((83, 12, 83, 37), 'pxr.Usd.Stage.Open', 'Usd.Stage.Open', ({(83, 27, 83, 36): 'rootLayer'}, {}), '(rootLayer)', False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((92, 15, 92, 42), 'pxr.Usd.ModelAPI', 'Usd.ModelAPI', ({(92, 28, 92, 41): 'modelRootPrim'}, {}), '(modelRootPrim)', False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((59, 11, 59, 42), 'pxr.Tf.IsValidIdentifier', 'Tf.IsValidIdentifier', ({(59, 32, 59, 41): 'assetName'}, {}), '(assetName)', False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((73, 11, 73, 53), 'pxr.Kind.Registry.IsA', 'Kind.Registry.IsA', ({(73, 29, 73, 33): 'kind', (73, 35, 73, 52): 'Kind.Tokens.model'}, {}), '(kind, Kind.Tokens.model)', False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((63, 30, 63, 66), 'pxr.Tf.IsValidIdentifier', 'Tf.IsValidIdentifier', ({(63, 51, 63, 65): 'variantSetName'}, {}), '(variantSetName)', False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((76, 14, 76, 41), 'pxr.Kind.Registry.GetAllKinds', 'Kind.Registry.GetAllKinds', ({}, {}), '()', False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((140, 42, 140, 71), 'os.path.basename', 'os.path.basename', ({(140, 59, 140, 70): 'sys.argv[0]'}, {}), '(sys.argv[0])', False, 'import os\n'), ((114, 47, 114, 79), 'pxr.Sdf.Payload', 'Sdf.Payload', ({(114, 59, 114, 78): 'filesToReference[0]'}, {}), '(filesToReference[0])', False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((122, 39, 122, 68), 'os.path.basename', 'os.path.basename', ({(122, 56, 122, 67): 'variantFile'}, {}), '(variantFile)', False, 'import os\n'), ((131, 51, 131, 75), 'pxr.Sdf.Payload', 'Sdf.Payload', ({(131, 63, 131, 74): 'variantFile'}, {}), '(variantFile)', False, 'from pxr import Tf, Kind, Sdf, Usd\n')] |
fbdp1202/pyukf_kinect_body_tracking | src/main.py | c44477149cfc22abfe9121c2604dc284c93fbd42 | import sys
import os
sys.path.append('./code/')
from skeleton import Skeleton
from read_data import *
from calibration import Calibration
from ukf_filter import ukf_Filter_Controler
from canvas import Canvas
from regression import *
import time
from functools import wraps
import os
def check_time(function):
@wraps(function)
def measure(*args, **kwargs):
start_time = time.time()
result = function(*args, **kwargs)
end_time = time.time()
print(f"@check_time: {function.__name__} took {end_time - start_time}")
return result
return measure
def get_dir_name(dir):
dir_list = []
for name in os.listdir(dir):
path = dir + '/' + name
if not os.path.isfile(path):
dir_list.append(name)
return dir_list
def scan_dir(dir):
dir_list = []
for name in os.listdir(dir):
path = dir + '/' + name
if os.path.isfile(path):
dir_list.append(path)
return dir_list
@check_time
def merge_skeleton_data(folder_name):
save_file_name = folder_name + '.txt'
dir_list = scan_dir(folder_name)
wf = open(save_file_name, 'w')
for filename in dir_list:
f = open(filename, 'r')
line = f.readline()
wf.write(line)
wf.close()
return save_file_name
@check_time
def init_simul(filename, test_num, cbr_num=50, div_step=1):
data = read_data_skeleton(filename)
# test_num, data = interval_compasation(data, test_num, div_step)
test_num = min(test_num, len(data))
skeletons = []
for i in range(test_num):
skeletons.append(Skeleton(data[i]))
cbr_num = min(test_num, cbr_num)
cal_skeletons = []
for i in range(cbr_num):
cal_skeletons.append(skeletons[i*div_step])
calibration = Calibration(cal_skeletons)
lower_init_mean, upper_init_mean = calibration.get_init_mean(0, filename)
return skeletons, lower_init_mean, upper_init_mean, test_num
@check_time
def make_filter(lower_init_mean, lower_init_cov, upper_init_mean, upper_init_cov, model):
flt = None
if model == 'ukf':
flt = ukf_Filter_Controler(lower_init_mean, lower_init_cov, upper_init_mean, upper_init_cov)
else:
print(model, "is not exist model name")
return flt
@check_time
def run_ukf(ukf, skeletons, test_num):
original_data = []
estimate_data = []
estimate_state = []
test_num = min(len(skeletons), test_num)
print("total test is {}".format(test_num))
print("test_num:", end=' ')
for i in range(test_num):
curr_input = skeletons[i].get_measurement()
original_data.append(curr_input)
state, data = ukf.update(curr_input)
estimate_data.append(data)
estimate_state.append(state)
if i % 10 == 0:
print(i, end=' ')
print('')
return original_data, estimate_data, estimate_state
def make_folder(folder_name):
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
return folder_name
def get_save_skeleton_data_folder_name(person_name, pos_mode, model):
folder_name = make_folder('result')
folder_name = make_folder(folder_name + '/' + person_name)
folder_name = make_folder(folder_name + '/' + pos_mode)
folder_name = make_folder(folder_name + '/' + model)
return folder_name + '/'
def save_sk_data_to_csv(folder_name, filename, data):
filename = folder_name + filename
f = open(filename, "w", encoding="UTF-8")
for i in range(len(data)):
for j in range(len(data[i])):
for k in range(3):
f.write(str(data[i][j][k]))
if j == (len(data[i])-1) and k == 2:
f.write('\n')
else:
f.write(',')
def save_sk_state_to_csv(folder_name, filename, data):
filename = folder_name + filename
f = open(filename, 'w', encoding="UTF-8")
for i in range(len(data)):
for j in range(len(data[i])):
f.write(str(data[i][j]))
if j == (len(data[i])-1):
f.write('\n')
else:
f.write(',')
@check_time
def save_skeleton_data_to_csv(person_name, pos_mode, original_data, estimate_data, estimate_state, model):
csv_folder_name = get_save_skeleton_data_folder_name(person_name, pos_mode, model)
save_sk_data_to_csv(csv_folder_name, 'original_data.csv', original_data)
save_sk_data_to_csv(csv_folder_name, 'estimate_data.csv', estimate_data)
save_sk_state_to_csv(csv_folder_name, 'estimate_state.csv', estimate_state)
def read_csv(filename):
data = []
with open(filename, 'r') as reader:
for line in reader:
fields = line.split(',')
fields[len(fields)-1] = fields[len(fields)-1].replace('\n', '')
for i in range(len(fields)):
data.append(float(fields[i]))
data = np.array(data).reshape((int)(len(data)/32/3), 32, 3)
skeletons = []
for d in data:
skeletons.append(Skeleton(d))
return skeletons
@check_time
def read_skeleton_data_from_csv(person_name, pos_mode, model):
csv_folder_name = get_save_skeleton_data_folder_name(person_name, pos_mode, model)
original_data = read_csv(csv_folder_name + 'original_data.csv')
estimate_data = read_csv(csv_folder_name + 'estimate_data.csv')
return original_data, estimate_data
def get_save_image_file_name(person_name, pos_mode, model, plot_mode):
folder_name = make_folder('result')
folder_name = make_folder(folder_name + '/' + person_name)
folder_name = make_folder(folder_name + '/' + pos_mode)
folder_name = make_folder(folder_name + '/' + model)
folder_name = make_folder(folder_name + '/' + plot_mode)
return folder_name + '/'
@check_time
def skeleton_draw(person_name, pos_mode, model, original_data, estimate_data, sleep_t=100):
canvas = Canvas()
img_name_point = get_save_image_file_name(person_name, pos_mode, model, 'point')
img_name_length = get_save_image_file_name(person_name, pos_mode, model, 'length')
img_name_3D = get_save_image_file_name(person_name, pos_mode, model, 'plot_3D')
# canvas.skeleton_3D_plot(original_data, estimate_data)
canvas.skeleton_3D_animation_save(original_data, estimate_data, sleep_t, img_name_3D)
canvas.skeleton_point_plot(original_data, estimate_data, img_name_point)
canvas.skeleton_length_plot(original_data, estimate_data, img_name_length)
def set_lower_init_cov(value_cov=1e-6, velo_cov_0=1e-4, velo_cov_1=1e-2, len_cov=1e-10, obs_cov_factor=1e-4, trans_factor=100):
return [value_cov, velo_cov_0,value_cov, velo_cov_0,value_cov, velo_cov_1,value_cov, velo_cov_1,value_cov, velo_cov_0, len_cov,obs_cov_factor, trans_factor]
def set_upper_init_cov(value_cov=1e-6, velo_cov=1e-4, len_cov=1e-10, obs_cov_factor=1e-4, trans_factor=100):
return [value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,len_cov,obs_cov_factor,trans_factor]
@check_time
def simulation_ukf(filename, test_num, cbr_num, model):
skeletons, lower_init_mean, upper_init_mean, test_num = init_simul(filename, test_num, cbr_num)
lower_init_cov = set_lower_init_cov()
upper_init_cov = set_upper_init_cov()
flt = make_filter(lower_init_mean, lower_init_cov, upper_init_mean, upper_init_cov, model)
original_data, estimate_data, estimate_state = run_ukf(flt, skeletons, test_num)
return original_data, estimate_data, estimate_state
| [((3, 0, 3, 26), 'sys.path.append', 'sys.path.append', ({(3, 16, 3, 25): '"""./code/"""'}, {}), "('./code/')", False, 'import sys\n'), ((18, 2, 18, 17), 'functools.wraps', 'wraps', ({(18, 8, 18, 16): 'function'}, {}), '(function)', False, 'from functools import wraps\n'), ((30, 13, 30, 28), 'os.listdir', 'os.listdir', ({(30, 24, 30, 27): 'dir'}, {}), '(dir)', False, 'import os\n'), ((38, 13, 38, 28), 'os.listdir', 'os.listdir', ({(38, 24, 38, 27): 'dir'}, {}), '(dir)', False, 'import os\n'), ((72, 15, 72, 41), 'calibration.Calibration', 'Calibration', ({(72, 27, 72, 40): 'cal_skeletons'}, {}), '(cal_skeletons)', False, 'from calibration import Calibration\n'), ((181, 10, 181, 18), 'canvas.Canvas', 'Canvas', ({}, {}), '()', False, 'from canvas import Canvas\n'), ((20, 15, 20, 26), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((22, 13, 22, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((40, 5, 40, 25), 'os.path.isfile', 'os.path.isfile', ({(40, 20, 40, 24): 'path'}, {}), '(path)', False, 'import os\n'), ((81, 8, 81, 94), 'ukf_filter.ukf_Filter_Controler', 'ukf_Filter_Controler', ({(81, 29, 81, 44): 'lower_init_mean', (81, 46, 81, 60): 'lower_init_cov', (81, 62, 81, 77): 'upper_init_mean', (81, 79, 81, 93): 'upper_init_cov'}, {}), '(lower_init_mean, lower_init_cov, upper_init_mean,\n upper_init_cov)', False, 'from ukf_filter import ukf_Filter_Controler\n'), ((108, 8, 108, 34), 'os.path.isdir', 'os.path.isdir', ({(108, 22, 108, 33): 'folder_name'}, {}), '(folder_name)', False, 'import os\n'), ((109, 2, 109, 23), 'os.mkdir', 'os.mkdir', ({(109, 11, 109, 22): 'folder_name'}, {}), '(folder_name)', False, 'import os\n'), ((32, 9, 32, 29), 'os.path.isfile', 'os.path.isfile', ({(32, 24, 32, 28): 'path'}, {}), '(path)', False, 'import os\n'), ((65, 19, 65, 36), 'skeleton.Skeleton', 'Skeleton', ({(65, 28, 65, 35): 'data[i]'}, {}), '(data[i])', False, 'from skeleton import Skeleton\n'), ((161, 19, 161, 30), 'skeleton.Skeleton', 'Skeleton', ({(161, 28, 161, 29): 'd'}, {}), '(d)', False, 'from skeleton import Skeleton\n')] |
Mario-Kart-Felix/cfgov-refresh | cfgov/scripts/initial_data.py | 7978fedeb7aaf4d96a87720e6545567085e056a9 | from __future__ import print_function
import json
import os
from django.conf import settings
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from wagtail.wagtailcore.models import Page, Site
from v1.models import HomePage, BrowseFilterablePage
def run():
print('Running script \'scripts.initial_data\' ...')
admin_user = None
site_root = None
events = None
admin_user = User.objects.filter(username='admin')
if not admin_user:
admin_user = User(username='admin',
password=make_password(os.environ.get('WAGTAIL_ADMIN_PW')),
is_superuser=True, is_active=True, is_staff=True)
admin_user.save()
else:
admin_user = admin_user[0]
# Creates a new site root `CFGov`
site_root = HomePage.objects.filter(title='CFGOV')
if not site_root:
root = Page.objects.first()
site_root = HomePage(title='CFGOV', slug='home-page', depth=2, owner=admin_user)
site_root.live = True
root.add_child(instance=site_root)
latest = site_root.save_revision(user=admin_user, submitted_for_moderation=False)
latest.save()
else:
site_root = site_root[0]
# Setting new site root
if not Site.objects.filter(hostname='content.localhost').exists():
site = Site.objects.first()
site.port = 8000
site.root_page_id = site_root.id
site.save()
content_site = Site(hostname='content.localhost', port=8000, root_page_id=site_root.id)
content_site.save()
# Clean Up
old_site_root = Page.objects.filter(id=2)[0]
if old_site_root:
old_site_root.delete()
# Events Browse Page required for event `import-data` command
if not BrowseFilterablePage.objects.filter(title='Events').exists():
events = BrowseFilterablePage(title='Events', slug='events', owner=admin_user)
site_root.add_child(instance=events)
revision = events.save_revision(
user=admin_user,
submitted_for_moderation=False,
)
revision.publish()
# Archived Events Browse Filterable Page
if not BrowseFilterablePage.objects.filter(title='Archive').exists():
archived_events = BrowseFilterablePage(title='Archive', slug='archive', owner=admin_user)
if not events:
events = BrowseFilterablePage.objects.get(title='Events')
events.add_child(instance=archived_events)
revision = archived_events.save_revision(
user=admin_user,
submitted_for_moderation=False,
)
revision.publish()
| [((20, 17, 20, 54), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', (), '', False, 'from django.contrib.auth.models import User\n'), ((30, 16, 30, 54), 'v1.models.HomePage.objects.filter', 'HomePage.objects.filter', (), '', False, 'from v1.models import HomePage, BrowseFilterablePage\n'), ((32, 15, 32, 35), 'wagtail.wagtailcore.models.Page.objects.first', 'Page.objects.first', ({}, {}), '()', False, 'from wagtail.wagtailcore.models import Page, Site\n'), ((33, 20, 33, 88), 'v1.models.HomePage', 'HomePage', (), '', False, 'from v1.models import HomePage, BrowseFilterablePage\n'), ((43, 15, 43, 35), 'wagtail.wagtailcore.models.Site.objects.first', 'Site.objects.first', ({}, {}), '()', False, 'from wagtail.wagtailcore.models import Page, Site\n'), ((47, 23, 47, 95), 'wagtail.wagtailcore.models.Site', 'Site', (), '', False, 'from wagtail.wagtailcore.models import Page, Site\n'), ((57, 17, 57, 86), 'v1.models.BrowseFilterablePage', 'BrowseFilterablePage', (), '', False, 'from v1.models import HomePage, BrowseFilterablePage\n'), ((67, 26, 67, 97), 'v1.models.BrowseFilterablePage', 'BrowseFilterablePage', (), '', False, 'from v1.models import HomePage, BrowseFilterablePage\n'), ((51, 24, 51, 49), 'wagtail.wagtailcore.models.Page.objects.filter', 'Page.objects.filter', (), '', False, 'from wagtail.wagtailcore.models import Page, Site\n'), ((69, 21, 69, 69), 'v1.models.BrowseFilterablePage.objects.get', 'BrowseFilterablePage.objects.get', (), '', False, 'from v1.models import HomePage, BrowseFilterablePage\n'), ((42, 11, 42, 60), 'wagtail.wagtailcore.models.Site.objects.filter', 'Site.objects.filter', (), '', False, 'from wagtail.wagtailcore.models import Page, Site\n'), ((56, 11, 56, 62), 'v1.models.BrowseFilterablePage.objects.filter', 'BrowseFilterablePage.objects.filter', (), '', False, 'from v1.models import HomePage, BrowseFilterablePage\n'), ((66, 11, 66, 63), 'v1.models.BrowseFilterablePage.objects.filter', 'BrowseFilterablePage.objects.filter', (), '', False, 'from v1.models import HomePage, BrowseFilterablePage\n'), ((23, 51, 23, 85), 'os.environ.get', 'os.environ.get', ({(23, 66, 23, 84): '"""WAGTAIL_ADMIN_PW"""'}, {}), "('WAGTAIL_ADMIN_PW')", False, 'import os\n')] |
harmim/vut-avs-project1 | Scripts/compareOutputs.py | d36e6b5cdebce748d2bdf2afc43950968ecf0a91 | # Simple python3 script to compare output with a reference output.
# Usage: python3 compareOutputs.py testOutput.h5 testRefOutput.h5
import sys
import h5py
import numpy as np
if len(sys.argv) != 3:
print("Expected two arguments. Output and reference output file.")
sys.exit(1)
filename = sys.argv[1]
ref_filename = sys.argv[2]
f = h5py.File(filename, 'r')
ref_f = h5py.File(ref_filename, 'r')
out = np.array(f['output_data'])
out_ref = np.array(ref_f['output_data'])
if out.shape != out_ref.shape:
print("The files do not contain the same number of outputs.")
print("The output size: {0}.".format(out.shape[0]))
print("The reference size: {0}.".format(out_ref.shape[0]))
sys.exit(1)
ref_value = np.copy(out_ref)
ref_value[ref_value == 0.0] = 1.0
error = (out_ref - out) / ref_value
maximal_error = np.amax(error)
print("Maximal error between the output and the reference is {0}.".format(maximal_error))
if maximal_error < 10**(-6):
print("OK:Output seems to match the reference.")
sys.exit(0)
print("Failure:Output does not match the reference.")
maximal_error = np.amax(error, axis=1)
print(maximal_error.shape)
for i in range(0, 5):
print("Image", i)
print("Expected:", end="")
for j in range(0, 10):
print(out_ref[i, j], end = " ")
print("\nGot:", end="")
for j in range(0, 10):
print(out[i, j], end=" ")
print("\nMaximal error:", maximal_error[i], "\n")
sys.exit(1)
| [((15, 4, 15, 28), 'h5py.File', 'h5py.File', ({(15, 14, 15, 22): 'filename', (15, 24, 15, 27): '"""r"""'}, {}), "(filename, 'r')", False, 'import h5py\n'), ((16, 8, 16, 36), 'h5py.File', 'h5py.File', ({(16, 18, 16, 30): 'ref_filename', (16, 32, 16, 35): '"""r"""'}, {}), "(ref_filename, 'r')", False, 'import h5py\n'), ((18, 6, 18, 32), 'numpy.array', 'np.array', ({(18, 15, 18, 31): "f['output_data']"}, {}), "(f['output_data'])", True, 'import numpy as np\n'), ((19, 10, 19, 40), 'numpy.array', 'np.array', ({(19, 19, 19, 39): "ref_f['output_data']"}, {}), "(ref_f['output_data'])", True, 'import numpy as np\n'), ((28, 12, 28, 28), 'numpy.copy', 'np.copy', ({(28, 20, 28, 27): 'out_ref'}, {}), '(out_ref)', True, 'import numpy as np\n'), ((33, 16, 33, 30), 'numpy.amax', 'np.amax', ({(33, 24, 33, 29): 'error'}, {}), '(error)', True, 'import numpy as np\n'), ((41, 16, 41, 38), 'numpy.amax', 'np.amax', (), '', True, 'import numpy as np\n'), ((54, 0, 54, 11), 'sys.exit', 'sys.exit', ({(54, 9, 54, 10): '(1)'}, {}), '(1)', False, 'import sys\n'), ((10, 4, 10, 15), 'sys.exit', 'sys.exit', ({(10, 13, 10, 14): '(1)'}, {}), '(1)', False, 'import sys\n'), ((26, 4, 26, 15), 'sys.exit', 'sys.exit', ({(26, 13, 26, 14): '(1)'}, {}), '(1)', False, 'import sys\n'), ((38, 4, 38, 15), 'sys.exit', 'sys.exit', ({(38, 13, 38, 14): '(0)'}, {}), '(0)', False, 'import sys\n')] |
20CM/Sanctuary | sanctuary/tag/serializers.py | 14694d9bd6376bdc05248741a91df778400e9f66 | # -*- coding: utf-8 -*-
from rest_framework import serializers
from .models import Tag
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
read_only_fields = ('topics_count',)
| [] |
cloudamqp/amqpstorm | examples/management_api/aliveness_test.py | 35eb8edc5f0c2ea3839e93940bf9d0e5f8f4242e | from amqpstorm.management import ApiConnectionError
from amqpstorm.management import ApiError
from amqpstorm.management import ManagementApi
if __name__ == '__main__':
API = ManagementApi('http://127.0.0.1:15672', 'guest', 'guest')
try:
result = API.aliveness_test('/')
if result['status'] == 'ok':
print("RabbitMQ is alive!")
else:
print("RabbitMQ is not alive! :(")
except ApiConnectionError as why:
print('Connection Error: %s' % why)
except ApiError as why:
print('ApiError: %s' % why)
| [((6, 10, 6, 67), 'amqpstorm.management.ManagementApi', 'ManagementApi', ({(6, 24, 6, 48): '"""http://127.0.0.1:15672"""', (6, 50, 6, 57): '"""guest"""', (6, 59, 6, 66): '"""guest"""'}, {}), "('http://127.0.0.1:15672', 'guest', 'guest')", False, 'from amqpstorm.management import ManagementApi\n')] |
vishalbelsare/zvt | src/zvt/recorders/em/meta/em_stockhk_meta_recorder.py | d55051147274c0a4157f08ec60908c781a323c8f | # -*- coding: utf-8 -*-
from zvt.contract.api import df_to_db
from zvt.contract.recorder import Recorder
from zvt.domain.meta.stockhk_meta import Stockhk
from zvt.recorders.em import em_api
class EMStockhkRecorder(Recorder):
provider = "em"
data_schema = Stockhk
def run(self):
df_south = em_api.get_tradable_list(entity_type="stockhk", hk_south=True)
df_south = df_south.set_index("code", drop=False)
df_south["south"] = True
df = em_api.get_tradable_list(entity_type="stockhk")
df = df.set_index("code", drop=False)
df_other = df.loc[~df.index.isin(df_south.index)].copy()
df_other["south"] = False
df_to_db(df=df_south, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
df_to_db(df=df_other, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
if __name__ == "__main__":
recorder = EMStockhkRecorder()
recorder.run()
# the __all__ is generated
__all__ = ["EMStockhkRecorder"]
| [((14, 19, 14, 81), 'zvt.recorders.em.em_api.get_tradable_list', 'em_api.get_tradable_list', (), '', False, 'from zvt.recorders.em import em_api\n'), ((18, 13, 18, 60), 'zvt.recorders.em.em_api.get_tradable_list', 'em_api.get_tradable_list', (), '', False, 'from zvt.recorders.em import em_api\n'), ((22, 8, 22, 115), 'zvt.contract.api.df_to_db', 'df_to_db', (), '', False, 'from zvt.contract.api import df_to_db\n'), ((23, 8, 23, 115), 'zvt.contract.api.df_to_db', 'df_to_db', (), '', False, 'from zvt.contract.api import df_to_db\n')] |
yanwunhao/auto-mshts | src/main.py | 7a4b690bbb6ae55e2f6fad77d176c2c0822db7a0 | from util.io import read_setting_json, read_0h_data, read_24h_data, draw_single_curve
from util.convert import split_array_into_samples, calculate_avg_of_sample, convert_to_percentage
from util.calculus import calculate_summary_of_sample, fit_sigmoid_curve
import matplotlib.pyplot as plt
import numpy as np
import csv
setting = read_setting_json()
setting = setting["rule"]
# load experiment parameter
# experiment parameter is stored in file of ./data/setting.json
initial_filename = setting["0h_datafile"]
final_filename = setting["24h_datafile"]
# sample width and height are the size of each sample area
sample_width = setting["sample_width"]
sample_height = setting["sample_height"]
dilution_protocol = setting["dilution_protocol"]
# width of each dilution
basic_width = setting["basic_width"]
# number of each control group
control_number_list = setting["control_number"]
# output directory
output_directory = setting["output_directory"]
# import initial concentration and calculate x_data
initial_concentration = setting["initial_concentration"]
repeat_times = int(sample_width / basic_width)
x_data = []
current_concentration = initial_concentration
for i in range(repeat_times):
x_data.append(current_concentration)
current_concentration /= dilution_protocol
# load raw data
initial_sd_data = read_0h_data()
final_sd_data = read_24h_data()
# reshape data into the size of board
rebuild_0h_data = initial_sd_data.reshape((32, -1))
rebuild_24h_data = final_sd_data.reshape((32, -1))
# reshape data into a 2-dimensional array contains each group data
sample_divided_list_0h = split_array_into_samples(rebuild_0h_data, sample_width, sample_height)
sample_divided_list_24h = split_array_into_samples(rebuild_24h_data, sample_width, sample_height)
# handle data of control groups
control_0h_summary = 0
for number in control_number_list:
number = number - 1
sample = sample_divided_list_0h[number]
control_0h_summary = control_0h_summary + calculate_summary_of_sample(sample)
control_0h_average = control_0h_summary / (sample_width * sample_height * len(control_number_list))
control_24h_summary = 0
for number in control_number_list:
number = number - 1
sample = sample_divided_list_24h[number]
control_24h_summary = control_24h_summary + calculate_summary_of_sample(sample)
control_24h_average = control_24h_summary / (sample_width * sample_height * len(control_number_list))
# calculate standard deviation of each grid
sd_matrix = []
for line in rebuild_24h_data:
new_line = []
for element in line:
sd_data = (float(element) - control_0h_average.item()) \
/ (control_24h_average.item() - control_0h_average.item())
new_line.append(sd_data)
sd_matrix.append(new_line)
sd_matrix = np.array(sd_matrix)
# split array into different samples
sd_groups = split_array_into_samples(sd_matrix, sample_width, sample_height)
sd_groups = np.array(sd_groups, dtype=float)
RESULT_LIST = []
for sample in sd_groups:
result = calculate_avg_of_sample(sample, sample_width, basic_width)
RESULT_LIST.append(result)
RESULT_LIST = np.array(RESULT_LIST)
FULL_RESULT_LIST = []
for group in sd_groups:
x_index = 0
y_index = 0
sample_buffer = []
data_buffer = []
while y_index < sample_height:
while x_index < basic_width:
x = x_index
while x < sample_width:
data_buffer.append(group[y_index][x])
x += basic_width
sample_buffer.append(data_buffer)
data_buffer = []
x_index += 1
y_index += 1
x_index = 0
FULL_RESULT_LIST.append(sample_buffer)
FULL_RESULT_LIST = np.array(FULL_RESULT_LIST, dtype=float)
optional_color = ['red', 'orange', 'yellow', 'green', 'cyan', 'blue', 'purple']
EC50_LIST = []
EC50_AVG_LIST = []
sample_num = 0
for SAMPLE in FULL_RESULT_LIST:
sample_num += 1
fig, ax = plt.subplots()
index = 0
ax.set_title('Sample '+str(sample_num))
x_buffer = []
x_sampling_buffer = []
y_sampling_buffer = []
for repeat in SAMPLE:
x, y, x_sampling, y_sampling = fit_sigmoid_curve(x_data, repeat)
x_buffer.append(x)
x_sampling_buffer.append(x_sampling)
y_sampling_buffer.append(y_sampling)
draw_single_curve(ax, x, y, x_sampling, y_sampling, optional_color[index])
index += 1
EC50_LIST.append(x_buffer)
# draw the average result
avg = np.mean(x_buffer)
EC50_AVG_LIST.append(avg)
# draw the average curve
x_sampling_buffer = np.array(x_sampling_buffer).T
y_sampling_buffer = np.array(y_sampling_buffer).T
x_sampling_avg = []
y_sampling_avg = []
for line in x_sampling_buffer:
x_sampling_avg.append(np.mean(line))
for line in y_sampling_buffer:
y_sampling_avg.append(np.mean(line))
ax.plot(avg, 0.5, 'o', color='black')
ax.plot(x_sampling_avg, y_sampling_avg, color='black')
plt.savefig("./output/" + output_directory + "/figs" + "/Sample " + str(sample_num))
plt.cla()
plt.close(fig)
# output grouped result
output_f_grouped = open("./output/" + output_directory + "/result_grouped.csv", "w")
csv_writer_grouped = csv.writer(output_f_grouped)
csv_writer_grouped.writerow(["initial concentration: " + str(initial_concentration), "dilution protocol: " + str(dilution_protocol)])
csv_writer_grouped.writerow("")
sample_num = 0
for SAMPLE in FULL_RESULT_LIST:
SAMPLE = SAMPLE.T
sample_num += 1
csv_writer_grouped.writerow(["Sample " + str(sample_num)])
for repeat in SAMPLE:
csv_writer_grouped.writerow(repeat)
csv_writer_grouped.writerow("")
ec50_result_list = []
for ec50_index in EC50_LIST[sample_num-1]:
ec50_result_list.append(10**ec50_index)
csv_writer_grouped.writerow(ec50_result_list)
average_ec50 = np.power(10, EC50_AVG_LIST[sample_num-1])
csv_writer_grouped.writerow([])
csv_writer_grouped.writerow(["Average EC50", "Std"])
csv_writer_grouped.writerow([average_ec50, np.std(ec50_result_list)])
csv_writer_grouped.writerow("")
output_f_grouped.close()
output_f_full = open("./output/" + output_directory + "/result_full.csv", "w")
csv_writer_full = csv.writer(output_f_full)
for line in sd_matrix:
csv_writer_full.writerow(line)
output_f_full.close()
print("Finished")
| [((9, 10, 9, 29), 'util.io.read_setting_json', 'read_setting_json', ({}, {}), '()', False, 'from util.io import read_setting_json, read_0h_data, read_24h_data, draw_single_curve\n'), ((45, 18, 45, 32), 'util.io.read_0h_data', 'read_0h_data', ({}, {}), '()', False, 'from util.io import read_setting_json, read_0h_data, read_24h_data, draw_single_curve\n'), ((46, 16, 46, 31), 'util.io.read_24h_data', 'read_24h_data', ({}, {}), '()', False, 'from util.io import read_setting_json, read_0h_data, read_24h_data, draw_single_curve\n'), ((53, 25, 53, 95), 'util.convert.split_array_into_samples', 'split_array_into_samples', ({(53, 50, 53, 65): 'rebuild_0h_data', (53, 67, 53, 79): 'sample_width', (53, 81, 53, 94): 'sample_height'}, {}), '(rebuild_0h_data, sample_width, sample_height)', False, 'from util.convert import split_array_into_samples, calculate_avg_of_sample, convert_to_percentage\n'), ((54, 26, 54, 97), 'util.convert.split_array_into_samples', 'split_array_into_samples', ({(54, 51, 54, 67): 'rebuild_24h_data', (54, 69, 54, 81): 'sample_width', (54, 83, 54, 96): 'sample_height'}, {}), '(rebuild_24h_data, sample_width, sample_height)', False, 'from util.convert import split_array_into_samples, calculate_avg_of_sample, convert_to_percentage\n'), ((83, 12, 83, 31), 'numpy.array', 'np.array', ({(83, 21, 83, 30): 'sd_matrix'}, {}), '(sd_matrix)', True, 'import numpy as np\n'), ((86, 12, 86, 76), 'util.convert.split_array_into_samples', 'split_array_into_samples', ({(86, 37, 86, 46): 'sd_matrix', (86, 48, 86, 60): 'sample_width', (86, 62, 86, 75): 'sample_height'}, {}), '(sd_matrix, sample_width, sample_height)', False, 'from util.convert import split_array_into_samples, calculate_avg_of_sample, convert_to_percentage\n'), ((87, 12, 87, 44), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((94, 14, 94, 35), 'numpy.array', 'np.array', ({(94, 23, 94, 34): 'RESULT_LIST'}, {}), '(RESULT_LIST)', True, 'import numpy as np\n'), ((116, 19, 116, 58), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((172, 21, 172, 49), 'csv.writer', 'csv.writer', ({(172, 32, 172, 48): 'output_f_grouped'}, {}), '(output_f_grouped)', False, 'import csv\n'), ((195, 18, 195, 43), 'csv.writer', 'csv.writer', ({(195, 29, 195, 42): 'output_f_full'}, {}), '(output_f_full)', False, 'import csv\n'), ((91, 13, 91, 71), 'util.convert.calculate_avg_of_sample', 'calculate_avg_of_sample', ({(91, 37, 91, 43): 'sample', (91, 45, 91, 57): 'sample_width', (91, 59, 91, 70): 'basic_width'}, {}), '(sample, sample_width, basic_width)', False, 'from util.convert import split_array_into_samples, calculate_avg_of_sample, convert_to_percentage\n'), ((127, 14, 127, 28), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((146, 10, 146, 27), 'numpy.mean', 'np.mean', ({(146, 18, 146, 26): 'x_buffer'}, {}), '(x_buffer)', True, 'import numpy as np\n'), ((166, 4, 166, 13), 'matplotlib.pyplot.cla', 'plt.cla', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((167, 4, 167, 18), 'matplotlib.pyplot.close', 'plt.close', ({(167, 14, 167, 17): 'fig'}, {}), '(fig)', True, 'import matplotlib.pyplot as plt\n'), ((187, 19, 187, 60), 'numpy.power', 'np.power', ({(187, 28, 187, 30): '10', (187, 32, 187, 59): 'EC50_AVG_LIST[sample_num - 1]'}, {}), '(10, EC50_AVG_LIST[sample_num - 1])', True, 'import numpy as np\n'), ((61, 46, 61, 81), 'util.calculus.calculate_summary_of_sample', 'calculate_summary_of_sample', ({(61, 74, 61, 80): 'sample'}, {}), '(sample)', False, 'from util.calculus import calculate_summary_of_sample, fit_sigmoid_curve\n'), ((69, 48, 69, 83), 'util.calculus.calculate_summary_of_sample', 'calculate_summary_of_sample', ({(69, 76, 69, 82): 'sample'}, {}), '(sample)', False, 'from util.calculus import calculate_summary_of_sample, fit_sigmoid_curve\n'), ((136, 39, 136, 72), 'util.calculus.fit_sigmoid_curve', 'fit_sigmoid_curve', ({(136, 57, 136, 63): 'x_data', (136, 65, 136, 71): 'repeat'}, {}), '(x_data, repeat)', False, 'from util.calculus import calculate_summary_of_sample, fit_sigmoid_curve\n'), ((140, 8, 140, 82), 'util.io.draw_single_curve', 'draw_single_curve', ({(140, 26, 140, 28): 'ax', (140, 30, 140, 31): 'x', (140, 33, 140, 34): 'y', (140, 36, 140, 46): 'x_sampling', (140, 48, 140, 58): 'y_sampling', (140, 60, 140, 81): 'optional_color[index]'}, {}), '(ax, x, y, x_sampling, y_sampling, optional_color[index])', False, 'from util.io import read_setting_json, read_0h_data, read_24h_data, draw_single_curve\n'), ((151, 24, 151, 51), 'numpy.array', 'np.array', ({(151, 33, 151, 50): 'x_sampling_buffer'}, {}), '(x_sampling_buffer)', True, 'import numpy as np\n'), ((152, 24, 152, 51), 'numpy.array', 'np.array', ({(152, 33, 152, 50): 'y_sampling_buffer'}, {}), '(y_sampling_buffer)', True, 'import numpy as np\n'), ((158, 30, 158, 43), 'numpy.mean', 'np.mean', ({(158, 38, 158, 42): 'line'}, {}), '(line)', True, 'import numpy as np\n'), ((161, 30, 161, 43), 'numpy.mean', 'np.mean', ({(161, 38, 161, 42): 'line'}, {}), '(line)', True, 'import numpy as np\n'), ((190, 47, 190, 71), 'numpy.std', 'np.std', ({(190, 54, 190, 70): 'ec50_result_list'}, {}), '(ec50_result_list)', True, 'import numpy as np\n')] |
twonds/twisted | twisted/names/root.py | d6e270a465d371c3bed01bf369af497b77eb9f1e | # -*- test-case-name: twisted.names.test.test_rootresolve -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Resolver implementation for querying successive authoritative servers to
lookup a record, starting from the root nameservers.
@author: Jp Calderone
todo::
robustify it
break discoverAuthority into several smaller functions
documentation
"""
from twisted.internet import defer
from twisted.names import dns
from twisted.names import common
def retry(t, p, *args):
assert t, "Timeout is required"
t = list(t)
def errback(failure):
failure.trap(defer.TimeoutError)
if not t:
return failure
return p.query(timeout=t.pop(0), *args
).addErrback(errback
)
return p.query(timeout=t.pop(0), *args
).addErrback(errback
)
class _DummyController:
def messageReceived(self, *args):
pass
class Resolver(common.ResolverBase):
def __init__(self, hints):
common.ResolverBase.__init__(self)
self.hints = hints
def _lookup(self, name, cls, type, timeout):
d = discoverAuthority(name, self.hints
).addCallback(self.discoveredAuthority, name, cls, type, timeout
)
return d
def discoveredAuthority(self, auth, name, cls, type, timeout):
from twisted.names import client
q = dns.Query(name, type, cls)
r = client.Resolver(servers=[(auth, dns.PORT)])
d = r.queryUDP([q], timeout)
d.addCallback(r.filterAnswers)
return d
def lookupNameservers(host, atServer, p=None):
# print 'Nameserver lookup for', host, 'at', atServer, 'with', p
if p is None:
p = dns.DNSDatagramProtocol(_DummyController())
p.noisy = False
return retry(
(1, 3, 11, 45), # Timeouts
p, # Protocol instance
(atServer, dns.PORT), # Server to query
[dns.Query(host, dns.NS, dns.IN)] # Question to ask
)
def lookupAddress(host, atServer, p=None):
# print 'Address lookup for', host, 'at', atServer, 'with', p
if p is None:
p = dns.DNSDatagramProtocol(_DummyController())
p.noisy = False
return retry(
(1, 3, 11, 45), # Timeouts
p, # Protocol instance
(atServer, dns.PORT), # Server to query
[dns.Query(host, dns.A, dns.IN)] # Question to ask
)
def extractAuthority(msg, cache):
records = msg.answers + msg.authority + msg.additional
nameservers = [r for r in records if r.type == dns.NS]
# print 'Records for', soFar, ':', records
# print 'NS for', soFar, ':', nameservers
if not nameservers:
return None, nameservers
if not records:
raise IOError("No records")
for r in records:
if r.type == dns.A:
cache[str(r.name)] = r.payload.dottedQuad()
for r in records:
if r.type == dns.NS:
if str(r.payload.name) in cache:
return cache[str(r.payload.name)], nameservers
for addr in records:
if addr.type == dns.A and addr.name == r.name:
return addr.payload.dottedQuad(), nameservers
return None, nameservers
def discoverAuthority(host, roots, cache=None, p=None):
if cache is None:
cache = {}
rootAuths = list(roots)
parts = host.rstrip('.').split('.')
parts.reverse()
authority = rootAuths.pop()
soFar = ''
for part in parts:
soFar = part + '.' + soFar
# print '///////', soFar, authority, p
msg = defer.waitForDeferred(lookupNameservers(soFar, authority, p))
yield msg
msg = msg.getResult()
newAuth, nameservers = extractAuthority(msg, cache)
if newAuth is not None:
# print "newAuth is not None"
authority = newAuth
else:
if nameservers:
r = str(nameservers[0].payload.name)
# print 'Recursively discovering authority for', r
authority = defer.waitForDeferred(discoverAuthority(r, roots, cache, p))
yield authority
authority = authority.getResult()
# print 'Discovered to be', authority, 'for', r
## else:
## # print 'Doing address lookup for', soFar, 'at', authority
## msg = defer.waitForDeferred(lookupAddress(soFar, authority, p))
## yield msg
## msg = msg.getResult()
## records = msg.answers + msg.authority + msg.additional
## addresses = [r for r in records if r.type == dns.A]
## if addresses:
## authority = addresses[0].payload.dottedQuad()
## else:
## raise IOError("Resolution error")
# print "Yielding authority", authority
yield authority
discoverAuthority = defer.deferredGenerator(discoverAuthority)
def makePlaceholder(deferred, name):
def placeholder(*args, **kw):
deferred.addCallback(lambda r: getattr(r, name)(*args, **kw))
return deferred
return placeholder
class DeferredResolver:
def __init__(self, resolverDeferred):
self.waiting = []
resolverDeferred.addCallback(self.gotRealResolver)
def gotRealResolver(self, resolver):
w = self.waiting
self.__dict__ = resolver.__dict__
self.__class__ = resolver.__class__
for d in w:
d.callback(resolver)
def __getattr__(self, name):
if name.startswith('lookup') or name in ('getHostByName', 'query'):
self.waiting.append(defer.Deferred())
return makePlaceholder(self.waiting[-1], name)
raise AttributeError(name)
def bootstrap(resolver):
"""Lookup the root nameserver addresses using the given resolver
Return a Resolver which will eventually become a C{root.Resolver}
instance that has references to all the root servers that we were able
to look up.
"""
domains = [chr(ord('a') + i) for i in range(13)]
# f = lambda r: (log.msg('Root server address: ' + str(r)), r)[1]
f = lambda r: r
L = [resolver.getHostByName('%s.root-servers.net' % d).addCallback(f) for d in domains]
d = defer.DeferredList(L)
d.addCallback(lambda r: Resolver([e[1] for e in r if e[0]]))
return DeferredResolver(d)
| [((151, 20, 151, 62), 'twisted.internet.defer.deferredGenerator', 'defer.deferredGenerator', ({(151, 44, 151, 61): 'discoverAuthority'}, {}), '(discoverAuthority)', False, 'from twisted.internet import defer\n'), ((188, 8, 188, 29), 'twisted.internet.defer.DeferredList', 'defer.DeferredList', ({(188, 27, 188, 28): 'L'}, {}), '(L)', False, 'from twisted.internet import defer\n'), ((41, 8, 41, 42), 'twisted.names.common.ResolverBase.__init__', 'common.ResolverBase.__init__', ({(41, 37, 41, 41): 'self'}, {}), '(self)', False, 'from twisted.names import common\n'), ((52, 12, 52, 38), 'twisted.names.dns.Query', 'dns.Query', ({(52, 22, 52, 26): 'name', (52, 28, 52, 32): 'type', (52, 34, 52, 37): 'cls'}, {}), '(name, type, cls)', False, 'from twisted.names import dns\n'), ((53, 12, 53, 55), 'twisted.names.client.Resolver', 'client.Resolver', (), '', False, 'from twisted.names import client\n'), ((67, 9, 67, 40), 'twisted.names.dns.Query', 'dns.Query', ({(67, 19, 67, 23): 'host', (67, 25, 67, 31): 'dns.NS', (67, 33, 67, 39): 'dns.IN'}, {}), '(host, dns.NS, dns.IN)', False, 'from twisted.names import dns\n'), ((79, 9, 79, 39), 'twisted.names.dns.Query', 'dns.Query', ({(79, 19, 79, 23): 'host', (79, 25, 79, 30): 'dns.A', (79, 32, 79, 38): 'dns.IN'}, {}), '(host, dns.A, dns.IN)', False, 'from twisted.names import dns\n'), ((173, 32, 173, 48), 'twisted.internet.defer.Deferred', 'defer.Deferred', ({}, {}), '()', False, 'from twisted.internet import defer\n')] |
edwardyehuang/iDS | tools/apply_colormap_dir.py | 36bde3a9e887eb7e1a8d88956cf041909ee84da4 | # ================================================================
# MIT License
# Copyright (c) 2021 edwardyehuang (https://github.com/edwardyehuang)
# ================================================================
import os, sys
rootpath = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
sys.path.insert(1, rootpath)
import tensorflow as tf
import numpy as np
from PIL import Image
from absl import app
from absl import flags
from common_flags import FLAGS
from ids.voc2012 import get_colormap as get_voc2012_colormap
from ids.cityscapes_fine import get_colormap as get_cityscapes_colormap
flags.DEFINE_string("input_dir", None, "input dir path")
flags.DEFINE_string("output_dir", None, "output dir path")
flags.DEFINE_string("colormap", "voc2012", "colormap name")
flags.DEFINE_integer("ignore_label", 255, "ignore label")
def apply_colormap_to_dir(input_dir, output_dir=None, colormap=None):
colormap = colormap.astype(np.uint8)
counter = 0
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for filename in tf.io.gfile.listdir(input_dir):
input_path = os.path.join(input_dir, filename)
output_path = os.path.join(output_dir, filename)
img = Image.open(input_path)
if img.mode != "L" and img.mode != "P":
continue
img = img.convert("P")
img.putpalette(colormap)
img.save(output_path, format="PNG")
counter += 1
tf.print("Processed {}".format(counter))
def main(argv):
colormap_name = FLAGS.colormap
colormap_name = colormap_name.lower()
if colormap_name == "voc2012":
colormap = get_voc2012_colormap()
elif colormap_name == "cityscapes":
colormap = get_cityscapes_colormap()
else:
raise ValueError(f"Not support colormap = {colormap_name}")
if FLAGS.ignore_label == 0:
colormap = colormap[1:]
apply_colormap_to_dir(FLAGS.input_dir, FLAGS.output_dir, colormap=colormap)
if __name__ == "__main__":
app.run(main)
| [((10, 0, 10, 28), 'sys.path.insert', 'sys.path.insert', ({(10, 16, 10, 17): '(1)', (10, 19, 10, 27): 'rootpath'}, {}), '(1, rootpath)', False, 'import os, sys\n'), ((25, 0, 25, 56), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', ({(25, 20, 25, 31): '"""input_dir"""', (25, 33, 25, 37): 'None', (25, 39, 25, 55): '"""input dir path"""'}, {}), "('input_dir', None, 'input dir path')", False, 'from absl import flags\n'), ((26, 0, 26, 58), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', ({(26, 20, 26, 32): '"""output_dir"""', (26, 34, 26, 38): 'None', (26, 40, 26, 57): '"""output dir path"""'}, {}), "('output_dir', None, 'output dir path')", False, 'from absl import flags\n'), ((27, 0, 27, 59), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', ({(27, 20, 27, 30): '"""colormap"""', (27, 32, 27, 41): '"""voc2012"""', (27, 43, 27, 58): '"""colormap name"""'}, {}), "('colormap', 'voc2012', 'colormap name')", False, 'from absl import flags\n'), ((28, 0, 28, 57), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', ({(28, 21, 28, 35): '"""ignore_label"""', (28, 37, 28, 40): '(255)', (28, 42, 28, 56): '"""ignore label"""'}, {}), "('ignore_label', 255, 'ignore label')", False, 'from absl import flags\n'), ((40, 20, 40, 50), 'tensorflow.io.gfile.listdir', 'tf.io.gfile.listdir', ({(40, 40, 40, 49): 'input_dir'}, {}), '(input_dir)', True, 'import tensorflow as tf\n'), ((81, 4, 81, 17), 'absl.app.run', 'app.run', ({(81, 12, 81, 16): 'main'}, {}), '(main)', False, 'from absl import app\n'), ((8, 40, 8, 65), 'os.path.dirname', 'os.path.dirname', ({(8, 56, 8, 64): '__file__'}, {}), '(__file__)', False, 'import os, sys\n'), ((37, 11, 37, 37), 'os.path.exists', 'os.path.exists', ({(37, 26, 37, 36): 'output_dir'}, {}), '(output_dir)', False, 'import os, sys\n'), ((38, 8, 38, 28), 'os.mkdir', 'os.mkdir', ({(38, 17, 38, 27): 'output_dir'}, {}), '(output_dir)', False, 'import os, sys\n'), ((42, 21, 42, 54), 'os.path.join', 'os.path.join', ({(42, 34, 42, 43): 'input_dir', (42, 45, 42, 53): 'filename'}, {}), '(input_dir, filename)', False, 'import os, sys\n'), ((43, 22, 43, 56), 'os.path.join', 'os.path.join', ({(43, 35, 43, 45): 'output_dir', (43, 47, 43, 55): 'filename'}, {}), '(output_dir, filename)', False, 'import os, sys\n'), ((45, 14, 45, 36), 'PIL.Image.open', 'Image.open', ({(45, 25, 45, 35): 'input_path'}, {}), '(input_path)', False, 'from PIL import Image\n'), ((67, 19, 67, 41), 'ids.voc2012.get_colormap', 'get_voc2012_colormap', ({}, {}), '()', True, 'from ids.voc2012 import get_colormap as get_voc2012_colormap\n'), ((69, 19, 69, 44), 'ids.cityscapes_fine.get_colormap', 'get_cityscapes_colormap', ({}, {}), '()', True, 'from ids.cityscapes_fine import get_colormap as get_cityscapes_colormap\n')] |
rit1200/kairon | kairon/shared/sso/base.py | 674a491f6deeae4800825ca93e0726e4fb6e0866 | class BaseSSO:
async def get_redirect_url(self):
"""Returns redirect url for facebook."""
raise NotImplementedError("Provider not implemented")
async def verify(self, request):
"""
Fetches user details using code received in the request.
:param request: starlette request object
"""
raise NotImplementedError("Provider not implemented")
| [] |
bal6765/ed-scout | EDScoutCore/JournalInterface.py | 0c2ee6141a5cd86a660c2319d7c4be61614b13fb | from inspect import signature
import json
import time
import os
import glob
import logging
from pathlib import Path
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
from watchdog.events import PatternMatchingEventHandler
from EDScoutCore.FileSystemUpdatePrompter import FileSystemUpdatePrompter
default_journal_path = os.path.join(str(Path.home()), "Saved Games\\Frontier Developments\\Elite Dangerous")
journal_file_pattern = "journal.*.log"
logger = logging.getLogger('JournalInterface')
class JournalChangeIdentifier:
def __init__(self, journal_path=default_journal_path):
pass
self.journals = {}
self.journal_path = journal_path
logger.debug(f"watching for journal changes in {self.journal_path}")
self._init_journal_lists()
self._new_journal_entry_callback = None
self.latest_journal = self.identify_latest_journal()
# Prompter is required to force the file system to do updates on some systems so we get regular updates from the
# journal watcher.
self.prompter = FileSystemUpdatePrompter(self.latest_journal)
def identify_latest_journal(self):
if len(self.journals.keys()) == 0:
return None
keys = sorted(self.journals.keys())
return keys[-1]
def process_journal_change(self, changed_file):
if changed_file != self.latest_journal:
self.latest_journal = changed_file
self.prompter.set_watch_file(self.latest_journal)
new_size = os.stat(changed_file).st_size
new_data = None
# If the game was loaded after the scout it will start a new journal which we need to treat as unscanned.
if changed_file not in self.journals:
self.journals[changed_file] = 0
logger.debug(f'{changed_file} - Size change: {self.journals[changed_file]} to {new_size}')
if new_size > 0: # Don't try and read it if this is the first notification (we seem to get two; one from the file being cleared).
# Check how much it has grown and read the excess
size_diff = new_size - self.journals[changed_file]
if size_diff > 0:
with open(changed_file, 'rb') as f:
f.seek(-size_diff, os.SEEK_END) # Note minus sign
new_data = f.read()
entries = []
if new_data:
new_journal_lines = JournalChangeIdentifier.binary_file_data_to_lines(new_data)
try:
for line in new_journal_lines:
logger.debug(f'New journal entry detected: {line}')
entry = json.loads(line)
entry['type'] = "JournalEntry" # Add an identifier that's common to everything we shove down the outgoing pipe so the receiver can distiguish.
entries.append(entry)
logger.debug(f'Found {len(entries)} new entries')
for entry in entries:
yield entry
self.journals[changed_file] = new_size
except json.decoder.JSONDecodeError as e:
logger.exception(e)
@staticmethod
def binary_file_data_to_lines(binary_data):
as_ascii = binary_data.decode('UTF-8')
all_lines = as_ascii.split("\r\n")
all_lines.pop() # Drop the last empty line
return all_lines
def _init_journal_lists(self):
journal_files = glob.glob(os.path.join(self.journal_path, journal_file_pattern))
for journal_file in journal_files:
self.journals[journal_file] = os.stat(journal_file).st_size
class JournalWatcher:
def __init__(self, path=default_journal_path, force_polling=False):
self.path = path
self.force_polling = force_polling
self._configure_watchers()
def set_callback(self, on_journal_change):
self.event_handler.set_callback(on_journal_change)
def stop(self):
self.observer.stop()
self.observer.join()
class _EntriesChangeHandler(PatternMatchingEventHandler):
def __init__(self):
super(JournalWatcher._EntriesChangeHandler, self).__init__(
patterns=['*Journal*.log'],
ignore_patterns=[],
ignore_directories=True)
self.on_journal_change = None
def set_callback(self, on_new_journal_entry):
self.on_journal_change = on_new_journal_entry
def on_modified(self, event):
changed_file = str(event.src_path)
logger.debug("Journal change: " + changed_file)
self.on_journal_change(changed_file)
def on_created(self, event):
file = str(event.src_path)
logger.debug("Journal created: " + file)
def on_deleted(self, event):
file = str(event.src_path)
logger.debug("Journal deleted: " + file)
def on_moved(self, event):
file = str(event.src_path)
logger.debug("Journal moved: " + file)
def _configure_watchers(self):
self.event_handler = JournalWatcher._EntriesChangeHandler()
if self.force_polling:
self.observer = PollingObserver(0.25) # Poll every quarter of a second
else:
self.observer = Observer()
self.observer.schedule(self.event_handler, self.path, recursive=False)
self.observer.start()
if __name__ == '__main__':
def ReportJournalChange(journal_hange):
print('New route detected:' + str(journal_hange))
journalWatcher = JournalWatcher()
journalWatcher.set_callback(ReportJournalChange)
print('running')
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print('done')
journalWatcher.stop()
| [((17, 9, 17, 46), 'logging.getLogger', 'logging.getLogger', ({(17, 27, 17, 45): '"""JournalInterface"""'}, {}), "('JournalInterface')", False, 'import logging\n'), ((14, 40, 14, 51), 'pathlib.Path.home', 'Path.home', ({}, {}), '()', False, 'from pathlib import Path\n'), ((36, 24, 36, 69), 'EDScoutCore.FileSystemUpdatePrompter.FileSystemUpdatePrompter', 'FileSystemUpdatePrompter', ({(36, 49, 36, 68): 'self.latest_journal'}, {}), '(self.latest_journal)', False, 'from EDScoutCore.FileSystemUpdatePrompter import FileSystemUpdatePrompter\n'), ((50, 19, 50, 40), 'os.stat', 'os.stat', ({(50, 27, 50, 39): 'changed_file'}, {}), '(changed_file)', False, 'import os\n'), ((97, 34, 97, 87), 'os.path.join', 'os.path.join', ({(97, 47, 97, 64): 'self.journal_path', (97, 66, 97, 86): 'journal_file_pattern'}, {}), '(self.journal_path, journal_file_pattern)', False, 'import os\n'), ((150, 28, 150, 49), 'watchdog.observers.polling.PollingObserver', 'PollingObserver', ({(150, 44, 150, 48): '0.25'}, {}), '(0.25)', False, 'from watchdog.observers.polling import PollingObserver\n'), ((152, 28, 152, 38), 'watchdog.observers.Observer', 'Observer', ({}, {}), '()', False, 'from watchdog.observers import Observer\n'), ((169, 12, 169, 25), 'time.sleep', 'time.sleep', ({(169, 23, 169, 24): '(1)'}, {}), '(1)', False, 'import time\n'), ((99, 42, 99, 63), 'os.stat', 'os.stat', ({(99, 50, 99, 62): 'journal_file'}, {}), '(journal_file)', False, 'import os\n'), ((75, 28, 75, 44), 'json.loads', 'json.loads', ({(75, 39, 75, 43): 'line'}, {}), '(line)', False, 'import json\n')] |
xR86/ml-stuff | labs-python/lab9/add_files.py | 2a1b79408897171b78032ff2531ab6f8b18be6c4 | import sqlite3
conn = sqlite3.connect('example.db')
c = conn.cursor()
import os
import hashlib
import time
def get_file_md5(filePath):
h = hashlib.md5()
h.update(open(filePath,"rb").read())
return h.hexdigest()
def get_file_sha256(filePath):
h = hashlib.sha256()
h.update(open(filePath,"rb").read())
return h.hexdigest()
def get_dir_data(dir_path):
dir_path = os.path.realpath(dir_path)
#print next(os.walk(dir_path))[2]
#print os.path.basename(dir_path)
id_location = 0
id_file = 0
for dir_file in next(os.walk(dir_path))[2]:
file_name = dir_file
file_md5 = get_file_md5(dir_file)
file_sha256 = get_file_sha256(dir_file)
file_size = os.path.getsize(dir_file)
file_time = time.gmtime(os.path.getctime(dir_file))
file_formatted_time = time.strftime("%Y-%m-%d %I:%M:%S %p", file_time)
file_path = os.path.realpath(dir_file)
location_values = (id_location, file_path)
c.execute("INSERT INTO location VALUES (?, ?)", location_values)
files_values = (id_location, id_file)
c.execute("INSERT INTO files VALUES (?, ?)", files_values)
file_info_values = (id_file, file_name, file_size, file_formatted_time, file_md5)
c.execute("INSERT INTO file_info VALUES (?, ?, ?, ?, ?)", file_info_values)
id_location += 1
id_file += 1
get_dir_data('./')
# Save (commit) the changes
conn.commit()
conn.close() | [((2, 7, 2, 36), 'sqlite3.connect', 'sqlite3.connect', ({(2, 23, 2, 35): '"""example.db"""'}, {}), "('example.db')", False, 'import sqlite3\n'), ((12, 5, 12, 18), 'hashlib.md5', 'hashlib.md5', ({}, {}), '()', False, 'import hashlib\n'), ((17, 5, 17, 21), 'hashlib.sha256', 'hashlib.sha256', ({}, {}), '()', False, 'import hashlib\n'), ((23, 12, 23, 38), 'os.path.realpath', 'os.path.realpath', ({(23, 29, 23, 37): 'dir_path'}, {}), '(dir_path)', False, 'import os\n'), ((34, 14, 34, 39), 'os.path.getsize', 'os.path.getsize', ({(34, 30, 34, 38): 'dir_file'}, {}), '(dir_file)', False, 'import os\n'), ((37, 24, 37, 72), 'time.strftime', 'time.strftime', ({(37, 38, 37, 60): '"""%Y-%m-%d %I:%M:%S %p"""', (37, 62, 37, 71): 'file_time'}, {}), "('%Y-%m-%d %I:%M:%S %p', file_time)", False, 'import time\n'), ((38, 14, 38, 40), 'os.path.realpath', 'os.path.realpath', ({(38, 31, 38, 39): 'dir_file'}, {}), '(dir_file)', False, 'import os\n'), ((30, 22, 30, 39), 'os.walk', 'os.walk', ({(30, 30, 30, 38): 'dir_path'}, {}), '(dir_path)', False, 'import os\n'), ((36, 26, 36, 52), 'os.path.getctime', 'os.path.getctime', ({(36, 43, 36, 51): 'dir_file'}, {}), '(dir_file)', False, 'import os\n')] |
blankenberg/galaxy-data-resource | lib/galaxy/model/migrate/versions/0073_add_ldda_to_implicit_conversion_table.py | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | """
Migration script to add 'ldda_parent_id' column to the implicitly_converted_dataset_association table.
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
import logging
log = logging.getLogger( __name__ )
metadata = MetaData()
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
try:
Implicitly_converted_table = Table( "implicitly_converted_dataset_association", metadata, autoload=True )
if migrate_engine.name != 'sqlite':
c = Column( "ldda_parent_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True, nullable=True )
else:
#Can't use the ForeignKey in sqlite.
c = Column( "ldda_parent_id", Integer, index=True, nullable=True )
c.create( Implicitly_converted_table, index_name="ix_implicitly_converted_dataset_assoc_ldda_parent_id")
assert c is Implicitly_converted_table.c.ldda_parent_id
except Exception, e:
print "Adding ldda_parent_id column to implicitly_converted_dataset_association table failed: %s" % str( e )
log.debug( "Adding ldda_parent_id column to implicitly_converted_dataset_association table failed: %s" % str( e ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
try:
Implicitly_converted_table = Table( "implicitly_converted_dataset_association", metadata, autoload=True )
Implicitly_converted_table.c.ldda_parent_id.drop()
except Exception, e:
print "Dropping ldda_parent_id column from implicitly_converted_dataset_association table failed: %s" % str( e )
log.debug( "Dropping ldda_parent_id column from implicitly_converted_dataset_association table failed: %s" % str( e ) )
| [] |
tzuliu/Contrastive-Multiple-Correspondence-Analysis-cMCA | Replication Python and R Codes/Figure_6/cMCA_ESS2018_LABCON_org.py | a59a5c36dd5d4ac04205627827e792322742462d | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import prince
from sklearn import utils
from sklearn.cluster import DBSCAN
import itertools
from cmca import CMCA
from ccmca import CCMCA
from matplotlib import rc
plt.style.use('ggplot')
df = pd.read_csv("./uk2018.csv")
df["prtclcgb"].replace({5: 8, 9: 8, 10:8, 11:8, 12:8, 13:8, 15:8, 19:8}, inplace=True)
df["prtclcgb"].replace({6: 5}, inplace=True)
df["prtclcgb"].replace({7: 6}, inplace=True)
df["prtclcgb"].replace({8: 7}, inplace=True)
alpha = r'$ \alpha $'
tableau10 = {
'teal': '#78B7B2',
'blue': '#507AA6',
'orange': '#F08E39',
'red': '#DF585C',
'green': '#5BA053',
'purple': '#AF7BA1',
'yellow': '#ECC854',
'brown': '#9A7460',
'pink': '#FD9EA9',
'gray': '#BAB0AC',
7: '#9A7460',
1: '#507AA6',
2: '#F08E39',
3: '#DF585C',
4: '#5BA053',
0: '#78B7B2',
6: '#ECC854',
5: '#AF7BA1',
8: '#FD9EA9',
9: '#BAB0AC',
-1: '#BAB0AC',
99: '#BAB0AC',
'LDP': '#507AA6',
'DPJ': '#F08E39'
}
def fillna_based_on_dtype(df):
for key in dict(df.dtypes).keys():
if df.dtypes[key] == np.object:
df[key] = df[key].fillna('na')
else:
df[key] = df[key].fillna(99)
def df_to_mat(df):
X = df.iloc[:,np.r_[1:(df.shape[1])]]
X_con = X[X["prtclcgb"] == 1]
X_lab = X[X["prtclcgb"] == 2]
X_ldp = X[X["prtclcgb"] == 3]
X_snp = X[X["prtclcgb"] == 4]
X_gre = X[X["prtclcgb"] == 5]
X_uip = X[X["prtclcgb"] == 6]
X_oth = X[X["prtclcgb"] == 7]
print("missing value ratio (CON)", X_con.isna().sum().sum() / (X_con.shape[0] * X_con.shape[1]))
print("missing value ratio (LAB)", X_lab.isna().sum().sum() / (X_lab.shape[0] * X_lab.shape[1]))
print("missing value ratio (LDP)", X_ldp.isna().sum().sum() / (X_ldp.shape[0] * X_ldp.shape[1]))
print("missing value ratio (SNP)", X_snp.isna().sum().sum() / (X_snp.shape[0] * X_snp.shape[1]))
print("missing value ratio (GRE)", X_gre.isna().sum().sum() / (X_gre.shape[0] * X_gre.shape[1]))
print("missing value ratio (UIP)", X_uip.isna().sum().sum() / (X_uip.shape[0] * X_uip.shape[1]))
print("missing value ratio (OTH)", X_oth.isna().sum().sum() / (X_oth.shape[0] * X_oth.shape[1]))
fillna_based_on_dtype(X_con)
fillna_based_on_dtype(X_lab)
fillna_based_on_dtype(X_ldp)
fillna_based_on_dtype(X_snp)
fillna_based_on_dtype(X_gre)
fillna_based_on_dtype(X_uip)
fillna_based_on_dtype(X_oth)
return(X_con, X_lab, X_ldp, X_snp, X_gre, X_uip, X_oth)
X_con, X_lab, X_ldp, X_snp, X_gre, X_uip, X_oth = df_to_mat(df)
X = pd.concat([X_con, X_lab, X_ldp, X_snp, X_gre, X_uip, X_oth])
print(X_con.shape, X_lab.shape, X_ldp.shape, X_snp.shape, X_gre.shape, X_uip.shape, X_oth.shape, X.shape)
##Disctionay for Level and Party
party = {1:"Con", 2:"Lab", 3:"LD", 4:"SNP", 5:"Green", 6:"UKIP", 7:"Other"}
##Fitting cMCA and export plots
cmca = CMCA(n_components=2, copy=True, check_input=True)
cmca = cmca.fit(fg=X_lab.iloc[:,0:(X_lab.shape[1]-3)], bg=X_con.iloc[:,0:(X_con.shape[1]-3)], alpha=1.5)
Y_fg = np.array(cmca.transform(X_lab.iloc[:,0:(X.shape[1]-3)]))
Y_bg = np.array(cmca.transform(X_con.iloc[:,0:(X.shape[1]-3)]))
Y_fg_col = np.array(cmca.transform(X_lab.iloc[:,0:(X.shape[1]-3)], axis='col'))
prefix_to_info = cmca.gen_prefix_to_info()
f_6 = plt.figure()
plt.xlim([-2.5, 2.5])
plt.ylim([-2.5, 2.5])
plt.scatter(Y_fg[:, 0], Y_fg[:, 1], c=tableau10[X_lab["prtclcgb"].iloc[0]], label=party[X_lab["prtclcgb"].iloc[0]], alpha=0.3, linewidths=0)
plt.scatter(Y_bg[:, 0], Y_bg[:, 1], c=tableau10[X_con["prtclcgb"].iloc[0]], label=party[X_con["prtclcgb"].iloc[0]], alpha=0.3, linewidths=0)
handles, labels = plt.gca().get_legend_handles_labels()
handles = [handles[1],handles[0]]
labels = ["Con","Lab"]
plt.legend(handles, labels, loc="lower right", shadow=False, scatterpoints=1, fontsize=8)
plt.xlabel('cPC1')
plt.ylabel('cPC2')
plt.title("cMCA (tg: LAB, bg: CON, " + str(alpha) + ": 1.5)")
plt.show()
f_6.savefig("cMCA_ESS2018_labcon_org.pdf", bbox_inches='tight')
| [((11, 0, 11, 23), 'matplotlib.pyplot.style.use', 'plt.style.use', ({(11, 14, 11, 22): '"""ggplot"""'}, {}), "('ggplot')", True, 'import matplotlib.pyplot as plt\n'), ((13, 5, 13, 32), 'pandas.read_csv', 'pd.read_csv', ({(13, 17, 13, 31): '"""./uk2018.csv"""'}, {}), "('./uk2018.csv')", True, 'import pandas as pd\n'), ((87, 4, 87, 64), 'pandas.concat', 'pd.concat', ({(87, 14, 87, 63): '[X_con, X_lab, X_ldp, X_snp, X_gre, X_uip, X_oth]'}, {}), '([X_con, X_lab, X_ldp, X_snp, X_gre, X_uip, X_oth])', True, 'import pandas as pd\n'), ((94, 7, 94, 56), 'cmca.CMCA', 'CMCA', (), '', False, 'from cmca import CMCA\n'), ((102, 6, 102, 18), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((103, 0, 103, 21), 'matplotlib.pyplot.xlim', 'plt.xlim', ({(103, 9, 103, 20): '[-2.5, 2.5]'}, {}), '([-2.5, 2.5])', True, 'import matplotlib.pyplot as plt\n'), ((104, 0, 104, 21), 'matplotlib.pyplot.ylim', 'plt.ylim', ({(104, 9, 104, 20): '[-2.5, 2.5]'}, {}), '([-2.5, 2.5])', True, 'import matplotlib.pyplot as plt\n'), ((105, 0, 105, 140), 'matplotlib.pyplot.scatter', 'plt.scatter', (), '', True, 'import matplotlib.pyplot as plt\n'), ((106, 0, 106, 140), 'matplotlib.pyplot.scatter', 'plt.scatter', (), '', True, 'import matplotlib.pyplot as plt\n'), ((110, 0, 110, 89), 'matplotlib.pyplot.legend', 'plt.legend', (), '', True, 'import matplotlib.pyplot as plt\n'), ((111, 0, 111, 18), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(111, 11, 111, 17): '"""cPC1"""'}, {}), "('cPC1')", True, 'import matplotlib.pyplot as plt\n'), ((112, 0, 112, 18), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(112, 11, 112, 17): '"""cPC2"""'}, {}), "('cPC2')", True, 'import matplotlib.pyplot as plt\n'), ((114, 0, 114, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((107, 18, 107, 27), 'matplotlib.pyplot.gca', 'plt.gca', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n')] |
ruppysuppy/Daily-Coding-Problem-Solutions | Solutions/077.py | 37d061215a9af2ce39c51f8816c83039914c0d0b | """
Problem:
Given a list of possibly overlapping intervals, return a new list of intervals where
all overlapping intervals have been merged.
The input list is not necessarily ordered in any way.
For example, given [(1, 3), (5, 8), (4, 10), (20, 25)], you should return
[(1, 3), (4, 10), (20, 25)].
"""
from typing import List, Tuple
def merge_intervals(intervals: List[Tuple[int, int]]) -> List[Tuple[int, int]]:
intervals.sort(key=lambda x: x[0])
merged_intervals = []
start = intervals[0][0]
end = intervals[0][1]
# generating the merged intervals
for interval in intervals[1:]:
curr_start, curr_end = interval
if end < curr_start:
merged_intervals.append((start, end))
start = curr_start
end = curr_end
elif end < curr_end and end > curr_start:
end = curr_end
# adding the last interval
merged_intervals.append((start, end))
return merged_intervals
if __name__ == "__main__":
print(merge_intervals([(1, 3), (5, 8), (4, 10), (20, 25)]))
print(merge_intervals([(1, 3), (5, 8), (4, 10), (20, 25), (6, 12)]))
"""
SPECS:
TIME COMPLEXITY: O(n)
SPACE COMPLEXITY: O(n)
"""
| [] |
wray/wems | slackbot_wems/chris/slacklib.py | 69caedfb8906f04175196d610a1ca516db01f72a | import time
import emoji
# Put your commands here
COMMAND1 = "testing testing"
COMMAND2 = "roger roger"
BLUEON = str("blue on")
BLUEOFF = str("blue off")
REDON = str("red on")
REDOFF = str("red off")
GREENON = str("green on")
GREENOFF = str("green off")
YELLOWON = str("yellow on")
YELLOWOFF = str("yellow off")
CLOCK = str("update clock")
SCRAMBLE = str('scramble the 7')
HACKER = str('hack the 7')
SINGLEREADING = str('light')
def setup():
import RPi.GPIO as GPIO
import slackbot_wems.chris.light as lite
import slackbot_wems.chris.segment7 as segment
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Pin Setup
GPIO.setup(17, GPIO.OUT) # BLUE LED
GPIO.setup(27, GPIO.OUT) # RED LED
GPIO.setup(5, GPIO.OUT) # GREEN LED
GPIO.setup(22, GPIO.OUT) # YELLOW LED
GPIO.setup(12, GPIO.OUT) # LDR
setup = False
# Your handling code goes in this function
def handle_command(command):
"""
Determine if the command is valid. If so, take action and return
a response, if necessary.
"""
if not setup:
setup_gpio()
setup = True
response = ""
if command.find(COMMAND1) >= 0:
response = str("Surprise!")
elif command.find(COMMAND2) >= 0:
response = (emoji.emojize('Python\n is\n :thumbs_up: :thumbs_up: :thumbs_up:'))
# Blue LED Commands
elif command.find(BLUEON) >= 0:
GPIO.output(17, True)
response = emoji.emojize("" + "Turning :radio_button: ON...")
elif command.find(BLUEOFF) >= 0:
GPIO.output(17, False)
response = emoji.emojize("" + "Turning :radio_button: OFF...")
# Red LED Commands
elif command.find(REDON) >= 0:
GPIO.output(27, True)
response = emoji.emojize("" + "Turning :red_circle: ON...")
elif command.find(REDOFF) >= 0:
GPIO.output(27, False)
response = emoji.emojize("" + "Turning :red_circle: OFF...")
# Green LED Commands
elif command.find(GREENON) >= 0:
GPIO.output(5, True)
response = emoji.emojize("" + "Turning :green_apple: ON...")
elif command.find(GREENOFF) >= 0:
GPIO.output(5, False)
response = emoji.emojize("" + "Turning :green_apple: OFF...")
# Yellow LED Commands
elif command.find(YELLOWON) >= 0:
GPIO.output(22, True)
response = emoji.emojize("" + "Turning :sunny: ON...")
elif command.find(YELLOWOFF) >= 0:
GPIO.output(22, False)
response = emoji.emojize("" + "Turning :sunny: OFF...")
# 7 Segment Commands
elif command.find(CLOCK) >= 0:
print('Updating the clock!')
response = segment.updateClock()
elif command.find(SCRAMBLE) >= 0:
print(emoji.emojize(":egg: There is nothing better than scrambled eggs! :egg:"))
response = segment.scramble()
elif command.find(HACKER) >= 0:
print('Message')
response = segment.hacker()
elif command.find(SINGLEREADING) >= 0:
a = lite.printReading()
a = int(a)
time.sleep(1)
print(a)
response = ('Here is what the LDR Sensor said to me: ' + str(a))
return response
| [((32, 4, 32, 26), 'RPi.GPIO.setmode', 'GPIO.setmode', ({(32, 17, 32, 25): 'GPIO.BCM'}, {}), '(GPIO.BCM)', True, 'import RPi.GPIO as GPIO\n'), ((33, 4, 33, 27), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', ({(33, 21, 33, 26): '(False)'}, {}), '(False)', True, 'import RPi.GPIO as GPIO\n'), ((36, 4, 36, 28), 'RPi.GPIO.setup', 'GPIO.setup', ({(36, 15, 36, 17): '(17)', (36, 19, 36, 27): 'GPIO.OUT'}, {}), '(17, GPIO.OUT)', True, 'import RPi.GPIO as GPIO\n'), ((37, 4, 37, 28), 'RPi.GPIO.setup', 'GPIO.setup', ({(37, 15, 37, 17): '(27)', (37, 19, 37, 27): 'GPIO.OUT'}, {}), '(27, GPIO.OUT)', True, 'import RPi.GPIO as GPIO\n'), ((38, 4, 38, 27), 'RPi.GPIO.setup', 'GPIO.setup', ({(38, 15, 38, 16): '(5)', (38, 18, 38, 26): 'GPIO.OUT'}, {}), '(5, GPIO.OUT)', True, 'import RPi.GPIO as GPIO\n'), ((39, 4, 39, 28), 'RPi.GPIO.setup', 'GPIO.setup', ({(39, 15, 39, 17): '(22)', (39, 19, 39, 27): 'GPIO.OUT'}, {}), '(22, GPIO.OUT)', True, 'import RPi.GPIO as GPIO\n'), ((41, 4, 41, 28), 'RPi.GPIO.setup', 'GPIO.setup', ({(41, 15, 41, 17): '(12)', (41, 19, 41, 27): 'GPIO.OUT'}, {}), '(12, GPIO.OUT)', True, 'import RPi.GPIO as GPIO\n'), ((61, 20, 61, 86), 'emoji.emojize', 'emoji.emojize', ({(61, 34, 61, 85): '"""Python\n is\n :thumbs_up: :thumbs_up: :thumbs_up:"""'}, {}), '("""Python\n is\n :thumbs_up: :thumbs_up: :thumbs_up:""")', False, 'import emoji\n'), ((65, 8, 65, 29), 'RPi.GPIO.output', 'GPIO.output', ({(65, 20, 65, 22): '(17)', (65, 24, 65, 28): '(True)'}, {}), '(17, True)', True, 'import RPi.GPIO as GPIO\n'), ((66, 19, 66, 69), 'emoji.emojize', 'emoji.emojize', ({(66, 33, 66, 68): "'' + 'Turning :radio_button: ON...'"}, {}), "('' + 'Turning :radio_button: ON...')", False, 'import emoji\n'), ((69, 8, 69, 30), 'RPi.GPIO.output', 'GPIO.output', ({(69, 20, 69, 22): '(17)', (69, 24, 69, 29): '(False)'}, {}), '(17, False)', True, 'import RPi.GPIO as GPIO\n'), ((70, 19, 70, 70), 'emoji.emojize', 'emoji.emojize', ({(70, 33, 70, 69): "'' + 'Turning :radio_button: OFF...'"}, {}), "('' + 'Turning :radio_button: OFF...')", False, 'import emoji\n'), ((74, 8, 74, 29), 'RPi.GPIO.output', 'GPIO.output', ({(74, 20, 74, 22): '(27)', (74, 24, 74, 28): '(True)'}, {}), '(27, True)', True, 'import RPi.GPIO as GPIO\n'), ((75, 19, 75, 67), 'emoji.emojize', 'emoji.emojize', ({(75, 33, 75, 66): "'' + 'Turning :red_circle: ON...'"}, {}), "('' + 'Turning :red_circle: ON...')", False, 'import emoji\n'), ((78, 8, 78, 30), 'RPi.GPIO.output', 'GPIO.output', ({(78, 20, 78, 22): '(27)', (78, 24, 78, 29): '(False)'}, {}), '(27, False)', True, 'import RPi.GPIO as GPIO\n'), ((79, 19, 79, 68), 'emoji.emojize', 'emoji.emojize', ({(79, 33, 79, 67): "'' + 'Turning :red_circle: OFF...'"}, {}), "('' + 'Turning :red_circle: OFF...')", False, 'import emoji\n'), ((83, 8, 83, 28), 'RPi.GPIO.output', 'GPIO.output', ({(83, 20, 83, 21): '(5)', (83, 23, 83, 27): '(True)'}, {}), '(5, True)', True, 'import RPi.GPIO as GPIO\n'), ((84, 19, 84, 68), 'emoji.emojize', 'emoji.emojize', ({(84, 33, 84, 67): "'' + 'Turning :green_apple: ON...'"}, {}), "('' + 'Turning :green_apple: ON...')", False, 'import emoji\n'), ((87, 8, 87, 29), 'RPi.GPIO.output', 'GPIO.output', ({(87, 20, 87, 21): '(5)', (87, 23, 87, 28): '(False)'}, {}), '(5, False)', True, 'import RPi.GPIO as GPIO\n'), ((88, 19, 88, 69), 'emoji.emojize', 'emoji.emojize', ({(88, 33, 88, 68): "'' + 'Turning :green_apple: OFF...'"}, {}), "('' + 'Turning :green_apple: OFF...')", False, 'import emoji\n'), ((92, 8, 92, 29), 'RPi.GPIO.output', 'GPIO.output', ({(92, 20, 92, 22): '(22)', (92, 24, 92, 28): '(True)'}, {}), '(22, True)', True, 'import RPi.GPIO as GPIO\n'), ((93, 19, 93, 62), 'emoji.emojize', 'emoji.emojize', ({(93, 33, 93, 61): "'' + 'Turning :sunny: ON...'"}, {}), "('' + 'Turning :sunny: ON...')", False, 'import emoji\n'), ((96, 8, 96, 30), 'RPi.GPIO.output', 'GPIO.output', ({(96, 20, 96, 22): '(22)', (96, 24, 96, 29): '(False)'}, {}), '(22, False)', True, 'import RPi.GPIO as GPIO\n'), ((97, 19, 97, 63), 'emoji.emojize', 'emoji.emojize', ({(97, 33, 97, 62): "'' + 'Turning :sunny: OFF...'"}, {}), "('' + 'Turning :sunny: OFF...')", False, 'import emoji\n'), ((102, 19, 102, 40), 'slackbot_wems.chris.segment7.updateClock', 'segment.updateClock', ({}, {}), '()', True, 'import slackbot_wems.chris.segment7 as segment\n'), ((106, 19, 106, 37), 'slackbot_wems.chris.segment7.scramble', 'segment.scramble', ({}, {}), '()', True, 'import slackbot_wems.chris.segment7 as segment\n'), ((105, 14, 105, 87), 'emoji.emojize', 'emoji.emojize', ({(105, 28, 105, 86): '""":egg: There is nothing better than scrambled eggs! :egg:"""'}, {}), "(':egg: There is nothing better than scrambled eggs! :egg:')", False, 'import emoji\n'), ((110, 19, 110, 35), 'slackbot_wems.chris.segment7.hacker', 'segment.hacker', ({}, {}), '()', True, 'import slackbot_wems.chris.segment7 as segment\n'), ((113, 12, 113, 31), 'slackbot_wems.chris.light.printReading', 'lite.printReading', ({}, {}), '()', True, 'import slackbot_wems.chris.light as lite\n'), ((115, 8, 115, 21), 'time.sleep', 'time.sleep', ({(115, 19, 115, 20): '(1)'}, {}), '(1)', False, 'import time\n')] |
iScrE4m/RSES | rses/__init__.py | 88299f105ded8838243eab8b25ab1626c97d1179 | # coding=utf-8
"""RSES :)"""
| [] |
adewaleo/azure-sdk-for-python | sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/operations/_gremlin_resources_operations.py | 169457edbea5e3c5557246cfcf8bd635d528bae4 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class GremlinResourcesOperations(object):
"""GremlinResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.cosmosdb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_gremlin_databases(
self,
resource_group_name, # type: str
account_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.GremlinDatabaseListResult"]
"""Lists the Gremlin databases under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GremlinDatabaseListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.GremlinDatabaseListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinDatabaseListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_gremlin_databases.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('GremlinDatabaseListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_gremlin_databases.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases'} # type: ignore
def get_gremlin_database(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.GremlinDatabaseGetResults"
"""Gets the Gremlin databases under an existing Azure Cosmos DB database account with the provided
name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GremlinDatabaseGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.GremlinDatabaseGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinDatabaseGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get_gremlin_database.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GremlinDatabaseGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_gremlin_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}'} # type: ignore
def _create_update_gremlin_database_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
create_update_gremlin_database_parameters, # type: "models.GremlinDatabaseCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["models.GremlinDatabaseGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.GremlinDatabaseGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_update_gremlin_database_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(create_update_gremlin_database_parameters, 'GremlinDatabaseCreateUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GremlinDatabaseGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_gremlin_database_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}'} # type: ignore
def begin_create_update_gremlin_database(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
create_update_gremlin_database_parameters, # type: "models.GremlinDatabaseCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.GremlinDatabaseGetResults"]
"""Create or update an Azure Cosmos DB Gremlin database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param create_update_gremlin_database_parameters: The parameters to provide for the current
Gremlin database.
:type create_update_gremlin_database_parameters: ~azure.mgmt.cosmosdb.models.GremlinDatabaseCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GremlinDatabaseGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.GremlinDatabaseGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinDatabaseGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_update_gremlin_database_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
create_update_gremlin_database_parameters=create_update_gremlin_database_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GremlinDatabaseGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_gremlin_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}'} # type: ignore
def _delete_gremlin_database_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
# Construct URL
url = self._delete_gremlin_database_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_gremlin_database_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}'} # type: ignore
def begin_delete_gremlin_database(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes an existing Azure Cosmos DB Gremlin database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_gremlin_database_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_gremlin_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}'} # type: ignore
def get_gremlin_database_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ThroughputSettingsGetResults"
"""Gets the RUs per second of the Gremlin database under an existing Azure Cosmos DB database
account with the provided name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ThroughputSettingsGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get_gremlin_database_throughput.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_gremlin_database_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default'} # type: ignore
def _update_gremlin_database_throughput_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
update_throughput_parameters, # type: "models.ThroughputSettingsUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_gremlin_database_throughput_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(update_throughput_parameters, 'ThroughputSettingsUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_gremlin_database_throughput_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default'} # type: ignore
def begin_update_gremlin_database_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
update_throughput_parameters, # type: "models.ThroughputSettingsUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Update RUs per second of an Azure Cosmos DB Gremlin database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current Gremlin database.
:type update_throughput_parameters: ~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_gremlin_database_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
update_throughput_parameters=update_throughput_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_gremlin_database_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default'} # type: ignore
def _migrate_gremlin_database_to_autoscale_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._migrate_gremlin_database_to_autoscale_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_gremlin_database_to_autoscale_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default/migrateToAutoscale'} # type: ignore
def begin_migrate_gremlin_database_to_autoscale(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Migrate an Azure Cosmos DB Gremlin database from manual throughput to autoscale.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._migrate_gremlin_database_to_autoscale_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_gremlin_database_to_autoscale.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default/migrateToAutoscale'} # type: ignore
def _migrate_gremlin_database_to_manual_throughput_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._migrate_gremlin_database_to_manual_throughput_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_gremlin_database_to_manual_throughput_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default/migrateToManualThroughput'} # type: ignore
def begin_migrate_gremlin_database_to_manual_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Migrate an Azure Cosmos DB Gremlin database from autoscale to manual throughput.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._migrate_gremlin_database_to_manual_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_gremlin_database_to_manual_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default/migrateToManualThroughput'} # type: ignore
def list_gremlin_graphs(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.GremlinGraphListResult"]
"""Lists the Gremlin graph under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GremlinGraphListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.GremlinGraphListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinGraphListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_gremlin_graphs.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('GremlinGraphListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_gremlin_graphs.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs'} # type: ignore
def get_gremlin_graph(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.GremlinGraphGetResults"
"""Gets the Gremlin graph under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GremlinGraphGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.GremlinGraphGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinGraphGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get_gremlin_graph.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GremlinGraphGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_gremlin_graph.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}'} # type: ignore
def _create_update_gremlin_graph_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
create_update_gremlin_graph_parameters, # type: "models.GremlinGraphCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["models.GremlinGraphGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.GremlinGraphGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_update_gremlin_graph_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(create_update_gremlin_graph_parameters, 'GremlinGraphCreateUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GremlinGraphGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_gremlin_graph_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}'} # type: ignore
def begin_create_update_gremlin_graph(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
create_update_gremlin_graph_parameters, # type: "models.GremlinGraphCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.GremlinGraphGetResults"]
"""Create or update an Azure Cosmos DB Gremlin graph.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:param create_update_gremlin_graph_parameters: The parameters to provide for the current
Gremlin graph.
:type create_update_gremlin_graph_parameters: ~azure.mgmt.cosmosdb.models.GremlinGraphCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GremlinGraphGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.GremlinGraphGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinGraphGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_update_gremlin_graph_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
graph_name=graph_name,
create_update_gremlin_graph_parameters=create_update_gremlin_graph_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GremlinGraphGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_gremlin_graph.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}'} # type: ignore
def _delete_gremlin_graph_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
# Construct URL
url = self._delete_gremlin_graph_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_gremlin_graph_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}'} # type: ignore
def begin_delete_gremlin_graph(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes an existing Azure Cosmos DB Gremlin graph.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_gremlin_graph_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
graph_name=graph_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_gremlin_graph.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}'} # type: ignore
def get_gremlin_graph_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ThroughputSettingsGetResults"
"""Gets the Gremlin graph throughput under an existing Azure Cosmos DB database account with the
provided name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ThroughputSettingsGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get_gremlin_graph_throughput.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_gremlin_graph_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default'} # type: ignore
def _update_gremlin_graph_throughput_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
update_throughput_parameters, # type: "models.ThroughputSettingsUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_gremlin_graph_throughput_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(update_throughput_parameters, 'ThroughputSettingsUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_gremlin_graph_throughput_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default'} # type: ignore
def begin_update_gremlin_graph_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
update_throughput_parameters, # type: "models.ThroughputSettingsUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Update RUs per second of an Azure Cosmos DB Gremlin graph.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current Gremlin graph.
:type update_throughput_parameters: ~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_gremlin_graph_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
graph_name=graph_name,
update_throughput_parameters=update_throughput_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_gremlin_graph_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default'} # type: ignore
def _migrate_gremlin_graph_to_autoscale_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._migrate_gremlin_graph_to_autoscale_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_gremlin_graph_to_autoscale_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default/migrateToAutoscale'} # type: ignore
def begin_migrate_gremlin_graph_to_autoscale(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Migrate an Azure Cosmos DB Gremlin graph from manual throughput to autoscale.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._migrate_gremlin_graph_to_autoscale_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
graph_name=graph_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_gremlin_graph_to_autoscale.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default/migrateToAutoscale'} # type: ignore
def _migrate_gremlin_graph_to_manual_throughput_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._migrate_gremlin_graph_to_manual_throughput_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_gremlin_graph_to_manual_throughput_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default/migrateToManualThroughput'} # type: ignore
def begin_migrate_gremlin_graph_to_manual_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Migrate an Azure Cosmos DB Gremlin graph from autoscale to manual throughput.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._migrate_gremlin_graph_to_manual_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
graph_name=graph_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_gremlin_graph_to_manual_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default/migrateToManualThroughput'} # type: ignore
| [((25, 8, 25, 20), 'typing.TypeVar', 'TypeVar', ({(25, 16, 25, 19): '"""T"""'}, {}), "('T')", False, 'from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union\n'), ((120, 15, 122, 9), 'azure.core.paging.ItemPaged', 'ItemPaged', ({(121, 12, 121, 20): 'get_next', (121, 22, 121, 34): 'extract_data'}, {}), '(get_next, extract_data)', False, 'from azure.core.paging import ItemPaged\n'), ((929, 15, 931, 9), 'azure.core.paging.ItemPaged', 'ItemPaged', ({(930, 12, 930, 20): 'get_next', (930, 22, 930, 34): 'extract_data'}, {}), '(get_next, extract_data)', False, 'from azure.core.paging import ItemPaged\n'), ((178, 12, 178, 95), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((179, 18, 179, 83), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((234, 12, 234, 95), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((235, 18, 235, 83), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((304, 45, 304, 77), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', ({(304, 56, 304, 65): 'lro_delay'}, {}), '(lro_delay, **kwargs)', False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((308, 19, 313, 13), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', (), '', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((315, 19, 315, 95), 'azure.core.polling.LROPoller', 'LROPoller', ({(315, 29, 315, 41): 'self._client', (315, 43, 315, 53): 'raw_result', (315, 55, 315, 78): 'get_long_running_output', (315, 80, 315, 94): 'polling_method'}, {}), '(self._client, raw_result, get_long_running_output, polling_method)', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((355, 12, 355, 95), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((356, 18, 356, 83), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((412, 45, 412, 77), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', ({(412, 56, 412, 65): 'lro_delay'}, {}), '(lro_delay, **kwargs)', False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((416, 19, 421, 13), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', (), '', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((423, 19, 423, 95), 'azure.core.polling.LROPoller', 'LROPoller', ({(423, 29, 423, 41): 'self._client', (423, 43, 423, 53): 'raw_result', (423, 55, 423, 78): 'get_long_running_output', (423, 80, 423, 94): 'polling_method'}, {}), '(self._client, raw_result, get_long_running_output, polling_method)', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((479, 12, 479, 95), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((480, 18, 480, 83), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((535, 12, 535, 95), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((536, 18, 536, 83), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((605, 45, 605, 77), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', ({(605, 56, 605, 65): 'lro_delay'}, {}), '(lro_delay, **kwargs)', False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((609, 19, 614, 13), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', (), '', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((616, 19, 616, 95), 'azure.core.polling.LROPoller', 'LROPoller', ({(616, 29, 616, 41): 'self._client', (616, 43, 616, 53): 'raw_result', (616, 55, 616, 78): 'get_long_running_output', (616, 80, 616, 94): 'polling_method'}, {}), '(self._client, raw_result, get_long_running_output, polling_method)', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((658, 12, 658, 95), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((659, 18, 659, 83), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((723, 45, 723, 77), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', ({(723, 56, 723, 65): 'lro_delay'}, {}), '(lro_delay, **kwargs)', False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((727, 19, 732, 13), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', (), '', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((734, 19, 734, 95), 'azure.core.polling.LROPoller', 'LROPoller', ({(734, 29, 734, 41): 'self._client', (734, 43, 734, 53): 'raw_result', (734, 55, 734, 78): 'get_long_running_output', (734, 80, 734, 94): 'polling_method'}, {}), '(self._client, raw_result, get_long_running_output, polling_method)', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((776, 12, 776, 95), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((777, 18, 777, 83), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((841, 45, 841, 77), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', ({(841, 56, 841, 65): 'lro_delay'}, {}), '(lro_delay, **kwargs)', False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((845, 19, 850, 13), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', (), '', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((852, 19, 852, 95), 'azure.core.polling.LROPoller', 'LROPoller', ({(852, 29, 852, 41): 'self._client', (852, 43, 852, 53): 'raw_result', (852, 55, 852, 78): 'get_long_running_output', (852, 80, 852, 94): 'polling_method'}, {}), '(self._client, raw_result, get_long_running_output, polling_method)', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((990, 12, 990, 95), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((991, 18, 991, 83), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((1048, 12, 1048, 95), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((1049, 18, 1049, 83), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((1122, 45, 1122, 77), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', ({(1122, 56, 1122, 65): 'lro_delay'}, {}), '(lro_delay, **kwargs)', False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((1126, 19, 1131, 13), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', (), '', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((1133, 19, 1133, 95), 'azure.core.polling.LROPoller', 'LROPoller', ({(1133, 29, 1133, 41): 'self._client', (1133, 43, 1133, 53): 'raw_result', (1133, 55, 1133, 78): 'get_long_running_output', (1133, 80, 1133, 94): 'polling_method'}, {}), '(self._client, raw_result, get_long_running_output, polling_method)', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((1175, 12, 1175, 95), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((1176, 18, 1176, 83), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((1236, 45, 1236, 77), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', ({(1236, 56, 1236, 65): 'lro_delay'}, {}), '(lro_delay, **kwargs)', False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((1240, 19, 1245, 13), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', (), '', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((1247, 19, 1247, 95), 'azure.core.polling.LROPoller', 'LROPoller', ({(1247, 29, 1247, 41): 'self._client', (1247, 43, 1247, 53): 'raw_result', (1247, 55, 1247, 78): 'get_long_running_output', (1247, 80, 1247, 94): 'polling_method'}, {}), '(self._client, raw_result, get_long_running_output, polling_method)', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((1307, 12, 1307, 95), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((1308, 18, 1308, 83), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((1365, 12, 1365, 95), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((1366, 18, 1366, 83), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((1439, 45, 1439, 77), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', ({(1439, 56, 1439, 65): 'lro_delay'}, {}), '(lro_delay, **kwargs)', False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((1443, 19, 1448, 13), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', (), '', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((1450, 19, 1450, 95), 'azure.core.polling.LROPoller', 'LROPoller', ({(1450, 29, 1450, 41): 'self._client', (1450, 43, 1450, 53): 'raw_result', (1450, 55, 1450, 78): 'get_long_running_output', (1450, 80, 1450, 94): 'polling_method'}, {}), '(self._client, raw_result, get_long_running_output, polling_method)', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((1494, 12, 1494, 95), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((1495, 18, 1495, 83), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((1563, 45, 1563, 77), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', ({(1563, 56, 1563, 65): 'lro_delay'}, {}), '(lro_delay, **kwargs)', False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((1567, 19, 1572, 13), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', (), '', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((1574, 19, 1574, 95), 'azure.core.polling.LROPoller', 'LROPoller', ({(1574, 29, 1574, 41): 'self._client', (1574, 43, 1574, 53): 'raw_result', (1574, 55, 1574, 78): 'get_long_running_output', (1574, 80, 1574, 94): 'polling_method'}, {}), '(self._client, raw_result, get_long_running_output, polling_method)', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((1618, 12, 1618, 95), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((1619, 18, 1619, 83), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((1687, 45, 1687, 77), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', ({(1687, 56, 1687, 65): 'lro_delay'}, {}), '(lro_delay, **kwargs)', False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((1691, 19, 1696, 13), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', (), '', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((1698, 19, 1698, 95), 'azure.core.polling.LROPoller', 'LROPoller', ({(1698, 29, 1698, 41): 'self._client', (1698, 43, 1698, 53): 'raw_result', (1698, 55, 1698, 78): 'get_long_running_output', (1698, 80, 1698, 94): 'polling_method'}, {}), '(self._client, raw_result, get_long_running_output, polling_method)', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((115, 16, 115, 99), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((116, 22, 116, 87), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((305, 48, 305, 59), 'azure.core.polling.NoPolling', 'NoPolling', ({}, {}), '()', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((413, 48, 413, 59), 'azure.core.polling.NoPolling', 'NoPolling', ({}, {}), '()', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((606, 48, 606, 59), 'azure.core.polling.NoPolling', 'NoPolling', ({}, {}), '()', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((724, 48, 724, 59), 'azure.core.polling.NoPolling', 'NoPolling', ({}, {}), '()', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((842, 48, 842, 59), 'azure.core.polling.NoPolling', 'NoPolling', ({}, {}), '()', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((924, 16, 924, 99), 'azure.core.exceptions.map_error', 'map_error', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((925, 22, 925, 87), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', (), '', False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((1123, 48, 1123, 59), 'azure.core.polling.NoPolling', 'NoPolling', ({}, {}), '()', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((1237, 48, 1237, 59), 'azure.core.polling.NoPolling', 'NoPolling', ({}, {}), '()', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((1440, 48, 1440, 59), 'azure.core.polling.NoPolling', 'NoPolling', ({}, {}), '()', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((1564, 48, 1564, 59), 'azure.core.polling.NoPolling', 'NoPolling', ({}, {}), '()', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((1688, 48, 1688, 59), 'azure.core.polling.NoPolling', 'NoPolling', ({}, {}), '()', False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n')] |
yura505/core | homeassistant/components/tasmota/discovery.py | 0fc5f4b0421c6c5204d3ccb562153ac3836441a9 | """Support for MQTT discovery."""
import asyncio
import logging
from hatasmota.discovery import (
TasmotaDiscovery,
get_device_config as tasmota_get_device_config,
get_entities_for_platform as tasmota_get_entities_for_platform,
get_entity as tasmota_get_entity,
has_entities_with_platform as tasmota_has_entities_with_platform,
unique_id_from_hash,
)
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SUPPORTED_PLATFORMS = [
"switch",
]
ALREADY_DISCOVERED = "tasmota_discovered_components"
CONFIG_ENTRY_IS_SETUP = "tasmota_config_entry_is_setup"
DATA_CONFIG_ENTRY_LOCK = "tasmota_config_entry_lock"
TASMOTA_DISCOVERY_DEVICE = "tasmota_discovery_device"
TASMOTA_DISCOVERY_ENTITY_NEW = "tasmota_discovery_entity_new_{}"
TASMOTA_DISCOVERY_ENTITY_UPDATED = "tasmota_discovery_entity_updated_{}_{}_{}_{}"
def clear_discovery_hash(hass, discovery_hash):
"""Clear entry in ALREADY_DISCOVERED list."""
del hass.data[ALREADY_DISCOVERED][discovery_hash]
def set_discovery_hash(hass, discovery_hash):
"""Set entry in ALREADY_DISCOVERED list."""
hass.data[ALREADY_DISCOVERED][discovery_hash] = {}
async def async_start(
hass: HomeAssistantType, discovery_topic, config_entry, tasmota_mqtt
) -> bool:
"""Start MQTT Discovery."""
async def _load_platform(platform):
"""Load a Tasmota platform if not already done."""
async with hass.data[DATA_CONFIG_ENTRY_LOCK]:
config_entries_key = f"{platform}.tasmota"
if config_entries_key not in hass.data[CONFIG_ENTRY_IS_SETUP]:
await hass.config_entries.async_forward_entry_setup(
config_entry, platform
)
hass.data[CONFIG_ENTRY_IS_SETUP].add(config_entries_key)
async def _discover_entity(tasmota_entity_config, discovery_hash, platform):
"""Handle adding or updating a discovered entity."""
if not tasmota_entity_config:
# Entity disabled, clean up entity registry
entity_registry = await hass.helpers.entity_registry.async_get_registry()
unique_id = unique_id_from_hash(discovery_hash)
entity_id = entity_registry.async_get_entity_id(platform, DOMAIN, unique_id)
if entity_id:
_LOGGER.debug("Removing entity: %s %s", platform, discovery_hash)
entity_registry.async_remove(entity_id)
return
if discovery_hash in hass.data[ALREADY_DISCOVERED]:
_LOGGER.debug(
"Entity already added, sending update: %s %s",
platform,
discovery_hash,
)
async_dispatcher_send(
hass,
TASMOTA_DISCOVERY_ENTITY_UPDATED.format(*discovery_hash),
tasmota_entity_config,
)
else:
_LOGGER.debug("Adding new entity: %s %s", platform, discovery_hash)
tasmota_entity = tasmota_get_entity(tasmota_entity_config, tasmota_mqtt)
hass.data[ALREADY_DISCOVERED][discovery_hash] = None
async_dispatcher_send(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(platform),
tasmota_entity,
discovery_hash,
)
async def async_device_discovered(payload, mac):
"""Process the received message."""
if ALREADY_DISCOVERED not in hass.data:
hass.data[ALREADY_DISCOVERED] = {}
_LOGGER.debug("Received discovery data for tasmota device: %s", mac)
tasmota_device_config = tasmota_get_device_config(payload)
async_dispatcher_send(
hass, TASMOTA_DISCOVERY_DEVICE, tasmota_device_config, mac
)
if not payload:
return
for platform in SUPPORTED_PLATFORMS:
if not tasmota_has_entities_with_platform(payload, platform):
continue
await _load_platform(platform)
for platform in SUPPORTED_PLATFORMS:
tasmota_entities = tasmota_get_entities_for_platform(payload, platform)
for (tasmota_entity_config, discovery_hash) in tasmota_entities:
await _discover_entity(tasmota_entity_config, discovery_hash, platform)
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
tasmota_discovery = TasmotaDiscovery(discovery_topic, tasmota_mqtt)
await tasmota_discovery.start_discovery(async_device_discovered, None)
| [((19, 10, 19, 37), 'logging.getLogger', 'logging.getLogger', ({(19, 28, 19, 36): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((119, 40, 119, 54), 'asyncio.Lock', 'asyncio.Lock', ({}, {}), '()', False, 'import asyncio\n'), ((122, 24, 122, 71), 'hatasmota.discovery.TasmotaDiscovery', 'TasmotaDiscovery', ({(122, 41, 122, 56): 'discovery_topic', (122, 58, 122, 70): 'tasmota_mqtt'}, {}), '(discovery_topic, tasmota_mqtt)', False, 'from hatasmota.discovery import TasmotaDiscovery, get_device_config as tasmota_get_device_config, get_entities_for_platform as tasmota_get_entities_for_platform, get_entity as tasmota_get_entity, has_entities_with_platform as tasmota_has_entities_with_platform, unique_id_from_hash\n'), ((101, 32, 101, 66), 'hatasmota.discovery.get_device_config', 'tasmota_get_device_config', ({(101, 58, 101, 65): 'payload'}, {}), '(payload)', True, 'from hatasmota.discovery import TasmotaDiscovery, get_device_config as tasmota_get_device_config, get_entities_for_platform as tasmota_get_entities_for_platform, get_entity as tasmota_get_entity, has_entities_with_platform as tasmota_has_entities_with_platform, unique_id_from_hash\n'), ((102, 8, 104, 9), 'homeassistant.helpers.dispatcher.async_dispatcher_send', 'async_dispatcher_send', ({(103, 12, 103, 16): 'hass', (103, 18, 103, 42): 'TASMOTA_DISCOVERY_DEVICE', (103, 44, 103, 65): 'tasmota_device_config', (103, 67, 103, 70): 'mac'}, {}), '(hass, TASMOTA_DISCOVERY_DEVICE, tasmota_device_config,\n mac)', False, 'from homeassistant.helpers.dispatcher import async_dispatcher_send\n'), ((63, 24, 63, 59), 'hatasmota.discovery.unique_id_from_hash', 'unique_id_from_hash', ({(63, 44, 63, 58): 'discovery_hash'}, {}), '(discovery_hash)', False, 'from hatasmota.discovery import TasmotaDiscovery, get_device_config as tasmota_get_device_config, get_entities_for_platform as tasmota_get_entities_for_platform, get_entity as tasmota_get_entity, has_entities_with_platform as tasmota_has_entities_with_platform, unique_id_from_hash\n'), ((83, 29, 83, 84), 'hatasmota.discovery.get_entity', 'tasmota_get_entity', ({(83, 48, 83, 69): 'tasmota_entity_config', (83, 71, 83, 83): 'tasmota_mqtt'}, {}), '(tasmota_entity_config, tasmota_mqtt)', True, 'from hatasmota.discovery import TasmotaDiscovery, get_device_config as tasmota_get_device_config, get_entities_for_platform as tasmota_get_entities_for_platform, get_entity as tasmota_get_entity, has_entities_with_platform as tasmota_has_entities_with_platform, unique_id_from_hash\n'), ((115, 31, 115, 83), 'hatasmota.discovery.get_entities_for_platform', 'tasmota_get_entities_for_platform', ({(115, 65, 115, 72): 'payload', (115, 74, 115, 82): 'platform'}, {}), '(payload, platform)', True, 'from hatasmota.discovery import TasmotaDiscovery, get_device_config as tasmota_get_device_config, get_entities_for_platform as tasmota_get_entities_for_platform, get_entity as tasmota_get_entity, has_entities_with_platform as tasmota_has_entities_with_platform, unique_id_from_hash\n'), ((110, 19, 110, 72), 'hatasmota.discovery.has_entities_with_platform', 'tasmota_has_entities_with_platform', ({(110, 54, 110, 61): 'payload', (110, 63, 110, 71): 'platform'}, {}), '(payload, platform)', True, 'from hatasmota.discovery import TasmotaDiscovery, get_device_config as tasmota_get_device_config, get_entities_for_platform as tasmota_get_entities_for_platform, get_entity as tasmota_get_entity, has_entities_with_platform as tasmota_has_entities_with_platform, unique_id_from_hash\n')] |
TimoKerr/tfx | tfx/components/infra_validator/executor.py | 10d13d57eeac21514fed73118cb43464dada67f1 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX InfraValidator executor definition."""
import contextlib
import functools
import os
import signal
import threading
import time
from typing import Any, Dict, List, Optional
from absl import logging
from tfx import types
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import request_builder
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator import types as iv_types
from tfx.components.infra_validator.model_server_runners import kubernetes_runner
from tfx.components.infra_validator.model_server_runners import local_docker_runner
from tfx.dsl.components.base import base_executor
from tfx.proto import infra_validator_pb2
from tfx.types import artifact_utils
from tfx.types.standard_component_specs import BLESSING_KEY
from tfx.types.standard_component_specs import EXAMPLES_KEY
from tfx.types.standard_component_specs import MODEL_KEY
from tfx.types.standard_component_specs import REQUEST_SPEC_KEY
from tfx.types.standard_component_specs import SERVING_SPEC_KEY
from tfx.types.standard_component_specs import VALIDATION_SPEC_KEY
from tfx.utils import io_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx.utils.model_paths import tf_serving_flavor
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_log_pb2
from tensorflow_serving.apis import regression_pb2
_DEFAULT_NUM_TRIES = 5
_DEFAULT_POLLING_INTERVAL_SEC = 1
_DEFAULT_MAX_LOADING_TIME_SEC = 300
_DEFAULT_MODEL_NAME = 'infra-validation-model'
# Proto message keys for oneof block.
_TENSORFLOW_SERVING = 'tensorflow_serving'
_LOCAL_DOCKER = 'local_docker'
_KUBERNETES = 'kubernetes'
# Artifact property keys
_BLESSED_KEY = 'blessed'
_MODEL_FLAG_KEY = 'has_model'
# Filename of infra blessing artifact on succeed.
_BLESSED_FILENAME = 'INFRA_BLESSED'
# Filename of infra blessing artifact on fail.
_NOT_BLESSED_FILENAME = 'INFRA_NOT_BLESSED'
def _create_model_server_runner(
model_path: str,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec):
"""Create a ModelServerRunner from a model, a ServingBinary and a ServingSpec.
Args:
model_path: An IV-flavored model path. (See model_path_utils.py)
serving_binary: One of ServingBinary instances parsed from the
`serving_spec`.
serving_spec: A ServingSpec instance of this infra validation.
Returns:
A ModelServerRunner.
"""
platform = serving_spec.WhichOneof('serving_platform')
if platform == 'local_docker':
return local_docker_runner.LocalDockerRunner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec
)
elif platform == 'kubernetes':
return kubernetes_runner.KubernetesRunner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec
)
else:
raise NotImplementedError('Invalid serving_platform {}'.format(platform))
def _convert_to_prediction_log(request: iv_types.Request):
"""Try convert infra validation request to TF-Serving PredictionLog."""
if isinstance(request, classification_pb2.ClassificationRequest):
return prediction_log_pb2.PredictionLog(
classify_log=prediction_log_pb2.ClassifyLog(request=request))
elif isinstance(request, regression_pb2.RegressionRequest):
return prediction_log_pb2.PredictionLog(
regress_log=prediction_log_pb2.RegressLog(request=request))
elif isinstance(request, predict_pb2.PredictRequest):
return prediction_log_pb2.PredictionLog(
predict_log=prediction_log_pb2.PredictLog(request=request))
else:
raise NotImplementedError(
f'Cannot convert {type(request)} to PredictionLog')
def _mark_blessed(blessing: types.Artifact) -> None:
logging.info('Model passed infra validation.')
io_utils.write_string_file(
os.path.join(blessing.uri, _BLESSED_FILENAME), '')
blessing.set_int_custom_property(_BLESSED_KEY, 1)
def _mark_not_blessed(blessing: types.Artifact) -> None:
logging.info('Model failed infra validation.')
io_utils.write_string_file(
os.path.join(blessing.uri, _NOT_BLESSED_FILENAME), '')
blessing.set_int_custom_property(_BLESSED_KEY, 0)
class Executor(base_executor.BaseExecutor):
"""TFX infra validator executor."""
def __init__(self,
context: Optional[base_executor.BaseExecutor.Context] = None):
super(Executor, self).__init__(context)
self._cleanups = []
def _AddCleanup(self, function, *args, **kwargs):
self._cleanups.append(functools.partial(function, *args, **kwargs))
def _Cleanup(self):
for cleanup in self._cleanups:
try:
cleanup()
except: # pylint: disable=broad-except, bare-except
logging.warning('Error occurred during cleanup.', exc_info=True)
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
"""Contract for running InfraValidator Executor.
Args:
input_dict:
- `model`: Single `Model` artifact that we're validating.
- `examples`: `Examples` artifacts to be used for test requests.
output_dict:
- `blessing`: Single `InfraBlessing` artifact containing the validated
result and optinally validated model if warmup requests are appended.
Artifact URI includes an empty file with the name either of
INFRA_BLESSED or INFRA_NOT_BLESSED.
exec_properties:
- `serving_spec`: Serialized `ServingSpec` configuration.
- `validation_spec`: Serialized `ValidationSpec` configuration.
- `request_spec`: Serialized `RequestSpec` configuration.
"""
self._log_startup(input_dict, output_dict, exec_properties)
model = artifact_utils.get_single_instance(input_dict[MODEL_KEY])
blessing = artifact_utils.get_single_instance(output_dict[BLESSING_KEY])
if input_dict.get(EXAMPLES_KEY):
examples = artifact_utils.get_single_instance(input_dict[EXAMPLES_KEY])
else:
examples = None
serving_spec = infra_validator_pb2.ServingSpec()
proto_utils.json_to_proto(exec_properties[SERVING_SPEC_KEY], serving_spec)
if not serving_spec.model_name:
serving_spec.model_name = _DEFAULT_MODEL_NAME
validation_spec = infra_validator_pb2.ValidationSpec()
if exec_properties.get(VALIDATION_SPEC_KEY):
proto_utils.json_to_proto(exec_properties[VALIDATION_SPEC_KEY],
validation_spec)
if not validation_spec.num_tries:
validation_spec.num_tries = _DEFAULT_NUM_TRIES
if not validation_spec.max_loading_time_seconds:
validation_spec.max_loading_time_seconds = _DEFAULT_MAX_LOADING_TIME_SEC
if exec_properties.get(REQUEST_SPEC_KEY):
request_spec = infra_validator_pb2.RequestSpec()
proto_utils.json_to_proto(exec_properties[REQUEST_SPEC_KEY],
request_spec)
else:
request_spec = None
with self._InstallGracefulShutdownHandler():
self._Do(
model=model,
examples=examples,
blessing=blessing,
serving_spec=serving_spec,
validation_spec=validation_spec,
request_spec=request_spec,
)
@contextlib.contextmanager
def _InstallGracefulShutdownHandler(self):
# pylint: disable=g-doc-return-or-yield
"""Install graceful shutdown behavior.
Caveat: InfraValidator currently only recognizes SIGTERM signal as a
graceful shutdown. Furthermore, SIGTERM can be handled only if the executor
is running on the MainThread (the thread that runs the python interpreter)
due to the limitation of Python API.
When the executor is running on Kubernetes, SIGTERM is a standard way to
signal the graceful shutdown. Python default behavior for receiving SIGTERM
is to terminate the process without raising any exception. By registering a
handler that raises on signal, we can effectively transform the signal to an
exception, and we can reuse our cleanup code inside "except" or "finally"
block during the grace period.
When the executor is run by the local Beam DirectRunner, the executor thread
is one of the worker threads (not a MainThread) therefore SIGTERM cannot
be recognized. If either of MainThread or worker thread receives SIGTERM,
executor will die immediately without grace period.
Even if the executor fails to shutdown gracefully, external resources that
are created by model server runner can be cleaned up if the platform
supports such mechanism (e.g. activeDeadlineSeconds in Kubernetes).
"""
def _handler(signum, frame):
del frame # Unused.
raise error_types.GracefulShutdown('Got signal {}.'.format(signum))
try:
old_handler = signal.signal(signal.SIGTERM, _handler)
except ValueError:
# If current thread is not a MainThread, it is not allowed to register
# the signal handler (ValueError raised).
logging.info('Unable to register signal handler for non-MainThread '
'(name=%s). SIGTERM will not be handled.',
threading.current_thread().name)
old_handler = None
try:
yield
finally:
self._Cleanup()
if old_handler:
signal.signal(signal.SIGTERM, old_handler)
def _Do(
self,
model: types.Artifact,
examples: Optional[types.Artifact],
blessing: types.Artifact,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
request_spec: Optional[infra_validator_pb2.RequestSpec],
):
if examples and request_spec:
logging.info('InfraValidator will be run in LOAD_AND_QUERY mode.')
requests = request_builder.build_requests(
model_name=serving_spec.model_name,
model=model,
examples=examples,
request_spec=request_spec)
else:
logging.info('InfraValidator will be run in LOAD_ONLY mode.')
requests = []
model_path = self._PrepareModelPath(model, serving_spec)
# TODO(jjong): Make logic parallel.
all_passed = True
for serving_binary in serving_bins.parse_serving_binaries(serving_spec):
all_passed &= self._ValidateWithRetry(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec,
validation_spec=validation_spec,
requests=requests)
if all_passed:
_mark_blessed(blessing)
if requests and request_spec.make_warmup:
self._CreateWarmupModel(blessing, model_path, warmup_requests=requests)
else:
_mark_not_blessed(blessing)
def _CreateWarmupModel(self, blessing: types.Artifact, model_path: str,
warmup_requests: List[iv_types.Request]):
output_model_path = path_utils.stamped_model_path(blessing.uri)
io_utils.copy_dir(src=model_path, dst=output_model_path)
io_utils.write_tfrecord_file(
path_utils.warmup_file_path(output_model_path),
*[_convert_to_prediction_log(r) for r in warmup_requests])
blessing.set_int_custom_property(_MODEL_FLAG_KEY, 1)
def _PrepareModelPath(self, model: types.Artifact,
serving_spec: infra_validator_pb2.ServingSpec) -> str:
model_path = path_utils.serving_model_path(
model.uri, path_utils.is_old_model_artifact(model))
serving_binary = serving_spec.WhichOneof('serving_binary')
if serving_binary == _TENSORFLOW_SERVING:
# TensorFlow Serving requires model to be stored in its own directory
# structure flavor. If current model_path does not conform to the flavor,
# we need to make a copy to the temporary path.
try:
# Check whether current model_path conforms to the tensorflow serving
# model path flavor. (Parsed without exception)
tf_serving_flavor.parse_model_path(
model_path,
expected_model_name=serving_spec.model_name)
except ValueError:
# Copy the model to comply with the tensorflow serving model path
# flavor.
temp_model_path = tf_serving_flavor.make_model_path(
model_base_path=self._get_tmp_dir(),
model_name=serving_spec.model_name,
version=int(time.time()))
io_utils.copy_dir(src=model_path, dst=temp_model_path)
self._AddCleanup(io_utils.delete_dir, self._context.get_tmp_path())
return temp_model_path
return model_path
def _ValidateWithRetry(
self, model_path: str,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
requests: List[iv_types.Request]):
for i in range(validation_spec.num_tries):
logging.info('Starting infra validation (attempt %d/%d).', i + 1,
validation_spec.num_tries)
try:
self._ValidateOnce(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec,
validation_spec=validation_spec,
requests=requests)
except error_types.GracefulShutdown:
# GracefulShutdown means infra validation aborted. No more retry and
# escalate the error.
raise
except Exception as e: # pylint: disable=broad-except
# Other exceptions indicates validation failure. Log the error and
# retry.
logging.exception('Infra validation (attempt %d/%d) failed.', i + 1,
validation_spec.num_tries)
if isinstance(e, error_types.DeadlineExceeded):
logging.info('Consider increasing the value of '
'ValidationSpec.max_loading_time_seconds.')
else:
# If validation has passed without any exception, succeeded.
return True
# Every trial has failed. Marking model as not blessed.
return False
def _ValidateOnce(
self, model_path: str,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
requests: List[iv_types.Request]):
deadline = time.time() + validation_spec.max_loading_time_seconds
runner = _create_model_server_runner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec)
try:
logging.info('Starting %r.', runner)
runner.Start()
# Check model is successfully loaded.
runner.WaitUntilRunning(deadline)
client = serving_binary.MakeClient(runner.GetEndpoint())
client.WaitUntilModelLoaded(
deadline, polling_interval_sec=_DEFAULT_POLLING_INTERVAL_SEC)
# Check model can be successfully queried.
if requests:
client.SendRequests(requests)
finally:
logging.info('Stopping %r.', runner)
runner.Stop()
| [((120, 2, 120, 48), 'absl.logging.info', 'logging.info', ({(120, 15, 120, 47): '"""Model passed infra validation."""'}, {}), "('Model passed infra validation.')", False, 'from absl import logging\n'), ((127, 2, 127, 48), 'absl.logging.info', 'logging.info', ({(127, 15, 127, 47): '"""Model failed infra validation."""'}, {}), "('Model failed infra validation.')", False, 'from absl import logging\n'), ((88, 11, 92, 5), 'tfx.components.infra_validator.model_server_runners.local_docker_runner.LocalDockerRunner', 'local_docker_runner.LocalDockerRunner', (), '', False, 'from tfx.components.infra_validator.model_server_runners import local_docker_runner\n'), ((122, 6, 122, 51), 'os.path.join', 'os.path.join', ({(122, 19, 122, 31): 'blessing.uri', (122, 33, 122, 50): '_BLESSED_FILENAME'}, {}), '(blessing.uri, _BLESSED_FILENAME)', False, 'import os\n'), ((129, 6, 129, 55), 'os.path.join', 'os.path.join', ({(129, 19, 129, 31): 'blessing.uri', (129, 33, 129, 54): '_NOT_BLESSED_FILENAME'}, {}), '(blessing.uri, _NOT_BLESSED_FILENAME)', False, 'import os\n'), ((172, 12, 172, 69), 'tfx.types.artifact_utils.get_single_instance', 'artifact_utils.get_single_instance', ({(172, 47, 172, 68): 'input_dict[MODEL_KEY]'}, {}), '(input_dict[MODEL_KEY])', False, 'from tfx.types import artifact_utils\n'), ((173, 15, 173, 76), 'tfx.types.artifact_utils.get_single_instance', 'artifact_utils.get_single_instance', ({(173, 50, 173, 75): 'output_dict[BLESSING_KEY]'}, {}), '(output_dict[BLESSING_KEY])', False, 'from tfx.types import artifact_utils\n'), ((180, 19, 180, 52), 'tfx.proto.infra_validator_pb2.ServingSpec', 'infra_validator_pb2.ServingSpec', ({}, {}), '()', False, 'from tfx.proto import infra_validator_pb2\n'), ((181, 4, 181, 78), 'tfx.utils.proto_utils.json_to_proto', 'proto_utils.json_to_proto', ({(181, 30, 181, 63): 'exec_properties[SERVING_SPEC_KEY]', (181, 65, 181, 77): 'serving_spec'}, {}), '(exec_properties[SERVING_SPEC_KEY], serving_spec)', False, 'from tfx.utils import proto_utils\n'), ((185, 22, 185, 58), 'tfx.proto.infra_validator_pb2.ValidationSpec', 'infra_validator_pb2.ValidationSpec', ({}, {}), '()', False, 'from tfx.proto import infra_validator_pb2\n'), ((283, 26, 283, 75), 'tfx.components.infra_validator.serving_bins.parse_serving_binaries', 'serving_bins.parse_serving_binaries', ({(283, 62, 283, 74): 'serving_spec'}, {}), '(serving_spec)', False, 'from tfx.components.infra_validator import serving_bins\n'), ((300, 24, 300, 67), 'tfx.utils.path_utils.stamped_model_path', 'path_utils.stamped_model_path', ({(300, 54, 300, 66): 'blessing.uri'}, {}), '(blessing.uri)', False, 'from tfx.utils import path_utils\n'), ((301, 4, 301, 60), 'tfx.utils.io_utils.copy_dir', 'io_utils.copy_dir', (), '', False, 'from tfx.utils import io_utils\n'), ((94, 11, 98, 5), 'tfx.components.infra_validator.model_server_runners.kubernetes_runner.KubernetesRunner', 'kubernetes_runner.KubernetesRunner', (), '', False, 'from tfx.components.infra_validator.model_server_runners import kubernetes_runner\n'), ((142, 26, 142, 70), 'functools.partial', 'functools.partial', ({(142, 44, 142, 52): 'function', (142, 54, 142, 59): '*args'}, {}), '(function, *args, **kwargs)', False, 'import functools\n'), ((176, 17, 176, 77), 'tfx.types.artifact_utils.get_single_instance', 'artifact_utils.get_single_instance', ({(176, 52, 176, 76): 'input_dict[EXAMPLES_KEY]'}, {}), '(input_dict[EXAMPLES_KEY])', False, 'from tfx.types import artifact_utils\n'), ((187, 6, 188, 48), 'tfx.utils.proto_utils.json_to_proto', 'proto_utils.json_to_proto', ({(187, 32, 187, 68): 'exec_properties[VALIDATION_SPEC_KEY]', (188, 32, 188, 47): 'validation_spec'}, {}), '(exec_properties[VALIDATION_SPEC_KEY], validation_spec\n )', False, 'from tfx.utils import proto_utils\n'), ((195, 21, 195, 54), 'tfx.proto.infra_validator_pb2.RequestSpec', 'infra_validator_pb2.RequestSpec', ({}, {}), '()', False, 'from tfx.proto import infra_validator_pb2\n'), ((196, 6, 197, 45), 'tfx.utils.proto_utils.json_to_proto', 'proto_utils.json_to_proto', ({(196, 32, 196, 65): 'exec_properties[REQUEST_SPEC_KEY]', (197, 32, 197, 44): 'request_spec'}, {}), '(exec_properties[REQUEST_SPEC_KEY], request_spec)', False, 'from tfx.utils import proto_utils\n'), ((243, 20, 243, 59), 'signal.signal', 'signal.signal', ({(243, 34, 243, 48): 'signal.SIGTERM', (243, 50, 243, 58): '_handler'}, {}), '(signal.SIGTERM, _handler)', False, 'import signal\n'), ((270, 6, 270, 72), 'absl.logging.info', 'logging.info', ({(270, 19, 270, 71): '"""InfraValidator will be run in LOAD_AND_QUERY mode."""'}, {}), "('InfraValidator will be run in LOAD_AND_QUERY mode.')", False, 'from absl import logging\n'), ((271, 17, 275, 36), 'tfx.components.infra_validator.request_builder.build_requests', 'request_builder.build_requests', (), '', False, 'from tfx.components.infra_validator import request_builder\n'), ((277, 6, 277, 67), 'absl.logging.info', 'logging.info', ({(277, 19, 277, 66): '"""InfraValidator will be run in LOAD_ONLY mode."""'}, {}), "('InfraValidator will be run in LOAD_ONLY mode.')", False, 'from absl import logging\n'), ((303, 8, 303, 54), 'tfx.utils.path_utils.warmup_file_path', 'path_utils.warmup_file_path', ({(303, 36, 303, 53): 'output_model_path'}, {}), '(output_model_path)', False, 'from tfx.utils import path_utils\n'), ((310, 19, 310, 58), 'tfx.utils.path_utils.is_old_model_artifact', 'path_utils.is_old_model_artifact', ({(310, 52, 310, 57): 'model'}, {}), '(model)', False, 'from tfx.utils import path_utils\n'), ((343, 6, 344, 45), 'absl.logging.info', 'logging.info', ({(343, 19, 343, 63): '"""Starting infra validation (attempt %d/%d)."""', (343, 65, 343, 70): '(i + 1)', (344, 19, 344, 44): 'validation_spec.num_tries'}, {}), "('Starting infra validation (attempt %d/%d).', i + 1,\n validation_spec.num_tries)", False, 'from absl import logging\n'), ((378, 15, 378, 26), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((385, 6, 385, 42), 'absl.logging.info', 'logging.info', ({(385, 19, 385, 33): '"""Starting %r."""', (385, 35, 385, 41): 'runner'}, {}), "('Starting %r.', runner)", False, 'from absl import logging\n'), ((398, 6, 398, 42), 'absl.logging.info', 'logging.info', ({(398, 19, 398, 33): '"""Stopping %r."""', (398, 35, 398, 41): 'runner'}, {}), "('Stopping %r.', runner)", False, 'from absl import logging\n'), ((107, 21, 107, 68), 'tensorflow_serving.apis.prediction_log_pb2.ClassifyLog', 'prediction_log_pb2.ClassifyLog', (), '', False, 'from tensorflow_serving.apis import prediction_log_pb2\n'), ((257, 8, 257, 50), 'signal.signal', 'signal.signal', ({(257, 22, 257, 36): 'signal.SIGTERM', (257, 38, 257, 49): 'old_handler'}, {}), '(signal.SIGTERM, old_handler)', False, 'import signal\n'), ((319, 8, 321, 56), 'tfx.utils.model_paths.tf_serving_flavor.parse_model_path', 'tf_serving_flavor.parse_model_path', (), '', False, 'from tfx.utils.model_paths import tf_serving_flavor\n'), ((110, 20, 110, 66), 'tensorflow_serving.apis.prediction_log_pb2.RegressLog', 'prediction_log_pb2.RegressLog', (), '', False, 'from tensorflow_serving.apis import prediction_log_pb2\n'), ((149, 8, 149, 72), 'absl.logging.warning', 'logging.warning', (), '', False, 'from absl import logging\n'), ((329, 8, 329, 62), 'tfx.utils.io_utils.copy_dir', 'io_utils.copy_dir', (), '', False, 'from tfx.utils import io_utils\n'), ((359, 8, 360, 52), 'absl.logging.exception', 'logging.exception', ({(359, 26, 359, 68): '"""Infra validation (attempt %d/%d) failed."""', (359, 70, 359, 75): '(i + 1)', (360, 26, 360, 51): 'validation_spec.num_tries'}, {}), "('Infra validation (attempt %d/%d) failed.', i + 1,\n validation_spec.num_tries)", False, 'from absl import logging\n'), ((113, 20, 113, 66), 'tensorflow_serving.apis.prediction_log_pb2.PredictLog', 'prediction_log_pb2.PredictLog', (), '', False, 'from tensorflow_serving.apis import prediction_log_pb2\n'), ((249, 19, 249, 45), 'threading.current_thread', 'threading.current_thread', ({}, {}), '()', False, 'import threading\n'), ((362, 10, 363, 66), 'absl.logging.info', 'logging.info', ({(362, 23, 363, 65): '"""Consider increasing the value of ValidationSpec.max_loading_time_seconds."""'}, {}), "(\n 'Consider increasing the value of ValidationSpec.max_loading_time_seconds.'\n )", False, 'from absl import logging\n'), ((328, 24, 328, 35), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
ykyang/org.allnix.python | learning_python/org/allnix/util.py | f9d74db2db026b20e925ac40dbca7d21b3ac0b0f | def write(message: str):
print("org.allnix", message)
def read() -> str:
"""Returns a string"""
return "org.allnix"
| [] |
happys2333/DL-2021-fall | metr-la/model/Double_C_STTN.py | e110d737d1a70c8238f2de3278e6aebce07c7a66 | # from folder workMETRLA
# MODEL CODE
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 10:28:06 2020
@author: wb
"""
import torch
import torch.nn as nn
import math
# from GCN_models import GCN
# from One_hot_encoder import One_hot_encoder
import torch.nn.functional as F
import numpy as np
from scipy.sparse.linalg import eigs
from Param import *
from torchsummary import summary
DEVICE = 'cuda:1'
class One_hot_encoder(nn.Module):
def __init__(self, embed_size, time_num=288):
super(One_hot_encoder, self).__init__()
self.time_num = time_num
self.I = nn.Parameter(torch.eye(time_num, time_num, requires_grad=True))
self.onehot_Linear = nn.Linear(time_num, embed_size) # 线性层改变one hot编码维度
def forward(self, i, N=25, T=12):
if i % self.time_num + T > self.time_num:
o1 = self.I[i % self.time_num:, :]
o2 = self.I[0: (i + T) % self.time_num, :]
onehot = torch.cat((o1, o2), 0)
else:
onehot = self.I[i % self.time_num: i % self.time_num + T, :]
# onehot = onehot.repeat(N, 1, 1)
onehot = onehot.expand(N, T, self.time_num)
onehot = self.onehot_Linear(onehot)
return onehot
'''
Attention 基础代码
ScaledDotProductAttention 是通用的
解释dk:
数据进来的时候是B,N,T,C,做attention的时候,C=1 ,不能很好的表征数据高维空间的特征,C ---> embedded size 32 or 64 加入dk = 32,
那么一个头就是32,然后加上多头注意力机制的话,比如8个head,8个头,那就是32*8=256,如果要跟NIPS17 tranformer论文完全对应上,那么dk=64,head = 8 ,all embeded size = 512
'''
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super(ScaledDotProductAttention, self).__init__()
def forward(self, Q, K, V):
'''
Q: [batch_size, n_heads, T(Spatial) or N(Temporal), N(Spatial) or T(Temporal), d_k]
K: [batch_size, n_heads, T(Spatial) or N(Temporal), N(Spatial) or T(Temporal), d_k]
V: [batch_size, n_heads, T(Spatial) or N(Temporal), N(Spatial) or T(Temporal), d_k]
attn_mask: [batch_size, n_heads, seq_len, seq_len] 可能没有
'''
B, n_heads, len1, len2, d_k = Q.shape
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k)
# scores : [batch_size, n_heads, T(Spatial) or N(Temporal), N(Spatial) or T(Temporal), N(Spatial) or T(Temporal)]
# scores.masked_fill_(attn_mask, -1e9) # Fills elements of self tensor with value where mask is True.
attn = nn.Softmax(dim=-1)(scores)
context = torch.matmul(attn,
V) # [batch_size, n_heads, T(Spatial) or N(Temporal), N(Spatial) or T(Temporal), d_k]]
return context
'''
S 代表spatial ,MultiHeadAttention 代表多头注意力机制
'''
class SMultiHeadAttention(nn.Module):
def __init__(self, embed_size, heads):
super(SMultiHeadAttention, self).__init__()
self.embed_size = embed_size
self.heads = heads
self.head_dim = embed_size // heads
assert (
self.head_dim * heads == embed_size
), "Embedding size needs to be divisible by heads"
# 用Linear来做投影矩阵
# 但这里如果是多头的话,是不是需要声明多个矩阵???
self.W_V = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.W_K = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.W_Q = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.fc_out = nn.Linear(heads * self.head_dim, embed_size)
def forward(self, input_Q, input_K, input_V):
'''
input_Q: [batch_size, N, T, C]
input_K: [batch_size, N, T, C]
input_V: [batch_size, N, T, C]
attn_mask: [batch_size, seq_len, seq_len]
'''
B, N, T, C = input_Q.shape
# [B, N, T, C] --> [B, N, T, h * d_k] --> [B, N, T, h, d_k] --> [B, h, T, N, d_k]
Q = self.W_Q(input_Q).view(B, N, T, self.heads, self.head_dim).transpose(1,
3) # Q: [B, N, T, C] --[B, N, T, self.heads, self.head_dim] -> [B,h,T,N,dk] 然后是为了把N,dk这两维度考虑去做ScaledDotProductAttention ,代表着是spatial attention
K = self.W_K(input_K).view(B, N, T, self.heads, self.head_dim).transpose(1, 3) # K: [B, h, T, N, d_k]
V = self.W_V(input_V).view(B, N, T, self.heads, self.head_dim).transpose(1, 3) # V: [B, h, T, N, d_k]
# attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1) # attn_mask : [batch_size, n_heads, seq_len, seq_len] seq_len = N
# context: [batch_size, n_heads, len_q, d_v], attn: [batch_size, n_heads, len_q, len_k]
context = ScaledDotProductAttention()(Q, K, V) # [B, h, T, N, d_k]
context = context.permute(0, 3, 2, 1, 4) # [B, N, T, h, d_k]
context = context.reshape(B, N, T, self.heads * self.head_dim) # [B, N, T, C]
# context = context.transpose(1, 2).reshape(batch_size, -1, n_heads * d_v) # context: [batch_size, len_q, n_heads * d_v]
output = self.fc_out(context) # [batch_size, len_q, d_model]
return output
'''
T 代表Temporal ,MultiHeadAttention 代表多头注意力机制
'''
class TMultiHeadAttention(nn.Module):
def __init__(self, embed_size, heads):
super(TMultiHeadAttention, self).__init__()
self.embed_size = embed_size
self.heads = heads
self.head_dim = embed_size // heads
assert (
self.head_dim * heads == embed_size
), "Embedding size needs to be divisible by heads"
# 用Linear来做投影矩阵
# 但这里如果是多头的话,是不是需要声明多个矩阵???
self.W_V = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.W_K = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.W_Q = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.fc_out = nn.Linear(heads * self.head_dim, embed_size)
def forward(self, input_Q, input_K, input_V):
'''
input_Q: [batch_size, N, T, C]
input_K: [batch_size, N, T, C]
input_V: [batch_size, N, T, C]
attn_mask: [batch_size, seq_len, seq_len]
'''
B, N, T, C = input_Q.shape
# [B, N, T, C] --> [B, N, T, h * d_k] --> [B, N, T, h, d_k] --> [B, h, N, T, d_k]
Q = self.W_Q(input_Q).view(B, N, T, self.heads, self.head_dim).permute(0, 3, 1, 2,
4) # Q: [B, h, N, T, d_k] T,dk 就代表是temporal attention
K = self.W_K(input_K).view(B, N, T, self.heads, self.head_dim).permute(0, 3, 1, 2, 4) # K: [B, h, N, T, d_k]
V = self.W_V(input_V).view(B, N, T, self.heads, self.head_dim).permute(0, 3, 1, 2, 4) # V: [B, h, N, T, d_k]
# attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1) # attn_mask : [batch_size, n_heads, seq_len, seq_len]
# context: [batch_size, n_heads, len_q, d_v], attn: [batch_size, n_heads, len_q, len_k]
context = ScaledDotProductAttention()(Q, K, V) # [B, h, N, T, d_k]
context = context.permute(0, 2, 3, 1, 4) # [B, N, T, h, d_k]
context = context.reshape(B, N, T, self.heads * self.head_dim) # [B, N, T, C]
# context = context.transpose(1, 2).reshape(batch_size, -1, n_heads * d_v) # context: [batch_size, len_q, n_heads * d_v]
output = self.fc_out(context) # [batch_size, len_q, d_model]
return output
class STransformer(nn.Module):
def __init__(self, embed_size, heads, adj, cheb_K, dropout, forward_expansion):
super(STransformer, self).__init__()
# Spatial Embedding
self.adj = adj
self.D_S = adj.to(DEVICE)
self.embed_liner = nn.Linear(adj.shape[0], embed_size)
self.attention = SMultiHeadAttention(embed_size, heads)
self.norm1 = nn.LayerNorm(embed_size)
self.norm2 = nn.LayerNorm(embed_size)
self.feed_forward = nn.Sequential(
nn.Linear(embed_size, forward_expansion * embed_size),
nn.ReLU(),
nn.Linear(forward_expansion * embed_size, embed_size),
)
# 调用GCN
self.norm_adj = nn.InstanceNorm2d(1) # 对邻接矩阵归一化
self.dropout = nn.Dropout(dropout)
self.fs = nn.Linear(embed_size, embed_size)
self.fg = nn.Linear(embed_size, embed_size)
def forward(self, value, key, query):
# value, key, query: [N, T, C] [B, N, T, C]
# Spatial Embedding 部分
# N, T, C = query.shape
# D_S = self.embed_liner(self.D_S) # [N, C]
# D_S = D_S.expand(T, N, C) #[T, N, C]相当于在第一维复制了T份
# D_S = D_S.permute(1, 0, 2) #[N, T, C]
B, N, T, C = query.shape
D_S = self.embed_liner(self.D_S) # [N, C] ---position encoding
D_S = D_S.expand(B, T, N, C) # [B, T, N, C] 相当于在第2维复制了T份, 第一维复制B份
D_S = D_S.permute(0, 2, 1, 3) # [B, N, T, C]
# Spatial Transformer 部分
query = query + D_S
attention = self.attention(query, query, query) # (B, N, T, C)
# Add skip connection, run through normalization and finally dropout
x = self.dropout(self.norm1(attention + query))
forward = self.feed_forward(x)
U_S = self.dropout(self.norm2(forward + x))
# 融合 STransformer and GCN
g = torch.sigmoid(self.fs(U_S)) # (7)
out = g * U_S + (1 - g) # (8)
return out # (B, N, T, C)
class TTransformer(nn.Module):
def __init__(self, embed_size, heads, time_num, dropout, forward_expansion):
super(TTransformer, self).__init__()
# Temporal embedding One hot
self.time_num = time_num
# self.one_hot = One_hot_encoder(embed_size, time_num) # temporal embedding选用one-hot方式 或者
self.temporal_embedding = nn.Embedding(time_num, embed_size) # temporal embedding选用nn.Embedding
self.attention = TMultiHeadAttention(embed_size, heads)
self.norm1 = nn.LayerNorm(embed_size)
self.norm2 = nn.LayerNorm(embed_size)
self.feed_forward = nn.Sequential(
nn.Linear(embed_size, forward_expansion * embed_size),
nn.ReLU(),
nn.Linear(forward_expansion * embed_size, embed_size),
)
self.dropout = nn.Dropout(dropout)
def forward(self, value, key, query, t):
B, N, T, C = query.shape
# D_T = self.one_hot(t, N, T) # temporal embedding选用one-hot方式 或者
D_T = self.temporal_embedding(torch.arange(0, T).to(DEVICE)) # temporal embedding选用nn.Embedding
D_T = D_T.expand(B, N, T, C)
# temporal embedding加到query。 原论文采用concatenated
query = query + D_T
attention = self.attention(query, query, query)
# Add skip connection, run through normalization and finally dropout
x = self.dropout(self.norm1(attention + query))
forward = self.feed_forward(x)
out = self.dropout(self.norm2(forward + x))
return out
### STBlock
class STTransformerBlock(nn.Module):
def __init__(self, embed_size, heads, adj, time_num, cheb_K, dropout, forward_expansion):
super(STTransformerBlock, self).__init__()
self.STransformer = STransformer(embed_size, heads, adj, cheb_K, dropout, forward_expansion)
self.TTransformer = TTransformer(embed_size, heads, time_num, dropout, forward_expansion)
self.norm1 = nn.LayerNorm(embed_size)
self.norm2 = nn.LayerNorm(embed_size)
self.dropout = nn.Dropout(dropout)
def forward(self, value, key, query, t):
# value, key, query: [N, T, C] [B, N, T, C]
# Add skip connection,run through normalization and finally dropout
x1 = self.norm1(self.STransformer(value, key, query) + query) # (B, N, T, C)
x2 = self.dropout(self.norm2(self.TTransformer(x1, x1, x1, t) + x1))
return x2
### Encoder
class Encoder(nn.Module):
# 堆叠多层 ST-Transformer Block
def __init__(
self,
embed_size,
num_layers,
heads,
adj,
time_num,
device,
forward_expansion,
cheb_K,
dropout,
):
super(Encoder, self).__init__()
self.embed_size = embed_size
self.device = device
self.layers = nn.ModuleList(
[
STTransformerBlock(
embed_size,
heads,
adj,
time_num,
cheb_K,
dropout=dropout,
forward_expansion=forward_expansion
)
for _ in range(num_layers)
]
)
self.dropout = nn.Dropout(dropout)
def forward(self, x, t):
# x: [N, T, C] [B, N, T, C]
out = self.dropout(x)
# In the Encoder the query, key, value are all the same.
for layer in self.layers:
out = layer(out, out, out, t)
return out
### Transformer
class Transformer(nn.Module):
def __init__(
self,
adj,
embed_size,
num_layers,
heads,
time_num,
forward_expansion, ##?
cheb_K,
dropout,
device=DEVICE
):
super(Transformer, self).__init__()
self.encoder = Encoder(
embed_size,
num_layers,
heads,
adj,
time_num,
device,
forward_expansion,
cheb_K,
dropout
)
self.device = device
def forward(self, src, t):
## scr: [N, T, C] [B, N, T, C]
enc_src = self.encoder(src, t)
return enc_src # [B, N, T, C]
### ST Transformer: Total Model
class STTransformer(nn.Module):
def __init__(
self,
adj,
in_channels,
embed_size,
time_num,
num_layers,
T_dim,
output_T_dim,
heads,
cheb_K,
forward_expansion,
dropout=0
):
super(STTransformer, self).__init__()
self.forward_expansion = forward_expansion # feed forward 的 embeded size 8,16,32....1024
# 第一次卷积扩充通道数
self.conv1 = nn.Conv2d(in_channels, embed_size, 1) # Channel = 1 给 扩维,成 embeded size
self.Transformer = Transformer(
adj,
embed_size,
num_layers,
heads,
time_num,
forward_expansion,
cheb_K,
dropout=0
)
# 缩小时间维度。 例:T_dim=12到output_T_dim=3,输入12维降到输出3维 or 12in 12 out
self.conv2 = nn.Conv2d(T_dim, output_T_dim, 1)
# 缩小通道数,降到1维。
self.conv3 = nn.Conv2d(embed_size, in_channels, 1)
self.relu = nn.ReLU() # 和归一化搭配好,防止梯度爆炸,消失。
def forward(self, x):
# platform: (CHANNEL, TIMESTEP_IN, N_NODE)
# input x shape[ C, N, T]
# C:通道数量。 N:传感器数量。 T:时间数量
# x = x.unsqueeze(0)
# x = np.transpose(x,(0,2,1)).to(DEVICE)
input_Transformer = self.conv1(x) # conv 要求第二维度是C, 也就是必须得B C + 其他
# input_Transformer = input_Transformer.squeeze(0)
# input_Transformer = input_Transformer.permute(1, 2, 0)
input_Transformer = input_Transformer.permute(0, 2, 3, 1)
# input_Transformer shape[N, T, C] [B, N, T, C]
output_Transformer = self.Transformer(input_Transformer, self.forward_expansion) # [B, N, T, C]
output_Transformer = output_Transformer.permute(0, 2, 1, 3)
# output_Transformer shape[B, T, N, C]
# output_Transformer = output_Transformer.unsqueeze(0)
out = self.relu(self.conv2(output_Transformer)) # 等号左边 out shape: [1, output_T_dim, N, C]
out = out.permute(0, 3, 2, 1) # 等号左边 out shape: [B, C, N, output_T_dim]
out = self.conv3(out) # 等号左边 out shape: [B, 1, N, output_T_dim]
# out = out.squeeze(1)
out = out.permute(0, 1, 3, 2)
# print('out: ',out.shape)
return out # [B, N, output_dim]
# return out shape: [N, output_dim]
def print_params(model_name, model):
param_count = 0
for name, param in model.named_parameters():
if param.requires_grad:
param_count += param.numel()
print(f'{model_name}, {param_count} trainable parameters in total.')
return
import sys
import pandas as pd
def main():
GPU = sys.argv[-1] if len(sys.argv) == 2 else '1'
device = torch.device("cuda:{}".format(GPU)) if torch.cuda.is_available() else torch.device("cpu")
in_channels = 2 # Channels of input
embed_size = 32 # Dimension of hidden embedding features
time_num = 288
num_layers = 2 # Number of ST Block
T_dim = 12 # Input length, should be the same as prepareData.py
output_T_dim = 12 # Output Expected length
heads = 4 # Number of Heads in MultiHeadAttention
cheb_K = 2 # Order for Chebyshev Polynomials (Eq 2)
forward_expansion = 32 # Dimension of Feed Forward Network: embed_size --> embed_size * forward_expansion --> embed_size
dropout = 0
A = pd.read_csv(ADJPATH).values
A = torch.Tensor(A)
### Construct Network
model = STTransformer(
A,
in_channels,
embed_size,
time_num,
num_layers,
T_dim,
output_T_dim,
heads,
cheb_K,
forward_expansion,
dropout).to(DEVICE)
summary(model, (2, N_NODE, TIMESTEP_IN), device=device)
print_params('STTransformer', model)
if __name__ == '__main__':
main()
'''
布置作业:
1. 设计 only Spatial Transformer 的版本,跑出PEMSBAY的结果 12 步in 12 步 out
2. 设计 only Temporal Transformer 的版本,跑出PEMSBAY的结果 12 步in 12 步 out
3. 设计 Temporal-Spatial Transformer 的版本,跑出PEMSBAY的结果 12 步in 12 步 out
4. 前面的版本完成后,全部升级为,C 维度由1变成2,多的一个C是时间戳,时间戳的写法,参考
也就是说原来是B N T C=1 ,现在要求改成 B,N,T,C=2, 然后跑出1,2,3 升级版结果。 12 步in 12 步 out PEMSBAY 数据集
'''
| [((469, 8, 469, 23), 'torch.Tensor', 'torch.Tensor', ({(469, 21, 469, 22): 'A'}, {}), '(A)', False, 'import torch\n'), ((484, 4, 484, 59), 'torchsummary.summary', 'summary', (), '', False, 'from torchsummary import summary\n'), ((32, 29, 32, 60), 'torch.nn.Linear', 'nn.Linear', ({(32, 39, 32, 47): 'time_num', (32, 49, 32, 59): 'embed_size'}, {}), '(time_num, embed_size)', True, 'import torch.nn as nn\n'), ((78, 18, 79, 33), 'torch.matmul', 'torch.matmul', ({(78, 31, 78, 35): 'attn', (79, 31, 79, 32): 'V'}, {}), '(attn, V)', False, 'import torch\n'), ((103, 19, 103, 85), 'torch.nn.Linear', 'nn.Linear', (), '', True, 'import torch.nn as nn\n'), ((104, 19, 104, 85), 'torch.nn.Linear', 'nn.Linear', (), '', True, 'import torch.nn as nn\n'), ((105, 19, 105, 85), 'torch.nn.Linear', 'nn.Linear', (), '', True, 'import torch.nn as nn\n'), ((106, 22, 106, 66), 'torch.nn.Linear', 'nn.Linear', ({(106, 32, 106, 53): 'heads * self.head_dim', (106, 55, 106, 65): 'embed_size'}, {}), '(heads * self.head_dim, embed_size)', True, 'import torch.nn as nn\n'), ((153, 19, 153, 85), 'torch.nn.Linear', 'nn.Linear', (), '', True, 'import torch.nn as nn\n'), ((154, 19, 154, 85), 'torch.nn.Linear', 'nn.Linear', (), '', True, 'import torch.nn as nn\n'), ((155, 19, 155, 85), 'torch.nn.Linear', 'nn.Linear', (), '', True, 'import torch.nn as nn\n'), ((156, 22, 156, 66), 'torch.nn.Linear', 'nn.Linear', ({(156, 32, 156, 53): 'heads * self.head_dim', (156, 55, 156, 65): 'embed_size'}, {}), '(heads * self.head_dim, embed_size)', True, 'import torch.nn as nn\n'), ((189, 27, 189, 62), 'torch.nn.Linear', 'nn.Linear', ({(189, 37, 189, 49): 'adj.shape[0]', (189, 51, 189, 61): 'embed_size'}, {}), '(adj.shape[0], embed_size)', True, 'import torch.nn as nn\n'), ((192, 21, 192, 45), 'torch.nn.LayerNorm', 'nn.LayerNorm', ({(192, 34, 192, 44): 'embed_size'}, {}), '(embed_size)', True, 'import torch.nn as nn\n'), ((193, 21, 193, 45), 'torch.nn.LayerNorm', 'nn.LayerNorm', ({(193, 34, 193, 44): 'embed_size'}, {}), '(embed_size)', True, 'import torch.nn as nn\n'), ((202, 24, 202, 44), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', ({(202, 42, 202, 43): '1'}, {}), '(1)', True, 'import torch.nn as nn\n'), ((204, 23, 204, 42), 'torch.nn.Dropout', 'nn.Dropout', ({(204, 34, 204, 41): 'dropout'}, {}), '(dropout)', True, 'import torch.nn as nn\n'), ((205, 18, 205, 51), 'torch.nn.Linear', 'nn.Linear', ({(205, 28, 205, 38): 'embed_size', (205, 40, 205, 50): 'embed_size'}, {}), '(embed_size, embed_size)', True, 'import torch.nn as nn\n'), ((206, 18, 206, 51), 'torch.nn.Linear', 'nn.Linear', ({(206, 28, 206, 38): 'embed_size', (206, 40, 206, 50): 'embed_size'}, {}), '(embed_size, embed_size)', True, 'import torch.nn as nn\n'), ((242, 34, 242, 68), 'torch.nn.Embedding', 'nn.Embedding', ({(242, 47, 242, 55): 'time_num', (242, 57, 242, 67): 'embed_size'}, {}), '(time_num, embed_size)', True, 'import torch.nn as nn\n'), ((245, 21, 245, 45), 'torch.nn.LayerNorm', 'nn.LayerNorm', ({(245, 34, 245, 44): 'embed_size'}, {}), '(embed_size)', True, 'import torch.nn as nn\n'), ((246, 21, 246, 45), 'torch.nn.LayerNorm', 'nn.LayerNorm', ({(246, 34, 246, 44): 'embed_size'}, {}), '(embed_size)', True, 'import torch.nn as nn\n'), ((253, 23, 253, 42), 'torch.nn.Dropout', 'nn.Dropout', ({(253, 34, 253, 41): 'dropout'}, {}), '(dropout)', True, 'import torch.nn as nn\n'), ((281, 21, 281, 45), 'torch.nn.LayerNorm', 'nn.LayerNorm', ({(281, 34, 281, 44): 'embed_size'}, {}), '(embed_size)', True, 'import torch.nn as nn\n'), ((282, 21, 282, 45), 'torch.nn.LayerNorm', 'nn.LayerNorm', ({(282, 34, 282, 44): 'embed_size'}, {}), '(embed_size)', True, 'import torch.nn as nn\n'), ((283, 23, 283, 42), 'torch.nn.Dropout', 'nn.Dropout', ({(283, 34, 283, 41): 'dropout'}, {}), '(dropout)', True, 'import torch.nn as nn\n'), ((326, 23, 326, 42), 'torch.nn.Dropout', 'nn.Dropout', ({(326, 34, 326, 41): 'dropout'}, {}), '(dropout)', True, 'import torch.nn as nn\n'), ((394, 21, 394, 58), 'torch.nn.Conv2d', 'nn.Conv2d', ({(394, 31, 394, 42): 'in_channels', (394, 44, 394, 54): 'embed_size', (394, 56, 394, 57): '1'}, {}), '(in_channels, embed_size, 1)', True, 'import torch.nn as nn\n'), ((407, 21, 407, 54), 'torch.nn.Conv2d', 'nn.Conv2d', ({(407, 31, 407, 36): 'T_dim', (407, 38, 407, 50): 'output_T_dim', (407, 52, 407, 53): '1'}, {}), '(T_dim, output_T_dim, 1)', True, 'import torch.nn as nn\n'), ((409, 21, 409, 58), 'torch.nn.Conv2d', 'nn.Conv2d', ({(409, 31, 409, 41): 'embed_size', (409, 43, 409, 54): 'in_channels', (409, 56, 409, 57): '1'}, {}), '(embed_size, in_channels, 1)', True, 'import torch.nn as nn\n'), ((410, 20, 410, 29), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((457, 52, 457, 77), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((457, 83, 457, 102), 'torch.device', 'torch.device', ({(457, 96, 457, 101): '"""cpu"""'}, {}), "('cpu')", False, 'import torch\n'), ((468, 8, 468, 28), 'pandas.read_csv', 'pd.read_csv', ({(468, 20, 468, 27): 'ADJPATH'}, {}), '(ADJPATH)', True, 'import pandas as pd\n'), ((31, 30, 31, 79), 'torch.eye', 'torch.eye', (), '', False, 'import torch\n'), ((39, 21, 39, 43), 'torch.cat', 'torch.cat', ({(39, 31, 39, 39): '(o1, o2)', (39, 41, 39, 42): '0'}, {}), '((o1, o2), 0)', False, 'import torch\n'), ((73, 56, 73, 68), 'numpy.sqrt', 'np.sqrt', ({(73, 64, 73, 67): 'd_k'}, {}), '(d_k)', True, 'import numpy as np\n'), ((77, 15, 77, 33), 'torch.nn.Softmax', 'nn.Softmax', (), '', True, 'import torch.nn as nn\n'), ((196, 12, 196, 65), 'torch.nn.Linear', 'nn.Linear', ({(196, 22, 196, 32): 'embed_size', (196, 34, 196, 64): 'forward_expansion * embed_size'}, {}), '(embed_size, forward_expansion * embed_size)', True, 'import torch.nn as nn\n'), ((197, 12, 197, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((198, 12, 198, 65), 'torch.nn.Linear', 'nn.Linear', ({(198, 22, 198, 52): 'forward_expansion * embed_size', (198, 54, 198, 64): 'embed_size'}, {}), '(forward_expansion * embed_size, embed_size)', True, 'import torch.nn as nn\n'), ((249, 12, 249, 65), 'torch.nn.Linear', 'nn.Linear', ({(249, 22, 249, 32): 'embed_size', (249, 34, 249, 64): 'forward_expansion * embed_size'}, {}), '(embed_size, forward_expansion * embed_size)', True, 'import torch.nn as nn\n'), ((250, 12, 250, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((251, 12, 251, 65), 'torch.nn.Linear', 'nn.Linear', ({(251, 22, 251, 52): 'forward_expansion * embed_size', (251, 54, 251, 64): 'embed_size'}, {}), '(forward_expansion * embed_size, embed_size)', True, 'import torch.nn as nn\n'), ((259, 38, 259, 56), 'torch.arange', 'torch.arange', ({(259, 51, 259, 52): '0', (259, 54, 259, 55): 'T'}, {}), '(0, T)', False, 'import torch\n')] |
daniel-chuang/tetris | tetrisanim3.py | 518bd7b1fd80babc34a1da323b2f50d88c31ed4a | # animation for medium article
from termcolor import colored
import time
import imageio
import pyautogui
pyautogui.FAILSAFE = True
matrix = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 2, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 2, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 0, 1, 0, 0, 0, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
lst = set()
for i in range(21):
for z in range(10):
for row in range(len(matrix)):
if 0 not in matrix[row]:
lst.add(row)
if (i == 20 or i > row) and row in lst:
print(colored("1 " * 10, "green"))
else:
for element in range(len(matrix[row])):
if i == row and z == element:
print(colored(matrix[row][element], "green"), end=" ", flush=False)
elif matrix[row][element] == 1:
print(colored(matrix[row][element], "red"), end=" ", flush=False)
elif matrix[row][element] == 2:
print(colored(matrix[row][element], "blue"), end=" ", flush=False)
else:
print(matrix[row][element], end=" ", flush=False)
print("")
print("")
# takes a screenshot
pyautogui.moveTo(338, 580, duration = 0)
pyautogui.hotkey('command', 'shift', '4')
pyautogui.dragTo(547, 1000, duration = 0, button = 'left')
| [((52, 8, 52, 48), 'pyautogui.moveTo', 'pyautogui.moveTo', (), '', False, 'import pyautogui\n'), ((53, 8, 53, 49), 'pyautogui.hotkey', 'pyautogui.hotkey', ({(53, 25, 53, 34): '"""command"""', (53, 36, 53, 43): '"""shift"""', (53, 45, 53, 48): '"""4"""'}, {}), "('command', 'shift', '4')", False, 'import pyautogui\n'), ((54, 8, 54, 66), 'pyautogui.dragTo', 'pyautogui.dragTo', (), '', False, 'import pyautogui\n'), ((38, 22, 38, 49), 'termcolor.colored', 'colored', ({(38, 30, 38, 39): "('1 ' * 10)", (38, 41, 38, 48): '"""green"""'}, {}), "('1 ' * 10, 'green')", False, 'from termcolor import colored\n'), ((42, 30, 42, 68), 'termcolor.colored', 'colored', ({(42, 38, 42, 58): 'matrix[row][element]', (42, 60, 42, 67): '"""green"""'}, {}), "(matrix[row][element], 'green')", False, 'from termcolor import colored\n'), ((44, 30, 44, 66), 'termcolor.colored', 'colored', ({(44, 38, 44, 58): 'matrix[row][element]', (44, 60, 44, 65): '"""red"""'}, {}), "(matrix[row][element], 'red')", False, 'from termcolor import colored\n'), ((46, 30, 46, 67), 'termcolor.colored', 'colored', ({(46, 38, 46, 58): 'matrix[row][element]', (46, 60, 46, 66): '"""blue"""'}, {}), "(matrix[row][element], 'blue')", False, 'from termcolor import colored\n')] |
destodasoftware/kately_api | inventories/models.py | 89e4e80a93ebf8e5d2f2981d108ce5efde75d0dd | from django.db import models
from products.models import Product
from utils.models import Utility
class Inventory(Utility):
inventory_number = models.CharField(unique=True, max_length=100, blank=True, null=True)
supplier = models.CharField(max_length=100, blank=True, null=True)
user = models.ForeignKey('auth.User', on_delete=models.SET_NULL, blank=True, null=True)
is_adjusment = models.BooleanField(default=False)
def __str__(self):
return self.inventory_number
class InventoryItem(Utility):
inventory = models.ForeignKey(Inventory, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
quantity = models.PositiveIntegerField(default=1)
def __str__(self):
return self.product.name
| [((8, 23, 8, 91), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((9, 15, 9, 70), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((10, 11, 10, 91), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((11, 19, 11, 53), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((18, 16, 18, 70), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((19, 14, 19, 66), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((20, 15, 20, 53), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import models\n')] |
stephken/Hierarchical_assessment | hierarchical_app/views.py | 537219903357d97d1354a8f262badba9729fb5e0 | from django.shortcuts import render
from hierarchical_app.models import Folder
# Create your views here.
def index_view(request):
return render(request, 'index.html', {'welcome': "Welcome to Kens Hierarchical Data and You assessment", 'folders': Folder.objects.all()})
| [((7, 120, 7, 140), 'hierarchical_app.models.Folder.objects.all', 'Folder.objects.all', ({}, {}), '()', False, 'from hierarchical_app.models import Folder\n')] |
ramizdundar/Chexpert | bin/train_vit.py | 6a5f005f1df421538182ad8497725b78e6de29be | import sys
import os
import argparse
import logging
import json
import time
import subprocess
from shutil import copyfile
import numpy as np
from sklearn import metrics
from easydict import EasyDict as edict
import torch
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch.nn import DataParallel
from vit_pytorch import ViT
from tensorboardX import SummaryWriter
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
from data.dataset import ImageDataset # noqa
from model.classifier import Classifier # noqa
from utils.misc import lr_schedule # noqa
from model.utils import get_optimizer # noqa
parser = argparse.ArgumentParser(description='Train model')
parser.add_argument('cfg_path', default=None, metavar='CFG_PATH', type=str,
help="Path to the config file in yaml format")
parser.add_argument('save_path', default=None, metavar='SAVE_PATH', type=str,
help="Path to the saved models")
parser.add_argument('--num_workers', default=8, type=int, help="Number of "
"workers for each data loader")
parser.add_argument('--device_ids', default='0,1,2,3', type=str,
help="GPU indices ""comma separated, e.g. '0,1' ")
parser.add_argument('--pre_train', default=None, type=str, help="If get"
"parameters from pretrained model")
parser.add_argument('--resume', default=0, type=int, help="If resume from "
"previous run")
parser.add_argument('--logtofile', default=False, type=bool, help="Save log "
"in save_path/log.txt if set True")
parser.add_argument('--verbose', default=False, type=bool, help="Detail info")
def get_loss(output, target, index, device, cfg):
if cfg.criterion == 'BCE':
for num_class in cfg.num_classes:
assert num_class == 1
target = target[:, index].view(-1)
pos_weight = torch.from_numpy(
np.array(cfg.pos_weight,
dtype=np.float32)).to(device).type_as(target)
if cfg.batch_weight:
if target.sum() == 0:
loss = torch.tensor(0., requires_grad=True).to(device)
else:
weight = (target.size()[0] - target.sum()) / target.sum()
loss = F.binary_cross_entropy_with_logits(
output[index].view(-1), target, pos_weight=weight)
else:
loss = F.binary_cross_entropy_with_logits(
output[index].view(-1), target, pos_weight=pos_weight[index])
label = torch.sigmoid(output[index].view(-1)).ge(0.5).float()
acc = (target == label).float().sum() / len(label)
else:
raise Exception('Unknown criterion : {}'.format(cfg.criterion))
return (loss, acc)
def train_epoch(summary, summary_dev, cfg, args, model, dataloader,
dataloader_dev, optimizer, summary_writer, best_dict,
dev_header):
torch.set_grad_enabled(True)
model.train()
device_ids = list(map(int, args.device_ids.split(',')))
device = torch.device('cuda:{}'.format(device_ids[0]))
steps = len(dataloader)
dataiter = iter(dataloader)
label_header = dataloader.dataset._label_header
num_tasks = len(cfg.num_classes)
time_now = time.time()
loss_sum = np.zeros(num_tasks)
acc_sum = np.zeros(num_tasks)
for step in range(steps):
image, target = next(dataiter)
image = image.to(device)
target = target.to(device)
# output, logit_map = model(image)
output = model(image)
output = [torch.unsqueeze(i, 1) for i in output.T]
# different number of tasks
loss = 0
for t in range(num_tasks):
loss_t, acc_t = get_loss(output, target, t, device, cfg)
loss += loss_t
loss_sum[t] += loss_t.item()
acc_sum[t] += acc_t.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
summary['step'] += 1
if summary['step'] % cfg.log_every == 0:
time_spent = time.time() - time_now
time_now = time.time()
loss_sum /= cfg.log_every
acc_sum /= cfg.log_every
loss_str = ' '.join(map(lambda x: '{:.5f}'.format(x), loss_sum))
acc_str = ' '.join(map(lambda x: '{:.3f}'.format(x), acc_sum))
logging.info(
'{}, Train, Epoch : {}, Step : {}, Loss : {}, '
'Acc : {}, Run Time : {:.2f} sec'
.format(time.strftime("%Y-%m-%d %H:%M:%S"),
summary['epoch'] + 1, summary['step'], loss_str,
acc_str, time_spent))
for t in range(num_tasks):
summary_writer.add_scalar(
'train/loss_{}'.format(label_header[t]), loss_sum[t],
summary['step'])
summary_writer.add_scalar(
'train/acc_{}'.format(label_header[t]), acc_sum[t],
summary['step'])
loss_sum = np.zeros(num_tasks)
acc_sum = np.zeros(num_tasks)
if summary['step'] % cfg.test_every == 0:
time_now = time.time()
summary_dev, predlist, true_list = test_epoch(
summary_dev, cfg, args, model, dataloader_dev)
time_spent = time.time() - time_now
auclist = []
for i in range(len(cfg.num_classes)):
y_pred = predlist[i]
y_true = true_list[i]
fpr, tpr, thresholds = metrics.roc_curve(
y_true, y_pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
auclist.append(auc)
summary_dev['auc'] = np.array(auclist)
loss_dev_str = ' '.join(map(lambda x: '{:.5f}'.format(x),
summary_dev['loss']))
acc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['acc']))
auc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['auc']))
logging.info(
'{}, Dev, Step : {}, Loss : {}, Acc : {}, Auc : {},'
'Mean auc: {:.3f} ''Run Time : {:.2f} sec' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
summary_dev['auc'].mean(),
time_spent))
for t in range(len(cfg.num_classes)):
summary_writer.add_scalar(
'dev/loss_{}'.format(dev_header[t]),
summary_dev['loss'][t], summary['step'])
summary_writer.add_scalar(
'dev/acc_{}'.format(dev_header[t]), summary_dev['acc'][t],
summary['step'])
summary_writer.add_scalar(
'dev/auc_{}'.format(dev_header[t]), summary_dev['auc'][t],
summary['step'])
save_best = False
mean_acc = summary_dev['acc'][cfg.save_index].mean()
if mean_acc >= best_dict['acc_dev_best']:
best_dict['acc_dev_best'] = mean_acc
if cfg.best_target == 'acc':
save_best = True
mean_auc = summary_dev['auc'][cfg.save_index].mean()
if mean_auc >= best_dict['auc_dev_best']:
best_dict['auc_dev_best'] = mean_auc
if cfg.best_target == 'auc':
save_best = True
mean_loss = summary_dev['loss'][cfg.save_index].mean()
if mean_loss <= best_dict['loss_dev_best']:
best_dict['loss_dev_best'] = mean_loss
if cfg.best_target == 'loss':
save_best = True
if save_best:
torch.save(
{'epoch': summary['epoch'],
'step': summary['step'],
'acc_dev_best': best_dict['acc_dev_best'],
'auc_dev_best': best_dict['auc_dev_best'],
'loss_dev_best': best_dict['loss_dev_best'],
'state_dict': model.module.state_dict()},
os.path.join(args.save_path, 'best{}.ckpt'.format(
best_dict['best_idx']))
)
best_dict['best_idx'] += 1
if best_dict['best_idx'] > cfg.save_top_k:
best_dict['best_idx'] = 1
logging.info(
'{}, Best, Step : {}, Loss : {}, Acc : {},Auc :{},'
'Best Auc : {:.3f}' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
best_dict['auc_dev_best']))
model.train()
torch.set_grad_enabled(True)
summary['epoch'] += 1
return summary, best_dict
def test_epoch(summary, cfg, args, model, dataloader):
torch.set_grad_enabled(False)
model.eval()
device_ids = list(map(int, args.device_ids.split(',')))
device = torch.device('cuda:{}'.format(device_ids[0]))
steps = len(dataloader)
dataiter = iter(dataloader)
num_tasks = len(cfg.num_classes)
loss_sum = np.zeros(num_tasks)
acc_sum = np.zeros(num_tasks)
predlist = list(x for x in range(len(cfg.num_classes)))
true_list = list(x for x in range(len(cfg.num_classes)))
for step in range(steps):
image, target = next(dataiter)
image = image.to(device)
target = target.to(device)
output = model(image)
output = [torch.unsqueeze(i, 1) for i in output.T]
# different number of tasks
for t in range(len(cfg.num_classes)):
loss_t, acc_t = get_loss(output, target, t, device, cfg)
# AUC
output_tensor = torch.sigmoid(
output[t].view(-1)).cpu().detach().numpy()
target_tensor = target[:, t].view(-1).cpu().detach().numpy()
if step == 0:
predlist[t] = output_tensor
true_list[t] = target_tensor
else:
predlist[t] = np.append(predlist[t], output_tensor)
true_list[t] = np.append(true_list[t], target_tensor)
loss_sum[t] += loss_t.item()
acc_sum[t] += acc_t.item()
summary['loss'] = loss_sum / steps
summary['acc'] = acc_sum / steps
return summary, predlist, true_list
def run(args):
with open(args.cfg_path) as f:
cfg = edict(json.load(f))
if args.verbose is True:
print(json.dumps(cfg, indent=4))
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
if args.logtofile is True:
logging.basicConfig(filename=args.save_path + '/log.txt',
filemode="w", level=logging.INFO)
else:
logging.basicConfig(level=logging.INFO)
if not args.resume:
with open(os.path.join(args.save_path, 'cfg.json'), 'w') as f:
json.dump(cfg, f, indent=1)
device_ids = list(map(int, args.device_ids.split(',')))
num_devices = torch.cuda.device_count()
if num_devices < len(device_ids):
raise Exception(
'#available gpu : {} < --device_ids : {}'
.format(num_devices, len(device_ids)))
device = torch.device('cuda:{}'.format(device_ids[0]))
# model = Classifier(cfg)
model = ViT(
cfg = cfg,
image_size=cfg.width,
patch_size=32,
num_classes=5,
dim=1024,
depth=6,
heads=8,
mlp_dim=512,
dropout=0.3,
emb_dropout=0.3,
channels=3
)
if args.verbose is True:
from torchsummary import summary
if cfg.fix_ratio:
h, w = cfg.long_side, cfg.long_side
else:
h, w = cfg.height, cfg.width
summary(model.to(device), (3, h, w))
model = DataParallel(model, device_ids=device_ids).to(device).train()
if args.pre_train is not None:
if os.path.exists(args.pre_train):
ckpt = torch.load(args.pre_train, map_location=device)
model.module.load_state_dict(ckpt)
optimizer = get_optimizer(model.parameters(), cfg)
src_folder = os.path.dirname(os.path.abspath(__file__)) + '/../'
dst_folder = os.path.join(args.save_path, 'classification')
rc, size = subprocess.getstatusoutput('du --max-depth=0 %s | cut -f1'
% src_folder)
if rc != 0:
raise Exception('Copy folder error : {}'.format(rc))
rc, err_msg = subprocess.getstatusoutput('cp -R %s %s' % (src_folder,
dst_folder))
if rc != 0:
raise Exception('copy folder error : {}'.format(err_msg))
copyfile(cfg.train_csv, os.path.join(args.save_path, 'train.csv'))
copyfile(cfg.dev_csv, os.path.join(args.save_path, 'dev.csv'))
dataloader_train = DataLoader(
ImageDataset(cfg.train_csv, cfg, mode='train'),
batch_size=cfg.train_batch_size, num_workers=args.num_workers,
drop_last=True, shuffle=True)
dataloader_dev = DataLoader(
ImageDataset(cfg.dev_csv, cfg, mode='dev'),
batch_size=cfg.dev_batch_size, num_workers=args.num_workers,
drop_last=False, shuffle=False)
dev_header = dataloader_dev.dataset._label_header
summary_train = {'epoch': 0, 'step': 0}
summary_dev = {'loss': float('inf'), 'acc': 0.0}
summary_writer = SummaryWriter(args.save_path)
epoch_start = 0
best_dict = {
"acc_dev_best": 0.0,
"auc_dev_best": 0.0,
"loss_dev_best": float('inf'),
"fused_dev_best": 0.0,
"best_idx": 1}
if args.resume:
ckpt_path = os.path.join(args.save_path, 'train.ckpt')
ckpt = torch.load(ckpt_path, map_location=device)
model.module.load_state_dict(ckpt['state_dict'])
summary_train = {'epoch': ckpt['epoch'], 'step': ckpt['step']}
best_dict['acc_dev_best'] = ckpt['acc_dev_best']
best_dict['loss_dev_best'] = ckpt['loss_dev_best']
best_dict['auc_dev_best'] = ckpt['auc_dev_best']
epoch_start = ckpt['epoch']
for epoch in range(epoch_start, cfg.epoch):
lr = lr_schedule(cfg.lr, cfg.lr_factor, summary_train['epoch'],
cfg.lr_epochs)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
summary_train, best_dict = train_epoch(
summary_train, summary_dev, cfg, args, model,
dataloader_train, dataloader_dev, optimizer,
summary_writer, best_dict, dev_header)
time_now = time.time()
summary_dev, predlist, true_list = test_epoch(
summary_dev, cfg, args, model, dataloader_dev)
time_spent = time.time() - time_now
auclist = []
for i in range(len(cfg.num_classes)):
y_pred = predlist[i]
y_true = true_list[i]
fpr, tpr, thresholds = metrics.roc_curve(
y_true, y_pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
auclist.append(auc)
summary_dev['auc'] = np.array(auclist)
loss_dev_str = ' '.join(map(lambda x: '{:.5f}'.format(x),
summary_dev['loss']))
acc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['acc']))
auc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['auc']))
logging.info(
'{}, Dev, Step : {}, Loss : {}, Acc : {}, Auc : {},'
'Mean auc: {:.3f} ''Run Time : {:.2f} sec' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary_train['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
summary_dev['auc'].mean(),
time_spent))
for t in range(len(cfg.num_classes)):
summary_writer.add_scalar(
'dev/loss_{}'.format(dev_header[t]), summary_dev['loss'][t],
summary_train['step'])
summary_writer.add_scalar(
'dev/acc_{}'.format(dev_header[t]), summary_dev['acc'][t],
summary_train['step'])
summary_writer.add_scalar(
'dev/auc_{}'.format(dev_header[t]), summary_dev['auc'][t],
summary_train['step'])
save_best = False
mean_acc = summary_dev['acc'][cfg.save_index].mean()
if mean_acc >= best_dict['acc_dev_best']:
best_dict['acc_dev_best'] = mean_acc
if cfg.best_target == 'acc':
save_best = True
mean_auc = summary_dev['auc'][cfg.save_index].mean()
if mean_auc >= best_dict['auc_dev_best']:
best_dict['auc_dev_best'] = mean_auc
if cfg.best_target == 'auc':
save_best = True
mean_loss = summary_dev['loss'][cfg.save_index].mean()
if mean_loss <= best_dict['loss_dev_best']:
best_dict['loss_dev_best'] = mean_loss
if cfg.best_target == 'loss':
save_best = True
if save_best:
torch.save(
{'epoch': summary_train['epoch'],
'step': summary_train['step'],
'acc_dev_best': best_dict['acc_dev_best'],
'auc_dev_best': best_dict['auc_dev_best'],
'loss_dev_best': best_dict['loss_dev_best'],
'state_dict': model.module.state_dict()},
os.path.join(args.save_path,
'best{}.ckpt'.format(best_dict['best_idx']))
)
best_dict['best_idx'] += 1
if best_dict['best_idx'] > cfg.save_top_k:
best_dict['best_idx'] = 1
logging.info(
'{}, Best, Step : {}, Loss : {}, Acc : {},'
'Auc :{},Best Auc : {:.3f}' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary_train['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
best_dict['auc_dev_best']))
torch.save({'epoch': summary_train['epoch'],
'step': summary_train['step'],
'acc_dev_best': best_dict['acc_dev_best'],
'auc_dev_best': best_dict['auc_dev_best'],
'loss_dev_best': best_dict['loss_dev_best'],
'state_dict': model.module.state_dict()},
os.path.join(args.save_path, 'train.ckpt'))
summary_writer.close()
def main():
args = parser.parse_args()
if args.verbose is True:
print('Using the specified args:')
print(args)
run(args)
if __name__ == '__main__':
main()
| [((22, 0, 22, 20), 'torch.manual_seed', 'torch.manual_seed', ({(22, 18, 22, 19): '(0)'}, {}), '(0)', False, 'import torch\n'), ((23, 0, 23, 29), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', ({(23, 27, 23, 28): '(0)'}, {}), '(0)', False, 'import torch\n'), ((30, 9, 30, 59), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((78, 4, 78, 32), 'torch.set_grad_enabled', 'torch.set_grad_enabled', ({(78, 27, 78, 31): '(True)'}, {}), '(True)', False, 'import torch\n'), ((87, 15, 87, 26), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((88, 15, 88, 34), 'numpy.zeros', 'np.zeros', ({(88, 24, 88, 33): 'num_tasks'}, {}), '(num_tasks)', True, 'import numpy as np\n'), ((89, 14, 89, 33), 'numpy.zeros', 'np.zeros', ({(89, 23, 89, 32): 'num_tasks'}, {}), '(num_tasks)', True, 'import numpy as np\n'), ((234, 4, 234, 33), 'torch.set_grad_enabled', 'torch.set_grad_enabled', ({(234, 27, 234, 32): '(False)'}, {}), '(False)', False, 'import torch\n'), ((242, 15, 242, 34), 'numpy.zeros', 'np.zeros', ({(242, 24, 242, 33): 'num_tasks'}, {}), '(num_tasks)', True, 'import numpy as np\n'), ((243, 14, 243, 33), 'numpy.zeros', 'np.zeros', ({(243, 23, 243, 32): 'num_tasks'}, {}), '(num_tasks)', True, 'import numpy as np\n'), ((295, 18, 295, 43), 'torch.cuda.device_count', 'torch.cuda.device_count', ({}, {}), '()', False, 'import torch\n'), ((303, 12, 315, 5), 'vit_pytorch.ViT', 'ViT', (), '', False, 'from vit_pytorch import ViT\n'), ((331, 17, 331, 63), 'os.path.join', 'os.path.join', ({(331, 30, 331, 44): 'args.save_path', (331, 46, 331, 62): '"""classification"""'}, {}), "(args.save_path, 'classification')", False, 'import os\n'), ((332, 15, 333, 55), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', ({(332, 42, 333, 54): "'du --max-depth=0 %s | cut -f1' % src_folder"}, {}), "('du --max-depth=0 %s | cut -f1' % src_folder)", False, 'import subprocess\n'), ((336, 18, 337, 74), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', ({(336, 45, 337, 73): "'cp -R %s %s' % (src_folder, dst_folder)"}, {}), "('cp -R %s %s' % (src_folder, dst_folder))", False, 'import subprocess\n'), ((356, 21, 356, 50), 'tensorboardX.SummaryWriter', 'SummaryWriter', ({(356, 35, 356, 49): 'args.save_path'}, {}), '(args.save_path)', False, 'from tensorboardX import SummaryWriter\n'), ((227, 8, 227, 36), 'torch.set_grad_enabled', 'torch.set_grad_enabled', ({(227, 31, 227, 35): '(True)'}, {}), '(True)', False, 'import torch\n'), ((282, 11, 282, 41), 'os.path.exists', 'os.path.exists', ({(282, 26, 282, 40): 'args.save_path'}, {}), '(args.save_path)', False, 'import os\n'), ((283, 8, 283, 32), 'os.mkdir', 'os.mkdir', ({(283, 17, 283, 31): 'args.save_path'}, {}), '(args.save_path)', False, 'import os\n'), ((285, 8, 286, 61), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((288, 8, 288, 47), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((325, 11, 325, 41), 'os.path.exists', 'os.path.exists', ({(325, 26, 325, 40): 'args.pre_train'}, {}), '(args.pre_train)', False, 'import os\n'), ((341, 28, 341, 69), 'os.path.join', 'os.path.join', ({(341, 41, 341, 55): 'args.save_path', (341, 57, 341, 68): '"""train.csv"""'}, {}), "(args.save_path, 'train.csv')", False, 'import os\n'), ((342, 26, 342, 65), 'os.path.join', 'os.path.join', ({(342, 39, 342, 53): 'args.save_path', (342, 55, 342, 64): '"""dev.csv"""'}, {}), "(args.save_path, 'dev.csv')", False, 'import os\n'), ((345, 8, 345, 54), 'data.dataset.ImageDataset', 'ImageDataset', (), '', False, 'from data.dataset import ImageDataset\n'), ((349, 8, 349, 50), 'data.dataset.ImageDataset', 'ImageDataset', (), '', False, 'from data.dataset import ImageDataset\n'), ((366, 20, 366, 62), 'os.path.join', 'os.path.join', ({(366, 33, 366, 47): 'args.save_path', (366, 49, 366, 61): '"""train.ckpt"""'}, {}), "(args.save_path, 'train.ckpt')", False, 'import os\n'), ((367, 15, 367, 57), 'torch.load', 'torch.load', (), '', False, 'import torch\n'), ((376, 13, 377, 39), 'utils.misc.lr_schedule', 'lr_schedule', ({(376, 25, 376, 31): 'cfg.lr', (376, 33, 376, 46): 'cfg.lr_factor', (376, 48, 376, 70): "summary_train['epoch']", (377, 25, 377, 38): 'cfg.lr_epochs'}, {}), "(cfg.lr, cfg.lr_factor, summary_train['epoch'], cfg.lr_epochs)", False, 'from utils.misc import lr_schedule\n'), ((386, 19, 386, 30), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((399, 29, 399, 46), 'numpy.array', 'np.array', ({(399, 38, 399, 45): 'auclist'}, {}), '(auclist)', True, 'import numpy as np\n'), ((20, 32, 20, 57), 'os.path.abspath', 'os.path.abspath', ({(20, 48, 20, 56): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((96, 18, 96, 39), 'torch.unsqueeze', 'torch.unsqueeze', ({(96, 34, 96, 35): 'i', (96, 37, 96, 38): '(1)'}, {}), '(i, 1)', False, 'import torch\n'), ((114, 23, 114, 34), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((136, 23, 136, 42), 'numpy.zeros', 'np.zeros', ({(136, 32, 136, 41): 'num_tasks'}, {}), '(num_tasks)', True, 'import numpy as np\n'), ((137, 22, 137, 41), 'numpy.zeros', 'np.zeros', ({(137, 31, 137, 40): 'num_tasks'}, {}), '(num_tasks)', True, 'import numpy as np\n'), ((140, 23, 140, 34), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((153, 33, 153, 50), 'numpy.array', 'np.array', ({(153, 42, 153, 49): 'auclist'}, {}), '(auclist)', True, 'import numpy as np\n'), ((252, 18, 252, 39), 'torch.unsqueeze', 'torch.unsqueeze', ({(252, 34, 252, 35): 'i', (252, 37, 252, 38): '(1)'}, {}), '(i, 1)', False, 'import torch\n'), ((278, 20, 278, 32), 'json.load', 'json.load', ({(278, 30, 278, 31): 'f'}, {}), '(f)', False, 'import json\n'), ((292, 12, 292, 39), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((326, 19, 326, 66), 'torch.load', 'torch.load', (), '', False, 'import torch\n'), ((330, 33, 330, 58), 'os.path.abspath', 'os.path.abspath', ({(330, 49, 330, 57): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((389, 21, 389, 32), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((395, 35, 396, 44), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (), '', False, 'from sklearn import metrics\n'), ((397, 18, 397, 39), 'sklearn.metrics.auc', 'metrics.auc', ({(397, 30, 397, 33): 'fpr', (397, 35, 397, 38): 'tpr'}, {}), '(fpr, tpr)', False, 'from sklearn import metrics\n'), ((479, 19, 479, 61), 'os.path.join', 'os.path.join', ({(479, 32, 479, 46): 'args.save_path', (479, 48, 479, 60): '"""train.ckpt"""'}, {}), "(args.save_path, 'train.ckpt')", False, 'import os\n'), ((113, 25, 113, 36), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((143, 25, 143, 36), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((149, 39, 150, 48), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (), '', False, 'from sklearn import metrics\n'), ((151, 22, 151, 43), 'sklearn.metrics.auc', 'metrics.auc', ({(151, 34, 151, 37): 'fpr', (151, 39, 151, 42): 'tpr'}, {}), '(fpr, tpr)', False, 'from sklearn import metrics\n'), ((265, 30, 265, 67), 'numpy.append', 'np.append', ({(265, 40, 265, 51): 'predlist[t]', (265, 53, 265, 66): 'output_tensor'}, {}), '(predlist[t], output_tensor)', True, 'import numpy as np\n'), ((266, 31, 266, 69), 'numpy.append', 'np.append', ({(266, 41, 266, 53): 'true_list[t]', (266, 55, 266, 68): 'target_tensor'}, {}), '(true_list[t], target_tensor)', True, 'import numpy as np\n'), ((280, 18, 280, 43), 'json.dumps', 'json.dumps', (), '', False, 'import json\n'), ((291, 18, 291, 58), 'os.path.join', 'os.path.join', ({(291, 31, 291, 45): 'args.save_path', (291, 47, 291, 57): '"""cfg.json"""'}, {}), "(args.save_path, 'cfg.json')", False, 'import os\n'), ((411, 16, 411, 50), 'time.strftime', 'time.strftime', ({(411, 30, 411, 49): '"""%Y-%m-%d %H:%M:%S"""'}, {}), "('%Y-%m-%d %H:%M:%S')", False, 'import time\n'), ((124, 24, 124, 58), 'time.strftime', 'time.strftime', ({(124, 38, 124, 57): '"""%Y-%m-%d %H:%M:%S"""'}, {}), "('%Y-%m-%d %H:%M:%S')", False, 'import time\n'), ((165, 20, 165, 54), 'time.strftime', 'time.strftime', ({(165, 34, 165, 53): '"""%Y-%m-%d %H:%M:%S"""'}, {}), "('%Y-%m-%d %H:%M:%S')", False, 'import time\n'), ((323, 12, 323, 54), 'torch.nn.DataParallel', 'DataParallel', (), '', False, 'from torch.nn import DataParallel\n'), ((467, 20, 467, 54), 'time.strftime', 'time.strftime', ({(467, 34, 467, 53): '"""%Y-%m-%d %H:%M:%S"""'}, {}), "('%Y-%m-%d %H:%M:%S')", False, 'import time\n'), ((58, 23, 58, 59), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((220, 24, 220, 58), 'time.strftime', 'time.strftime', ({(220, 38, 220, 57): '"""%Y-%m-%d %H:%M:%S"""'}, {}), "('%Y-%m-%d %H:%M:%S')", False, 'import time\n'), ((54, 12, 55, 38), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n')] |
AndreasGeiger/hackerrank-python | Sets/the capaint s room.py | a436c207e62b32f70a6b4279bb641a3c4d90e112 | groupSize = input()
groups = list(map(int,input().split(' ')))
tmpArray1 = set()
tmpArray2 = set()
for i in groups:
if i in tmpArray1:
tmpArray2.discard(i)
else:
tmpArray1.add(i)
tmpArray2.add(i)
for i in tmpArray2:
print(i)
| [] |
gtmadureira/Python | tests/testsoma.py | 38de6c56fec1d22662f30c1ff4d4f4f411678484 | import unittest
from hf_src.main import soma
class TestSoma(unittest.TestCase):
def test_retorno_soma_15_30(self):
self.assertEqual(soma(15, 30), 45)
| [((7, 25, 7, 37), 'hf_src.main.soma', 'soma', ({(7, 30, 7, 32): '(15)', (7, 34, 7, 36): '(30)'}, {}), '(15, 30)', False, 'from hf_src.main import soma\n')] |
rohe/oictest | src/oictest/setup.py | f6f0800220befd5983b8cb34a5c984f98855d089 | import copy
import json
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
from oic.utils.keyio import KeyJar
from oic.utils.keyio import KeyBundle
__author__ = 'roland'
import logging
logger = logging.getLogger(__name__)
class OIDCError(Exception):
pass
def flow2sequence(operations, item):
flow = operations.FLOWS[item]
return [operations.PHASES[phase] for phase in flow["sequence"]]
class OIDCTestSetup(object):
def __init__(self, client_cls, config, test_defs):
"""
:param config: Imported configuration module
:return:
"""
self.client_cls = client_cls
self.config = config
self.test_features = []
self.client = self.create_client(**config.CLIENT)
self.test_defs = test_defs
def create_client(self, **kwargs):
"""
Instantiate a _client instance
:param: Keyword arguments
Keys are ["srv_discovery_url", "client_info", "client_registration",
"provider_info". "keys]
:return: _client instance
"""
_key_set = set(kwargs.keys())
args = {}
_client = self.client_cls(client_authn_method=CLIENT_AUTHN_METHOD,
behaviour=kwargs["behaviour"],
verify_ssl=self.config.VERIFY_SSL, **args)
# The behaviour parameter is not significant for the election process
_key_set.discard("behaviour")
try:
setattr(_client, "allow", kwargs["allow"])
except KeyError:
pass
else:
_key_set.discard("allow")
try:
jwks = self.construct_jwks(_client, kwargs["keys"])
except KeyError:
pass
else:
# export JWKS
f = open("export/jwk.json", "w")
f.write(json.dumps(jwks))
f.close()
_client.jwks_uri = self.config.CLIENT["key_export_url"]
self.test_features = _key_set
try:
_client.client_prefs = copy.copy(kwargs["preferences"])
except KeyError:
pass
else:
_key_set.discard("preferences")
if "client_info" in _key_set:
_client.redirect_uris = self.config.CLIENT[
"client_info"]["redirect_uris"]
elif "client_registration" in _key_set:
reg_info = self.config.CLIENT["client_registration"]
_client.redirect_uris = reg_info["redirect_uris"]
_client.client_id = reg_info["client_id"]
_client.client_secret = reg_info["client_secret"]
return _client
@staticmethod
def construct_jwks(_client, key_conf):
"""
Construct the jwks
"""
if _client.keyjar is None:
_client.keyjar = KeyJar()
kbl = []
kid_template = "a%d"
kid = 0
for typ, info in key_conf.items():
kb = KeyBundle(source="file://%s" % info["key"], fileformat="der",
keytype=typ)
for k in kb.keys():
k.serialize()
k.kid = kid_template % kid
kid += 1
_client.kid[k.use][k.kty] = k.kid
_client.keyjar.add_kb("", kb)
kbl.append(kb)
jwks = {"keys": []}
for kb in kbl:
# ignore simple keys
jwks["keys"].extend([k.to_dict()
for k in kb.keys() if k.kty != 'oct'])
return jwks
def make_sequence(self, flow):
"""
Translate a flow name into a sequence of request/responses.
:param flow: Which test flow to use
:return: test sequence and test definitions
"""
sequence = flow2sequence(self.test_defs, flow)
res = {"sequence": sequence,
"tests": {"pre": [], "post": []},
"flow": [flow],
"block": [],
"mode": "",
"expect_exception": False}
_flow = self.test_defs.FLOWS[flow]
for param in ["tests", "block", "mode", "expect_exception"]:
try:
res[param] = _flow[param]
except KeyError:
pass
return res
def add_init(self, test_spec):
"""
Add _client registration and provider info gathering if necessary
:param test_spec:
:return:
"""
_seq = test_spec["sequence"]
_flow = test_spec["flow"]
if "client_info" in self.test_features and \
"registration" not in test_spec["block"]:
_register = True
# May not be the first item in the sequence
for sq in _seq:
try:
if sq[0].request == "RegistrationRequest":
_register = False
except TypeError:
pass
if _register:
_ext = self.test_defs.PHASES["oic-registration"]
_seq.insert(0, _ext)
_flow.insert(0, "oic-registration")
if "srv_discovery_url" in self.test_features:
op_spec = self.test_defs.PHASES["provider-discovery"]
if op_spec not in _seq:
_seq.insert(0, op_spec)
_flow.insert(0, "provider-discovery")
return test_spec
def request_and_return(conv, url, response=None, method="GET", body=None,
body_type="json", state="", http_args=None,
**kwargs):
"""
:param url: The URL to which the request should be sent
:param response: Response type
:param method: Which HTTP method to use
:param body: A message body if any
:param body_type: The format of the body of the return message
:param http_args: Arguments for the HTTP _client
:return: A cls or ErrorResponse instance or the HTTP response
instance if no response body was expected.
"""
if http_args is None:
http_args = {}
_cli = conv._client
try:
_resp = _cli.http_request(url, method, data=body, **http_args)
except Exception:
raise
conv.position = url
conv.last_response = _resp
conv.last_content = _resp.content
if not "keyjar" in kwargs:
kwargs["keyjar"] = conv.keyjar
_response = _cli.parse_request_response(_resp, response, body_type, state,
**kwargs)
conv.protocol_response.append((_response, _resp.content))
return _response
def test_summation(conv, sid):
status = 0
for item in conv.test_output:
if item["status"] > status:
status = item["status"]
if status == 0:
status = 1
info = {
"id": sid,
"status": status,
"tests": conv.test_output
}
return info | [((12, 9, 12, 36), 'logging.getLogger', 'logging.getLogger', ({(12, 27, 12, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((77, 35, 77, 67), 'copy.copy', 'copy.copy', ({(77, 45, 77, 66): "kwargs['preferences']"}, {}), "(kwargs['preferences'])", False, 'import copy\n'), ((100, 29, 100, 37), 'oic.utils.keyio.KeyJar', 'KeyJar', ({}, {}), '()', False, 'from oic.utils.keyio import KeyJar\n'), ((106, 17, 107, 39), 'oic.utils.keyio.KeyBundle', 'KeyBundle', (), '', False, 'from oic.utils.keyio import KeyBundle\n'), ((70, 20, 70, 36), 'json.dumps', 'json.dumps', ({(70, 31, 70, 35): 'jwks'}, {}), '(jwks)', False, 'import json\n')] |
PKUfudawei/cmssw | HLTrigger/Configuration/python/HLT_75e33/modules/hltPFPuppiNoLep_cfi.py | 8fbb5ce74398269c8a32956d7c7943766770c093 | import FWCore.ParameterSet.Config as cms
hltPFPuppiNoLep = cms.EDProducer("PuppiProducer",
DeltaZCut = cms.double(0.1),
DeltaZCutForChargedFromPUVtxs = cms.double(0.2),
EtaMaxCharged = cms.double(99999.0),
EtaMaxPhotons = cms.double(2.5),
EtaMinUseDeltaZ = cms.double(-1.0),
MinPuppiWeight = cms.double(0.01),
NumOfPUVtxsForCharged = cms.uint32(0),
PUProxyValue = cms.InputTag("hltPixelClustersMultiplicity"),
PtMaxCharged = cms.double(-1.0),
PtMaxNeutrals = cms.double(200.0),
PtMaxNeutralsStartSlope = cms.double(0.0),
PtMaxPhotons = cms.double(20.0),
UseDeltaZCut = cms.bool(True),
UseFromPVLooseTight = cms.bool(False),
algos = cms.VPSet(
cms.PSet(
EtaMaxExtrap = cms.double(2.0),
MedEtaSF = cms.vdouble(1.0, 1.0),
MinNeutralPt = cms.vdouble(0.5105, 0.821),
MinNeutralPtSlope = cms.vdouble(9.51e-06, 1.902e-05),
RMSEtaSF = cms.vdouble(1.0, 1.0),
etaMax = cms.vdouble(2.5, 3.5),
etaMin = cms.vdouble(0.0, 2.5),
ptMin = cms.vdouble(0.0, 0.0),
puppiAlgos = cms.VPSet(cms.PSet(
algoId = cms.int32(5),
applyLowPUCorr = cms.bool(True),
combOpt = cms.int32(0),
cone = cms.double(0.4),
rmsPtMin = cms.double(0.1),
rmsScaleFactor = cms.double(1.0),
useCharged = cms.bool(True)
))
),
cms.PSet(
EtaMaxExtrap = cms.double(2.0),
MedEtaSF = cms.vdouble(0.75),
MinNeutralPt = cms.vdouble(3.656),
MinNeutralPtSlope = cms.vdouble(5.072e-05),
RMSEtaSF = cms.vdouble(1.0),
etaMax = cms.vdouble(10.0),
etaMin = cms.vdouble(3.5),
ptMin = cms.vdouble(0.0),
puppiAlgos = cms.VPSet(cms.PSet(
algoId = cms.int32(5),
applyLowPUCorr = cms.bool(True),
combOpt = cms.int32(0),
cone = cms.double(0.4),
rmsPtMin = cms.double(0.5),
rmsScaleFactor = cms.double(1.0),
useCharged = cms.bool(False)
))
)
),
applyCHS = cms.bool(True),
candName = cms.InputTag("particleFlowTmp"),
clonePackedCands = cms.bool(False),
invertPuppi = cms.bool(False),
puppiDiagnostics = cms.bool(False),
puppiNoLep = cms.bool(True),
useExistingWeights = cms.bool(False),
useExp = cms.bool(False),
usePUProxyValue = cms.bool(True),
vertexName = cms.InputTag("goodOfflinePrimaryVertices"),
vtxNdofCut = cms.int32(4),
vtxZCut = cms.double(24)
)
| [((4, 16, 4, 31), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(4, 27, 4, 30): '0.1'}, {}), '(0.1)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((5, 36, 5, 51), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(5, 47, 5, 50): '0.2'}, {}), '(0.2)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((6, 20, 6, 39), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(6, 31, 6, 38): '99999.0'}, {}), '(99999.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((7, 20, 7, 35), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(7, 31, 7, 34): '2.5'}, {}), '(2.5)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((8, 22, 8, 38), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(8, 33, 8, 37): '-1.0'}, {}), '(-1.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((9, 21, 9, 37), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(9, 32, 9, 36): '0.01'}, {}), '(0.01)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((10, 28, 10, 41), 'FWCore.ParameterSet.Config.uint32', 'cms.uint32', ({(10, 39, 10, 40): '0'}, {}), '(0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((11, 19, 11, 63), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', ({(11, 32, 11, 62): '"""hltPixelClustersMultiplicity"""'}, {}), "('hltPixelClustersMultiplicity')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((12, 19, 12, 35), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(12, 30, 12, 34): '-1.0'}, {}), '(-1.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((13, 20, 13, 37), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(13, 31, 13, 36): '200.0'}, {}), '(200.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((14, 30, 14, 45), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(14, 41, 14, 44): '0.0'}, {}), '(0.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((15, 19, 15, 35), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(15, 30, 15, 34): '20.0'}, {}), '(20.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((16, 19, 16, 33), 'FWCore.ParameterSet.Config.bool', 'cms.bool', ({(16, 28, 16, 32): 'True'}, {}), '(True)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((17, 26, 17, 41), 'FWCore.ParameterSet.Config.bool', 'cms.bool', ({(17, 35, 17, 40): 'False'}, {}), '(False)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((58, 15, 58, 29), 'FWCore.ParameterSet.Config.bool', 'cms.bool', ({(58, 24, 58, 28): 'True'}, {}), '(True)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((59, 15, 59, 46), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', ({(59, 28, 59, 45): '"""particleFlowTmp"""'}, {}), "('particleFlowTmp')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((60, 23, 60, 38), 'FWCore.ParameterSet.Config.bool', 'cms.bool', ({(60, 32, 60, 37): 'False'}, {}), '(False)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((61, 18, 61, 33), 'FWCore.ParameterSet.Config.bool', 'cms.bool', ({(61, 27, 61, 32): 'False'}, {}), '(False)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((62, 23, 62, 38), 'FWCore.ParameterSet.Config.bool', 'cms.bool', ({(62, 32, 62, 37): 'False'}, {}), '(False)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((63, 17, 63, 31), 'FWCore.ParameterSet.Config.bool', 'cms.bool', ({(63, 26, 63, 30): 'True'}, {}), '(True)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((64, 25, 64, 40), 'FWCore.ParameterSet.Config.bool', 'cms.bool', ({(64, 34, 64, 39): 'False'}, {}), '(False)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((65, 13, 65, 28), 'FWCore.ParameterSet.Config.bool', 'cms.bool', ({(65, 22, 65, 27): 'False'}, {}), '(False)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((66, 22, 66, 36), 'FWCore.ParameterSet.Config.bool', 'cms.bool', ({(66, 31, 66, 35): 'True'}, {}), '(True)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((67, 17, 67, 59), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', ({(67, 30, 67, 58): '"""goodOfflinePrimaryVertices"""'}, {}), "('goodOfflinePrimaryVertices')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((68, 17, 68, 29), 'FWCore.ParameterSet.Config.int32', 'cms.int32', ({(68, 27, 68, 28): '4'}, {}), '(4)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((69, 14, 69, 28), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(69, 25, 69, 27): '24'}, {}), '(24)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((20, 27, 20, 42), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(20, 38, 20, 41): '2.0'}, {}), '(2.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((21, 23, 21, 44), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', ({(21, 35, 21, 38): '1.0', (21, 40, 21, 43): '1.0'}, {}), '(1.0, 1.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((22, 27, 22, 53), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', ({(22, 39, 22, 45): '0.5105', (22, 47, 22, 52): '0.821'}, {}), '(0.5105, 0.821)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((23, 32, 23, 64), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', ({(23, 44, 23, 52): '9.51e-06', (23, 54, 23, 63): '1.902e-05'}, {}), '(9.51e-06, 1.902e-05)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((24, 23, 24, 44), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', ({(24, 35, 24, 38): '1.0', (24, 40, 24, 43): '1.0'}, {}), '(1.0, 1.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((25, 21, 25, 42), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', ({(25, 33, 25, 36): '2.5', (25, 38, 25, 41): '3.5'}, {}), '(2.5, 3.5)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((26, 21, 26, 42), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', ({(26, 33, 26, 36): '0.0', (26, 38, 26, 41): '2.5'}, {}), '(0.0, 2.5)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((27, 20, 27, 41), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', ({(27, 32, 27, 35): '0.0', (27, 37, 27, 40): '0.0'}, {}), '(0.0, 0.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((39, 27, 39, 42), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(39, 38, 39, 41): '2.0'}, {}), '(2.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((40, 23, 40, 40), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', ({(40, 35, 40, 39): '0.75'}, {}), '(0.75)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((41, 27, 41, 45), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', ({(41, 39, 41, 44): '3.656'}, {}), '(3.656)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((42, 32, 42, 54), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', ({(42, 44, 42, 53): '5.072e-05'}, {}), '(5.072e-05)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((43, 23, 43, 39), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', ({(43, 35, 43, 38): '1.0'}, {}), '(1.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((44, 21, 44, 38), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', ({(44, 33, 44, 37): '10.0'}, {}), '(10.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((45, 21, 45, 37), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', ({(45, 33, 45, 36): '3.5'}, {}), '(3.5)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((46, 20, 46, 36), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', ({(46, 32, 46, 35): '0.0'}, {}), '(0.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((29, 25, 29, 37), 'FWCore.ParameterSet.Config.int32', 'cms.int32', ({(29, 35, 29, 36): '5'}, {}), '(5)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((30, 33, 30, 47), 'FWCore.ParameterSet.Config.bool', 'cms.bool', ({(30, 42, 30, 46): 'True'}, {}), '(True)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((31, 26, 31, 38), 'FWCore.ParameterSet.Config.int32', 'cms.int32', ({(31, 36, 31, 37): '0'}, {}), '(0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((32, 23, 32, 38), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(32, 34, 32, 37): '0.4'}, {}), '(0.4)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((33, 27, 33, 42), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(33, 38, 33, 41): '0.1'}, {}), '(0.1)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((34, 33, 34, 48), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(34, 44, 34, 47): '1.0'}, {}), '(1.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((35, 29, 35, 43), 'FWCore.ParameterSet.Config.bool', 'cms.bool', ({(35, 38, 35, 42): 'True'}, {}), '(True)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((48, 25, 48, 37), 'FWCore.ParameterSet.Config.int32', 'cms.int32', ({(48, 35, 48, 36): '5'}, {}), '(5)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((49, 33, 49, 47), 'FWCore.ParameterSet.Config.bool', 'cms.bool', ({(49, 42, 49, 46): 'True'}, {}), '(True)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((50, 26, 50, 38), 'FWCore.ParameterSet.Config.int32', 'cms.int32', ({(50, 36, 50, 37): '0'}, {}), '(0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((51, 23, 51, 38), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(51, 34, 51, 37): '0.4'}, {}), '(0.4)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((52, 27, 52, 42), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(52, 38, 52, 41): '0.5'}, {}), '(0.5)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((53, 33, 53, 48), 'FWCore.ParameterSet.Config.double', 'cms.double', ({(53, 44, 53, 47): '1.0'}, {}), '(1.0)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((54, 29, 54, 44), 'FWCore.ParameterSet.Config.bool', 'cms.bool', ({(54, 38, 54, 43): 'False'}, {}), '(False)', True, 'import FWCore.ParameterSet.Config as cms\n')] |
RogueScholar/debreate | wizbin/build.py | 0abc168c51336b31ff87c61f84bc7bb6000e88f4 | # -*- coding: utf-8 -*-
## \package wizbin.build
# MIT licensing
# See: docs/LICENSE.txt
import commands, os, shutil, subprocess, traceback, wx
from dbr.functions import FileUnstripped
from dbr.language import GT
from dbr.log import DebugEnabled
from dbr.log import Logger
from dbr.md5 import WriteMD5
from fileio.fileio import ReadFile
from fileio.fileio import WriteFile
from globals.bitmaps import ICON_EXCLAMATION
from globals.bitmaps import ICON_INFORMATION
from globals.errorcodes import dbrerrno
from globals.execute import ExecuteCommand
from globals.execute import GetExecutable
from globals.execute import GetSystemInstaller
from globals.ident import btnid
from globals.ident import chkid
from globals.ident import inputid
from globals.ident import pgid
from globals.paths import ConcatPaths
from globals.paths import PATH_app
from globals.strings import GS
from globals.strings import RemoveEmptyLines
from globals.strings import TextIsEmpty
from globals.system import PY_VER_MAJ
from globals.tooltips import SetPageToolTips
from input.toggle import CheckBox
from input.toggle import CheckBoxESS
from startup.tests import UsingTest
from ui.button import CreateButton
from ui.checklist import CheckListDialog
from ui.dialog import DetailedMessageDialog
from ui.dialog import ShowErrorDialog
from ui.layout import BoxSizer
from ui.output import OutputLog
from ui.panel import BorderedPanel
from ui.progress import PD_DEFAULT_STYLE
from ui.progress import ProgressDialog
from ui.progress import TimedProgressDialog
from ui.style import layout as lyt
from wiz.helper import FieldEnabled
from wiz.helper import GetField
from wiz.helper import GetMainWindow
from wiz.helper import GetPage
from wiz.wizard import WizardPage
## Build page
class Page(WizardPage):
## Constructor
#
# \param parent
# Parent <b><i>wx.Window</i></b> instance
def __init__(self, parent):
WizardPage.__init__(self, parent, pgid.BUILD)
# ----- Extra Options
pnl_options = BorderedPanel(self)
self.chk_md5 = CheckBoxESS(pnl_options, chkid.MD5, GT(u'Create md5sums file'),
name=u'MD5', defaultValue=True, commands=u'md5sum')
# The » character denotes that an alternate tooltip should be shown if the control is disabled
self.chk_md5.tt_name = u'md5»'
self.chk_md5.col = 0
# Option to strip binaries
self.chk_strip = CheckBoxESS(pnl_options, chkid.STRIP, GT(u'Strip binaries'),
name=u'strip»', defaultValue=True, commands=u'strip')
self.chk_strip.col = 0
# Deletes the temporary build tree
self.chk_rmstage = CheckBoxESS(pnl_options, chkid.DELETE, GT(u'Delete staged directory'),
name=u'RMSTAGE', defaultValue=True)
self.chk_rmstage.col = 0
# Checks the output .deb for errors
self.chk_lint = CheckBoxESS(pnl_options, chkid.LINT, GT(u'Check package for errors with lintian'),
name=u'LINTIAN', defaultValue=True, commands=u'lintian')
self.chk_lint.tt_name = u'lintian»'
self.chk_lint.col = 0
# Installs the deb on the system
self.chk_install = CheckBox(pnl_options, chkid.INSTALL, GT(u'Install package after build'),
name=u'INSTALL', commands=(u'gdebi-gtk', u'gdebi-kde',))
self.chk_install.tt_name = u'install»'
self.chk_install.col = 0
# *** Lintian Overrides *** #
if UsingTest(u'alpha'):
# FIXME: Move next to lintian check box
Logger.Info(__name__, u'Enabling alpha feature "lintian overrides" option')
self.lint_overrides = []
btn_lint_overrides = CreateButton(self, label=GT(u'Lintian overrides'))
btn_lint_overrides.Bind(wx.EVT_BUTTON, self.OnSetLintOverrides)
btn_build = CreateButton(self, btnid.BUILD, GT(u'Build'), u'build', 64)
# Display log
dsp_log = OutputLog(self)
SetPageToolTips(self)
# *** Event Handling *** #
btn_build.Bind(wx.EVT_BUTTON, self.OnBuild)
# *** Layout *** #
lyt_options = wx.GridBagSizer()
next_row = 0
prev_row = next_row
for CHK in pnl_options.Children:
row = next_row
FLAGS = lyt.PAD_LR
if CHK.col:
row = prev_row
FLAGS = wx.RIGHT
lyt_options.Add(CHK, (row, CHK.col), flag=FLAGS, border=5)
if not CHK.col:
prev_row = next_row
next_row += 1
pnl_options.SetSizer(lyt_options)
pnl_options.SetAutoLayout(True)
pnl_options.Layout()
lyt_buttons = BoxSizer(wx.HORIZONTAL)
lyt_buttons.Add(btn_build, 1)
lyt_main = BoxSizer(wx.VERTICAL)
lyt_main.AddSpacer(10)
lyt_main.Add(wx.StaticText(self, label=GT(u'Extra Options')), 0,
lyt.ALGN_LB|wx.LEFT, 5)
lyt_main.Add(pnl_options, 0, wx.LEFT, 5)
lyt_main.AddSpacer(5)
if UsingTest(u'alpha'):
#lyt_main.Add(wx.StaticText(self, label=GT(u'Lintian overrides')), 0, wx.LEFT, 5)
lyt_main.Add(btn_lint_overrides, 0, wx.LEFT, 5)
lyt_main.AddSpacer(5)
lyt_main.Add(lyt_buttons, 0, lyt.ALGN_C)
lyt_main.Add(dsp_log, 2, wx.EXPAND|lyt.PAD_LRB, 5)
self.SetAutoLayout(True)
self.SetSizer(lyt_main)
self.Layout()
## Method that builds the actual Debian package
#
# \param task_list
# \b \e dict : Task string IDs & page data
# \param build_path
# \b \e unicode|str : Directory where .deb will be output
# \param filename
# \b \e unicode|str : Basename of output file without .deb extension
# \return
# \b \e dbrerror : SUCCESS if build completed successfully
def Build(self, task_list, build_path, filename):
# Declare this here in case of error before progress dialog created
build_progress = None
try:
# Other mandatory tasks that will be processed
mandatory_tasks = (
u'stage',
u'install_size',
u'control',
u'build',
)
# Add other mandatory tasks
for T in mandatory_tasks:
task_list[T] = None
task_count = len(task_list)
# Add each file for updating progress dialog
if u'files' in task_list:
task_count += len(task_list[u'files'])
# Add each script for updating progress dialog
if u'scripts' in task_list:
task_count += len(task_list[u'scripts'])
if DebugEnabled():
task_msg = GT(u'Total tasks: {}').format(task_count)
print(u'DEBUG: [{}] {}'.format(__name__, task_msg))
for T in task_list:
print(u'\t{}'.format(T))
create_changelog = u'changelog' in task_list
create_copyright = u'copyright' in task_list
pg_control = GetPage(pgid.CONTROL)
pg_menu = GetPage(pgid.MENU)
stage_dir = u'{}/{}__dbp__'.format(build_path, filename)
if os.path.isdir(u'{}/DEBIAN'.format(stage_dir)):
try:
shutil.rmtree(stage_dir)
except OSError:
ShowErrorDialog(GT(u'Could not free stage directory: {}').format(stage_dir),
title=GT(u'Cannot Continue'))
return (dbrerrno.EEXIST, None)
# Actual path to new .deb
deb = u'"{}/{}.deb"'.format(build_path, filename)
progress = 0
task_msg = GT(u'Preparing build tree')
Logger.Debug(__name__, task_msg)
wx.Yield()
build_progress = ProgressDialog(GetMainWindow(), GT(u'Building'), task_msg,
maximum=task_count,
style=PD_DEFAULT_STYLE|wx.PD_ELAPSED_TIME|wx.PD_ESTIMATED_TIME|wx.PD_CAN_ABORT)
DIR_debian = ConcatPaths((stage_dir, u'DEBIAN'))
# Make a fresh build tree
os.makedirs(DIR_debian)
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
def UpdateProgress(current_task, message=None):
task_eval = u'{} / {}'.format(current_task, task_count)
if message:
Logger.Debug(__name__, u'{} ({})'.format(message, task_eval))
wx.Yield()
build_progress.Update(current_task, message)
return
wx.Yield()
build_progress.Update(current_task)
# *** Files *** #
if u'files' in task_list:
UpdateProgress(progress, GT(u'Copying files'))
no_follow_link = GetField(GetPage(pgid.FILES), chkid.SYMLINK).IsChecked()
# TODO: move this into a file functions module
def _copy(f_src, f_tgt, exe=False):
# NOTE: Python 3 appears to have follow_symlinks option for shutil.copy
# FIXME: copying nested symbolic link may not work
if os.path.isdir(f_src):
if os.path.islink(f_src) and no_follow_link:
Logger.Debug(__name__, u'Adding directory symbolic link to stage: {}'.format(f_tgt))
os.symlink(os.readlink(f_src), f_tgt)
else:
Logger.Debug(__name__, u'Adding directory to stage: {}'.format(f_tgt))
shutil.copytree(f_src, f_tgt)
os.chmod(f_tgt, 0o0755)
elif os.path.isfile(f_src):
if os.path.islink(f_src) and no_follow_link:
Logger.Debug(__name__, u'Adding file symbolic link to stage: {}'.format(f_tgt))
os.symlink(os.readlink(f_src), f_tgt)
else:
if exe:
Logger.Debug(__name__, u'Adding executable to stage: {}'.format(f_tgt))
else:
Logger.Debug(__name__, u'Adding file to stage: {}'.format(f_tgt))
shutil.copy(f_src, f_tgt)
# Set FILE permissions
if exe:
os.chmod(f_tgt, 0o0755)
else:
os.chmod(f_tgt, 0o0644)
files_data = task_list[u'files']
for FILE in files_data:
file_defs = FILE.split(u' -> ')
source_file = file_defs[0]
target_file = u'{}{}/{}'.format(stage_dir, file_defs[2], file_defs[1])
target_dir = os.path.dirname(target_file)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
# Remove asteriks from exectuables
exe = False
if source_file[-1] == u'*':
exe = True
source_file = source_file[:-1]
_copy(source_file, u'{}/{}'.format(target_dir, os.path.basename(source_file)), exe)
# Individual files
progress += 1
UpdateProgress(progress)
# Entire file task
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** Strip files ***#
# FIXME: Needs only be run if 'files' step is used
if u'strip' in task_list:
UpdateProgress(progress, GT(u'Stripping binaries'))
for ROOT, DIRS, FILES in os.walk(stage_dir): #@UnusedVariable
for F in FILES:
# Don't check files in DEBIAN directory
if ROOT != DIR_debian:
F = ConcatPaths((ROOT, F))
if FileUnstripped(F):
Logger.Debug(__name__, u'Unstripped file: {}'.format(F))
# FIXME: Strip command should be set as class member?
ExecuteCommand(GetExecutable(u'strip'), F)
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
package = GetField(pg_control, inputid.PACKAGE).GetValue()
# Make sure that the directory is available in which to place documentation
if create_changelog or create_copyright:
doc_dir = u'{}/usr/share/doc/{}'.format(stage_dir, package)
if not os.path.isdir(doc_dir):
os.makedirs(doc_dir)
# *** Changelog *** #
if create_changelog:
UpdateProgress(progress, GT(u'Creating changelog'))
# If changelog will be installed to default directory
changelog_target = task_list[u'changelog'][0]
if changelog_target == u'STANDARD':
changelog_target = ConcatPaths((u'{}/usr/share/doc'.format(stage_dir), package))
else:
changelog_target = ConcatPaths((stage_dir, changelog_target))
if not os.path.isdir(changelog_target):
os.makedirs(changelog_target)
WriteFile(u'{}/changelog'.format(changelog_target), task_list[u'changelog'][1])
CMD_gzip = GetExecutable(u'gzip')
if CMD_gzip:
UpdateProgress(progress, GT(u'Compressing changelog'))
c = u'{} -n --best "{}/changelog"'.format(CMD_gzip, changelog_target)
clog_status = commands.getstatusoutput(c.encode(u'utf-8'))
if clog_status[0]:
ShowErrorDialog(GT(u'Could not compress changelog'), clog_status[1], warn=True, title=GT(u'Warning'))
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** Copyright *** #
if create_copyright:
UpdateProgress(progress, GT(u'Creating copyright'))
WriteFile(u'{}/usr/share/doc/{}/copyright'.format(stage_dir, package), task_list[u'copyright'])
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# Characters that should not be in filenames
invalid_chars = (u' ', u'/')
# *** Menu launcher *** #
if u'launcher' in task_list:
UpdateProgress(progress, GT(u'Creating menu launcher'))
# This might be changed later to set a custom directory
menu_dir = u'{}/usr/share/applications'.format(stage_dir)
menu_filename = pg_menu.GetOutputFilename()
# Remove invalid characters from filename
for char in invalid_chars:
menu_filename = menu_filename.replace(char, u'_')
if not os.path.isdir(menu_dir):
os.makedirs(menu_dir)
WriteFile(u'{}/{}.desktop'.format(menu_dir, menu_filename), task_list[u'launcher'])
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** md5sums file *** #
# Good practice to create hashes before populating DEBIAN directory
if u'md5sums' in task_list:
UpdateProgress(progress, GT(u'Creating md5sums'))
if not WriteMD5(stage_dir, parent=build_progress):
# Couldn't call md5sum command
build_progress.Cancel()
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** Scripts *** #
if u'scripts' in task_list:
UpdateProgress(progress, GT(u'Creating scripts'))
scripts = task_list[u'scripts']
for SCRIPT in scripts:
script_name = SCRIPT
script_text = scripts[SCRIPT]
script_filename = ConcatPaths((stage_dir, u'DEBIAN', script_name))
WriteFile(script_filename, script_text)
# Make sure scipt path is wrapped in quotes to avoid whitespace errors
os.chmod(script_filename, 0755)
os.system((u'chmod +x "{}"'.format(script_filename)))
# Individual scripts
progress += 1
UpdateProgress(progress)
# Entire script task
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** Control file *** #
UpdateProgress(progress, GT(u'Getting installed size'))
# Get installed-size
installed_size = os.popen((u'du -hsk "{}"'.format(stage_dir))).readlines()
installed_size = installed_size[0].split(u'\t')
installed_size = installed_size[0]
# Insert Installed-Size into control file
control_data = pg_control.Get().split(u'\n')
control_data.insert(2, u'Installed-Size: {}'.format(installed_size))
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# Create final control file
UpdateProgress(progress, GT(u'Creating control file'))
# dpkg fails if there is no newline at end of file
control_data = u'\n'.join(control_data).strip(u'\n')
# Ensure there is only one empty trailing newline
# Two '\n' to show physical empty line, but not required
# Perhaps because string is not null terminated???
control_data = u'{}\n\n'.format(control_data)
WriteFile(u'{}/DEBIAN/control'.format(stage_dir), control_data, noStrip=u'\n')
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** Final build *** #
UpdateProgress(progress, GT(u'Running dpkg'))
working_dir = os.path.split(stage_dir)[0]
c_tree = os.path.split(stage_dir)[1]
deb_package = u'{}.deb'.format(filename)
# Move the working directory becuase dpkg seems to have problems with spaces in path
os.chdir(working_dir)
# HACK to fix file/dir permissions
for ROOT, DIRS, FILES in os.walk(stage_dir):
for D in DIRS:
D = u'{}/{}'.format(ROOT, D)
os.chmod(D, 0o0755)
for F in FILES:
F = u'{}/{}'.format(ROOT, F)
if os.access(F, os.X_OK):
os.chmod(F, 0o0755)
else:
os.chmod(F, 0o0644)
# FIXME: Should check for working fakeroot & dpkg-deb executables
build_status = commands.getstatusoutput((u'{} {} -b "{}" "{}"'.format(GetExecutable(u'fakeroot'), GetExecutable(u'dpkg-deb'), c_tree, deb_package)))
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** Delete staged directory *** #
if u'rmstage' in task_list:
UpdateProgress(progress, GT(u'Removing temp directory'))
try:
shutil.rmtree(stage_dir)
except OSError:
ShowErrorDialog(GT(u'An error occurred when trying to delete the build tree'),
parent=build_progress)
progress += 1
if build_progress.WasCancelled():
build_progress.Destroy()
return (dbrerrno.ECNCLD, None)
# *** ERROR CHECK
if u'lintian' in task_list:
UpdateProgress(progress, GT(u'Checking package for errors'))
# FIXME: Should be set as class memeber?
CMD_lintian = GetExecutable(u'lintian')
errors = commands.getoutput((u'{} {}'.format(CMD_lintian, deb)))
if errors != wx.EmptyString:
e1 = GT(u'Lintian found some issues with the package.')
e2 = GT(u'Details saved to {}').format(filename)
WriteFile(u'{}/{}.lintian'.format(build_path, filename), errors)
DetailedMessageDialog(build_progress, GT(u'Lintian Errors'),
ICON_INFORMATION, u'{}\n{}.lintian'.format(e1, e2), errors).ShowModal()
progress += 1
# Close progress dialog
wx.Yield()
build_progress.Update(progress)
build_progress.Destroy()
# Build completed successfullly
if not build_status[0]:
return (dbrerrno.SUCCESS, deb_package)
if PY_VER_MAJ <= 2:
# Unicode decoder has trouble with certain characters. Replace any
# non-decodable characters with � (0xFFFD).
build_output = list(build_status[1])
# String & unicode string incompatibilities
index = 0
for C in build_output:
try:
GS(C)
except UnicodeDecodeError:
build_output[index] = u'�'
index += 1
build_status = (build_status[0], u''.join(build_output))
# Build failed
return (build_status[0], build_status[1])
except:
if build_progress:
build_progress.Destroy()
return(dbrerrno.EUNKNOWN, traceback.format_exc())
## TODO: Doxygen
#
# \return
# \b \e tuple containing Return code & build details
def BuildPrep(self):
# Declare these here in case of error before dialogs created
save_dia = None
prebuild_progress = None
try:
# List of tasks for build process
# 'stage' should be very first task
task_list = {}
# Control page
pg_control = GetPage(pgid.CONTROL)
fld_package = GetField(pg_control, inputid.PACKAGE)
fld_version = GetField(pg_control, inputid.VERSION)
fld_maint = GetField(pg_control, inputid.MAINTAINER)
fld_email = GetField(pg_control, inputid.EMAIL)
fields_control = (
fld_package,
fld_version,
fld_maint,
fld_email,
)
# Menu launcher page
pg_launcher = GetPage(pgid.MENU)
# Check to make sure that all required fields have values
required = list(fields_control)
if pg_launcher.IsOkay():
task_list[u'launcher'] = pg_launcher.Get()
required.append(GetField(pg_launcher, inputid.NAME))
if not GetField(pg_launcher, chkid.FNAME).GetValue():
required.append(GetField(pg_launcher, inputid.FNAME))
for item in required:
if TextIsEmpty(item.GetValue()):
field_name = GT(item.GetName().title())
page_name = pg_control.GetName()
if item not in fields_control:
page_name = pg_launcher.GetName()
return (dbrerrno.FEMPTY, u'{} ➜ {}'.format(page_name, field_name))
# Get information from control page for default filename
package = fld_package.GetValue()
# Remove whitespace
package = package.strip(u' \t')
package = u'-'.join(package.split(u' '))
version = fld_version.GetValue()
# Remove whitespace
version = version.strip(u' \t')
version = u''.join(version.split())
arch = GetField(pg_control, inputid.ARCH).GetStringSelection()
# Dialog for save destination
ttype = GT(u'Debian packages')
save_dia = wx.FileDialog(self, GT(u'Save'), os.getcwd(), wx.EmptyString, u'{}|*.deb'.format(ttype),
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT|wx.FD_CHANGE_DIR)
save_dia.SetFilename(u'{}_{}_{}.deb'.format(package, version, arch))
if not save_dia.ShowModal() == wx.ID_OK:
return (dbrerrno.ECNCLD, None)
build_path = os.path.split(save_dia.GetPath())[0]
filename = os.path.split(save_dia.GetPath())[1].split(u'.deb')[0]
# Control, menu, & build pages not added to this list
page_checks = (
(pgid.FILES, u'files'),
(pgid.SCRIPTS, u'scripts'),
(pgid.CHANGELOG, u'changelog'),
(pgid.COPYRIGHT, u'copyright'),
)
# Install step is not added to this list
# 'control' should be after 'md5sums'
# 'build' should be after 'control'
other_checks = (
(self.chk_md5, u'md5sums'),
(self.chk_strip, u'strip'),
(self.chk_rmstage, u'rmstage'),
(self.chk_lint, u'lintian'),
)
prep_task_count = len(page_checks) + len(other_checks)
progress = 0
wx.Yield()
prebuild_progress = ProgressDialog(GetMainWindow(), GT(u'Preparing to build'),
maximum=prep_task_count)
if wx.MAJOR_VERSION < 3:
# Resize dialog for better fit
pb_size = prebuild_progress.GetSizeTuple()
pb_size = (pb_size[0]+200, pb_size[1])
prebuild_progress.SetSize(pb_size)
prebuild_progress.CenterOnParent()
for PID, id_string in page_checks:
wx.Yield()
prebuild_progress.Update(progress, GT(u'Checking {}').format(id_string))
wizard_page = GetPage(PID)
if wizard_page.IsOkay():
task_list[id_string] = wizard_page.Get()
progress += 1
for task_check, id_string in other_checks:
wx.Yield()
prebuild_progress.Update(progress, GT(u'Testing for: {}').format(task_check.GetLabel()))
if task_check.GetValue():
task_list[id_string] = None
progress += 1
# Close progress dialog
wx.Yield()
prebuild_progress.Update(progress)
prebuild_progress.Destroy()
return (dbrerrno.SUCCESS, (task_list, build_path, filename))
except:
if save_dia:
save_dia.Destroy()
if prebuild_progress:
prebuild_progress.Destroy()
return (dbrerrno.EUNKNOWN, traceback.format_exc())
## TODO: Doxygen
def GetSaveData(self):
build_list = []
options = (
self.chk_md5,
self.chk_rmstage,
self.chk_lint,
)
for O in options:
if O.GetValue():
build_list.append(u'1')
else:
build_list.append(u'0')
if self.chk_strip.GetValue():
build_list.append(u'strip')
return u'<<BUILD>>\n{}\n<</BUILD>>'.format(u'\n'.join(build_list))
## Installs the built .deb package onto the system
#
# Uses the system's package installer:
# gdebi if available or dpkg
#
# Shows a success dialog if installed. Otherwise shows an
# error dialog.
# \param package
# \b \e unicode|str : Path to package to be installed
def InstallPackage(self, package):
system_installer = GetSystemInstaller()
if not system_installer:
ShowErrorDialog(
GT(u'Cannot install package'),
GT(u'A compatible package manager could not be found on the system'),
__name__,
warn=True
)
return
Logger.Info(__name__, GT(u'Attempting to install package: {}').format(package))
Logger.Info(__name__, GT(u'Installing with {}').format(system_installer))
install_cmd = (system_installer, package,)
wx.Yield()
# FIXME: Use ExecuteCommand here
install_output = subprocess.Popen(install_cmd)
# Command appears to not have been executed correctly
if install_output == None:
ShowErrorDialog(
GT(u'Could not install package: {}'),
GT(u'An unknown error occurred'),
__name__
)
return
# Command executed but did not return success code
if install_output.returncode:
err_details = (
GT(u'Process returned code {}').format(install_output.returncode),
GT(u'Command executed: {}').format(u' '.join(install_cmd)),
)
ShowErrorDialog(
GT(u'An error occurred during installation'),
u'\n'.join(err_details),
__name__
)
return
## TODO: Doxygen
def OnBuild(self, event=None):
# Build preparation
ret_code, build_prep = self.BuildPrep()
if ret_code == dbrerrno.ECNCLD:
return
if ret_code == dbrerrno.FEMPTY:
err_dia = DetailedMessageDialog(GetMainWindow(), GT(u'Cannot Continue'), ICON_EXCLAMATION,
text=u'{}\n{}'.format(GT(u'One of the required fields is empty:'), build_prep))
err_dia.ShowModal()
err_dia.Destroy()
return
if ret_code == dbrerrno.SUCCESS:
task_list, build_path, filename = build_prep
# Actual build
ret_code, result = self.Build(task_list, build_path, filename)
# FIXME: Check .deb package timestamp to confirm build success
if ret_code == dbrerrno.SUCCESS:
DetailedMessageDialog(GetMainWindow(), GT(u'Success'), ICON_INFORMATION,
text=GT(u'Package created successfully')).ShowModal()
# Installing the package
if FieldEnabled(self.chk_install) and self.chk_install.GetValue():
self.InstallPackage(result)
return
if result:
ShowErrorDialog(GT(u'Package build failed'), result)
else:
ShowErrorDialog(GT(u'Package build failed with unknown error'))
return
if build_prep:
ShowErrorDialog(GT(u'Build preparation failed'), build_prep)
else:
ShowErrorDialog(GT(u'Build preparation failed with unknown error'))
## TODO: Doxygen
#
# TODO: Show warning dialog that this could take a while
# TODO: Add cancel option to progress dialog
# FIXME: List should be cached so no need for re-scanning
def OnSetLintOverrides(self, event=None):
Logger.Debug(__name__, GT(u'Setting Lintian overrides...'))
lintian_tags_file = u'{}/data/lintian/tags'.format(PATH_app)
if not os.path.isfile(lintian_tags_file):
Logger.Error(__name__, u'Lintian tags file is missing: {}'.format(lintian_tags_file))
return False
lint_tags = RemoveEmptyLines(ReadFile(lintian_tags_file, split=True))
if lint_tags:
Logger.Debug(__name__, u'Lintian tags set')
# DEBUG: Start
if DebugEnabled() and len(lint_tags) > 50:
print(u' Reducing tag count to 200 ...')
lint_tags = lint_tags[:50]
Logger.Debug(__name__, u'Processing {} tags'.format(len(lint_tags)))
# DEBUG: End
tag_count = len(lint_tags)
def GetProgressMessage(message, count=tag_count):
return u'{} ({} {})'.format(message, count, GT(u'tags'))
progress = TimedProgressDialog(GetMainWindow(), GT(u'Building Tag List'),
GetProgressMessage(GT(u'Scanning default tags')))
progress.Start()
wx.Yield()
# Create the dialog
overrides_dialog = CheckListDialog(GetMainWindow(), title=GT(u'Lintian Overrides'),
allow_custom=True)
# FIXME: Needs progress dialog
overrides_dialog.InitCheckList(tuple(lint_tags))
progress.SetMessage(GetProgressMessage(GT(u'Setting selected overrides')))
for T in lint_tags:
if T in self.lint_overrides:
overrides_dialog.SetItemCheckedByLabel(T)
self.lint_overrides.remove(T)
progress.SetMessage(GetProgressMessage(GT(u'Adding custom tags'), len(self.lint_overrides)))
# Remaining tags should be custom entries
# FIXME:
if self.lint_overrides:
for T in self.lint_overrides:
overrides_dialog.AddItem(T, True)
progress.Stop()
if overrides_dialog.ShowModal() == wx.ID_OK:
# Remove old overrides
self.lint_overrides = []
for L in overrides_dialog.GetCheckedLabels():
Logger.Debug(__name__, GT(u'Adding Lintian override: {}').format(L))
self.lint_overrides.append(L)
return True
else:
Logger.Debug(__name__, u'Setting lintian tags failed')
return False
## TODO: Doxygen
#
# TODO: Use string names in project file but retain
# compatibility with older projects that use
# integer values.
def Set(self, data):
# ???: Redundant
self.Reset()
build_data = data.split(u'\n')
if GetExecutable(u'md5sum'):
try:
self.chk_md5.SetValue(int(build_data[0]))
except IndexError:
pass
try:
self.chk_rmstage.SetValue(int(build_data[1]))
except IndexError:
pass
if GetExecutable(u'lintian'):
try:
self.chk_lint.SetValue(int(build_data[2]))
except IndexError:
pass
self.chk_strip.SetValue(GetExecutable(u'strip') and u'strip' in build_data)
## TODO: Doxygen
def SetSummary(self, event=None):
pg_scripts = GetPage(pgid.SCRIPTS)
# Make sure the page is not destroyed so no error is thrown
if self:
# Set summary when "Build" page is shown
# Get the file count
files_total = GetPage(pgid.FILES).GetFileCount()
f = GT(u'File Count')
file_count = u'{}: {}'.format(f, files_total)
# Scripts to make
scripts_to_make = []
scripts = ((u'preinst', pg_scripts.chk_preinst),
(u'postinst', pg_scripts.chk_postinst),
(u'prerm', pg_scripts.chk_prerm),
(u'postrm', pg_scripts.chk_postrm))
for script in scripts:
if script[1].IsChecked():
scripts_to_make.append(script[0])
s = GT(u'Scripts')
if len(scripts_to_make):
scripts_to_make = u'{}: {}'.format(s, u', '.join(scripts_to_make))
else:
scripts_to_make = u'{}: 0'.format(s)
self.summary.SetValue(u'\n'.join((file_count, scripts_to_make)))
| [] |
maelstromdat/YOSHI | __main__.py | 67e5176f24ff12e598025d4250b408da564f53d1 | from YoshiViz import Gui
if __name__ == '__main__':
#file director
gui = Gui.Gui()
"""
report_generator.\
generate_pdf_report(fileDirectory, repositoryName, tempCommunityType)
"""
print('the type of', repositoryName, 'is', tempCommunityType, '\n"check .\YoshiViz\output"')
| [((6, 10, 6, 19), 'YoshiViz.Gui.Gui', 'Gui.Gui', ({}, {}), '()', False, 'from YoshiViz import Gui\n')] |
LarsenClose/dr.hpotter | hpotter/src/lazy_init.py | ef6199ab563a92f3e4916277dbde9217126f36a9 | ''' Wrap an __init__ function so that I don't have to assign all the
parameters to a self. variable. '''
# https://stackoverflow.com/questions/5048329/python-decorator-for-automatic-binding-init-arguments
import inspect
from functools import wraps
def lazy_init(init):
''' Create an annotation to assign all the parameters to a self.
variable. '''
arg_names = inspect.getfullargspec(init)[0]
# pylint: disable=E1101
@wraps(init)
def new_init(self, *args):
for name, value in zip(arg_names[1:], args):
setattr(self, name, value)
init(self, *args)
return new_init
| [((15, 5, 15, 16), 'functools.wraps', 'wraps', ({(15, 11, 15, 15): 'init'}, {}), '(init)', False, 'from functools import wraps\n'), ((12, 16, 12, 44), 'inspect.getfullargspec', 'inspect.getfullargspec', ({(12, 39, 12, 43): 'init'}, {}), '(init)', False, 'import inspect\n')] |
technojam/MLian | main.py | 7632c5c7d4c44b1d87de9ab23c1ed7293962ca49 | # def register_feed():
import os
import cv2
path = '/UserImage'
cam = cv2.VideoCapture(0)
name=input("Name: ")
cv2.namedWindow("test")
img_counter = 0
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
else:
cv2.imshow("test", frame)
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
# img_name = "opencv_frame_{}.png".format(img_counter)
cv2.imwrite(name + ".jpg", frame)
# print("{} written!".format(img_name))
print("Image Captured! Proceed...")
img_counter += 1
cam.release()
cv2.destroyAllWindows() | [((5, 6, 5, 25), 'cv2.VideoCapture', 'cv2.VideoCapture', ({(5, 23, 5, 24): '0'}, {}), '(0)', False, 'import cv2\n'), ((8, 0, 8, 23), 'cv2.namedWindow', 'cv2.namedWindow', ({(8, 16, 8, 22): '"""test"""'}, {}), "('test')", False, 'import cv2\n'), ((35, 0, 35, 23), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ({}, {}), '()', False, 'import cv2\n'), ((18, 8, 18, 33), 'cv2.imshow', 'cv2.imshow', ({(18, 19, 18, 25): '"""test"""', (18, 27, 18, 32): 'frame'}, {}), "('test', frame)", False, 'import cv2\n'), ((20, 12, 20, 26), 'cv2.waitKey', 'cv2.waitKey', ({(20, 24, 20, 25): '1'}, {}), '(1)', False, 'import cv2\n'), ((28, 12, 28, 45), 'cv2.imwrite', 'cv2.imwrite', ({(28, 24, 28, 37): "(name + '.jpg')", (28, 39, 28, 44): 'frame'}, {}), "(name + '.jpg', frame)", False, 'import cv2\n')] |
Hiwyl/keras_cnn_finetune | models/train.py | f424302a72c8d05056a9af6f9b293003acb8398d | # -*- encoding: utf-8 -*-
'''
@Author : lance
@Email : [email protected]
'''
import time
from model_cx.inceptionresnet import inceptionresnet
from model_cx.vgg19two import vgg19_all_lr
from model_cx.inceptionv3 import inceptionv3
from model_cx.densenet import densenet
from model_cx.nasnet import nasnet
from model_cx.merge import merge
from model_cx.bcnn import bilinearnet
from model_cx.resnet import ResNet50
from model_cx.mobilenetv2 import mobilenetv2
from model_cx.senet import senet
if __name__=="__main__":
classes = 1
epochs = 100
steps_per_epoch = 113
validation_steps = 48
shape=(224,224)
print("开始训练...")
start = time.time()
#
# try:
# print("densenet")
# densenet(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
# try:
# print("bcnn")
# bilinearnet(classes, epochs, steps_per_epoch, validation_steps, shape)
#
# except Exception as e:
# print(e)
# try:
# print("resnet")
# ResNet50(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
try:
print("merge")
merge(classes, epochs, steps_per_epoch, validation_steps, shape)
except Exception as e:
print(e)
# try:
# print("ince_res")
# inceptionresnet(classes, epochs, steps_per_epoch, validation_steps, (299, 299))
# # inceptionresnet(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
# try:
# print("mobilenetv2")
# mobilenetv2(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
# try:
# print("inceptionv3")
# inceptionv3(classes, epochs, steps_per_epoch, validation_steps, (299, 299))
# # inceptionv3(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
try:
print("nasnet")
nasnet(classes, epochs, steps_per_epoch, validation_steps, shape)
except Exception as e:
print(e)
try:
print("vgg19two")
vgg19_all_lr(classes, epochs, steps_per_epoch, validation_steps, shape)
except Exception as e:
print(e)
try:
print("senet")
vgg19_all_lr(classes, epochs, steps_per_epoch, validation_steps, (100,100))
except Exception as e:
print(e)
end = time.time()
print("ETA:", (end - start) / 3600) | [((29, 12, 29, 23), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((84, 10, 84, 21), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((49, 8, 49, 72), 'model_cx.merge.merge', 'merge', ({(49, 14, 49, 21): 'classes', (49, 23, 49, 29): 'epochs', (49, 31, 49, 46): 'steps_per_epoch', (49, 48, 49, 64): 'validation_steps', (49, 66, 49, 71): 'shape'}, {}), '(classes, epochs, steps_per_epoch, validation_steps, shape)', False, 'from model_cx.merge import merge\n'), ((71, 8, 71, 73), 'model_cx.nasnet.nasnet', 'nasnet', ({(71, 15, 71, 22): 'classes', (71, 24, 71, 30): 'epochs', (71, 32, 71, 47): 'steps_per_epoch', (71, 49, 71, 65): 'validation_steps', (71, 67, 71, 72): 'shape'}, {}), '(classes, epochs, steps_per_epoch, validation_steps, shape)', False, 'from model_cx.nasnet import nasnet\n'), ((76, 8, 76, 79), 'model_cx.vgg19two.vgg19_all_lr', 'vgg19_all_lr', ({(76, 21, 76, 28): 'classes', (76, 30, 76, 36): 'epochs', (76, 38, 76, 53): 'steps_per_epoch', (76, 55, 76, 71): 'validation_steps', (76, 73, 76, 78): 'shape'}, {}), '(classes, epochs, steps_per_epoch, validation_steps, shape)', False, 'from model_cx.vgg19two import vgg19_all_lr\n'), ((81, 8, 81, 83), 'model_cx.vgg19two.vgg19_all_lr', 'vgg19_all_lr', ({(81, 21, 81, 28): 'classes', (81, 30, 81, 36): 'epochs', (81, 38, 81, 53): 'steps_per_epoch', (81, 55, 81, 71): 'validation_steps', (81, 73, 81, 82): '(100, 100)'}, {}), '(classes, epochs, steps_per_epoch, validation_steps, (100, 100))', False, 'from model_cx.vgg19two import vgg19_all_lr\n')] |
alpiges/probnum | src/probnum/randprocs/markov/integrator/_preconditioner.py | 2e4153cb0df559984e09ec74487ef6c9d3f6d464 | """Coordinate changes in state space models."""
import abc
try:
# cached_property is only available in Python >=3.8
from functools import cached_property
except ImportError:
from cached_property import cached_property
import numpy as np
import scipy.special # for vectorised factorial
from probnum import config, linops, randvars
def apply_precon(precon, rv):
# public (because it is needed in some integrator implementations),
# but not exposed to the 'randprocs' namespace
# (i.e. not imported in any __init__.py).
# There is no way of checking whether `rv` has its Cholesky factor computed already or not.
# Therefore, since we need to update the Cholesky factor for square-root filtering,
# we also update the Cholesky factor for non-square-root algorithms here,
# which implies additional cost.
# See Issues #319 and #329.
# When they are resolved, this function here will hopefully be superfluous.
new_mean = precon @ rv.mean
new_cov_cholesky = precon @ rv.cov_cholesky # precon is diagonal, so this is valid
new_cov = new_cov_cholesky @ new_cov_cholesky.T
return randvars.Normal(new_mean, new_cov, cov_cholesky=new_cov_cholesky)
class Preconditioner(abc.ABC):
"""Coordinate change transformations as preconditioners in state space models.
For some models, this makes the filtering and smoothing steps more numerically
stable.
"""
@abc.abstractmethod
def __call__(self, step) -> np.ndarray:
# if more than step is needed, add them into the signature in the future
raise NotImplementedError
@cached_property
def inverse(self) -> "Preconditioner":
raise NotImplementedError
class NordsieckLikeCoordinates(Preconditioner):
"""Nordsieck-like coordinates.
Similar to Nordsieck coordinates (which store the Taylor coefficients instead of the
derivatives), but better for ODE filtering and smoothing. Used in integrator-transitions, e.g. in
:class:`IntegratedWienerTransition`.
"""
def __init__(self, powers, scales, dimension):
# Clean way of assembling these coordinates cheaply,
# because the powers and scales of the inverse
# are better read off than inverted
self.powers = powers
self.scales = scales
self.dimension = dimension
@classmethod
def from_order(cls, order, dimension):
# used to conveniently initialise in the beginning
powers = np.arange(order, -1, -1)
scales = scipy.special.factorial(powers)
return cls(
powers=powers + 0.5,
scales=scales,
dimension=dimension,
)
def __call__(self, step):
scaling_vector = np.abs(step) ** self.powers / self.scales
if config.matrix_free:
return linops.Kronecker(
A=linops.Identity(self.dimension),
B=linops.Scaling(factors=scaling_vector),
)
return np.kron(np.eye(self.dimension), np.diag(scaling_vector))
@cached_property
def inverse(self) -> "NordsieckLikeCoordinates":
return NordsieckLikeCoordinates(
powers=-self.powers,
scales=1.0 / self.scales,
dimension=self.dimension,
)
| [((33, 11, 33, 76), 'probnum.randvars.Normal', 'randvars.Normal', (), '', False, 'from probnum import config, linops, randvars\n'), ((72, 17, 72, 41), 'numpy.arange', 'np.arange', ({(72, 27, 72, 32): 'order', (72, 34, 72, 36): '-1', (72, 38, 72, 40): '-1'}, {}), '(order, -1, -1)', True, 'import numpy as np\n'), ((87, 23, 87, 45), 'numpy.eye', 'np.eye', ({(87, 30, 87, 44): 'self.dimension'}, {}), '(self.dimension)', True, 'import numpy as np\n'), ((87, 47, 87, 70), 'numpy.diag', 'np.diag', ({(87, 55, 87, 69): 'scaling_vector'}, {}), '(scaling_vector)', True, 'import numpy as np\n'), ((81, 25, 81, 37), 'numpy.abs', 'np.abs', ({(81, 32, 81, 36): 'step'}, {}), '(step)', True, 'import numpy as np\n'), ((84, 18, 84, 49), 'probnum.linops.Identity', 'linops.Identity', ({(84, 34, 84, 48): 'self.dimension'}, {}), '(self.dimension)', False, 'from probnum import config, linops, randvars\n'), ((85, 18, 85, 56), 'probnum.linops.Scaling', 'linops.Scaling', (), '', False, 'from probnum import config, linops, randvars\n')] |
mina-gaid/scp | allauth/socialaccount/providers/linkedin/provider.py | 38e1cd303d4728a987df117f666ce194e241ed1a | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth.provider import OAuthProvider
from allauth.socialaccount import app_settings
class LinkedInAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('public-profile-url')
def get_avatar_url(self):
# try to return the higher res picture-urls::(original) first
try:
if self.account.extra_data.get('picture-urls', {}).get(
'picture-url'):
return self.account.extra_data.get('picture-urls', {}).get(
'picture-url')
except:
# if we can't get higher res for any reason, we'll just return the
# low res
pass
return self.account.extra_data.get('picture-url')
def to_str(self):
dflt = super(LinkedInAccount, self).to_str()
name = self.account.extra_data.get('name', dflt)
first_name = self.account.extra_data.get('first-name', None)
last_name = self.account.extra_data.get('last-name', None)
if first_name and last_name:
name = first_name + ' ' + last_name
return name
class LinkedInProvider(OAuthProvider):
id = 'linkedin'
name = 'LinkedIn'
account_class = LinkedInAccount
def get_default_scope(self):
scope = []
if app_settings.QUERY_EMAIL:
scope.append('r_emailaddress')
return scope
def get_profile_fields(self):
default_fields = ['id',
'first-name',
'last-name',
'email-address',
'picture-url',
'picture-urls::(original)',
# picture-urls::(original) is higher res
'public-profile-url']
fields = self.get_settings().get('PROFILE_FIELDS', default_fields)
return fields
def extract_uid(self, data):
return data['id']
def extract_common_fields(self, data):
return dict(email=data.get('email-address'),
first_name=data.get('first-name'),
last_name=data.get('last-name'))
providers.registry.register(LinkedInProvider)
| [((67, 0, 67, 45), 'allauth.socialaccount.providers.registry.register', 'providers.registry.register', ({(67, 28, 67, 44): 'LinkedInProvider'}, {}), '(LinkedInProvider)', False, 'from allauth.socialaccount import providers\n')] |
CCTQL/2048-api | game2048/myNew.py | a75316a90e9a7c8c9171e39e1d1fc24cbac3ba1a | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets
from torch.autograd import Variable
from sklearn.model_selection import train_test_split
import time
import pandas as pd
import numpy as np
import csv
batch_size = 128
NUM_EPOCHS = 30
LR = 0.001
TIME_STEP = 4
class CCRNN(nn.Module):
def __init__(self):
# 继承RNN
super(CCRNN, self).__init__()
self.ccLSTM = nn.LSTM(
input_size=4,
hidden_size=128,
num_layers=4,
bidirectional=True,
batch_first=True
)
self.ccCNN22 = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=2,
stride=2,
padding=0
)
self.ccCNN14 = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=(1, 4),
stride=1,
padding=0
)
self.ccCNN41 = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=(4, 1),
stride=1,
padding=0
)
self.CNN22toFC = nn.Linear(4, 64)
self.CNN41toFC = nn.Linear(4, 32)
self.CNN14toFC = nn.Linear(4, 32)
self.LSTMtoFC = nn.Linear(256, 128)
self.FCtoOut = nn.Linear(256, 4)
def forward(self, x):
LSTM_out, (h_n, c_n) = self.ccLSTM(x, None)
CNN_in = torch.unsqueeze(x[:, 0:4, :], 1)
CNN_out22 = self.ccCNN22(CNN_in)
CNN_out41 = self.ccCNN41(CNN_in)
CNN_out14 = self.ccCNN14(CNN_in)
CNN22_reshape = CNN_out22.view(-1, 4)
CNN14_reshape = CNN_out41.view(-1, 4)
CNN41_reshape = CNN_out14.view(-1, 4)
CNN22toFC = self.CNN22toFC(CNN22_reshape)
CNN14toFC = self.CNN14toFC(CNN14_reshape)
CNN41toFC = self.CNN41toFC(CNN41_reshape)
LSTMtoFC = self.LSTMtoFC(LSTM_out[:, -1, :])
CNNandLSTM = torch.cat((CNN22toFC, CNN41toFC, CNN14toFC, LSTMtoFC), 1)
out = self.FCtoOut(CNNandLSTM)
return out
#------------------读入数据-----------------------------
csv_data = pd.read_csv('./drive/My Drive/DATA.csv')
csv_data = csv_data.values
A = csv_data.shape[0]
board_data = csv_data[:,0:16]
# X = np.log2(X)
X = torch.FloatTensor(board_data)
X = np.int64(board_data)
# 转置后拼接
X = np.reshape(X, (-1,4,4))
XT = X.transpose(0,2,1)
X = np.concatenate((X,XT),axis=1)
print(X.shape)
direction_data = csv_data[:,16]
Y = np.int64(direction_data)
#-------------------------------------------------------
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2,shuffle=False)
X_train = torch.FloatTensor(X_train)
X_test = torch.FloatTensor(X_test)
Y_train = torch.LongTensor(Y_train)
Y_test = torch.LongTensor(Y_test)
train_dataset = torch.utils.data.TensorDataset(X_train,Y_train)
# test_dataset = torch.utils.data.TensorDataset(X_test,Y_test)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True
)
# test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
# batch_size=batch_size,
# shuffle=False
# )
batch_size = 128
NUM_EPOCHS = 30
LR = 0.001
TIME_STEP = 4
#------------------读入数据-----------------------------
csv_data = pd.read_csv('./drive/My Drive/DATA.csv')
csv_data = csv_data.values
A = csv_data.shape[0]
board_data = csv_data[:,0:16]
# X = np.log2(X)
X = torch.FloatTensor(board_data)
X = np.int64(board_data)
# 转置后拼接
X = np.reshape(X, (-1,4,4))
XT = X.transpose(0,2,1)
X = np.concatenate((X,XT),axis=1)
print(X.shape)
direction_data = csv_data[:,16]
Y = np.int64(direction_data)
model = CCRNN()
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr = 0.001)
def train(epoch):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = Variable(data).cuda(), Variable(target).cuda()
data = data/11.0
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % 50 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\t Loss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
torch.save(self.model, 'rnn_model_' + str(epoch) + '.pkl')
if __name__ == '__main__':
for epoch in range(0, NUM_EPOCHS):
train(epoch) | [((83, 11, 83, 51), 'pandas.read_csv', 'pd.read_csv', ({(83, 23, 83, 50): '"""./drive/My Drive/DATA.csv"""'}, {}), "('./drive/My Drive/DATA.csv')", True, 'import pandas as pd\n'), ((88, 4, 88, 33), 'torch.FloatTensor', 'torch.FloatTensor', ({(88, 22, 88, 32): 'board_data'}, {}), '(board_data)', False, 'import torch\n'), ((89, 4, 89, 24), 'numpy.int64', 'np.int64', ({(89, 13, 89, 23): 'board_data'}, {}), '(board_data)', True, 'import numpy as np\n'), ((92, 4, 92, 27), 'numpy.reshape', 'np.reshape', ({(92, 15, 92, 16): 'X', (92, 18, 92, 26): '(-1, 4, 4)'}, {}), '(X, (-1, 4, 4))', True, 'import numpy as np\n'), ((95, 4, 95, 33), 'numpy.concatenate', 'np.concatenate', (), '', True, 'import numpy as np\n'), ((99, 4, 99, 28), 'numpy.int64', 'np.int64', ({(99, 13, 99, 27): 'direction_data'}, {}), '(direction_data)', True, 'import numpy as np\n'), ((105, 35, 105, 86), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((106, 10, 106, 36), 'torch.FloatTensor', 'torch.FloatTensor', ({(106, 28, 106, 35): 'X_train'}, {}), '(X_train)', False, 'import torch\n'), ((107, 9, 107, 34), 'torch.FloatTensor', 'torch.FloatTensor', ({(107, 27, 107, 33): 'X_test'}, {}), '(X_test)', False, 'import torch\n'), ((108, 10, 108, 35), 'torch.LongTensor', 'torch.LongTensor', ({(108, 27, 108, 34): 'Y_train'}, {}), '(Y_train)', False, 'import torch\n'), ((109, 9, 109, 33), 'torch.LongTensor', 'torch.LongTensor', ({(109, 26, 109, 32): 'Y_test'}, {}), '(Y_test)', False, 'import torch\n'), ((111, 16, 111, 63), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', ({(111, 47, 111, 54): 'X_train', (111, 55, 111, 62): 'Y_train'}, {}), '(X_train, Y_train)', False, 'import torch\n'), ((114, 15, 117, 1), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (), '', False, 'import torch\n'), ((131, 11, 131, 51), 'pandas.read_csv', 'pd.read_csv', ({(131, 23, 131, 50): '"""./drive/My Drive/DATA.csv"""'}, {}), "('./drive/My Drive/DATA.csv')", True, 'import pandas as pd\n'), ((136, 4, 136, 33), 'torch.FloatTensor', 'torch.FloatTensor', ({(136, 22, 136, 32): 'board_data'}, {}), '(board_data)', False, 'import torch\n'), ((137, 4, 137, 24), 'numpy.int64', 'np.int64', ({(137, 13, 137, 23): 'board_data'}, {}), '(board_data)', True, 'import numpy as np\n'), ((140, 4, 140, 27), 'numpy.reshape', 'np.reshape', ({(140, 15, 140, 16): 'X', (140, 18, 140, 26): '(-1, 4, 4)'}, {}), '(X, (-1, 4, 4))', True, 'import numpy as np\n'), ((143, 4, 143, 33), 'numpy.concatenate', 'np.concatenate', (), '', True, 'import numpy as np\n'), ((147, 4, 147, 28), 'numpy.int64', 'np.int64', ({(147, 13, 147, 27): 'direction_data'}, {}), '(direction_data)', True, 'import numpy as np\n'), ((25, 22, 32, 9), 'torch.nn.LSTM', 'nn.LSTM', (), '', True, 'import torch.nn as nn\n'), ((34, 23, 40, 9), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((42, 23, 48, 9), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((50, 23, 56, 9), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((58, 25, 58, 41), 'torch.nn.Linear', 'nn.Linear', ({(58, 35, 58, 36): '4', (58, 38, 58, 40): '64'}, {}), '(4, 64)', True, 'import torch.nn as nn\n'), ((59, 25, 59, 41), 'torch.nn.Linear', 'nn.Linear', ({(59, 35, 59, 36): '4', (59, 38, 59, 40): '32'}, {}), '(4, 32)', True, 'import torch.nn as nn\n'), ((60, 25, 60, 41), 'torch.nn.Linear', 'nn.Linear', ({(60, 35, 60, 36): '4', (60, 38, 60, 40): '32'}, {}), '(4, 32)', True, 'import torch.nn as nn\n'), ((61, 24, 61, 43), 'torch.nn.Linear', 'nn.Linear', ({(61, 34, 61, 37): '256', (61, 39, 61, 42): '128'}, {}), '(256, 128)', True, 'import torch.nn as nn\n'), ((62, 23, 62, 40), 'torch.nn.Linear', 'nn.Linear', ({(62, 33, 62, 36): '256', (62, 38, 62, 39): '4'}, {}), '(256, 4)', True, 'import torch.nn as nn\n'), ((66, 17, 66, 49), 'torch.unsqueeze', 'torch.unsqueeze', ({(66, 33, 66, 45): 'x[:, 0:4, :]', (66, 47, 66, 48): '1'}, {}), '(x[:, 0:4, :], 1)', False, 'import torch\n'), ((78, 21, 78, 78), 'torch.cat', 'torch.cat', ({(78, 31, 78, 74): '(CNN22toFC, CNN41toFC, CNN14toFC, LSTMtoFC)', (78, 76, 78, 77): '1'}, {}), '((CNN22toFC, CNN41toFC, CNN14toFC, LSTMtoFC), 1)', False, 'import torch\n'), ((161, 15, 161, 46), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', ({(161, 31, 161, 37): 'output', (161, 39, 161, 45): 'target'}, {}), '(output, target)', True, 'import torch.nn.functional as F\n'), ((157, 23, 157, 37), 'torch.autograd.Variable', 'Variable', ({(157, 32, 157, 36): 'data'}, {}), '(data)', False, 'from torch.autograd import Variable\n'), ((157, 46, 157, 62), 'torch.autograd.Variable', 'Variable', ({(157, 55, 157, 61): 'target'}, {}), '(target)', False, 'from torch.autograd import Variable\n')] |
yliu120/dbsystem | HW2/dbsys-hw2/Database.py | d1b008f411929058a34a1dd2c44c9ee2cf899865 | import json, io, os, os.path
from Catalog.Schema import DBSchema, DBSchemaEncoder, DBSchemaDecoder
from Query.Plan import PlanBuilder
from Storage.StorageEngine import StorageEngine
class Database:
"""
A top-level database engine class.
For now, this primarily maintains a simple catalog,
mapping relation names to schema objects.
Also, it provides the ability to construct query
plan objects, as well as wrapping the storage layer methods.
"""
checkpointEncoding = "latin1"
checkpointFile = "db.catalog"
def __init__(self, **kwargs):
other = kwargs.get("other", None)
if other:
self.fromOther(other)
else:
storageArgs = {k:v for (k,v) in kwargs.items() \
if k in ["pageSize", "poolSize", "dataDir", "indexDir"]}
self.relationMap = kwargs.get("relations", {})
self.defaultPageSize = kwargs.get("pageSize", io.DEFAULT_BUFFER_SIZE)
self.storage = kwargs.get("storage", StorageEngine(**storageArgs))
checkpointFound = os.path.exists(os.path.join(self.storage.fileMgr.dataDir, Database.checkpointFile))
restoring = "restore" in kwargs
if not restoring and checkpointFound:
self.restore()
def fromOther(self, other):
self.relationMap = other.relationMap
self.defaultPageSize = other.defaultPageSize
self.storage = other.storage
def close(self):
if self.storage:
self.storage.close()
# Database internal components
def storageEngine(self):
return self.storage
def bufferPool(self):
return self.storage.bufferPool if self.storage else None
def fileManager(self):
return self.storage.fileMgr if self.storage else None
# User API
# Catalog methods
def relations(self):
return self.relationMap.keys()
def hasRelation(self, relationName):
return relationName in self.relationMap
def relationSchema(self, relationName):
if relationName in self.relationMap:
return self.relationMap[relationName]
# DDL statements
def createRelation(self, relationName, relationFields):
if relationName not in self.relationMap:
schema = DBSchema(relationName, relationFields)
self.relationMap[relationName] = schema
self.storage.createRelation(relationName, schema)
self.checkpoint()
else:
raise ValueError("Relation '" + relationName + "' already exists")
def removeRelation(self, relationName):
if relationName in self.relationMap:
del self.relationMap[relationName]
self.storage.removeRelation(relationName)
self.checkpoint()
else:
raise ValueError("No relation '" + relationName + "' found in database")
# DML statements
# Returns a tuple id for the newly inserted data.
def insertTuple(self, relationName, tupleData):
if relationName in self.relationMap:
return self.storage.insertTuple(relationName, tupleData)
else:
raise ValueError("Unknown relation '" + relationName + "' while inserting a tuple")
def deleteTuple(self, tupleId):
self.storage.deleteTuple(tupleId)
def updateTuple(self, tupleId, tupleData):
self.storage.updateTuple(tupleId, tupleData)
# Queries
# Returns an empty query builder that can access the current database.
def query(self):
return PlanBuilder(db=self)
# Returns an iterable for query results, after initializing the given plan.
def processQuery(self, queryPlan):
return queryPlan.prepare(self)
# Save the database internals to the data directory.
def checkpoint(self):
if self.storage:
dbcPath = os.path.join(self.storage.fileMgr.dataDir, Database.checkpointFile)
with open(dbcPath, 'w', encoding=Database.checkpointEncoding) as f:
f.write(self.pack())
# Load relations and schema from an existing data directory.
def restore(self):
if self.storage:
dbcPath = os.path.join(self.storage.fileMgr.dataDir, Database.checkpointFile)
with open(dbcPath, 'r', encoding=Database.checkpointEncoding) as f:
other = Database.unpack(f.read(), self.storage)
self.fromOther(other)
# Database schema catalog serialization
def pack(self):
if self.relationMap is not None:
return json.dumps([self.relationMap, self.defaultPageSize], cls=DBSchemaEncoder)
@classmethod
def unpack(cls, buffer, storageEngine):
(relationMap, pageSize) = json.loads(buffer, cls=DBSchemaDecoder)
return cls(relations=relationMap, pageSize=pageSize, storage=storageEngine, restore=True)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [((144, 4, 144, 21), 'doctest.testmod', 'doctest.testmod', ({}, {}), '()', False, 'import doctest\n'), ((110, 11, 110, 31), 'Query.Plan.PlanBuilder', 'PlanBuilder', (), '', False, 'from Query.Plan import PlanBuilder\n'), ((139, 30, 139, 69), 'json.loads', 'json.loads', (), '', False, 'import json, io, os, os.path\n'), ((76, 15, 76, 53), 'Catalog.Schema.DBSchema', 'DBSchema', ({(76, 24, 76, 36): 'relationName', (76, 38, 76, 52): 'relationFields'}, {}), '(relationName, relationFields)', False, 'from Catalog.Schema import DBSchema, DBSchemaEncoder, DBSchemaDecoder\n'), ((120, 16, 120, 83), 'os.path.join', 'os.path.join', ({(120, 29, 120, 57): 'self.storage.fileMgr.dataDir', (120, 59, 120, 82): 'Database.checkpointFile'}, {}), '(self.storage.fileMgr.dataDir, Database.checkpointFile)', False, 'import json, io, os, os.path\n'), ((127, 16, 127, 83), 'os.path.join', 'os.path.join', ({(127, 29, 127, 57): 'self.storage.fileMgr.dataDir', (127, 59, 127, 82): 'Database.checkpointFile'}, {}), '(self.storage.fileMgr.dataDir, Database.checkpointFile)', False, 'import json, io, os, os.path\n'), ((135, 13, 135, 86), 'json.dumps', 'json.dumps', (), '', False, 'import json, io, os, os.path\n'), ((32, 51, 32, 79), 'Storage.StorageEngine.StorageEngine', 'StorageEngine', ({}, {}), '(**storageArgs)', False, 'from Storage.StorageEngine import StorageEngine\n'), ((34, 39, 34, 106), 'os.path.join', 'os.path.join', ({(34, 52, 34, 80): 'self.storage.fileMgr.dataDir', (34, 82, 34, 105): 'Database.checkpointFile'}, {}), '(self.storage.fileMgr.dataDir, Database.checkpointFile)', False, 'import json, io, os, os.path\n')] |
dboyliao/TaipeiPy-pybind11-buffer-array | tests/test_arr_add_value.py | 22e764d9fbf605950c0de10e3a341de36bc9bf89 | import numpy as np
import mylib
def test_arr_add_value():
for _ in range(10):
shape = np.random.randint(1, 10, size=np.random.randint(3, 10)).tolist()
in_arr = np.random.rand(*shape).astype(np.double)
ok = np.allclose(mylib.array_add_value(in_arr, np.pi), in_arr + np.pi)
if not ok:
raise ValueError("incorrect result")
| [((10, 25, 10, 61), 'mylib.array_add_value', 'mylib.array_add_value', ({(10, 47, 10, 53): 'in_arr', (10, 55, 10, 60): 'np.pi'}, {}), '(in_arr, np.pi)', False, 'import mylib\n'), ((9, 17, 9, 39), 'numpy.random.rand', 'np.random.rand', ({(9, 32, 9, 38): '*shape'}, {}), '(*shape)', True, 'import numpy as np\n'), ((8, 46, 8, 70), 'numpy.random.randint', 'np.random.randint', ({(8, 64, 8, 65): '3', (8, 67, 8, 69): '10'}, {}), '(3, 10)', True, 'import numpy as np\n')] |
DavideRuzza/moderngl-window | moderngl_window/resources/data.py | e9debc6ed4a1899aa83c0da2320e03b0c2922b80 | """
Registry general data files
"""
from typing import Any
from moderngl_window.resources.base import BaseRegistry
from moderngl_window.meta import DataDescription
class DataFiles(BaseRegistry):
"""Registry for requested data files"""
settings_attr = "DATA_LOADERS"
def load(self, meta: DataDescription) -> Any:
"""Load data file with the configured loaders.
Args:
meta (:py:class:`~moderngl_window.meta.data.DataDescription`): the resource description
Returns:
Any: The loaded resource
"""
return super().load(meta)
data = DataFiles()
| [] |
frewsxcv/routes | tests/test_units/test_mapper_str.py | 7690fc1016e56739855435fb54c96acccfa29009 | import unittest
from routes import Mapper
class TestMapperStr(unittest.TestCase):
def test_str(self):
m = Mapper()
m.connect('/{controller}/{action}')
m.connect('entries', '/entries', controller='entry', action='index')
m.connect('entry', '/entries/{id}', controller='entry', action='show')
expected = """\
Route name Methods Path
/{controller}/{action}
entries /entries
entry /entries/{id}"""
for expected_line, actual_line in zip(expected.splitlines(), str(m).splitlines()):
assert expected_line == actual_line.rstrip()
| [((6, 12, 6, 20), 'routes.Mapper', 'Mapper', ({}, {}), '()', False, 'from routes import Mapper\n')] |
HAOYUatHZ/pyquarkchain | quarkchain/tools/config_slave.py | b2c7c02e4415aa26917c2cbb5e7571c9fef16c5b | """
python config_slave.py 127.0.0.1 38000 38006 127.0.0.2 18999 18002
will generate 4 slave server configs accordingly. will be used in deployment automation to configure a cluster.
usage: python config_slave.py <host1> <port1> <port2> <host2> <port3> ...
"""
import argparse
import collections
import json
import os
FILE = "../../testnet/2/cluster_config_template.json"
if "QKC_CONFIG" in os.environ:
FILE = os.environ["QKC_CONFIG"]
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"hostports",
nargs="+",
metavar="hostports",
help="Host and ports for slave config",
)
args = parser.parse_args()
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
###############
# parse hosts and ports to form a slave list
###############
host_port_mapping = collections.defaultdict(list)
last_host = None
for host_or_port in args.hostports: # type: str
if not host_or_port.isdigit(): # host
last_host = host_or_port
else: # port
host_port_mapping[last_host].append(host_or_port)
assert None not in host_port_mapping
slave_num = sum(len(port_list) for port_list in host_port_mapping.values())
# make sure number of slaves is power of 2
assert slave_num > 0 and (slave_num & (slave_num - 1) == 0)
slave_servers, i = [], 0
for host, port_list in host_port_mapping.items():
for port in port_list:
s = {
"HOST": host,
"PORT": int(port),
"ID": "S%d" % i,
"CHAIN_MASK_LIST": [i | slave_num],
}
slave_servers.append(s)
i += 1
###############
# read config file and substitute with updated slave config
###############
with open(FILE, "r+") as f:
parsed_config = json.load(f)
parsed_config["SLAVE_LIST"] = slave_servers
f.seek(0)
f.truncate()
f.write(json.dumps(parsed_config, indent=4))
if __name__ == "__main__":
main()
| [((18, 13, 18, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((27, 14, 27, 39), 'os.path.abspath', 'os.path.abspath', ({(27, 30, 27, 38): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((28, 12, 28, 36), 'os.path.dirname', 'os.path.dirname', ({(28, 28, 28, 35): 'abspath'}, {}), '(abspath)', False, 'import os\n'), ((29, 4, 29, 19), 'os.chdir', 'os.chdir', ({(29, 13, 29, 18): 'dname'}, {}), '(dname)', False, 'import os\n'), ((35, 24, 35, 53), 'collections.defaultdict', 'collections.defaultdict', ({(35, 48, 35, 52): 'list'}, {}), '(list)', False, 'import collections\n'), ((65, 24, 65, 36), 'json.load', 'json.load', ({(65, 34, 65, 35): 'f'}, {}), '(f)', False, 'import json\n'), ((69, 16, 69, 51), 'json.dumps', 'json.dumps', (), '', False, 'import json\n')] |
MauMendes/python3-programming-specialization | python-function-files-dictionaries/week4-assignment1.py | 8bd259f0ac559c6004baa0e759b6ec4bc25e1320 | #1) Write a function, sublist, that takes in a list of numbers as the parameter. In the function, use a while loop to return a sublist of the input list.
# The sublist should contain the same values of the original list up until it reaches the number 5 (it should not contain the number 5).
def sublist(input_lst):
out_lst = list()
number = 0
i = 0
print(input_lst)
print(len(input_lst))
length = len(input_lst)
while i<length:
number = input_lst[i]
i+=1
if number==5: break
else : out_lst.append(number)
print(out_lst)
return out_lst
#2) Write a function called check_nums that takes a list as its parameter, and contains a while loop that only stops once the element of the
# list is the number 7. What is returned is a list of all of the numbers up until it reaches 7.def check_nums(input_lst):
def check_nums(input_lst):
out_lst = list()
number = 0
i = 0
print(input_lst)
print(len(input_lst))
length = len(input_lst)
while i<length:
number = input_lst[i]
i+=1
if number==7: break
else : out_lst.append(number)
print(out_lst)
return out_lst
#3) Write a function, sublist, that takes in a list of strings as the parameter. In the function, use a while loop to return a sublist of the input list.
# The sublist should contain the same values of the original list up until it reaches the string “STOP” (it should not contain the string “STOP”).
def sublist(in_lst):
out_list = list()
str = ""
i = 0
while str!="STOP":
str = in_lst[i]
i+=1
if str=="STOP": break
else: out_list.append(str)
return out_list
#4) Write a function called stop_at_z that iterates through a list of strings. Using a while loop, append each string to a new list until the string that
# appears is “z”. The function should return the new list.
def stop_at_z(in_lst):
out_list = list()
str = ""
i = 0
while str!="z":
str = in_lst[i]
i+=1
if str=="z": break
else: out_list.append(str)
return out_list
#5) Below is a for loop that works. Underneath the for loop, rewrite the problem so that it does the same thing, but using a while loop instead of a for loop.
# Assign the accumulated total in the while loop code to the variable sum2. Once complete, sum2 should equal sum1.
lst = [65, 78, 21, 33]
lenght = len(lst)
i = 0
sum2 = 0
while i<lenght:
sum2 += lst[i]
i+=1
#6) Challenge: Write a function called beginning that takes a list as input and contains a while loop that only stops once the element of the list is the string ‘bye’.
# What is returned is a list that contains up to the first 10 strings, regardless of where the loop stops. (i.e., if it stops on the 32nd element, the first 10 are
# returned. If “bye” is the 5th element, the first 4 are returned.) If you want to make this even more of a challenge, do this without slicing
def beginning(in_list):
length = len(in_list)
out_lst = list()
i = 0
str = ""
while i<length:
str = in_list[i]
i+=1
if str=="bye" or i>10:
break
out_lst.append(str)
return out_lst
| [] |
Canway-shiisa/bk-iam-saas | saas/backend/apps/group/views.py | 73c3770d9647c9cc8d515427cd1d053d8af9d071 | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from functools import wraps
from typing import List
from django.shortcuts import get_object_or_404
from django.utils.translation import gettext as _
from drf_yasg.utils import swagger_auto_schema
from pydantic.tools import parse_obj_as
from rest_framework import serializers, status, views
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet, mixins
from backend.account.permissions import RolePermission, role_perm_class
from backend.apps.application.serializers import ConditionCompareSLZ, ConditionTagSLZ
from backend.apps.group import tasks # noqa
from backend.apps.group.models import Group
from backend.apps.policy.serializers import PolicyDeleteSLZ, PolicySLZ, PolicySystemSLZ
from backend.apps.template.models import PermTemplatePolicyAuthorized
from backend.audit.audit import audit_context_setter, view_audit_decorator
from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean
from backend.biz.policy import PolicyBean, PolicyOperationBiz, PolicyQueryBiz
from backend.biz.policy_tag import ConditionTagBean, ConditionTagBiz
from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker
from backend.biz.template import TemplateBiz
from backend.common.error_codes import error_codes
from backend.common.filters import NoCheckModelFilterBackend
from backend.common.serializers import SystemQuerySLZ
from backend.common.time import PERMANENT_SECONDS
from backend.service.constants import PermissionCodeEnum, RoleType, SubjectType
from backend.service.models import Subject
from backend.trans.group import GroupTrans
from .audit import (
GroupCreateAuditProvider,
GroupDeleteAuditProvider,
GroupMemberCreateAuditProvider,
GroupMemberDeleteAuditProvider,
GroupMemberRenewAuditProvider,
GroupPolicyDeleteAuditProvider,
GroupPolicyUpdateAuditProvider,
GroupTemplateCreateAuditProvider,
GroupTransferAuditProvider,
GroupUpdateAuditProvider,
)
from .constants import OperateEnum
from .filters import GroupFilter, GroupTemplateSystemFilter
from .serializers import (
GroupAddMemberSLZ,
GroupAuthoriedConditionSLZ,
GroupAuthorizationSLZ,
GroupCreateSLZ,
GroupDeleteMemberSLZ,
GroupIdSLZ,
GroupMemberUpdateExpiredAtSLZ,
GroupPolicyUpdateSLZ,
GroupSLZ,
GroupTemplateDetailSchemaSLZ,
GroupTemplateDetailSLZ,
GroupTemplateSchemaSLZ,
GroupTemplateSLZ,
GroupTransferSLZ,
GroupUpdateSLZ,
MemberSLZ,
SearchMemberSLZ,
)
permission_logger = logging.getLogger("permission")
def check_readonly_group(operation):
"""用户组可读检测"""
def decorate(func):
@wraps(func)
def wrapper(view, request, *args, **kwargs):
group = view.get_object()
readonly = group.readonly
if readonly:
raise error_codes.FORBIDDEN.format(
message=_("只读用户组({})无法进行({})操作!").format(group.id, operation), replace=True
)
response = func(view, request, *args, **kwargs)
return response
return wrapper
return decorate
class GroupQueryMixin:
def get_queryset(self):
request = self.request
return RoleListQuery(request.role, request.user).query_group()
class GroupPermissionMixin:
def check_object_permissions(self, request, obj):
if not RoleObjectRelationChecker(request.role).check_group(obj):
self.permission_denied(request, message=f"{request.role.type} role can not access group {obj.id}")
class GroupViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {
"create": PermissionCodeEnum.MANAGE_GROUP.value,
"update": PermissionCodeEnum.MANAGE_GROUP.value,
"destroy": PermissionCodeEnum.MANAGE_GROUP.value,
}
queryset = Group.objects.all()
serializer_class = GroupSLZ
filterset_class = GroupFilter
lookup_field = "id"
group_biz = GroupBiz()
group_check_biz = GroupCheckBiz()
role_biz = RoleBiz()
group_trans = GroupTrans()
@swagger_auto_schema(
operation_description="创建用户组",
request_body=GroupCreateSLZ(label="用户组"),
responses={status.HTTP_201_CREATED: GroupIdSLZ(label="用户组ID")},
tags=["group"],
)
@view_audit_decorator(GroupCreateAuditProvider)
def create(self, request, *args, **kwargs):
"""
创建用户组
"""
serializer = GroupCreateSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
user_id = request.user.username
data = serializer.validated_data
# 用户组名称在角色内唯一
self.group_check_biz.check_role_group_name_unique(request.role.id, data["name"])
# 用户组数量在角色内是否超限
number_of_new_group = 1 # 接口只支持创建一个用户组,不支持批量,所以新增用户组数量为1
self.group_check_biz.check_role_group_limit(request.role, number_of_new_group)
# 检测成员是否满足管理的授权范围
members = parse_obj_as(List[Subject], data["members"])
self.group_check_biz.check_role_subject_scope(request.role, members)
group = self.group_biz.create_and_add_members(
request.role.id, data["name"], data["description"], user_id, members, data["expired_at"]
)
# 使用长时任务触发多个模板同时授权
if data["templates"]:
templates = self.group_trans.from_group_grant_data(data["templates"])
self.group_biz.grant(request.role, group, templates)
# 写入审计上下文
audit_context_setter(group=group)
return Response({"id": group.id}, status=status.HTTP_201_CREATED)
def get_queryset(self):
request = self.request
role = request.role
username = request.user.username
filter_role_id = request.query_params.get("role_id")
# 如果当前角色是staff 并且 存在筛选的role_id
if role.type == RoleType.STAFF.value and filter_role_id:
# 检查用户是否在角色的授权范围内
filter_role = self.role_biz.get_role_scope_include_user(filter_role_id, username)
if not filter_role:
return Group.objects.none()
# 返回角色的用户组列表
return RoleListQuery(filter_role, request.user).query_group()
return RoleListQuery(role, request.user).query_group()
@swagger_auto_schema(
operation_description="用户组列表",
responses={status.HTTP_200_OK: GroupSLZ(label="用户组", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
@swagger_auto_schema(
operation_description="用户组详情",
responses={status.HTTP_200_OK: GroupSLZ(label="用户组")},
tags=["group"],
)
def retrieve(self, request, *args, **kwargs):
return super().retrieve(request, *args, **kwargs)
@swagger_auto_schema(
operation_description="修改用户组",
request_body=GroupUpdateSLZ(label="用户组"),
responses={status.HTTP_200_OK: GroupUpdateSLZ(label="用户组")},
tags=["group"],
)
@view_audit_decorator(GroupUpdateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_UPDATE.label)
def update(self, request, *args, **kwargs):
group = self.get_object()
serializer = GroupUpdateSLZ(group, data=request.data)
serializer.is_valid(raise_exception=True)
user_id = request.user.username
data = serializer.validated_data
# 用户组名称在角色内唯一
self.group_check_biz.check_role_group_name_unique(request.role.id, data["name"], group.id)
group = self.group_biz.update(group, data["name"], data["description"], user_id)
# 写入审计上下文
audit_context_setter(group=group)
return Response(serializer.data)
@swagger_auto_schema(
operation_description="删除用户组",
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupDeleteAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_DELETE.label)
def destroy(self, request, *args, **kwargs):
group = self.get_object()
self.group_biz.delete(group.id)
# 写入审计上下文
audit_context_setter(group=group)
return Response({})
class GroupMemberViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {
"list": PermissionCodeEnum.MANAGE_GROUP.value,
"create": PermissionCodeEnum.MANAGE_GROUP.value,
"destroy": PermissionCodeEnum.MANAGE_GROUP.value,
}
queryset = Group.objects.all()
lookup_field = "id"
biz = GroupBiz()
group_check_biz = GroupCheckBiz()
@swagger_auto_schema(
operation_description="用户组成员列表",
query_serializer=SearchMemberSLZ(label="keyword"),
responses={status.HTTP_200_OK: MemberSLZ(label="成员")},
tags=["group"],
)
def list(self, request, *args, **kwargs):
group = get_object_or_404(self.queryset, pk=kwargs["id"])
# 校验权限
checker = RoleObjectRelationChecker(request.role)
if not checker.check_group(group):
raise error_codes.FORBIDDEN.format(message=_("用户组({})不在当前用户身份可访问的范围内").format(group.id), replace=True)
if request.query_params.get("keyword"):
slz = SearchMemberSLZ(data=request.query_params)
slz.is_valid(raise_exception=True)
keyword = slz.validated_data["keyword"].lower()
group_members = self.biz.search_member_by_keyword(group.id, keyword)
return Response({"results": [one.dict() for one in group_members]})
pagination = LimitOffsetPagination()
limit = pagination.get_limit(request)
offset = pagination.get_offset(request)
count, group_members = self.biz.list_paging_group_member(group.id, limit, offset)
return Response({"count": count, "results": [one.dict() for one in group_members]})
@swagger_auto_schema(
operation_description="用户组添加成员",
request_body=GroupAddMemberSLZ(label="成员"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupMemberCreateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_MEMBER_CREATE.label)
def create(self, request, *args, **kwargs):
serializer = GroupAddMemberSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
members_data = data["members"]
expired_at = data["expired_at"]
# 成员Dict结构转换为Subject结构,并去重
members = list(set(parse_obj_as(List[Subject], members_data)))
# 检测成员是否满足管理的授权范围
self.group_check_biz.check_role_subject_scope(request.role, members)
self.group_check_biz.check_member_count(group.id, len(members))
permission_logger.info("group %s add members %s by user %s", group.id, members, request.user.username)
# 添加成员
self.biz.add_members(group.id, members, expired_at)
# 写入审计上下文
audit_context_setter(group=group, members=[m.dict() for m in members])
return Response({}, status=status.HTTP_201_CREATED)
@swagger_auto_schema(
operation_description="用户组删除成员",
request_body=GroupDeleteMemberSLZ(label="成员"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupMemberDeleteAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_MEMBER_DELETE.label)
def destroy(self, request, *args, **kwargs):
serializer = GroupDeleteMemberSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
permission_logger.info(
"group %s delete members %s by user %s", group.id, data["members"], request.user.username
)
self.biz.remove_members(str(group.id), parse_obj_as(List[Subject], data["members"]))
# 写入审计上下文
audit_context_setter(group=group, members=data["members"])
return Response({})
class GroupMemberUpdateExpiredAtViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [role_perm_class(PermissionCodeEnum.MANAGE_GROUP.value)]
queryset = Group.objects.all()
lookup_field = "id"
# service
group_biz = GroupBiz()
@swagger_auto_schema(
operation_description="用户组成员续期",
request_body=GroupMemberUpdateExpiredAtSLZ(label="成员"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupMemberRenewAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_MEMBER_RENEW.label)
def create(self, request, *args, **kwargs):
serializer = GroupMemberUpdateExpiredAtSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
permission_logger.info(
"group %s update members %s expired_at by user %s", group.id, data["members"], request.user.username
)
for m in data["members"]:
m["policy_expired_at"] = m.pop("expired_at")
self.group_biz.update_members_expired_at(
group.id, parse_obj_as(List[GroupMemberExpiredAtBean], data["members"])
)
# 写入审计上下文
audit_context_setter(group=group, members=data["members"])
return Response({})
class GroupTemplateViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {"create": PermissionCodeEnum.MANAGE_GROUP.value}
pagination_class = None # 去掉swagger中的limit offset参数
queryset = Group.objects.all()
filterset_class = GroupTemplateSystemFilter
filter_backends = [NoCheckModelFilterBackend]
lookup_field = "id"
template_biz = TemplateBiz()
@swagger_auto_schema(
operation_description="用户组拥有的权限模板列表",
responses={status.HTTP_200_OK: GroupTemplateSchemaSLZ(label="权限模板", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
group = get_object_or_404(self.queryset, pk=kwargs["id"])
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
queryset = PermTemplatePolicyAuthorized.objects.filter_by_subject(subject).defer("_data")
queryset = self.filter_queryset(queryset)
return Response(GroupTemplateSLZ(queryset, many=True).data)
@swagger_auto_schema(
operation_description="用户组权限模板授权信息",
responses={status.HTTP_200_OK: GroupTemplateDetailSchemaSLZ(label="授权信息")},
tags=["group"],
)
def retrieve(self, request, *args, **kwargs):
group = get_object_or_404(self.queryset, pk=kwargs["id"])
template_id = kwargs["template_id"]
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
authorized_template = PermTemplatePolicyAuthorized.objects.get_by_subject_template(subject, int(template_id))
return Response(GroupTemplateDetailSLZ(authorized_template).data)
class GroupPolicyViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {
"create": PermissionCodeEnum.MANAGE_GROUP.value,
"destroy": PermissionCodeEnum.MANAGE_GROUP.value,
"update": PermissionCodeEnum.MANAGE_GROUP.value,
}
pagination_class = None # 去掉swagger中的limit offset参数
queryset = Group.objects.all()
lookup_field = "id"
policy_query_biz = PolicyQueryBiz()
policy_operation_biz = PolicyOperationBiz()
group_biz = GroupBiz()
group_trans = GroupTrans()
@swagger_auto_schema(
operation_description="用户组添加权限",
request_body=GroupAuthorizationSLZ(label="授权信息"),
responses={status.HTTP_201_CREATED: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupTemplateCreateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_POLICY_CREATE.label)
def create(self, request, *args, **kwargs):
serializer = GroupAuthorizationSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
templates = self.group_trans.from_group_grant_data(data["templates"])
self.group_biz.grant(request.role, group, templates)
# 写入审计上下文
audit_context_setter(
group=group,
templates=[{"system_id": t["system_id"], "template_id": t["template_id"]} for t in data["templates"]],
)
return Response({}, status=status.HTTP_201_CREATED)
@swagger_auto_schema(
operation_description="用户组自定义权限列表",
query_serializer=SystemQuerySLZ,
responses={status.HTTP_200_OK: PolicySLZ(label="策略", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
slz = SystemQuerySLZ(data=request.query_params)
slz.is_valid(raise_exception=True)
system_id = slz.validated_data["system_id"]
group = get_object_or_404(self.queryset, pk=kwargs["id"])
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
policies = self.policy_query_biz.list_by_subject(system_id, subject)
# ResourceNameAutoUpdate
updated_policies = self.policy_operation_biz.update_due_to_renamed_resource(system_id, subject, policies)
return Response([p.dict() for p in updated_policies])
@swagger_auto_schema(
operation_description="用户组删除自定义权限",
request_body=PolicyDeleteSLZ(label="ids"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupPolicyDeleteAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_POLICY_DELETE.label)
def destroy(self, request, *args, **kwargs):
slz = PolicyDeleteSLZ(data=request.data)
slz.is_valid(raise_exception=True)
system_id = slz.validated_data["system_id"]
ids = slz.validated_data["ids"]
group = self.get_object()
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
permission_logger.info(
"subject type=%s, id=%s policy deleted by user %s", subject.type, subject.id, request.user.username
)
policy_list = self.policy_query_biz.query_policy_list_by_policy_ids(system_id, subject, ids)
# 删除权限
self.policy_operation_biz.delete_by_ids(system_id, subject, ids)
# 写入审计上下文
audit_context_setter(group=group, system_id=system_id, policies=policy_list.policies)
return Response()
@swagger_auto_schema(
operation_description="用户组权限修改",
request_body=GroupPolicyUpdateSLZ(label="修改策略"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupPolicyUpdateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_POLICY_UPDATE.label)
def update(self, request, *args, **kwargs):
group = self.get_object()
slz = GroupPolicyUpdateSLZ(data=request.data)
slz.is_valid(raise_exception=True)
data = slz.validated_data
system_id = data["system_id"]
template_id = data["template_id"]
policies = [PolicyBean(expired_at=PERMANENT_SECONDS, **action) for action in data["actions"]]
self.group_biz.update_policies(request.role, group.id, system_id, template_id, policies)
# 写入审计上下文
audit_context_setter(group=group, system_id=system_id, template_id=template_id, policies=policies)
return Response({})
class GroupSystemViewSet(GenericViewSet):
pagination_class = None # 去掉swagger中的limit offset参数
queryset = Group.objects.all()
lookup_field = "id"
biz = GroupBiz()
@swagger_auto_schema(
operation_description="用户组有权限的所有系统列表",
responses={status.HTTP_200_OK: PolicySystemSLZ(label="系统", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
group = self.get_object()
data = self.biz.list_system_counter(group.id)
return Response([one.dict() for one in data])
class GroupTransferView(views.APIView):
"""
用户组转出
"""
permission_classes = [role_perm_class(PermissionCodeEnum.TRANSFER_GROUP.value)]
role_biz = RoleBiz()
@swagger_auto_schema(
operation_description="用户组批量转出",
request_body=GroupTransferSLZ(label="用户转移"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupTransferAuditProvider)
def post(self, request, *args, **kwargs):
slz = GroupTransferSLZ(data=request.data, context={"role": request.role})
slz.is_valid(raise_exception=True)
group_ids = slz.validated_data["group_ids"]
role_id = slz.validated_data["role_id"]
self.role_biz.transfer_groups_role(group_ids, role_id)
audit_context_setter(group_ids=group_ids, role_id=role_id)
return Response({})
class GroupTemplateConditionCompareView(GroupPermissionMixin, GenericViewSet):
condition_biz = ConditionTagBiz()
template_biz = TemplateBiz()
queryset = Group.objects.all()
lookup_field = "id"
@swagger_auto_schema(
operation_description="权限模板操作条件对比",
request_body=GroupAuthoriedConditionSLZ(label="操作条件"),
responses={status.HTTP_200_OK: ConditionTagSLZ(label="条件差异", many=True)},
tags=["group"],
)
def create(self, request, *args, **kwargs):
serializer = GroupAuthoriedConditionSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
group = self.get_object()
action_id = data["action_id"]
resource_group_id = data["resource_group_id"]
related_resource_type = data["related_resource_type"]
new_condition = parse_obj_as(List[ConditionTagBean], related_resource_type["condition"])
# 从模板数据中查找匹配的操作, 资源类型的条件
template_id = kwargs["template_id"]
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
authorized_template = PermTemplatePolicyAuthorized.objects.get_by_subject_template(subject, int(template_id))
for action in authorized_template.data["actions"]:
policy = PolicyBean.parse_obj(action)
# 查询对应的操作
if policy.action_id == action_id:
# 操作操作中对应于资源类型的操作
related_resource_type = policy.get_related_resource_type(
resource_group_id, related_resource_type["system_id"], related_resource_type["type"]
)
old_condition = related_resource_type.condition if related_resource_type else []
# 对比用户组已有的条件与用户提交的条件
conditions = self.condition_biz.compare_and_tag(
new_condition, parse_obj_as(List[ConditionTagBean], old_condition), is_template=True
)
return Response([c.dict() for c in conditions])
raise error_codes.VALIDATE_ERROR.format(_("模板: {} 没有操作: {} 的权限").format(template_id, action_id))
class GroupCustomPolicyConditionCompareView(GroupPermissionMixin, GenericViewSet):
policy_biz = PolicyQueryBiz()
condition_biz = ConditionTagBiz()
queryset = Group.objects.all()
lookup_field = "id"
@swagger_auto_schema(
operation_description="条件差异对比",
request_body=ConditionCompareSLZ(label="资源条件"),
responses={status.HTTP_200_OK: ConditionTagSLZ(label="条件差异", many=True)},
tags=["group"],
)
def create(self, request, *args, **kwargs):
serializer = ConditionCompareSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
group = self.get_object()
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
# 1. 查询policy的condition
related_resource_type = data["related_resource_type"]
old_condition = self.policy_biz.get_policy_resource_type_conditions(
subject,
data["policy_id"],
data["resource_group_id"],
related_resource_type["system_id"],
related_resource_type["type"],
)
# 2. 对比合并差异
conditions = self.condition_biz.compare_and_tag(
parse_obj_as(List[ConditionTagBean], related_resource_type["condition"]),
parse_obj_as(List[ConditionTagBean], old_condition),
is_template=True,
)
return Response([c.dict() for c in conditions])
| [((78, 20, 78, 51), 'logging.getLogger', 'logging.getLogger', ({(78, 38, 78, 50): '"""permission"""'}, {}), "('permission')", False, 'import logging\n'), ((125, 15, 125, 34), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ({}, {}), '()', False, 'from backend.apps.group.models import Group\n'), ((130, 16, 130, 26), 'backend.biz.group.GroupBiz', 'GroupBiz', ({}, {}), '()', False, 'from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean\n'), ((131, 22, 131, 37), 'backend.biz.group.GroupCheckBiz', 'GroupCheckBiz', ({}, {}), '()', False, 'from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean\n'), ((132, 15, 132, 24), 'backend.biz.role.RoleBiz', 'RoleBiz', ({}, {}), '()', False, 'from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker\n'), ((134, 18, 134, 30), 'backend.trans.group.GroupTrans', 'GroupTrans', ({}, {}), '()', False, 'from backend.trans.group import GroupTrans\n'), ((142, 5, 142, 51), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', ({(142, 26, 142, 50): 'GroupCreateAuditProvider'}, {}), '(GroupCreateAuditProvider)', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((217, 5, 217, 51), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', ({(217, 26, 217, 50): 'GroupUpdateAuditProvider'}, {}), '(GroupUpdateAuditProvider)', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((242, 5, 242, 51), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', ({(242, 26, 242, 50): 'GroupDeleteAuditProvider'}, {}), '(GroupDeleteAuditProvider)', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((264, 15, 264, 34), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ({}, {}), '()', False, 'from backend.apps.group.models import Group\n'), ((267, 10, 267, 20), 'backend.biz.group.GroupBiz', 'GroupBiz', ({}, {}), '()', False, 'from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean\n'), ((268, 22, 268, 37), 'backend.biz.group.GroupCheckBiz', 'GroupCheckBiz', ({}, {}), '()', False, 'from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean\n'), ((306, 5, 306, 57), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', ({(306, 26, 306, 56): 'GroupMemberCreateAuditProvider'}, {}), '(GroupMemberCreateAuditProvider)', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((340, 5, 340, 57), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', ({(340, 26, 340, 56): 'GroupMemberDeleteAuditProvider'}, {}), '(GroupMemberDeleteAuditProvider)', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((365, 15, 365, 34), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ({}, {}), '()', False, 'from backend.apps.group.models import Group\n'), ((369, 16, 369, 26), 'backend.biz.group.GroupBiz', 'GroupBiz', ({}, {}), '()', False, 'from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean\n'), ((377, 5, 377, 56), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', ({(377, 26, 377, 55): 'GroupMemberRenewAuditProvider'}, {}), '(GroupMemberRenewAuditProvider)', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((409, 15, 409, 34), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ({}, {}), '()', False, 'from backend.apps.group.models import Group\n'), ((414, 19, 414, 32), 'backend.biz.template.TemplateBiz', 'TemplateBiz', ({}, {}), '()', False, 'from backend.biz.template import TemplateBiz\n'), ((453, 15, 453, 34), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ({}, {}), '()', False, 'from backend.apps.group.models import Group\n'), ((456, 23, 456, 39), 'backend.biz.policy.PolicyQueryBiz', 'PolicyQueryBiz', ({}, {}), '()', False, 'from backend.biz.policy import PolicyBean, PolicyOperationBiz, PolicyQueryBiz\n'), ((457, 27, 457, 47), 'backend.biz.policy.PolicyOperationBiz', 'PolicyOperationBiz', ({}, {}), '()', False, 'from backend.biz.policy import PolicyBean, PolicyOperationBiz, PolicyQueryBiz\n'), ((458, 16, 458, 26), 'backend.biz.group.GroupBiz', 'GroupBiz', ({}, {}), '()', False, 'from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean\n'), ((460, 18, 460, 30), 'backend.trans.group.GroupTrans', 'GroupTrans', ({}, {}), '()', False, 'from backend.trans.group import GroupTrans\n'), ((468, 5, 468, 59), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', ({(468, 26, 468, 58): 'GroupTemplateCreateAuditProvider'}, {}), '(GroupTemplateCreateAuditProvider)', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((516, 5, 516, 57), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', ({(516, 26, 516, 56): 'GroupPolicyDeleteAuditProvider'}, {}), '(GroupPolicyDeleteAuditProvider)', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((547, 5, 547, 57), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', ({(547, 26, 547, 56): 'GroupPolicyUpdateAuditProvider'}, {}), '(GroupPolicyUpdateAuditProvider)', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((571, 15, 571, 34), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ({}, {}), '()', False, 'from backend.apps.group.models import Group\n'), ((574, 10, 574, 20), 'backend.biz.group.GroupBiz', 'GroupBiz', ({}, {}), '()', False, 'from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean\n'), ((594, 15, 594, 24), 'backend.biz.role.RoleBiz', 'RoleBiz', ({}, {}), '()', False, 'from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker\n'), ((602, 5, 602, 53), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', ({(602, 26, 602, 52): 'GroupTransferAuditProvider'}, {}), '(GroupTransferAuditProvider)', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((618, 20, 618, 37), 'backend.biz.policy_tag.ConditionTagBiz', 'ConditionTagBiz', ({}, {}), '()', False, 'from backend.biz.policy_tag import ConditionTagBean, ConditionTagBiz\n'), ((619, 19, 619, 32), 'backend.biz.template.TemplateBiz', 'TemplateBiz', ({}, {}), '()', False, 'from backend.biz.template import TemplateBiz\n'), ((621, 15, 621, 34), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ({}, {}), '()', False, 'from backend.apps.group.models import Group\n'), ((668, 17, 668, 33), 'backend.biz.policy.PolicyQueryBiz', 'PolicyQueryBiz', ({}, {}), '()', False, 'from backend.biz.policy import PolicyBean, PolicyOperationBiz, PolicyQueryBiz\n'), ((669, 20, 669, 37), 'backend.biz.policy_tag.ConditionTagBiz', 'ConditionTagBiz', ({}, {}), '()', False, 'from backend.biz.policy_tag import ConditionTagBean, ConditionTagBiz\n'), ((671, 15, 671, 34), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ({}, {}), '()', False, 'from backend.apps.group.models import Group\n'), ((85, 9, 85, 20), 'functools.wraps', 'wraps', ({(85, 15, 85, 19): 'func'}, {}), '(func)', False, 'from functools import wraps\n'), ((160, 18, 160, 62), 'pydantic.tools.parse_obj_as', 'parse_obj_as', ({(160, 31, 160, 44): 'List[Subject]', (160, 46, 160, 61): "data['members']"}, {}), "(List[Subject], data['members'])", False, 'from pydantic.tools import parse_obj_as\n'), ((173, 8, 173, 41), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', (), '', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((175, 15, 175, 73), 'rest_framework.response.Response', 'Response', (), '', False, 'from rest_framework.response import Response\n'), ((233, 8, 233, 41), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', (), '', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((235, 15, 235, 40), 'rest_framework.response.Response', 'Response', ({(235, 24, 235, 39): 'serializer.data'}, {}), '(serializer.data)', False, 'from rest_framework.response import Response\n'), ((250, 8, 250, 41), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', (), '', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((252, 15, 252, 27), 'rest_framework.response.Response', 'Response', ({(252, 24, 252, 26): '{}'}, {}), '({})', False, 'from rest_framework.response import Response\n'), ((277, 16, 277, 65), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (), '', False, 'from django.shortcuts import get_object_or_404\n'), ((280, 18, 280, 57), 'backend.biz.role.RoleObjectRelationChecker', 'RoleObjectRelationChecker', ({(280, 44, 280, 56): 'request.role'}, {}), '(request.role)', False, 'from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker\n'), ((293, 21, 293, 44), 'rest_framework.pagination.LimitOffsetPagination', 'LimitOffsetPagination', ({}, {}), '()', False, 'from rest_framework.pagination import LimitOffsetPagination\n'), ((332, 15, 332, 59), 'rest_framework.response.Response', 'Response', (), '', False, 'from rest_framework.response import Response\n'), ((356, 8, 356, 66), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', (), '', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((358, 15, 358, 27), 'rest_framework.response.Response', 'Response', ({(358, 24, 358, 26): '{}'}, {}), '({})', False, 'from rest_framework.response import Response\n'), ((363, 26, 363, 80), 'backend.account.permissions.role_perm_class', 'role_perm_class', ({(363, 42, 363, 79): 'PermissionCodeEnum.MANAGE_GROUP.value'}, {}), '(PermissionCodeEnum.MANAGE_GROUP.value)', False, 'from backend.account.permissions import RolePermission, role_perm_class\n'), ((398, 8, 398, 66), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', (), '', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((400, 15, 400, 27), 'rest_framework.response.Response', 'Response', ({(400, 24, 400, 26): '{}'}, {}), '({})', False, 'from rest_framework.response import Response\n'), ((422, 16, 422, 65), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (), '', False, 'from django.shortcuts import get_object_or_404\n'), ((435, 16, 435, 65), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (), '', False, 'from django.shortcuts import get_object_or_404\n'), ((481, 8, 484, 9), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', (), '', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((486, 15, 486, 59), 'rest_framework.response.Response', 'Response', (), '', False, 'from rest_framework.response import Response\n'), ((495, 14, 495, 55), 'backend.common.serializers.SystemQuerySLZ', 'SystemQuerySLZ', (), '', False, 'from backend.common.serializers import SystemQuerySLZ\n'), ((499, 16, 499, 65), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (), '', False, 'from django.shortcuts import get_object_or_404\n'), ((519, 14, 519, 48), 'backend.apps.policy.serializers.PolicyDeleteSLZ', 'PolicyDeleteSLZ', (), '', False, 'from backend.apps.policy.serializers import PolicyDeleteSLZ, PolicySLZ, PolicySystemSLZ\n'), ((537, 8, 537, 93), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', (), '', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((539, 15, 539, 25), 'rest_framework.response.Response', 'Response', ({}, {}), '()', False, 'from rest_framework.response import Response\n'), ((563, 8, 563, 106), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', (), '', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((565, 15, 565, 27), 'rest_framework.response.Response', 'Response', ({(565, 24, 565, 26): '{}'}, {}), '({})', False, 'from rest_framework.response import Response\n'), ((592, 26, 592, 82), 'backend.account.permissions.role_perm_class', 'role_perm_class', ({(592, 42, 592, 81): 'PermissionCodeEnum.TRANSFER_GROUP.value'}, {}), '(PermissionCodeEnum.TRANSFER_GROUP.value)', False, 'from backend.account.permissions import RolePermission, role_perm_class\n'), ((612, 8, 612, 66), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', (), '', False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((614, 15, 614, 27), 'rest_framework.response.Response', 'Response', ({(614, 24, 614, 26): '{}'}, {}), '({})', False, 'from rest_framework.response import Response\n'), ((641, 24, 641, 96), 'pydantic.tools.parse_obj_as', 'parse_obj_as', ({(641, 37, 641, 59): 'List[ConditionTagBean]', (641, 61, 641, 95): "related_resource_type['condition']"}, {}), "(List[ConditionTagBean], related_resource_type['condition'])", False, 'from pydantic.tools import parse_obj_as\n'), ((681, 21, 681, 59), 'backend.apps.application.serializers.ConditionCompareSLZ', 'ConditionCompareSLZ', (), '', False, 'from backend.apps.application.serializers import ConditionCompareSLZ, ConditionTagSLZ\n'), ((353, 47, 353, 91), 'pydantic.tools.parse_obj_as', 'parse_obj_as', ({(353, 60, 353, 73): 'List[Subject]', (353, 75, 353, 90): "data['members']"}, {}), "(List[Subject], data['members'])", False, 'from pydantic.tools import parse_obj_as\n'), ((394, 22, 394, 83), 'pydantic.tools.parse_obj_as', 'parse_obj_as', ({(394, 35, 394, 65): 'List[GroupMemberExpiredAtBean]', (394, 67, 394, 82): "data['members']"}, {}), "(List[GroupMemberExpiredAtBean], data['members'])", False, 'from pydantic.tools import parse_obj_as\n'), ((512, 21, 512, 49), 'backend.apps.policy.serializers.PolicyDeleteSLZ', 'PolicyDeleteSLZ', (), '', False, 'from backend.apps.policy.serializers import PolicyDeleteSLZ, PolicySLZ, PolicySystemSLZ\n'), ((559, 20, 559, 70), 'backend.biz.policy.PolicyBean', 'PolicyBean', (), '', False, 'from backend.biz.policy import PolicyBean, PolicyOperationBiz, PolicyQueryBiz\n'), ((648, 21, 648, 49), 'backend.biz.policy.PolicyBean.parse_obj', 'PolicyBean.parse_obj', ({(648, 42, 648, 48): 'action'}, {}), '(action)', False, 'from backend.biz.policy import PolicyBean, PolicyOperationBiz, PolicyQueryBiz\n'), ((701, 12, 701, 84), 'pydantic.tools.parse_obj_as', 'parse_obj_as', ({(701, 25, 701, 47): 'List[ConditionTagBean]', (701, 49, 701, 83): "related_resource_type['condition']"}, {}), "(List[ConditionTagBean], related_resource_type['condition'])", False, 'from pydantic.tools import parse_obj_as\n'), ((702, 12, 702, 63), 'pydantic.tools.parse_obj_as', 'parse_obj_as', ({(702, 25, 702, 47): 'List[ConditionTagBean]', (702, 49, 702, 62): 'old_condition'}, {}), '(List[ConditionTagBean], old_condition)', False, 'from pydantic.tools import parse_obj_as\n'), ((676, 21, 676, 62), 'backend.apps.application.serializers.ConditionCompareSLZ', 'ConditionCompareSLZ', (), '', False, 'from backend.apps.application.serializers import ConditionCompareSLZ, ConditionTagSLZ\n'), ((107, 15, 107, 56), 'backend.biz.role.RoleListQuery', 'RoleListQuery', ({(107, 29, 107, 41): 'request.role', (107, 43, 107, 55): 'request.user'}, {}), '(request.role, request.user)', False, 'from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker\n'), ((188, 23, 188, 43), 'backend.apps.group.models.Group.objects.none', 'Group.objects.none', ({}, {}), '()', False, 'from backend.apps.group.models import Group\n'), ((193, 15, 193, 48), 'backend.biz.role.RoleListQuery', 'RoleListQuery', ({(193, 29, 193, 33): 'role', (193, 35, 193, 47): 'request.user'}, {}), '(role, request.user)', False, 'from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker\n'), ((239, 39, 239, 63), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ({}, {}), '()', False, 'from rest_framework import serializers, status, views\n'), ((319, 27, 319, 68), 'pydantic.tools.parse_obj_as', 'parse_obj_as', ({(319, 40, 319, 53): 'List[Subject]', (319, 55, 319, 67): 'members_data'}, {}), '(List[Subject], members_data)', False, 'from pydantic.tools import parse_obj_as\n'), ((303, 39, 303, 63), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ({}, {}), '()', False, 'from rest_framework import serializers, status, views\n'), ((337, 39, 337, 63), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ({}, {}), '()', False, 'from rest_framework import serializers, status, views\n'), ((374, 39, 374, 63), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ({}, {}), '()', False, 'from rest_framework import serializers, status, views\n'), ((424, 19, 424, 82), 'backend.apps.template.models.PermTemplatePolicyAuthorized.objects.filter_by_subject', 'PermTemplatePolicyAuthorized.objects.filter_by_subject', ({(424, 74, 424, 81): 'subject'}, {}), '(subject)', False, 'from backend.apps.template.models import PermTemplatePolicyAuthorized\n'), ((465, 44, 465, 68), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ({}, {}), '()', False, 'from rest_framework import serializers, status, views\n'), ((491, 39, 491, 75), 'backend.apps.policy.serializers.PolicySLZ', 'PolicySLZ', (), '', False, 'from backend.apps.policy.serializers import PolicyDeleteSLZ, PolicySLZ, PolicySystemSLZ\n'), ((513, 39, 513, 63), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ({}, {}), '()', False, 'from rest_framework import serializers, status, views\n'), ((544, 39, 544, 63), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ({}, {}), '()', False, 'from rest_framework import serializers, status, views\n'), ((578, 39, 578, 81), 'backend.apps.policy.serializers.PolicySystemSLZ', 'PolicySystemSLZ', (), '', False, 'from backend.apps.policy.serializers import PolicyDeleteSLZ, PolicySLZ, PolicySystemSLZ\n'), ((599, 39, 599, 63), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ({}, {}), '()', False, 'from rest_framework import serializers, status, views\n'), ((627, 39, 627, 87), 'backend.apps.application.serializers.ConditionTagSLZ', 'ConditionTagSLZ', (), '', False, 'from backend.apps.application.serializers import ConditionCompareSLZ, ConditionTagSLZ\n'), ((677, 39, 677, 87), 'backend.apps.application.serializers.ConditionTagSLZ', 'ConditionTagSLZ', (), '', False, 'from backend.apps.application.serializers import ConditionCompareSLZ, ConditionTagSLZ\n'), ((112, 15, 112, 54), 'backend.biz.role.RoleObjectRelationChecker', 'RoleObjectRelationChecker', ({(112, 41, 112, 53): 'request.role'}, {}), '(request.role)', False, 'from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker\n'), ((191, 19, 191, 59), 'backend.biz.role.RoleListQuery', 'RoleListQuery', ({(191, 33, 191, 44): 'filter_role', (191, 46, 191, 58): 'request.user'}, {}), '(filter_role, request.user)', False, 'from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker\n'), ((659, 35, 659, 86), 'pydantic.tools.parse_obj_as', 'parse_obj_as', ({(659, 48, 659, 70): 'List[ConditionTagBean]', (659, 72, 659, 85): 'old_condition'}, {}), '(List[ConditionTagBean], old_condition)', False, 'from pydantic.tools import parse_obj_as\n'), ((664, 48, 664, 90), 'django.utils.translation.gettext', '_', ({(664, 50, 664, 89): '"""模板: {} 没有操作: {} 的权限"""'}, {}), "('模板: {} 没有操作: {} 的权限')", True, 'from django.utils.translation import gettext as _\n'), ((282, 55, 282, 118), 'django.utils.translation.gettext', '_', ({(282, 57, 282, 117): '"""用户组({})不在当前用户身份可访问的范围内"""'}, {}), "('用户组({})不在当前用户身份可访问的范围内')", True, 'from django.utils.translation import gettext as _\n'), ((92, 28, 92, 77), 'django.utils.translation.gettext', '_', ({(92, 30, 92, 76): '"""只读用户组({})无法进行({})操作!"""'}, {}), "('只读用户组({})无法进行({})操作!')", True, 'from django.utils.translation import gettext as _\n')] |
fillest/7drl2013 | towers.py | 96d291dce08a85d3871713c99f3a036de482d6ca | import util
import libtcodpy as tcod
import enemies
import operator
class Missile (util.Entity):
sym = '*'
color = tcod.white
class BasicMissile (Missile):
color = tcod.yellow
class IceMissile (Missile):
color = tcod.light_blue
class AoeMissile (Missile):
color = tcod.red
class Building (util.Entity):
sym = '@'
max_hp = 1
cost = 0
def __init__ (self, *args):
super(Building, self).__init__(*args)
self.hp = self.max_hp
def hurt (self, hp):
self.hp -= hp
if self.hp < 1:
self.die()
def hit (self, e):
if e in self.state.entities:
e.hurt(self.damage)
def die (self):
if self in self.state.entities:
self.delete()
def put (self):
assert self.state.energy > 0
self.state.entities.append(self)
self.state.energy -= self.cost
return self
def delete (self):
self.state.entities.remove(self)
self.state.energy += self.cost
return self
class Heart (Building):
sym = '&'
color = tcod.darker_red
max_hp = 20
def delete (self):
self.state.is_paused = True
return super(Heart, self).delete()
class Bait (Building):
sym = Heart.sym
color = tcod.pink
max_hp = 10
class Tower (Building):
radius = 15
max_hp = 10
damage = 1
missile = None
def __init__ (self, *args):
super(Tower, self).__init__(*args)
self.cooldown = False
def update (self):
if not self.cooldown:
# dist_min = None
# target = None
# for e in self.state.entities.enemies():
# d = util.dist(self.x, self.y, e.x, e.y)
# if d < (self.radius + 1) and ((dist_min is None) or (d < dist_min)):
# dist_min = d
# target = e
preferred_targets = []
other_targets = []
for e in self.state.entities.enemies():
d = util.dist(self.x, self.y, e.x, e.y)
if d < (self.radius + 1):
if e in self.state.targets_towers:
total_damage = sum([t.damage for t in self.state.targets_towers[e]])
if total_damage < e.hp:
preferred_targets.append((d, e))
else:
other_targets.append((d, e))
else:
preferred_targets.append((d, e))
target = None
if preferred_targets:
_d, target = sorted(preferred_targets, key = operator.itemgetter(0))[0]
elif other_targets:
_d, target = sorted(other_targets, key = operator.itemgetter(0))[0]
if target:
self.state.targets_towers[target].append(self)
self._shoot(target)
def render (self):
super(Tower, self).render()
if self.mouse_over:
# if True:
for x in range(self.x - (self.radius + 1), self.x + (self.radius + 1)):
for y in range(self.y - (self.radius + 1), self.y + (self.radius + 1)):
if util.dist(self.x, self.y, x, y) < (self.radius + 1):
tcod.console_set_char_background(0, x, y, tcod.Color(*[15]*3), flag = tcod.BKGND_SET)
def _shoot (self, e):
self.cooldown = True
def clear_cd ():
self.cooldown = False
self.state.timers.start_run_once(1000, clear_cd)
m = self.missile(self.state, self.x, self.y)
self.state.entities.append(m)
missile_speed = 20
self.state.timers.start(missile_speed, self.update_missile, [m, e])
def update_missile (self, m, e):
tcod.line_init(m.x, m.y, e.x, e.y)
x, y = tcod.line_step()
if x is None:
self.state.entities.remove(m)
self.hit(e)
return util.STOP
else:
m.x = x
m.y = y
class BasicTower (Tower):
color = tcod.dark_green
missile = BasicMissile
cost = 1
class ResearchBuilding (Building):
color = tcod.dark_sepia
cost = 1
def __init__ (self, *args):
super(ResearchBuilding, self).__init__(*args)
self.timer = self.state.timers.start(1000, self._research)
def _research (self):
pass
class AoeExplosion (util.Entity):
sym = '*'
color = tcod.dark_red
def __init__ (self, radius, *args):
super(AoeExplosion, self).__init__(*args)
self.radius = radius
def render (self):
for x in range(self.x - self.radius, self.x + self.radius):
for y in range(self.y - self.radius, self.y + self.radius):
tcod.console_put_char(0, x, y, self.sym, tcod.BKGND_NONE)
tcod.console_set_char_foreground(0, x, y, self.color)
class AoeTower (Tower):
color = tcod.dark_orange
missile = AoeMissile
cost = 2
def hit (self, target):
radius = 2
for x in range(target.x - radius, target.x + radius):
for y in range(target.y - radius, target.y + radius):
for e in self.state.entities.enemies():
if (e.x, e.y) == (x, y):
if e in self.state.entities: #TODO copypaste
e.hurt(self.damage)
e = AoeExplosion(radius, self.state, target.x, target.y)
self.state.entities.append(e)
self.state.timers.start_run_once(70, lambda: self.state.entities.remove(e))
class IceTower (Tower):
damage = 0.2
color = tcod.dark_blue
missile = IceMissile
cost = 1
def hit (self, target):
target.hurt(self.damage)
if not getattr(target, 'is_debuffed', False):
old_speed = target.timer.interval
target.timer.interval *= 3
target.timer.time_buf *= 3
target.is_debuffed = True
def rollback ():
target.timer.interval = old_speed
target.timer.time_buf /= 3
target.is_debuffed = False
self.rollback_timer = self.state.timers.start_run_once(1000, rollback)
elif getattr(self, 'rollback_timer', False):
self.rollback_timer.reset()
| [((135, 2, 135, 36), 'libtcodpy.line_init', 'tcod.line_init', ({(135, 17, 135, 20): 'm.x', (135, 22, 135, 25): 'm.y', (135, 27, 135, 30): 'e.x', (135, 32, 135, 35): 'e.y'}, {}), '(m.x, m.y, e.x, e.y)', True, 'import libtcodpy as tcod\n'), ((136, 9, 136, 25), 'libtcodpy.line_step', 'tcod.line_step', ({}, {}), '()', True, 'import libtcodpy as tcod\n'), ((92, 8, 92, 43), 'util.dist', 'util.dist', ({(92, 18, 92, 24): 'self.x', (92, 26, 92, 32): 'self.y', (92, 34, 92, 37): 'e.x', (92, 39, 92, 42): 'e.y'}, {}), '(self.x, self.y, e.x, e.y)', False, 'import util\n'), ((174, 4, 174, 61), 'libtcodpy.console_put_char', 'tcod.console_put_char', ({(174, 26, 174, 27): '(0)', (174, 29, 174, 30): 'x', (174, 32, 174, 33): 'y', (174, 35, 174, 43): 'self.sym', (174, 45, 174, 60): 'tcod.BKGND_NONE'}, {}), '(0, x, y, self.sym, tcod.BKGND_NONE)', True, 'import libtcodpy as tcod\n'), ((175, 4, 175, 57), 'libtcodpy.console_set_char_foreground', 'tcod.console_set_char_foreground', ({(175, 37, 175, 38): '(0)', (175, 40, 175, 41): 'x', (175, 43, 175, 44): 'y', (175, 46, 175, 56): 'self.color'}, {}), '(0, x, y, self.color)', True, 'import libtcodpy as tcod\n'), ((119, 8, 119, 39), 'util.dist', 'util.dist', ({(119, 18, 119, 24): 'self.x', (119, 26, 119, 32): 'self.y', (119, 34, 119, 35): 'x', (119, 37, 119, 38): 'y'}, {}), '(self.x, self.y, x, y)', False, 'import util\n'), ((105, 49, 105, 71), 'operator.itemgetter', 'operator.itemgetter', ({(105, 69, 105, 70): '(0)'}, {}), '(0)', False, 'import operator\n'), ((120, 48, 120, 67), 'libtcodpy.Color', 'tcod.Color', ({(120, 59, 120, 66): '*([15] * 3)'}, {}), '(*([15] * 3))', True, 'import libtcodpy as tcod\n'), ((107, 45, 107, 67), 'operator.itemgetter', 'operator.itemgetter', ({(107, 65, 107, 66): '(0)'}, {}), '(0)', False, 'import operator\n')] |
lukasjoc/random | python/mandelbrot.py | 5be080b424f02491fb219634902fc0cc192aff6c | #!/usr/bin/python3
from PIL import Image
from numpy import complex, array
from tqdm import tqdm
import colorsys
W=512
#W=142
def mandelbrot(x, y):
def get_colors(i):
color = 255 * array(colorsys.hsv_to_rgb(i / 255.0, 1.0, 0.5))
return tuple(color.astype(int))
c, cc = 0, complex(x, y)
for i in range(1, 1000):
if abs(c) > 2:
return get_colors(i)
c = c * c + cc
return 0,0,0
if __name__ == "__main__":
img = Image.new("RGB", (W, int(W / 2)))
pixels = img.load()
for x in tqdm(range(img.size[0])):
for y in tqdm(range(img.size[1])):
xx = (x - (0.75 * W)) / (W / 4)
yy = (y - (W / 4)) / (W / 4)
pixels[x, y] = mandelbrot(xx, yy)
img.show()
img.save("mandelbrot.jpg")
| [((17, 15, 17, 28), 'numpy.complex', 'complex', ({(17, 23, 17, 24): 'x', (17, 26, 17, 27): 'y'}, {}), '(x, y)', False, 'from numpy import complex, array\n'), ((14, 28, 14, 68), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', ({(14, 48, 14, 57): '(i / 255.0)', (14, 59, 14, 62): '(1.0)', (14, 64, 14, 67): '(0.5)'}, {}), '(i / 255.0, 1.0, 0.5)', False, 'import colorsys\n')] |
tonyreina/mlt | tests/unit/commands/test_deploy.py | ee490ebdeb5aa6924dbfc0a067a0653754c470f4 | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
from __future__ import print_function
import uuid
import pytest
from mock import call, MagicMock
from mlt.commands.deploy import DeployCommand
from test_utils.io import catch_stdout
@pytest.fixture
def sleep(patch):
return patch('time.sleep')
@pytest.fixture
def fetch_action_arg(patch):
return patch('files.fetch_action_arg', MagicMock(return_value='output'))
@pytest.fixture
def kube_helpers(patch):
return patch('kubernetes_helpers')
@pytest.fixture
def json_mock(patch):
return patch('json')
@pytest.fixture
def open_mock(patch):
return patch('open')
@pytest.fixture
def popen_mock(patch):
popen_mock = MagicMock()
popen_mock.return_value.poll.return_value = 0
return patch('Popen', popen_mock)
@pytest.fixture
def process_helpers(patch):
return patch('process_helpers')
@pytest.fixture
def progress_bar(patch):
progress_mock = MagicMock()
progress_mock.duration_progress.side_effect = lambda x, y, z: print(
'Pushing ')
return patch('progress_bar', progress_mock)
@pytest.fixture
def template(patch):
return patch('Template')
@pytest.fixture
def verify_build(patch):
return patch('build_helpers.verify_build')
@pytest.fixture
def verify_init(patch):
return patch('config_helpers.load_config')
@pytest.fixture
def walk_mock(patch):
return patch('os.walk', MagicMock(return_value=['foo', 'bar']))
@pytest.fixture
def yaml(patch):
return patch('yaml.load')
def deploy(no_push, skip_crd_check, interactive, extra_config_args, retries=5):
deploy = DeployCommand(
{'deploy': True, '--no-push': no_push,
'--skip-crd-check': skip_crd_check,
'--interactive': interactive, '--retries': retries,
'--logs':False})
deploy.config = {'name': 'app', 'namespace': 'namespace'}
deploy.config.update(extra_config_args)
with catch_stdout() as caught_output:
deploy.action()
output = caught_output.getvalue()
return output
def verify_successful_deploy(output, did_push=True, interactive=False):
"""assert pushing, deploying, then objs created, then pushed"""
pushing = output.find('Pushing ')
push_skip = output.find('Skipping image push')
deploying = output.find('Deploying ')
inspecting = output.find('Inspect created objects by running:\n')
pushed = output.find('Pushed to ')
pod_connect = output.find('Connecting to pod...')
if did_push:
assert all(var >= 0 for var in (
deploying, inspecting, pushing, pushed))
assert deploying < inspecting, pushing < pushed
else:
assert all(var == -1 for var in (pushing, pushed))
assert all(var >= 0 for var in (deploying, inspecting, push_skip))
assert push_skip < deploying, deploying < inspecting
if interactive:
assert pod_connect > inspecting
def test_deploy_gce(walk_mock, progress_bar, popen_mock, open_mock,
template, kube_helpers, process_helpers, verify_build,
verify_init, fetch_action_arg, json_mock):
json_mock.load.return_value = {
'last_remote_container': 'gcr.io/app_name:container_id',
'last_push_duration': 0.18889}
output = deploy(
no_push=False, skip_crd_check=True,
interactive=False,
extra_config_args={'gceProject': 'gcr://projectfoo'})
verify_successful_deploy(output)
def test_deploy_docker(walk_mock, progress_bar, popen_mock, open_mock,
template, kube_helpers, process_helpers, verify_build,
verify_init, fetch_action_arg, json_mock):
json_mock.load.return_value = {
'last_remote_container': 'gcr.io/app_name:container_id',
'last_push_duration': 0.18889}
output = deploy(
no_push=False, skip_crd_check=True,
interactive=False,
extra_config_args={'registry': 'dockerhub'})
verify_successful_deploy(output)
def test_deploy_without_push(walk_mock, progress_bar, popen_mock, open_mock,
template, kube_helpers, process_helpers,
verify_build, verify_init, fetch_action_arg, json_mock):
json_mock.load.return_value = {
'last_remote_container': 'gcr.io/app_name:container_id',
'last_push_duration': 0.18889}
output = deploy(
no_push=True, skip_crd_check=True,
interactive=False,
extra_config_args={'gceProject': 'gcr://projectfoo'})
verify_successful_deploy(output, did_push=False)
def test_deploy_interactive_one_file(walk_mock, progress_bar, popen_mock,
open_mock, template, kube_helpers,
process_helpers, verify_build,
verify_init, fetch_action_arg, sleep,
yaml, json_mock):
walk_mock.return_value = ['foo']
yaml.return_value = {
'template': {'foo': 'bar'}, 'containers': [{'foo': 'bar'}]}
json_mock.loads.return_value = {'status': {'phase': 'Running'}}
output = deploy(
no_push=False, skip_crd_check=True,
interactive=True,
extra_config_args={'registry': 'dockerhub'})
verify_successful_deploy(output, interactive=True)
# verify that kubectl commands are specifying namespace
for call_args in process_helpers.run_popen.call_args_list:
assert isinstance(call_args, type(call))
assert isinstance(call_args[0], tuple)
assert len(call_args[0]) > 0
command = call_args[0][0]
if command[0] == "kubectl":
assert "--namespace" in command
def test_deploy_interactive_two_files(walk_mock, progress_bar, popen_mock,
open_mock, template, kube_helpers,
process_helpers, verify_build,
verify_init, fetch_action_arg, sleep,
yaml, json_mock):
json_mock.loads.return_value = {'status': {'phase': 'Running'}}
yaml.return_value = {
'template': {'foo': 'bar'}, 'containers': [{'foo': 'bar'}]}
output = deploy(
no_push=False, skip_crd_check=True,
interactive=True,
extra_config_args={'registry': 'dockerhub', '<kube_spec>': 'r'})
verify_successful_deploy(output, interactive=True)
def test_deploy_interactive_pod_not_run(walk_mock, progress_bar, popen_mock,
open_mock, template, kube_helpers,
process_helpers, verify_build,
verify_init, fetch_action_arg, sleep,
yaml, json_mock):
json_mock.loads.return_value = {'status': {'phase': 'Error'}}
yaml.return_value = {
'template': {'foo': 'bar'}, 'containers': [{'foo': 'bar'}]}
with pytest.raises(ValueError):
output = deploy(
no_push=False, skip_crd_check=True,
interactive=True,
extra_config_args={'registry': 'dockerhub', '<kube_spec>': 'r'})
def test_deploy_update_app_run_id(open_mock, json_mock):
run_id = str(uuid.uuid4())
json_mock_data = {
'last_remote_container': 'gcr.io/app_name:container_id',
'last_push_duration': 0.18889}
json_mock.load.return_value = json_mock_data
DeployCommand._update_app_run_id(run_id)
assert json_mock_data['app_run_id'] == run_id
def test_image_push_error(walk_mock, progress_bar, popen_mock, open_mock,
template, kube_helpers, process_helpers, verify_build,
verify_init, fetch_action_arg, json_mock):
json_mock.load.return_value = {
'last_remote_container': 'gcr.io/app_name:container_id',
'last_push_duration': 0.18889}
# setup mock to induce and error during the deploy
popen_mock.return_value.poll.return_value = 1
output_str = "normal output..."
error_str = "error message..."
build_output = MagicMock()
build_output.decode.return_value = output_str
error_output = MagicMock()
error_output.decode.return_value = error_str
popen_mock.return_value.communicate.return_value = (build_output,
error_output)
deploy_cmd = DeployCommand({'deploy': True,
'--skip-crd-check': True,
'--no-push': False})
deploy_cmd.config = {'name': 'app', 'namespace': 'namespace'}
deploy_cmd.config.update({'gceProject': 'gcr://projectfoo'})
with catch_stdout() as caught_output:
with pytest.raises(SystemExit):
deploy_cmd.action()
output = caught_output.getvalue()
# assert that we got the normal output, followed by the error message
output_location = output.find(output_str)
error_location = output.find(error_str)
assert all(var >= 0 for var in (output_location, error_location))
assert output_location < error_location
| [((58, 17, 58, 28), 'mock.MagicMock', 'MagicMock', ({}, {}), '()', False, 'from mock import call, MagicMock\n'), ((70, 20, 70, 31), 'mock.MagicMock', 'MagicMock', ({}, {}), '()', False, 'from mock import call, MagicMock\n'), ((102, 13, 106, 25), 'mlt.commands.deploy.DeployCommand', 'DeployCommand', ({(103, 8, 106, 24): "{'deploy': True, '--no-push': no_push, '--skip-crd-check': skip_crd_check,\n '--interactive': interactive, '--retries': retries, '--logs': False}"}, {}), "({'deploy': True, '--no-push': no_push, '--skip-crd-check':\n skip_crd_check, '--interactive': interactive, '--retries': retries,\n '--logs': False})", False, 'from mlt.commands.deploy import DeployCommand\n'), ((239, 4, 239, 44), 'mlt.commands.deploy.DeployCommand._update_app_run_id', 'DeployCommand._update_app_run_id', ({(239, 37, 239, 43): 'run_id'}, {}), '(run_id)', False, 'from mlt.commands.deploy import DeployCommand\n'), ((255, 19, 255, 30), 'mock.MagicMock', 'MagicMock', ({}, {}), '()', False, 'from mock import call, MagicMock\n'), ((257, 19, 257, 30), 'mock.MagicMock', 'MagicMock', ({}, {}), '()', False, 'from mock import call, MagicMock\n'), ((262, 17, 264, 52), 'mlt.commands.deploy.DeployCommand', 'DeployCommand', ({(262, 31, 264, 51): "{'deploy': True, '--skip-crd-check': True, '--no-push': False}"}, {}), "({'deploy': True, '--skip-crd-check': True, '--no-push': False})", False, 'from mlt.commands.deploy import DeployCommand\n'), ((38, 43, 38, 75), 'mock.MagicMock', 'MagicMock', (), '', False, 'from mock import call, MagicMock\n'), ((93, 28, 93, 66), 'mock.MagicMock', 'MagicMock', (), '', False, 'from mock import call, MagicMock\n'), ((110, 9, 110, 23), 'test_utils.io.catch_stdout', 'catch_stdout', ({}, {}), '()', False, 'from test_utils.io import catch_stdout\n'), ((225, 9, 225, 34), 'pytest.raises', 'pytest.raises', ({(225, 23, 225, 33): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((233, 17, 233, 29), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((268, 9, 268, 23), 'test_utils.io.catch_stdout', 'catch_stdout', ({}, {}), '()', False, 'from test_utils.io import catch_stdout\n'), ((269, 13, 269, 38), 'pytest.raises', 'pytest.raises', ({(269, 27, 269, 37): 'SystemExit'}, {}), '(SystemExit)', False, 'import pytest\n')] |
mcvine/mcvine | packages/mccomponents/tests/mccomponentsbpmodule/sample/Broadened_E_Q_Kernel_TestCase.py | 42232534b0c6af729628009bed165cd7d833789d | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2006-2010 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
standalone = True
import unittestX as unittest
import journal
debug = journal.debug( "Broadened_E_Q_Kernel_TestCase" )
warning = journal.warning( "Broadened_E_Q_Kernel_TestCase" )
import mcni
from mccomposite import mccompositebp
from mccomponents import mccomponentsbp
class TestCase(unittest.TestCase):
def test(self):
E_Q = "Q*Q/3."
S_Q = "1"
sigma_Q = "Q/2."
Qmin = 0; Qmax = 10
absorption_coefficient = scattering_coefficient = 1.
kernel = mccomponentsbp.create_Broadened_E_Q_Kernel(
E_Q, S_Q, sigma_Q,
Qmin, Qmax,
absorption_coefficient,
scattering_coefficient,
)
ei = 500 # meV
from mcni.utils import conversion
vil = conversion.e2v(ei)
vi = (0,0,vil)
import numpy.linalg as nl
import numpy as np
for i in range(10):
event = mcni.neutron(
r = (0,0,0), v = vi,
prob = 1, time = 0 )
kernel.scatter( event );
vf = np.array(event.state.velocity)
diffv = vi - vf
Q = conversion.v2k(nl.norm(diffv))
ef = conversion.v2e(nl.norm(vf))
E = ei - ef
# print E, Q, event
E1 = eval(E_Q)
continue
return
pass # end of TestCase
def main():
unittest.main()
return
if __name__ == "__main__":
main()
# version
__id__ = "$Id: TestCase.py 696 2010-11-09 06:23:06Z linjiao $"
# End of file
| [((21, 8, 21, 56), 'journal.debug', 'journal.debug', ({(21, 23, 21, 54): '"""Broadened_E_Q_Kernel_TestCase"""'}, {}), "('Broadened_E_Q_Kernel_TestCase')", False, 'import journal\n'), ((22, 10, 22, 60), 'journal.warning', 'journal.warning', ({(22, 27, 22, 58): '"""Broadened_E_Q_Kernel_TestCase"""'}, {}), "('Broadened_E_Q_Kernel_TestCase')", False, 'import journal\n'), ((72, 4, 72, 19), 'unittestX.main', 'unittest.main', ({}, {}), '()', True, 'import unittestX as unittest\n'), ((37, 17, 42, 13), 'mccomponents.mccomponentsbp.create_Broadened_E_Q_Kernel', 'mccomponentsbp.create_Broadened_E_Q_Kernel', ({(38, 12, 38, 15): 'E_Q', (38, 17, 38, 20): 'S_Q', (38, 22, 38, 29): 'sigma_Q', (39, 12, 39, 16): 'Qmin', (39, 18, 39, 22): 'Qmax', (40, 12, 40, 34): 'absorption_coefficient', (41, 12, 41, 34): 'scattering_coefficient'}, {}), '(E_Q, S_Q, sigma_Q, Qmin, Qmax,\n absorption_coefficient, scattering_coefficient)', False, 'from mccomponents import mccomponentsbp\n'), ((46, 14, 46, 32), 'mcni.utils.conversion.e2v', 'conversion.e2v', ({(46, 29, 46, 31): 'ei'}, {}), '(ei)', False, 'from mcni.utils import conversion\n'), ((52, 20, 54, 36), 'mcni.neutron', 'mcni.neutron', (), '', False, 'import mcni\n'), ((56, 17, 56, 47), 'numpy.array', 'np.array', ({(56, 26, 56, 46): 'event.state.velocity'}, {}), '(event.state.velocity)', True, 'import numpy as np\n'), ((58, 31, 58, 45), 'numpy.linalg.norm', 'nl.norm', ({(58, 39, 58, 44): 'diffv'}, {}), '(diffv)', True, 'import numpy.linalg as nl\n'), ((59, 32, 59, 43), 'numpy.linalg.norm', 'nl.norm', ({(59, 40, 59, 42): 'vf'}, {}), '(vf)', True, 'import numpy.linalg as nl\n')] |
robinzixuan/Video-Question-Answering-HRI | baseline/ns-vqa/reason/options/test_options.py | ae68ffee1e6fc1eb13229e457e3b8e3bc3a11579 | from .base_options import BaseOptions
class TestOptions(BaseOptions):
"""Test Option Class"""
def __init__(self):
super(TestOptions, self).__init__()
self.parser.add_argument('--load_checkpoint_path', required=True, type=str, help='checkpoint path')
self.parser.add_argument('--save_result_path', required=True, type=str, help='save result path')
self.parser.add_argument('--max_val_samples', default=None, type=int, help='max val data')
self.parser.add_argument('--batch_size', default=256, type=int, help='batch_size')
self.is_train = False | [] |
keremakdemir/ISONE_UCED | Model_setup/NEISO_data_file/downsampling_generators_v1.py | 11ce34c5ac5d34dcab771640f41c0d2ce4ab21f9 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 24 18:45:34 2020
@author: kakdemi
"""
import pandas as pd
#importing generators
all_generators = pd.read_excel('generators2.xlsx', sheet_name='NEISO generators (dispatch)')
#getting all oil generators
all_oil = all_generators[all_generators['typ']=='oil'].copy()
#getting all generators in every zone
CT_oil = all_oil[all_oil['zone']=='CT'].copy()
ME_oil = all_oil[all_oil['zone']=='ME'].copy()
NEMA_oil = all_oil[all_oil['zone']=='NEMA'].copy()
NH_oil = all_oil[all_oil['zone']=='NH'].copy()
RI_oil = all_oil[all_oil['zone']=='RI'].copy()
SEMA_oil = all_oil[all_oil['zone']=='SEMA'].copy()
VT_oil = all_oil[all_oil['zone']=='VT'].copy()
WCMA_oil = all_oil[all_oil['zone']=='WCMA'].copy()
#defining zones
zones = ['CT','ME','NEMA','NH','RI','SEMA','VT','WCMA']
#getting all slack generators
all_slack = all_generators[all_generators['typ']=='slack'].copy()
#getting generators other than slack and oil
all_other = all_generators[(all_generators['typ']!='oil') & (all_generators['typ']!='slack')].copy()
#defining a function to downsample oil generators
def oil_downsampler(zone):
#copying the oil generators in that zone and sorting wrt to their seg1 heat rate
Selected_line_oil = globals()[zone+'_oil'].copy()
sorted_df = Selected_line_oil.sort_values(by=['seg1'])
sorted_df_reset = sorted_df.reset_index(drop=True)
#creating 3 chunks wrt their heatrates
heat_rate = list(sorted_df_reset.loc[:,'seg1'])
num = int(len(heat_rate)/3)
First_plant = sorted_df_reset.iloc[:num,:].copy()
Second_plant = sorted_df_reset.iloc[num:num*2,:].copy()
Third_plant = sorted_df_reset.iloc[num*2:,:].copy()
#finding the relevant parameters for the downsampled oil plants
First_cap = First_plant.loc[:,'netcap'].sum()
Second_cap = Second_plant.loc[:,'netcap'].sum()
Third_cap = Third_plant.loc[:,'netcap'].sum()
netcap = [First_cap, Second_cap, Third_cap]
ramp_1 = First_cap
ramp_2 = Second_cap
ramp_3 = Third_cap
ramp = [ramp_1, ramp_2, ramp_3]
First_min_cap = First_cap*0.35
Second_min_cap = Second_cap*0.35
Third_min_cap = Third_cap*0.35
min_cap = [First_min_cap, Second_min_cap, Third_min_cap]
Min_u = [1, 1, 1]
Min_d = [1, 1, 1]
zones = [zone, zone, zone]
types = ['oil', 'oil', 'oil']
seg_1_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg1']
seg_1_1_new = seg_1_1.sum()/First_plant.loc[:,'netcap'].sum()
seg_1_2 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg2']
seg_1_2_new = seg_1_2.sum()/First_plant.loc[:,'netcap'].sum()
seg_1_3 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg3']
seg_1_3_new = seg_1_3.sum()/First_plant.loc[:,'netcap'].sum()
seg_2_1 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg1']
seg_2_1_new = seg_2_1.sum()/Second_plant.loc[:,'netcap'].sum()
seg_2_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg2']
seg_2_2_new = seg_2_2.sum()/Second_plant.loc[:,'netcap'].sum()
seg_2_3 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg3']
seg_2_3_new = seg_2_3.sum()/Second_plant.loc[:,'netcap'].sum()
seg_3_1 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg1']
seg_3_1_new = seg_3_1.sum()/Third_plant.loc[:,'netcap'].sum()
seg_3_2 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg2']
seg_3_2_new = seg_3_2.sum()/Third_plant.loc[:,'netcap'].sum()
seg_3_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg3']
seg_3_3_new = seg_3_3.sum()/Third_plant.loc[:,'netcap'].sum()
seg_1 = [seg_1_1_new, seg_2_1_new, seg_3_1_new]
seg_2 = [seg_1_2_new, seg_2_2_new, seg_3_2_new]
seg_3 = [seg_1_3_new, seg_2_3_new, seg_3_3_new]
var_om_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'var_om']
var_om_1_new = var_om_1.sum()/First_plant.loc[:,'netcap'].sum()
var_om_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'var_om']
var_om_2_new = var_om_2.sum()/Second_plant.loc[:,'netcap'].sum()
var_om_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'var_om']
var_om_3_new = var_om_3.sum()/Third_plant.loc[:,'netcap'].sum()
var_om = [var_om_1_new, var_om_2_new, var_om_3_new]
no_load_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'no_load']
no_load_1_new = no_load_1.sum()/First_plant.loc[:,'netcap'].sum()
no_load_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'no_load']
no_load_2_new = no_load_2.sum()/Second_plant.loc[:,'netcap'].sum()
no_load_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'no_load']
no_load_3_new = no_load_3.sum()/Third_plant.loc[:,'netcap'].sum()
no_load = [no_load_1_new, no_load_2_new, no_load_3_new]
st_cost_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'st_cost']
st_cost_1_new = st_cost_1.sum()/First_plant.loc[:,'netcap'].sum()
st_cost_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'st_cost']
st_cost_2_new = st_cost_2.sum()/Second_plant.loc[:,'netcap'].sum()
st_cost_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'st_cost']
st_cost_3_new = st_cost_3.sum()/Third_plant.loc[:,'netcap'].sum()
st_cost = [st_cost_1_new, st_cost_2_new, st_cost_3_new]
name = [zone+'_agg_oil_1', zone+'_agg_oil_2', zone+'_agg_oil_3']
#creating a dataframe that includes downsampled oil generators
list_labels = list(WCMA_oil.columns)
list_columns = [name, types, zones, netcap, seg_1, seg_2, seg_3, min_cap, ramp, Min_u,
Min_d, var_om, no_load, st_cost]
zipped_list = list(zip(list_labels, list_columns))
gen_df = dict(zipped_list)
df_oils = pd.DataFrame(gen_df)
return df_oils
#downsampling oil generators in every zone by using the defined function
for z in zones:
globals()[z+'_agg_oil_df'] = oil_downsampler(z)
#adding downsampled oil generators to create a complete list of generators
final_generators = pd.concat([all_other, CT_agg_oil_df, ME_agg_oil_df, NEMA_agg_oil_df,
NH_agg_oil_df, RI_agg_oil_df, SEMA_agg_oil_df, VT_agg_oil_df,
WCMA_agg_oil_df, all_slack], ignore_index=True)
#exporting the generators as an Excel file
final_generators.to_excel('generators.xlsx', sheet_name='NEISO generators (dispatch)', index=False)
| [((11, 17, 11, 92), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((127, 19, 129, 77), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((117, 14, 117, 34), 'pandas.DataFrame', 'pd.DataFrame', ({(117, 27, 117, 33): 'gen_df'}, {}), '(gen_df)', True, 'import pandas as pd\n')] |
otmanabdoun/IHM-Python | GUI1.py | 624e961c2f6966b98bf2c1bc4dd276b812954ba1 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 16 19:47:41 2021
@author: User
"""
import tkinter as tk
racine = tk . Tk ()
label = tk . Label ( racine , text ="J ' adore Python !")
bouton = tk . Button ( racine , text =" Quitter ", command = racine . destroy )
label . pack ()
bouton . pack () | [((9, 9, 9, 19), 'tkinter.Tk', 'tk.Tk', ({}, {}), '()', True, 'import tkinter as tk\n'), ((10, 8, 10, 57), 'tkinter.Label', 'tk.Label', (), '', True, 'import tkinter as tk\n'), ((11, 9, 11, 79), 'tkinter.Button', 'tk.Button', (), '', True, 'import tkinter as tk\n')] |
ertyurk/bugme | app/routes/v1/endpoints/clickup.py | 5a3ef3e089e0089055074c1c896c3fdc76600e93 | from fastapi import APIRouter, status, Body, HTTPException
from fastapi.encoders import jsonable_encoder
from starlette.responses import JSONResponse
from app.models.common import *
from app.models.clickup import *
from app.database.crud.clickup import *
router = APIRouter()
@router.get("/", response_description="Clickup integrations are retrieved.")
async def get_clickup_integrations():
clickups = await retrieve_clickups()
return (
ResponseModel(clickups, "Clickup integrations data retrieved successfully")
if len(clickups) > 0
else ResponseModel(clickups, "Empty list returned")
)
@router.post(
"/", response_description="Clickup integrations data added into the database."
)
async def add_clickup_a_integration(clickup: ClickupModel = Body(...)):
clickup = jsonable_encoder(clickup)
new_clickup = await add_new_clickup(clickup)
return ResponseModel(
new_clickup,
"clickup integration created successfully.",
status.HTTP_201_CREATED,
)
@router.get("/{id}/", response_description="Clickup data retrieved.")
async def find_clickup_integration(id):
clickup = await retrieve_clickup(id)
return (
ResponseModel(clickup, "Clickup integrations data retrieved successfully")
if clickup
else ErrorResponseModel(
"An error occured.", status.HTTP_404_NOT_FOUND, "Integration doesn't exist."
)
)
@router.put(
"/{id}/", response_description="Clickup integrations data updated in the database."
)
async def update_a_clickup_integration(
id: str, clickup: UpdateClickupModel = Body(...)
):
clickup = jsonable_encoder(clickup)
updated_clickup = await update_clickup_data(id, clickup)
return (
ResponseModel({"id": id}, "Clickup integration updated successfully")
if updated_clickup
else ErrorResponseModel(
"An error occurred",
status.HTTP_404_NOT_FOUND,
"There was an error updating the Clickup integration.",
)
)
@router.delete("/{id}/", response_description="Delete the integration")
async def delete_clickup_integration(id: str):
deleted_clickup = await delete_integration(id)
return (
ResponseModel(
"Integration with ID: {} removed".format(id),
"Integration deleted successfully",
)
if deleted_clickup
else ErrorResponseModel(
"An error occured",
status.HTTP_404_NOT_FOUND,
"Integration with id {0} doesn't exist".format(id),
)
)
| [((9, 9, 9, 20), 'fastapi.APIRouter', 'APIRouter', ({}, {}), '()', False, 'from fastapi import APIRouter, status, Body, HTTPException\n'), ((25, 60, 25, 69), 'fastapi.Body', 'Body', ({(25, 65, 25, 68): '...'}, {}), '(...)', False, 'from fastapi import APIRouter, status, Body, HTTPException\n'), ((26, 14, 26, 39), 'fastapi.encoders.jsonable_encoder', 'jsonable_encoder', ({(26, 31, 26, 38): 'clickup'}, {}), '(clickup)', False, 'from fastapi.encoders import jsonable_encoder\n'), ((51, 43, 51, 52), 'fastapi.Body', 'Body', ({(51, 48, 51, 51): '...'}, {}), '(...)', False, 'from fastapi import APIRouter, status, Body, HTTPException\n'), ((53, 14, 53, 39), 'fastapi.encoders.jsonable_encoder', 'jsonable_encoder', ({(53, 31, 53, 38): 'clickup'}, {}), '(clickup)', False, 'from fastapi.encoders import jsonable_encoder\n')] |
npeschke/cellfinder-core | cellfinder_core/main.py | 7a86a7d2c879c94da529ec6140f7e5c3f02bf288 | """
N.B imports are within functions to prevent tensorflow being imported before
it's warnings are silenced
"""
import os
import logging
from imlib.general.logging import suppress_specific_logs
tf_suppress_log_messages = [
"multiprocessing can interact badly with TensorFlow"
]
def main(
signal_array,
background_array,
voxel_sizes,
start_plane=0,
end_plane=-1,
trained_model=None,
model_weights=None,
model="resnet50_tv",
batch_size=32,
n_free_cpus=2,
network_voxel_sizes=[5, 1, 1],
soma_diameter=16,
ball_xy_size=6,
ball_z_size=15,
ball_overlap_fraction=0.6,
log_sigma_size=0.2,
n_sds_above_mean_thresh=10,
soma_spread_factor=1.4,
max_cluster_size=100000,
cube_width=50,
cube_height=50,
cube_depth=20,
network_depth="50",
):
suppress_tf_logging(tf_suppress_log_messages)
from cellfinder_core.detect import detect
from cellfinder_core.classify import classify
from cellfinder_core.tools import prep
from pathlib import Path
home = Path.home()
install_path = home / ".cellfinder"
logging.info("Detecting cell candidates")
points = detect.main(
signal_array,
start_plane,
end_plane,
voxel_sizes,
soma_diameter,
max_cluster_size,
ball_xy_size,
ball_z_size,
ball_overlap_fraction,
soma_spread_factor,
n_free_cpus,
log_sigma_size,
n_sds_above_mean_thresh,
)
model_weights = prep.prep_classification(
trained_model, model_weights, install_path, model, n_free_cpus
)
if len(points) > 0:
logging.info("Running classification")
points = classify.main(
points,
signal_array,
background_array,
n_free_cpus,
voxel_sizes,
network_voxel_sizes,
batch_size,
cube_height,
cube_width,
cube_depth,
trained_model,
model_weights,
network_depth,
)
else:
logging.info("No candidates, skipping classification")
return points
# logging.info("Saving classified cells")
# save_cells(points, classified_points_path)
def suppress_tf_logging(tf_suppress_log_messages):
"""
Prevents many lines of logs such as:
"2019-10-24 16:54:41.363978: I tensorflow/stream_executor/platform/default
/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1"
"""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
for message in tf_suppress_log_messages:
suppress_specific_logs("tensorflow", message)
| [((47, 11, 47, 22), 'pathlib.Path.home', 'Path.home', ({}, {}), '()', False, 'from pathlib import Path\n'), ((49, 4, 49, 45), 'logging.info', 'logging.info', ({(49, 17, 49, 44): '"""Detecting cell candidates"""'}, {}), "('Detecting cell candidates')", False, 'import logging\n'), ((51, 13, 65, 5), 'cellfinder_core.detect.detect.main', 'detect.main', ({(52, 8, 52, 20): 'signal_array', (53, 8, 53, 19): 'start_plane', (54, 8, 54, 17): 'end_plane', (55, 8, 55, 19): 'voxel_sizes', (56, 8, 56, 21): 'soma_diameter', (57, 8, 57, 24): 'max_cluster_size', (58, 8, 58, 20): 'ball_xy_size', (59, 8, 59, 19): 'ball_z_size', (60, 8, 60, 29): 'ball_overlap_fraction', (61, 8, 61, 26): 'soma_spread_factor', (62, 8, 62, 19): 'n_free_cpus', (63, 8, 63, 22): 'log_sigma_size', (64, 8, 64, 31): 'n_sds_above_mean_thresh'}, {}), '(signal_array, start_plane, end_plane, voxel_sizes,\n soma_diameter, max_cluster_size, ball_xy_size, ball_z_size,\n ball_overlap_fraction, soma_spread_factor, n_free_cpus, log_sigma_size,\n n_sds_above_mean_thresh)', False, 'from cellfinder_core.detect import detect\n'), ((67, 20, 69, 5), 'cellfinder_core.tools.prep.prep_classification', 'prep.prep_classification', ({(68, 8, 68, 21): 'trained_model', (68, 23, 68, 36): 'model_weights', (68, 38, 68, 50): 'install_path', (68, 52, 68, 57): 'model', (68, 59, 68, 70): 'n_free_cpus'}, {}), '(trained_model, model_weights, install_path, model,\n n_free_cpus)', False, 'from cellfinder_core.tools import prep\n'), ((71, 8, 71, 46), 'logging.info', 'logging.info', ({(71, 21, 71, 45): '"""Running classification"""'}, {}), "('Running classification')", False, 'import logging\n'), ((72, 17, 86, 9), 'cellfinder_core.classify.classify.main', 'classify.main', ({(73, 12, 73, 18): 'points', (74, 12, 74, 24): 'signal_array', (75, 12, 75, 28): 'background_array', (76, 12, 76, 23): 'n_free_cpus', (77, 12, 77, 23): 'voxel_sizes', (78, 12, 78, 31): 'network_voxel_sizes', (79, 12, 79, 22): 'batch_size', (80, 12, 80, 23): 'cube_height', (81, 12, 81, 22): 'cube_width', (82, 12, 82, 22): 'cube_depth', (83, 12, 83, 25): 'trained_model', (84, 12, 84, 25): 'model_weights', (85, 12, 85, 25): 'network_depth'}, {}), '(points, signal_array, background_array, n_free_cpus,\n voxel_sizes, network_voxel_sizes, batch_size, cube_height, cube_width,\n cube_depth, trained_model, model_weights, network_depth)', False, 'from cellfinder_core.classify import classify\n'), ((88, 8, 88, 62), 'logging.info', 'logging.info', ({(88, 21, 88, 61): '"""No candidates, skipping classification"""'}, {}), "('No candidates, skipping classification')", False, 'import logging\n'), ((103, 8, 103, 53), 'imlib.general.logging.suppress_specific_logs', 'suppress_specific_logs', ({(103, 31, 103, 43): '"""tensorflow"""', (103, 45, 103, 52): 'message'}, {}), "('tensorflow', message)", False, 'from imlib.general.logging import suppress_specific_logs\n')] |
rezist-ro/rezistenta.tv | server.py | 0c0dfa4842061baf2b575688588c5d77cfdba427 | # coding=utf-8
import dateutil.parser
import flask
import json
import os
import time
import urllib
import yaml
EPISODES = yaml.load(open("episodes.yaml").read())
app = flask.Flask(__name__,
static_path="/assets",
static_folder="assets")
app.jinja_env.filters["strftime"] = \
lambda str, fmt: dateutil.parser.parse(str).strftime(fmt)
app.jinja_env.filters["quote_plus"] = lambda u: urllib.quote_plus(u)
ASSETS = os.path.join(app.root_path, "assets")
@app.route("/favicon.ico")
def favicon():
return flask.send_from_directory(
ASSETS,
"favicon.ico",
mimetype="image/icon")
@app.route("/")
def home():
return flask.render_template("pages/home.html",
playlist=os.environ["PLAYLIST"],
episodes=EPISODES,
autoplay=not app.debug)
@app.route("/episod/<int:number>")
def episode(number):
if number < 1:
return "not found"
elif number > len(EPISODES):
return "coming soon"
else:
episode = EPISODES[len(EPISODES) - number]
template = "pages/episode/%s.html" % (
"youtube" if "yt" in episode else "facebook"
)
return flask.render_template(template,
number=number,
episode=episode,
episodes=EPISODES)
| [((14, 6, 16, 41), 'flask.Flask', 'flask.Flask', (), '', False, 'import flask\n'), ((23, 9, 23, 46), 'os.path.join', 'os.path.join', ({(23, 22, 23, 35): 'app.root_path', (23, 37, 23, 45): '"""assets"""'}, {}), "(app.root_path, 'assets')", False, 'import os\n'), ((20, 48, 20, 68), 'urllib.quote_plus', 'urllib.quote_plus', ({(20, 66, 20, 67): 'u'}, {}), '(u)', False, 'import urllib\n'), ((26, 11, 29, 30), 'flask.send_from_directory', 'flask.send_from_directory', (), '', False, 'import flask\n'), ((34, 11, 37, 31), 'flask.render_template', 'flask.render_template', (), '', False, 'import flask\n'), ((51, 15, 54, 55), 'flask.render_template', 'flask.render_template', (), '', False, 'import flask\n')] |
mazayus/ProjectEuler | problem020.py | 64aebd5d80031fab2f0ef3c44c3a1118212ab613 | #!/usr/bin/env python3
from functools import *
import operator
def factorial(number):
assert number >= 1
return reduce(operator.mul, range(1, number+1))
def digits(number):
yield from (int(digit) for digit in str(number))
print(sum(digits(factorial(100))))
| [] |
ghafran/KerasPersonLab | transformer.py | fcd80b62247aee8bd1d41ff91e31c822950f561e | import numpy as np
from math import cos, sin, pi
import cv2
import random
from config import config, TransformationParams
from data_prep import map_coco_to_personlab
class AugmentSelection:
def __init__(self, flip=False, degree = 0., crop = (0,0), scale = 1.):
self.flip = flip
self.degree = degree #rotate
self.crop = crop #shift actually
self.scale = scale
@staticmethod
def random():
flip = random.uniform(0.,1.) > TransformationParams.flip_prob
degree = random.uniform(-1.,1.) * TransformationParams.max_rotate_degree
scale = (TransformationParams.scale_max - TransformationParams.scale_min)*random.uniform(0.,1.)+TransformationParams.scale_min \
if random.uniform(0.,1.) < TransformationParams.scale_prob else 1.
x_offset = int(random.uniform(-1.,1.) * TransformationParams.center_perterb_max);
y_offset = int(random.uniform(-1.,1.) * TransformationParams.center_perterb_max);
return AugmentSelection(flip, degree, (x_offset,y_offset), scale)
@staticmethod
def unrandom():
flip = False
degree = 0.
scale = 1.
x_offset = 0
y_offset = 0
return AugmentSelection(flip, degree, (x_offset,y_offset), scale)
def affine(self, center=(config.IMAGE_SHAPE[1]//2, config.IMAGE_SHAPE[0]//2) , scale_self=1.):
# the main idea: we will do all image transformations with one affine matrix.
# this saves lot of cpu and make code significantly shorter
# same affine matrix could be used to transform joint coordinates afterwards
A = self.scale * cos(self.degree / 180. * pi )
B = self.scale * sin(self.degree / 180. * pi )
# scale_size = TransformationParams.target_dist / scale_self * self.scale
scale_size = TransformationParams.target_dist / self.scale
(width, height) = center
center_x = width + self.crop[0]
center_y = height + self.crop[1]
center2zero = np.array( [[ 1., 0., -center_x],
[ 0., 1., -center_y ],
[ 0., 0., 1. ]] )
rotate = np.array( [[ A, B, 0 ],
[ -B, A, 0 ],
[ 0, 0, 1. ] ])
scale = np.array( [[ scale_size, 0, 0 ],
[ 0, scale_size, 0 ],
[ 0, 0, 1. ] ])
flip = np.array( [[ -1 if self.flip else 1., 0., 0. ],
[ 0., 1., 0. ],
[ 0., 0., 1. ]] )
center2center = np.array( [[ 1., 0., config.IMAGE_SHAPE[1]//2],
[ 0., 1., config.IMAGE_SHAPE[0]//2 ],
[ 0., 0., 1. ]] )
# order of combination is reversed
combined = center2center.dot(flip).dot(scale).dot(rotate).dot(center2zero)
return combined[0:2]
class Transformer:
@staticmethod
def transform(img, masks, keypoints, aug=AugmentSelection.random()):
# warp picture and mask
M = aug.affine(center=(img.shape[1]//2, img.shape[0]//2))
cv_shape = (config.IMAGE_SHAPE[1], config.IMAGE_SHAPE[0])
# TODO: need to understand this, scale_provided[0] is height of main person divided by 368, caclulated in generate_hdf5.py
# print(img.shape)
# for i, img in enumerate(input_transform_targets):
img = cv2.warpAffine(img, M, cv_shape, flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, borderValue=(127,127,127))
# concat = np.stack(output_transform_targets, axis=-1)
# fix from https://github.com/octiapp/KerasPersonLab/issues/2
# masks = cv2.warpAffine(masks, M, cv_shape, flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, borderValue=0)
out_masks = np.zeros(cv_shape[::-1]+(masks.shape[-1],))
for i in range(masks.shape[-1]):
out_masks[:,:,i] = cv2.warpAffine(masks[:,:,i], M, cv_shape, flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, borderValue=0)
masks = out_masks
# warp key points
#TODO: joint could be cropped by augmentation, in this case we should mark it as invisible.
#update: may be we don't need it actually, original code removed part sliced more than half totally, may be we should keep it
keypoints = map_coco_to_personlab(keypoints)
original_points = keypoints.copy()
# print keypoints
original_points[:,:,2]=1 # we reuse 3rd column in completely different way here, it is hack
converted_points = np.matmul(M, original_points.transpose([0,2,1])).transpose([0,2,1])
keypoints[:,:,0:2]=converted_points
cropped_kp = keypoints[:,:,0] >= config.IMAGE_SHAPE[1]
cropped_kp = np.logical_or(cropped_kp, keypoints[:,:,1] >= config.IMAGE_SHAPE[0])
cropped_kp = np.logical_or(cropped_kp, keypoints[:,:,0] < 0)
cropped_kp = np.logical_or(cropped_kp, keypoints[:,:,1] < 0)
keypoints[cropped_kp, 2] = 0
# we just made image flip, i.e. right leg just became left leg, and vice versa
if aug.flip:
tmpLeft = keypoints[:, config.LEFT_KP, :]
tmpRight = keypoints[:, config.RIGHT_KP, :]
keypoints[:, config.LEFT_KP, :] = tmpRight
keypoints[:, config.RIGHT_KP, :] = tmpLeft
# print keypoints
return img, masks, keypoints
| [((54, 22, 56, 50), 'numpy.array', 'np.array', ({(54, 32, 56, 48): '[[1.0, 0.0, -center_x], [0.0, 1.0, -center_y], [0.0, 0.0, 1.0]]'}, {}), '([[1.0, 0.0, -center_x], [0.0, 1.0, -center_y], [0.0, 0.0, 1.0]])', True, 'import numpy as np\n'), ((58, 17, 60, 43), 'numpy.array', 'np.array', ({(58, 27, 60, 42): '[[A, B, 0], [-B, A, 0], [0, 0, 1.0]]'}, {}), '([[A, B, 0], [-B, A, 0], [0, 0, 1.0]])', True, 'import numpy as np\n'), ((62, 16, 64, 43), 'numpy.array', 'np.array', ({(62, 26, 64, 42): '[[scale_size, 0, 0], [0, scale_size, 0], [0, 0, 1.0]]'}, {}), '([[scale_size, 0, 0], [0, scale_size, 0], [0, 0, 1.0]])', True, 'import numpy as np\n'), ((66, 15, 68, 43), 'numpy.array', 'np.array', ({(66, 25, 68, 41): '[[-1 if self.flip else 1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'}, {}), '([[-1 if self.flip else 1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0,\n 1.0]])', True, 'import numpy as np\n'), ((70, 24, 72, 52), 'numpy.array', 'np.array', ({(70, 34, 72, 50): '[[1.0, 0.0, config.IMAGE_SHAPE[1] // 2], [0.0, 1.0, config.IMAGE_SHAPE[0] //\n 2], [0.0, 0.0, 1.0]]'}, {}), '([[1.0, 0.0, config.IMAGE_SHAPE[1] // 2], [0.0, 1.0, config.\n IMAGE_SHAPE[0] // 2], [0.0, 0.0, 1.0]])', True, 'import numpy as np\n'), ((91, 14, 91, 128), 'cv2.warpAffine', 'cv2.warpAffine', (), '', False, 'import cv2\n'), ((96, 20, 96, 63), 'numpy.zeros', 'np.zeros', ({(96, 29, 96, 62): 'cv_shape[::-1] + (masks.shape[-1],)'}, {}), '(cv_shape[::-1] + (masks.shape[-1],))', True, 'import numpy as np\n'), ((104, 20, 104, 52), 'data_prep.map_coco_to_personlab', 'map_coco_to_personlab', ({(104, 42, 104, 51): 'keypoints'}, {}), '(keypoints)', False, 'from data_prep import map_coco_to_personlab\n'), ((112, 21, 112, 89), 'numpy.logical_or', 'np.logical_or', ({(112, 35, 112, 45): 'cropped_kp', (112, 47, 112, 88): 'keypoints[:, :, (1)] >= config.IMAGE_SHAPE[0]'}, {}), '(cropped_kp, keypoints[:, :, (1)] >= config.IMAGE_SHAPE[0])', True, 'import numpy as np\n'), ((113, 21, 113, 68), 'numpy.logical_or', 'np.logical_or', ({(113, 35, 113, 45): 'cropped_kp', (113, 47, 113, 67): 'keypoints[:, :, (0)] < 0'}, {}), '(cropped_kp, keypoints[:, :, (0)] < 0)', True, 'import numpy as np\n'), ((114, 21, 114, 68), 'numpy.logical_or', 'np.logical_or', ({(114, 35, 114, 45): 'cropped_kp', (114, 47, 114, 67): 'keypoints[:, :, (1)] < 0'}, {}), '(cropped_kp, keypoints[:, :, (1)] < 0)', True, 'import numpy as np\n'), ((19, 15, 19, 36), 'random.uniform', 'random.uniform', ({(19, 30, 19, 32): '(0.0)', (19, 33, 19, 35): '(1.0)'}, {}), '(0.0, 1.0)', False, 'import random\n'), ((20, 17, 20, 39), 'random.uniform', 'random.uniform', ({(20, 32, 20, 35): '(-1.0)', (20, 36, 20, 38): '(1.0)'}, {}), '(-1.0, 1.0)', False, 'import random\n'), ((44, 25, 44, 54), 'math.cos', 'cos', ({(44, 29, 44, 52): '(self.degree / 180.0 * pi)'}, {}), '(self.degree / 180.0 * pi)', False, 'from math import cos, sin, pi\n'), ((45, 25, 45, 54), 'math.sin', 'sin', ({(45, 29, 45, 52): '(self.degree / 180.0 * pi)'}, {}), '(self.degree / 180.0 * pi)', False, 'from math import cos, sin, pi\n'), ((98, 31, 98, 142), 'cv2.warpAffine', 'cv2.warpAffine', (), '', False, 'import cv2\n'), ((22, 15, 22, 36), 'random.uniform', 'random.uniform', ({(22, 30, 22, 32): '(0.0)', (22, 33, 22, 35): '(1.0)'}, {}), '(0.0, 1.0)', False, 'import random\n'), ((23, 23, 23, 45), 'random.uniform', 'random.uniform', ({(23, 38, 23, 41): '-1.0', (23, 42, 23, 44): '1.0'}, {}), '(-1.0, 1.0)', False, 'import random\n'), ((24, 23, 24, 45), 'random.uniform', 'random.uniform', ({(24, 38, 24, 41): '-1.0', (24, 42, 24, 44): '1.0'}, {}), '(-1.0, 1.0)', False, 'import random\n'), ((21, 82, 21, 103), 'random.uniform', 'random.uniform', ({(21, 97, 21, 99): '(0.0)', (21, 100, 21, 102): '(1.0)'}, {}), '(0.0, 1.0)', False, 'import random\n')] |
mizuno-group/enan | enan/__init__.py | 3c9dbe60bebf98e384e858db56980928b5897775 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 25 15:46:32 2019
@author: tadahaya
"""
from .binom import BT
from .connect import Connect
from .fet import FET
from .gsea import GSEA
from .ssgsea import ssGSEA
__copyright__ = 'Copyright (C) 2020 MIZUNO Tadahaya'
__version__ = '1.0.3'
__license__ = 'MIT'
__author__ = 'MIZUNO Tadahaya'
__author_email__ = '[email protected]' | [] |
Hacker-1202/Selfium | app/helpers/__init__.py | 7e798c23c9f24aacab6f6a485d6355f1045bc65c |
"""
Selfium Helper Files
~~~~~~~~~~~~~~~~~~~
All Helper Files used in Selfium project;
:copyright: (c) 2021 - Caillou and ZeusHay;
:license: MIT, see LICENSE for more details.
"""
from .getUser import *
from .getGuild import *
from .params import *
from .notify import *
from .sendEmbed import *
from .isStaff import * | [] |
zhangyimi/Research | NLP/UNIMO/src/finetune/visual_entailment.py | 866f91d9774a38d205d6e9a3b1ee6293748261b3 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for visual_entailment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import time
import numpy as np
import paddle.fluid as fluid
from model.unimo_finetune import UNIMOModel
from eval import glue_eval
from collections import OrderedDict
from utils.utils import print_eval_log
def kl_divergence_with_logits(q_logits, p_logits):
"""
symmetric KL-divergence (See SMART, Sec 3.1)
q_logits: logits
p_logits: delta_logits
"""
q = fluid.layers.softmax(input=q_logits)
p = fluid.layers.softmax(input=p_logits)
kl_qp = fluid.layers.reduce_sum(q * (fluid.layers.log(q) - fluid.layers.log(p)), -1)
kl_pq = fluid.layers.reduce_sum(p * (fluid.layers.log(p) - fluid.layers.log(q)), -1)
vat_loss = fluid.layers.mean(x=kl_qp+kl_pq)
return vat_loss
def create_model(args, config, pyreader_name="train_reader", is_train=True):
"""create_model"""
shapes = [[-1, args.max_seq_len, 1], # src_ids
[-1, args.max_seq_len, 1], # pos_ids
[-1, args.max_seq_len, 1], # sent_ids
[-1, args.max_img_len + args.max_seq_len, args.max_img_len + args.max_seq_len], # input_mask
[-1, args.max_img_len, 1], # v_mask
[-1, args.max_seq_len, 1], # t_mask
[-1, args.max_img_len, config["image_embedding_size"]], # image_embedding
[-1, args.max_img_len, 5], # image_loc
[-1, 1] # labels
]
dtypes = ['int64', 'int64', 'int64', 'float32', 'float32', 'float32', 'float32','float32', 'int64']
lod_levels = [0, 0, 0, 0, 0, 0, 0, 0, 0]
pyreader = fluid.layers.py_reader(
capacity=70,
shapes=shapes,
dtypes=dtypes,
lod_levels=lod_levels,
name=pyreader_name,
use_double_buffer=True)
(src_ids, pos_ids, sent_ids, input_mask, v_mask, t_mask, image_embedding, image_loc, labels) \
= fluid.layers.read_file(pyreader)
emb_ids = {"word_embedding": src_ids, "sent_embedding": sent_ids, "pos_embedding": pos_ids}
image_input = {"image_embedding": image_embedding, "loc_embedding": image_loc}
adv_step, adv_lr, norm_type, adv_max_norm, adv_init_mag = \
args.adv_step, args.adv_lr, args.norm_type, args.adv_max_norm, args.adv_init_mag
assert adv_step > 0 and adv_init_mag > 0
def get_loss_and_logits(text_feats, image_feats):
feats = text_feats + image_feats
cls_params_name = ["cls_out_w_0", "cls_out_b_0"]
feats = fluid.layers.fc(
input=feats,
size=2048,
param_attr=fluid.ParamAttr(
name=cls_params_name[0],
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name=cls_params_name[1], initializer=fluid.initializer.Constant(0.)))
feats = fluid.layers.dropout(
x=feats,
dropout_prob=0.1,
dropout_implementation="upscale_in_train")
cls_params_name = ["cls_out_w_1", "cls_out_b_1"]
logits = fluid.layers.fc(
input=feats,
size=args.num_labels,
param_attr=fluid.ParamAttr(
name=cls_params_name[0],
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name=cls_params_name[1], initializer=fluid.initializer.Constant(0.)))
ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=labels, return_softmax=True)
loss = fluid.layers.mean(x=ce_loss) / adv_step
return loss, logits, probs
def init_delta(input, mask, shape, name='text'):
real_seq_len = fluid.layers.shape(input)[1]
fake = fluid.layers.data(name=name+"_fake", shape=shape, dtype='float32')
mask_slice = fluid.layers.slice(mask, axes=[1], starts=[0], ends=fluid.layers.shape(mask)[1])
length = fluid.layers.reduce_sum(mask_slice, dim=1, keep_dim=True) * shape[-1]
# l2 norm
delta = fluid.layers.uniform_random_batch_size_like(mask, shape=fake.shape, min=-1.0, max=1.0)
delta = fluid.layers.slice(delta, axes=[1], starts=[0], ends=real_seq_len)
delta = delta * mask_slice
mag = adv_init_mag / fluid.layers.sqrt(length)
delta = delta * mag
return delta
if is_train:
text_emb_shape = [-1, args.max_seq_len, config['hidden_size']]
text_delta = init_delta(src_ids, t_mask, text_emb_shape, name='text')
image_emb_shape = [-1, args.max_img_len, config['image_embedding_size']]
image_delta = init_delta(image_embedding, v_mask, image_emb_shape, name='img')
else:
text_delta, image_delta = None, None
def pgd_with_l2(loss, delta):
# grad
delta_grad = fluid.backward.gradients(loss, delta)[0]
# l2 norm
delta_norm = fluid.layers.sqrt(fluid.layers.reduce_sum(fluid.layers.pow(fluid.layers.reshape(delta_grad, \
[fluid.layers.shape(delta_grad)[0], -1]), factor=2), dim=1, keep_dim=True))
delta_norm = fluid.layers.clamp(delta_norm, min=float(1e-8))
# pgd
delta = delta + adv_lr * delta_grad / delta_norm
# projection
if adv_max_norm > 0:
exceed_mask = (delta_norm > adv_max_norm).astype('float32')
reweights = (adv_max_norm / delta_norm) * exceed_mask + (1 - exceed_mask)
delta = delta * reweights
delta_grad.stop_gradient=True
return delta
loss = None
for iter in range(adv_step):
vl_pure = UNIMOModel(
emb_ids=emb_ids,
input_mask=input_mask,
config=config,
image_input=image_input,
weight_sharing=args.weight_sharing
)
vl_text = UNIMOModel(
text_adv_delta=text_delta,
emb_ids=emb_ids,
input_mask=input_mask,
config=config,
image_input=image_input,
weight_sharing=args.weight_sharing
)
vl_image = UNIMOModel(
image_adv_delta=image_delta,
emb_ids=emb_ids,
input_mask=input_mask,
config=config,
image_input=image_input,
weight_sharing=args.weight_sharing
)
h_pure_text, h_pure_image = vl_pure.get_pooled_output()
h_text_text, h_text_image = vl_text.get_pooled_output()
h_image_text, h_image_image = vl_image.get_pooled_output()
loss_pure, logit_pure, probs_pure = get_loss_and_logits(h_pure_text, h_pure_image)
loss_text, logit_text, probs_text = get_loss_and_logits(h_text_text, h_text_image)
loss_image, logit_image, probs_image = get_loss_and_logits(h_image_text, h_image_image)
if is_train:
text_delta = pgd_with_l2(loss_text, text_delta)
image_delta = pgd_with_l2(loss_image, image_delta)
kl_adv_text_loss = kl_divergence_with_logits(logit_pure, logit_text)
kl_adv_image_loss = kl_divergence_with_logits(logit_pure, logit_image)
cur_loss = loss_pure + loss_text + loss_image + kl_adv_text_loss + kl_adv_image_loss
loss = cur_loss if loss is None else loss + cur_loss
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(input=probs_pure, label=labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": probs_pure,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs
}
for k, v in graph_vars.items():
v.persistable = False
return pyreader, graph_vars
def evaluate(args, exe, test_pyreader, graph_vars, eval_phase, dev_count=1, gpu_id=0):
"""evaluate"""
all_mat = []
test_pyreader.start()
time_begin = time.time()
fetch_list = [graph_vars["probs"].name, graph_vars["labels"].name]
while True:
try:
np_probs, np_labels = exe.run(fetch_list=fetch_list)
np_preds = np.argmax(np_probs, axis=1).reshape((-1, 1))
np_labels = np_labels.reshape((-1, 1))
mat = np.concatenate([np_preds, np_labels], axis=1)
all_mat.extend(mat.tolist())
except fluid.core.EOFException:
test_pyreader.reset()
break
all_mat = np.array(all_mat)
time_end = time.time()
save_file = "%s/%s.trainers_%d.part_%d.npy" % (args.eval_dir, eval_phase, dev_count, gpu_id)
np.save(save_file, all_mat)
tmp_file = "%s/%s.trainers_%d.part_%d.finish" % (args.eval_dir, eval_phase, dev_count, gpu_id)
tmp_writer = open(tmp_file, "w")
tmp_writer.close()
if gpu_id == 0:
while True:
ret = os.popen('find %s -maxdepth 1 -name "%s.trainers_%d.part_*.finish"' %
(args.eval_dir, eval_phase, dev_count)).readlines()
if len(ret) != dev_count:
time.sleep(1)
continue
else:
break
all_mats = []
save_files = glob.glob("%s/%s.trainers_%d.part_*.npy" % (args.eval_dir, eval_phase, dev_count))
for cur_save_file in save_files:
mat = np.load(cur_save_file).tolist()
all_mats.extend(mat)
all_mats = np.array(all_mats)
cur_time = str(int(time.time()))
os.system("mkdir %s/%s" % (args.eval_dir, cur_time))
os.system("mv %s/%s.trainers_%d.* %s/%s" % (args.eval_dir, eval_phase, dev_count, args.eval_dir, cur_time))
ret = OrderedDict()
ret['phase'] = eval_phase
ret['loss'] = -1
ret['data_num'] = all_mats.shape[0]
ret['used_time'] = round(time_end - time_begin, 4)
metrics = OrderedDict()
metrics["simple_accuracy"] = glue_eval.simple_accuracy
if args.eval_mertrics in metrics:
ret_metric = metrics[args.eval_mertrics](all_mats[:, 0], all_mats[:, 1])
ret.update(ret_metric)
print_eval_log(ret)
else:
raise ValueError('unsupported metric {}'.format(args.eval_mertrics))
return ret
else:
return None
| [((38, 8, 38, 44), 'paddle.fluid.layers.softmax', 'fluid.layers.softmax', (), '', True, 'import paddle.fluid as fluid\n'), ((39, 8, 39, 44), 'paddle.fluid.layers.softmax', 'fluid.layers.softmax', (), '', True, 'import paddle.fluid as fluid\n'), ((42, 15, 42, 47), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (), '', True, 'import paddle.fluid as fluid\n'), ((62, 15, 68, 31), 'paddle.fluid.layers.py_reader', 'fluid.layers.py_reader', (), '', True, 'import paddle.fluid as fluid\n'), ((71, 10, 71, 42), 'paddle.fluid.layers.read_file', 'fluid.layers.read_file', ({(71, 33, 71, 41): 'pyreader'}, {}), '(pyreader)', True, 'import paddle.fluid as fluid\n'), ((195, 15, 195, 56), 'paddle.fluid.layers.create_tensor', 'fluid.layers.create_tensor', (), '', True, 'import paddle.fluid as fluid\n'), ((196, 15, 196, 84), 'paddle.fluid.layers.accuracy', 'fluid.layers.accuracy', (), '', True, 'import paddle.fluid as fluid\n'), ((217, 17, 217, 28), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((230, 14, 230, 31), 'numpy.array', 'np.array', ({(230, 23, 230, 30): 'all_mat'}, {}), '(all_mat)', True, 'import numpy as np\n'), ((231, 15, 231, 26), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((234, 4, 234, 31), 'numpy.save', 'np.save', ({(234, 12, 234, 21): 'save_file', (234, 23, 234, 30): 'all_mat'}, {}), '(save_file, all_mat)', True, 'import numpy as np\n'), ((91, 16, 94, 54), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', (), '', True, 'import paddle.fluid as fluid\n'), ((104, 25, 105, 61), 'paddle.fluid.layers.softmax_with_cross_entropy', 'fluid.layers.softmax_with_cross_entropy', (), '', True, 'import paddle.fluid as fluid\n'), ((112, 15, 112, 81), 'paddle.fluid.layers.data', 'fluid.layers.data', (), '', True, 'import paddle.fluid as fluid\n'), ((117, 16, 117, 102), 'paddle.fluid.layers.uniform_random_batch_size_like', 'fluid.layers.uniform_random_batch_size_like', (), '', True, 'import paddle.fluid as fluid\n'), ((118, 16, 118, 82), 'paddle.fluid.layers.slice', 'fluid.layers.slice', (), '', True, 'import paddle.fluid as fluid\n'), ((154, 18, 160, 9), 'model.unimo_finetune.UNIMOModel', 'UNIMOModel', (), '', False, 'from model.unimo_finetune import UNIMOModel\n'), ((161, 18, 168, 9), 'model.unimo_finetune.UNIMOModel', 'UNIMOModel', (), '', False, 'from model.unimo_finetune import UNIMOModel\n'), ((169, 19, 176, 9), 'model.unimo_finetune.UNIMOModel', 'UNIMOModel', (), '', False, 'from model.unimo_finetune import UNIMOModel\n'), ((251, 21, 251, 103), 'glob.glob', 'glob.glob', ({(251, 31, 251, 102): "'%s/%s.trainers_%d.part_*.npy' % (args.eval_dir, eval_phase, dev_count)"}, {}), "('%s/%s.trainers_%d.part_*.npy' % (args.eval_dir, eval_phase,\n dev_count))", False, 'import glob\n'), ((255, 19, 255, 37), 'numpy.array', 'np.array', ({(255, 28, 255, 36): 'all_mats'}, {}), '(all_mats)', True, 'import numpy as np\n'), ((258, 8, 258, 60), 'os.system', 'os.system', ({(258, 18, 258, 59): "('mkdir %s/%s' % (args.eval_dir, cur_time))"}, {}), "('mkdir %s/%s' % (args.eval_dir, cur_time))", False, 'import os\n'), ((259, 8, 259, 115), 'os.system', 'os.system', ({(259, 18, 259, 114): "('mv %s/%s.trainers_%d.* %s/%s' % (args.eval_dir, eval_phase, dev_count,\n args.eval_dir, cur_time))"}, {}), "('mv %s/%s.trainers_%d.* %s/%s' % (args.eval_dir, eval_phase,\n dev_count, args.eval_dir, cur_time))", False, 'import os\n'), ((261, 14, 261, 27), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((267, 18, 267, 31), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((106, 15, 106, 43), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (), '', True, 'import paddle.fluid as fluid\n'), ((110, 23, 110, 48), 'paddle.fluid.layers.shape', 'fluid.layers.shape', ({(110, 42, 110, 47): 'input'}, {}), '(input)', True, 'import paddle.fluid as fluid\n'), ((114, 17, 114, 74), 'paddle.fluid.layers.reduce_sum', 'fluid.layers.reduce_sum', (), '', True, 'import paddle.fluid as fluid\n'), ((120, 29, 120, 54), 'paddle.fluid.layers.sqrt', 'fluid.layers.sqrt', ({(120, 47, 120, 53): 'length'}, {}), '(length)', True, 'import paddle.fluid as fluid\n'), ((134, 21, 134, 58), 'paddle.fluid.backward.gradients', 'fluid.backward.gradients', ({(134, 46, 134, 50): 'loss', (134, 52, 134, 57): 'delta'}, {}), '(loss, delta)', True, 'import paddle.fluid as fluid\n'), ((225, 18, 225, 63), 'numpy.concatenate', 'np.concatenate', (), '', True, 'import numpy as np\n'), ((273, 12, 273, 31), 'utils.utils.print_eval_log', 'print_eval_log', ({(273, 27, 273, 30): 'ret'}, {}), '(ret)', False, 'from utils.utils import print_eval_log\n'), ((40, 41, 40, 60), 'paddle.fluid.layers.log', 'fluid.layers.log', ({(40, 58, 40, 59): 'q'}, {}), '(q)', True, 'import paddle.fluid as fluid\n'), ((40, 63, 40, 82), 'paddle.fluid.layers.log', 'fluid.layers.log', ({(40, 80, 40, 81): 'p'}, {}), '(p)', True, 'import paddle.fluid as fluid\n'), ((41, 41, 41, 60), 'paddle.fluid.layers.log', 'fluid.layers.log', ({(41, 58, 41, 59): 'p'}, {}), '(p)', True, 'import paddle.fluid as fluid\n'), ((41, 63, 41, 82), 'paddle.fluid.layers.log', 'fluid.layers.log', ({(41, 80, 41, 81): 'q'}, {}), '(q)', True, 'import paddle.fluid as fluid\n'), ((245, 16, 245, 29), 'time.sleep', 'time.sleep', ({(245, 27, 245, 28): '(1)'}, {}), '(1)', False, 'import time\n'), ((257, 27, 257, 38), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((113, 73, 113, 97), 'paddle.fluid.layers.shape', 'fluid.layers.shape', ({(113, 92, 113, 96): 'mask'}, {}), '(mask)', True, 'import paddle.fluid as fluid\n'), ((223, 23, 223, 50), 'numpy.argmax', 'np.argmax', (), '', True, 'import numpy as np\n'), ((242, 18, 243, 66), 'os.popen', 'os.popen', ({(242, 27, 243, 65): '\'find %s -maxdepth 1 -name "%s.trainers_%d.part_*.finish"\' % (args.eval_dir,\n eval_phase, dev_count)'}, {}), '(\'find %s -maxdepth 1 -name "%s.trainers_%d.part_*.finish"\' % (args\n .eval_dir, eval_phase, dev_count))', False, 'import os\n'), ((253, 18, 253, 40), 'numpy.load', 'np.load', ({(253, 26, 253, 39): 'cur_save_file'}, {}), '(cur_save_file)', True, 'import numpy as np\n'), ((88, 28, 88, 73), 'paddle.fluid.initializer.TruncatedNormal', 'fluid.initializer.TruncatedNormal', (), '', True, 'import paddle.fluid as fluid\n'), ((90, 53, 90, 83), 'paddle.fluid.initializer.Constant', 'fluid.initializer.Constant', ({(90, 80, 90, 82): '0.0'}, {}), '(0.0)', True, 'import paddle.fluid as fluid\n'), ((101, 28, 101, 73), 'paddle.fluid.initializer.TruncatedNormal', 'fluid.initializer.TruncatedNormal', (), '', True, 'import paddle.fluid as fluid\n'), ((103, 53, 103, 83), 'paddle.fluid.initializer.Constant', 'fluid.initializer.Constant', ({(103, 80, 103, 82): '0.0'}, {}), '(0.0)', True, 'import paddle.fluid as fluid\n'), ((138, 17, 138, 47), 'paddle.fluid.layers.shape', 'fluid.layers.shape', ({(138, 36, 138, 46): 'delta_grad'}, {}), '(delta_grad)', True, 'import paddle.fluid as fluid\n')] |
oth-datapipeline/ingestion-scripts | src/records.py | 48eecf63b0bf06200aa59be63de6839599ec51df | from faust import Record
class RssFeed(Record, serializer='json'):
feed_source: str
title: str
link: str
published: str = None
author: str = None
summary: str = None
published_parsed: list = None
authors: list = None
tags: list = None
comments: str = None
content: list = None
source: dict = None
class TwitterTrend(Record, serializer='json'):
pass
class Tweet(Record, serializer="json"):
tweet_id: str
text: str
created_at: str
metrics: dict
author: dict
trend: str
place: str = None
hashtags: list = None
class RedditPost(Record, serializer='json'):
id: str
title: str
author: dict
created: str
score: int
upvote_ratio: float
reddit: dict
domain: str = None
url: str = None
comments: list = None
keywords: list = None
| [] |
vaishali-bariwal/Practice-Coding-Questions | leetcode/102-Medium-Binary-Tree-Level-Order-Traversal/answer.py | 747bfcb1cb2be5340daa745f2b9938f0ee87c9ac | #!/usr/bin/python3
#------------------------------------------------------------------------------
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
stack = [(root, 0)]
result = []
while stack:
(node, level) = stack.pop(0)
if level == len(result):
result.append([])
result[level].append(node.val)
if node.left: stack.append((node.left, level+1))
if node.right: stack.append((node.right, level+1))
return result
#------------------------------------------------------------------------------
#Testing
| [] |
shirayu/fitbit-dumper | setup.py | 21cee614e294d84204ad06d81dae9adf9853a135 | #!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name="",
version="0.01",
packages=find_packages(),
install_requires=[
"fitbit"
],
dependency_links=[
],
extras_require={
"tests": [
"flake8",
"autopep8",
]
}
)
| [((8, 13, 8, 28), 'setuptools.find_packages', 'find_packages', ({}, {}), '()', False, 'from setuptools import setup, find_packages\n')] |
mtnmunuklu/SigmaToExcel | src/main.py | 7d11fda19c0075122928ff5f1dbaab7775d30fe9 | import sys
sys.path.append("../")
from src.app.sigma import SigmaConverter
if __name__ == "__main__":
sigmaconverter = SigmaConverter()
sigmaconverter.read_from_file()
sigmaconverter.write_to_excel()
| [((2, 0, 2, 22), 'sys.path.append', 'sys.path.append', ({(2, 16, 2, 21): '"""../"""'}, {}), "('../')", False, 'import sys\n'), ((7, 21, 7, 37), 'src.app.sigma.SigmaConverter', 'SigmaConverter', ({}, {}), '()', False, 'from src.app.sigma import SigmaConverter\n')] |
CloudReactor/task_manager | server/processes/migrations/0132_auto_20201108_0540.py | 464ca74371064fabb9a21b1f5bacba30360932ab | # Generated by Django 2.2.14 on 2020-11-08 05:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('processes', '0131_auto_20201107_2316'),
]
operations = [
migrations.RunSQL(
"UPDATE processes_workflow SET run_environment_id = scheduling_run_environment_id WHERE run_environment_id IS NULL;",
reverse_sql='',
),
]
| [((13, 8, 16, 9), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (), '', False, 'from django.db import migrations\n')] |
pengkangzaia/usad | sparsely_lstmvae_main.py | 937a29c24632cfa31e0c626cd5b058b3af74ef94 | from model.sparsely_lstm_vae import *
import torch.utils.data as data_utils
from sklearn import preprocessing
from utils.eval_methods import *
device = get_default_device()
# Read data
# normal = pd.read_csv("data/SWaT_Dataset_Normal_v1.csv") # , nrows=1000)
normal = pd.read_csv("data/SWaT/SWaT_Dataset_Normal_v1.csv", nrows=10000) # , nrows=1000)
normal = normal.drop(["Timestamp", "Normal/Attack"], axis=1)
# normal.shape
# Transform all columns into float64
for i in list(normal):
normal[i] = normal[i].apply(lambda x: str(x).replace(",", "."))
normal = normal.astype(float)
# 数据预处理
min_max_scaler = preprocessing.MinMaxScaler()
x = normal.values
x_scaled = min_max_scaler.fit_transform(x)
normal = pd.DataFrame(x_scaled)
# Read data
# attack = pd.read_csv("data/SWaT_Dataset_Attack_v0.csv", sep=";") # , nrows=1000)
attack = pd.read_csv("data/SWaT/SWaT_Dataset_Attack_v0.csv", sep=";", nrows=10000) # , nrows=1000)
labels = [float(label != 'Normal') for label in attack["Normal/Attack"].values]
attack = attack.drop(["Timestamp", "Normal/Attack"], axis=1)
# Transform all columns into float64
for i in list(attack):
attack[i] = attack[i].apply(lambda x: str(x).replace(",", "."))
attack = attack.astype(float)
x = attack.values
x_scaled = min_max_scaler.transform(x)
attack = pd.DataFrame(x_scaled)
############## windows ###################
window_size = 12
# np.arange(window_size)[None, :] 1*12 (0,1,2,3,4,5,6,7,8,9,10,11)一行12列
# np.arange(normal.shape[0] - window_size)[:, None] (1000-12)*1 (0,1,2,3,4,5...) 988列,每列递增
# np.arange(window_size)[None, :] + np.arange(normal.shape[0] - window_size)[:, None] (1000-12)*12
windows_normal = normal.values[np.arange(window_size)[None, :] + np.arange(attack.shape[0] - window_size)[:, None]]
windows_attack = attack.values[np.arange(window_size)[None, :] + np.arange(attack.shape[0] - window_size)[:, None]]
############## training ###################
# BATCH_SIZE = 7919
BATCH_SIZE = 200
N_EPOCHS = 100
hidden_size = 100
latent_size = 40
# w_size = windows_normal.shape[1] * windows_normal.shape[2] # window_size * feature_size
# z_size = windows_normal.shape[1] * hidden_size # window_size * hidden_size
windows_normal_train = windows_normal[:int(np.floor(.8 * windows_normal.shape[0]))]
windows_normal_val = windows_normal[int(np.floor(.8 * windows_normal.shape[0])):int(np.floor(windows_normal.shape[0]))]
train_loader = torch.utils.data.DataLoader(data_utils.TensorDataset(
torch.from_numpy(windows_normal_train).float().view(([windows_normal_train.shape[0], windows_normal_train.shape[1], windows_normal_train.shape[2]]))
), batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
val_loader = torch.utils.data.DataLoader(data_utils.TensorDataset(
torch.from_numpy(windows_normal_val).float().view(([windows_normal_val.shape[0], windows_normal_train.shape[1], windows_normal_train.shape[2]]))
), batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
test_loader = torch.utils.data.DataLoader(data_utils.TensorDataset(
torch.from_numpy(windows_attack).float().view(([windows_attack.shape[0], windows_attack.shape[1], windows_attack.shape[2]]))
), batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
model = SparselyLstmVae(BATCH_SIZE, window_size, windows_normal.shape[2], hidden_size, latent_size, former_step=3)
model = to_device(model, device)
val_loss, train_loss = training(N_EPOCHS, model, train_loader, val_loader)
plot_simple_history(val_loss)
plot_train_loss(train_loss)
torch.save({'ae': model.state_dict()}, "saved_model/model.pth")
############ testing #################
checkpoint = torch.load("model.pth")
model.load_state_dict(checkpoint['ae'])
# 每一个batch都有一个result。组成result集合
results = testing(model, test_loader)
windows_labels = []
for i in range(len(labels) - window_size):
windows_labels.append(list(np.int_(labels[i:i + window_size])))
# 窗口中有误差,则为异常,表示为1
y_test = [1.0 if (np.sum(window) > 0) else 0 for window in windows_labels]
# 样本太少的话,误差会很大
y_pred = np.concatenate(
[torch.stack(results[:-1]).flatten().detach().cpu().numpy(),
results[-1].flatten().detach().cpu().numpy()])
y_pred = (y_pred - y_pred.min()) / (y_pred.max() - y_pred.min())
threshold = ROC(y_test, y_pred)
t, th = bf_search(y_pred, y_test, start=0, end=1, step_num=1000, display_freq=50)
| [((20, 17, 20, 45), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ({}, {}), '()', False, 'from sklearn import preprocessing\n')] |
MexsonFernandes/AsynchronousTasks-Django-Celery-RabbitMQ-Redis | src/demo/tasks.py | b64b31cec4ccf8e0dca2cfe9faba40da647b94f7 | from __future__ import absolute_import, unicode_literals
from dcs.celeryconf import app
import time
from django.core.mail import EmailMessage
@app.task(bind=True, ignore_result=False, max_retries=3)
def demo_task1(self):
result = {
'val1': 1,
'val2': 2,
'val3': 3,
}
print("hellp")
from_email = '[email protected]'
to_list = ['[email protected]',]
sendemail = EmailMessage("Message received!!!", "Hello test", str(from_email), to_list)
sendemail.send()
return result
| [((7, 1, 7, 56), 'dcs.celeryconf.app.task', 'app.task', (), '', False, 'from dcs.celeryconf import app\n')] |
Ayansam1152/translate | pytorch_translate/models/__init__.py | 33d397fc25fb1072abd2975c77c602a2d031c6c4 | #!/usr/bin/env python3
import importlib
import os
# automatically import any Python files in the models/ directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
model_name = file[: file.find(".py")]
importlib.import_module("pytorch_translate.models." + model_name)
| [((8, 30, 8, 55), 'os.path.dirname', 'os.path.dirname', ({(8, 46, 8, 54): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((11, 8, 11, 73), 'importlib.import_module', 'importlib.import_module', ({(11, 32, 11, 72): "('pytorch_translate.models.' + model_name)"}, {}), "('pytorch_translate.models.' + model_name)", False, 'import importlib\n')] |
mapeimapei/awesome-flask-webapp | app/config/secure.py | d0474f447a41e9432a14f9110989166c6595f0fa | # -*- coding: utf-8 -*-
__author__ = '带土'
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:[email protected]:3306/awesome'
SECRET_KEY = '\x88D\xf09\x91\x07\x98\x89\x87\x96\xa0A\xc68\xf9\xecJ:U\x17\xc5V\xbe\x8b\xef\xd7\xd8\xd3\xe6\x98*4'
# Email 配置
MAIL_SERVER = 'smtp.exmail.qq.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TSL = False
MAIL_USERNAME = '[email protected]'
MAIL_PASSWORD = 'Bmwzy1314520'
MAIL_SUBJECT_PREFIX = '[鱼书]'
MAIL_SENDER = '鱼书 <[email protected]>'
# 开启数据库查询性能测试
SQLALCHEMY_RECORD_QUERIES = True
# 性能测试的阀值
DATABASE_QUERY_TIMEOUT = 0.5
SQLALCHEMY_TRACK_MODIFICATIONS = True
WTF_CSRF_CHECK_DEFAULT = False
SQLALCHEMY_ECHO = True
from datetime import timedelta
REMEMBER_COOKIE_DURATION = timedelta(days=30)
PROXY_API = 'http://ip.yushu.im/get'
# PERMANENT_SESSION_LIFETIME = 3600
| [((31, 27, 31, 45), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.