repo_name
stringlengths
7
94
repo_path
stringlengths
4
237
repo_head_hexsha
stringlengths
40
40
content
stringlengths
10
680k
apis
stringlengths
2
840k
KATO-Hiro/AtCoder
Others/code_festival/code-festival-2015-final-open/a.py
cbbdb18e95110b604728a54aed83a6ed6b993fde
# -*- coding: utf-8 -*- def main(): s, t, u = map(str, input().split()) if len(s) == 5 and len(t) == 7 and len(u) == 5: print('valid') else: print('invalid') if __name__ == '__main__': main()
[]
Zzz-ww/Python-prac
python_Project/Day_16-20/test_2.py
c97f2c16b74a2c1df117f377a072811cc596f98b
""" 嵌套的列表的坑 """ names = ['关羽', '张飞', '赵云', '马超', '黄忠'] courses = ['语文', '数学', '英语'] # 录入五个学生三门课程的成绩 scores = [[None] * len(courses) for _ in range(len(names))] for row, name in enumerate(names): for col, course in enumerate(courses): scores[row][col] = float(input(f'请输入{name}的{course}的成绩:')) print(scores)
[]
Z-yq/audioSamples.github.io
asr/dataloaders/am_dataloader.py
53c474288f0db1a3acfe40ba57a4cd5f2aecbcd3
import logging import random import numpy as np import pypinyin import tensorflow as tf from augmentations.augments import Augmentation from utils.speech_featurizers import SpeechFeaturizer from utils.text_featurizers import TextFeaturizer logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') import time class AM_DataLoader(): def __init__(self, config_dict, training=True): self.speech_config = config_dict['speech_config'] self.phone_config = config_dict['inp_config'] self.text_config = config_dict['tar_config'] self.running_config=config_dict['running_config'] self.augment_config = config_dict['augments_config'] self.streaming = self.speech_config['streaming'] self.chunk = self.speech_config['sample_rate'] * self.speech_config['streaming_bucket'] self.batch = config_dict['running_config']['batch_size'] self.speech_featurizer = SpeechFeaturizer(self.speech_config) self.phone_featurizer = TextFeaturizer(self.phone_config) self.text_featurizer = TextFeaturizer(self.text_config) self.make_file_list( training) self.augment = Augmentation(self.augment_config) self.init_text_to_vocab() self.epochs = 1 self.steps = 0 def return_data_types(self): return (tf.float32, tf.int32, tf.int32, tf.int32,tf.int32) def return_data_shape(self): return ( tf.TensorShape([self.batch, None, 1]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]), ) def get_per_epoch_steps(self): return len(self.train_list) // self.batch def eval_per_epoch_steps(self): return len(self.test_list) // self.batch def init_text_to_vocab(self): pypinyin.load_phrases_dict({'调大': [['tiáo'], ['dà']], '调小': [['tiáo'], ['xiǎo']], '调亮': [['tiáo'], ['liàng']], '调暗': [['tiáo'], ['àn']], '肖': [['xiāo']], '英雄传': [['yīng'], ['xióng'], ['zhuàn']], '新传': [['xīn'], ['zhuàn']], '外传': [['wài'], ['zhuàn']], '正传': [['zhèng'], ['zhuàn']], '水浒传': [['shuǐ'], ['hǔ'], ['zhuàn']] }) def text_to_vocab_func(txt): pins = pypinyin.pinyin(txt) pins = [i[0] for i in pins] phones = [] for pin in pins: if pin in self.phone_featurizer.vocab_array: phones += [pin] else: phones += list(pin) # print(phones) return phones self.text_to_vocab = text_to_vocab_func def make_file_list(self, training=True): train_list=self.speech_config['train_list'] test_list=self.speech_config['eval_list'] if training: with open(train_list, encoding='utf-8') as f: train_list = f.readlines() train_list = [i.strip() for i in train_list if i != ''] self.train_list = train_list np.random.shuffle(self.train_list) with open(test_list, encoding='utf-8') as f: data = f.readlines() data = [i.strip() for i in data if i != ''] self.test_list = data self.train_offset = 0 self.test_offset = 0 logging.info('load train list {} test list {}'.format(len(self.train_list), len(self.test_list))) else: with open(test_list, encoding='utf-8') as f: data = f.readlines() data = [i.strip() for i in data if i != ''] self.test_list = data self.test_offset = 0 def only_chinese(self, word): txt = '' for ch in word: if '\u4e00' <= ch <= '\u9fff': txt += ch else: continue return txt def eval_data_generator(self): sample = [] speech_features = [] input_length = [] phones = [] phones_length = [] txts = [] max_input = 0 batch = self.batch for i in range(batch * 10): line = self.test_list[self.test_offset] self.test_offset += 1 if self.test_offset > len(self.test_list) - 1: self.test_offset = 0 wp, txt = line.strip().split('\t') try: data = self.speech_featurizer.load_wav(wp) except: logging.info('{} load data failed,skip'.format(wp)) continue if len(data) < 400: continue elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: logging.info( '{} duration out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration'])) continue if self.speech_config['only_chinese']: txt = self.only_chinese(txt) if not self.streaming: speech_feature = data / np.abs(data).max() speech_feature = np.expand_dims(speech_feature, -1) in_len = len(speech_feature) // ( self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * self.speech_config['stride_ms']) else: speech_feature = data speech_feature = np.expand_dims(speech_feature, -1) reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \ self.speech_config['stride_ms'] in_len = len(speech_feature) // self.chunk if len(speech_feature) % self.chunk != 0: in_len += 1 chunk_times = self.chunk // reduce if self.chunk % reduce != 0: chunk_times += 1 in_len *= chunk_times py = self.text_to_vocab(txt) if self.check_valid(py, self.phone_featurizer.vocab_array) is not True: logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py, self.phone_featurizer.vocab_array))) continue if self.check_valid(txt, self.text_featurizer.vocab_array) is not True: logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py, self.text_featurizer.vocab_array))) continue txt = list(txt) phone_feature = self.phone_featurizer.extract(py) text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()] if in_len < len(phone_feature): logging.info('{} feature length < phone length,continue'.format(wp)) continue max_input = max(max_input, len(speech_feature)) speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) sample.append(line) if len(sample) == batch: break if self.streaming: max_input = max_input // self.chunk * self.chunk + self.chunk speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) if self.streaming: reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \ self.speech_config['stride_ms'] max_input = max_input // self.chunk * self.chunk + self.chunk max_in_len = max_input // self.chunk chunk_times = self.chunk // reduce if self.chunk % reduce != 0: chunk_times += 1 max_in_len *= chunk_times input_length = np.clip(input_length, 0, max_in_len) speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) phones = tf.keras.preprocessing.sequence.pad_sequences(phones, maxlen=max([len(i) for i in phones]), padding='post', value=self.phone_featurizer.pad) txts = tf.keras.preprocessing.sequence.pad_sequences(txts, maxlen=max([len(i) for i in txts]), padding='post', value=self.text_featurizer.pad) x = np.array(speech_features, 'float32') phones = np.array(phones, 'int32') txts = np.array(txts, 'int32') input_length = np.array(input_length, 'int32') phones_length = np.array(phones_length, 'int32') return x, input_length, phones, phones_length, txts def check_valid(self, txt, vocab_list): if len(txt) == 0: return False for n in txt: if n in vocab_list: pass else: return n return True def generate(self, train=True): sample = [] speech_features = [] input_length = [] phones = [] phones_length = [] txts = [] max_input = 0 if train: batch = self.batch * 3 // 4 if self.augment.available() else self.batch else: batch = self.batch for i in range(batch * 10): if train: line = self.train_list[self.train_offset] self.train_offset += 1 if self.train_offset > len(self.train_list) - 1: self.train_offset = 0 np.random.shuffle(self.train_list) self.epochs += 1 else: line = self.test_list[self.test_offset] self.test_offset += 1 if self.test_offset > len(self.test_list) - 1: self.test_offset = 0 wp, txt = line.strip().split('\t') try: data = self.speech_featurizer.load_wav(wp) except: logging.info('{} load data failed,skip'.format(wp)) continue if len(data) < 400: continue elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: logging.info( '{} duration out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration'])) continue if self.speech_config['only_chinese']: txt = self.only_chinese(txt) if not self.streaming: speech_feature = data / np.abs(data).max() speech_feature = np.expand_dims(speech_feature, -1) in_len = len(speech_feature) // ( self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * self.speech_config['stride_ms']) else: speech_feature = data speech_feature = np.expand_dims(speech_feature, -1) reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \ self.speech_config['stride_ms'] in_len = len(speech_feature) // self.chunk if len(speech_feature) % self.chunk != 0: in_len += 1 chunk_times = self.chunk // reduce if self.chunk % reduce != 0: chunk_times += 1 in_len *= chunk_times py = self.text_to_vocab(txt) if self.check_valid(py, self.phone_featurizer.vocab_array) is not True: logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py, self.phone_featurizer.vocab_array))) continue if self.check_valid(txt, self.text_featurizer.vocab_array) is not True: logging.info(' {} txt {} not all in tokens,continue'.format(txt, self.check_valid(txt, self.text_featurizer.vocab_array))) continue txt = list(txt) phone_feature = self.phone_featurizer.extract(py) text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()] if in_len < len(phone_feature): logging.info('{} feature length < phone length,continue'.format(wp)) continue max_input = max(max_input, len(speech_feature)) speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) sample.append(line) if len(sample) == batch: break if train and self.augment.available(): sample = random.sample(sample, self.batch // 4) for i in sample: wp, txt = i.strip().split('\t') try: data = self.speech_featurizer.load_wav(wp) except: continue if len(data) < 400: logging.info('{} wav too short < 25ms,skip'.format(wp)) continue elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: continue data = self.augment.process(data) if self.speech_config['only_chinese']: txt = self.only_chinese(txt) if not self.streaming: speech_feature = data / np.abs(data).max() speech_feature = np.expand_dims(speech_feature, -1) in_len = len(speech_feature) // ( self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * self.speech_config['stride_ms']) else: speech_feature = data speech_feature = np.expand_dims(speech_feature, -1) reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \ self.speech_config['stride_ms'] in_len = len(speech_feature) // self.chunk if len(speech_feature) % self.chunk != 0: in_len += 1 chunk_times = self.chunk // reduce if self.chunk % reduce != 0: chunk_times += 1 in_len *= chunk_times py = self.text_to_vocab(txt) if self.check_valid(py, self.phone_featurizer.vocab_array) is not True: logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py, self.phone_featurizer.vocab_array))) continue if self.check_valid(txt, self.text_featurizer.vocab_array) is not True: logging.info(' {} txt {} not all in tokens,continue'.format(txt, self.check_valid(txt, self.text_featurizer.vocab_array))) continue txt = list(txt) phone_feature = self.phone_featurizer.extract(py) text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()] if in_len < len(phone_feature): logging.info('{} feature length < phone length,continue'.format(wp)) continue max_input = max(max_input, len(speech_feature)) speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) if self.streaming: reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \ self.speech_config['stride_ms'] max_input = max_input // self.chunk * self.chunk + self.chunk max_in_len = max_input // self.chunk chunk_times = self.chunk // reduce if self.chunk % reduce != 0: chunk_times += 1 max_in_len *= chunk_times input_length = np.clip(input_length, 0, max_in_len) speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) phones=tf.keras.preprocessing.sequence.pad_sequences(phones,maxlen=max([len(i) for i in phones]),padding='post',value=self.phone_featurizer.pad) txts=tf.keras.preprocessing.sequence.pad_sequences(txts,maxlen=max([len(i) for i in txts]),padding='post',value=self.text_featurizer.pad) x = np.array(speech_features, 'float32') phones = np.array(phones, 'int32') txts = np.array(txts, 'int32') input_length = np.array(input_length, 'int32') phones_length = np.array(phones_length, 'int32') return x, input_length, phones, phones_length,txts def generator(self, train=True): while 1: s=time.time() x, input_length, phones, phones_length,txts = self.generate(train) e=time.time() logging.info('load data cost time: {}'.format(e-s)) if x.shape[0] == 0: logging.info('load data length zero,continue') continue yield x, input_length, phones, phones_length,txts
[((12, 0, 12, 102), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((27, 33, 27, 69), 'utils.speech_featurizers.SpeechFeaturizer', 'SpeechFeaturizer', ({(27, 50, 27, 68): 'self.speech_config'}, {}), '(self.speech_config)', False, 'from utils.speech_featurizers import SpeechFeaturizer\n'), ((28, 32, 28, 65), 'utils.text_featurizers.TextFeaturizer', 'TextFeaturizer', ({(28, 47, 28, 64): 'self.phone_config'}, {}), '(self.phone_config)', False, 'from utils.text_featurizers import TextFeaturizer\n'), ((29, 31, 29, 63), 'utils.text_featurizers.TextFeaturizer', 'TextFeaturizer', ({(29, 46, 29, 62): 'self.text_config'}, {}), '(self.text_config)', False, 'from utils.text_featurizers import TextFeaturizer\n'), ((31, 23, 31, 56), 'augmentations.augments.Augmentation', 'Augmentation', ({(31, 36, 31, 55): 'self.augment_config'}, {}), '(self.augment_config)', False, 'from augmentations.augments import Augmentation\n'), ((59, 8, 68, 38), 'pypinyin.load_phrases_dict', 'pypinyin.load_phrases_dict', ({(59, 35, 68, 37): "{'调大': [['tiáo'], ['dà']], '调小': [['tiáo'], ['xiǎo']], '调亮': [['tiáo'], [\n 'liàng']], '调暗': [['tiáo'], ['àn']], '肖': [['xiāo']], '英雄传': [['yīng'],\n ['xióng'], ['zhuàn']], '新传': [['xīn'], ['zhuàn']], '外传': [['wài'], [\n 'zhuàn']], '正传': [['zhèng'], ['zhuàn']], '水浒传': [['shuǐ'], ['hǔ'], [\n 'zhuàn']]}"}, {}), "({'调大': [['tiáo'], ['dà']], '调小': [['tiáo'], [\n 'xiǎo']], '调亮': [['tiáo'], ['liàng']], '调暗': [['tiáo'], ['àn']], '肖': [\n ['xiāo']], '英雄传': [['yīng'], ['xióng'], ['zhuàn']], '新传': [['xīn'], [\n 'zhuàn']], '外传': [['wài'], ['zhuàn']], '正传': [['zhèng'], ['zhuàn']],\n '水浒传': [['shuǐ'], ['hǔ'], ['zhuàn']]})", False, 'import pypinyin\n'), ((221, 12, 221, 48), 'numpy.array', 'np.array', ({(221, 21, 221, 36): 'speech_features', (221, 38, 221, 47): '"""float32"""'}, {}), "(speech_features, 'float32')", True, 'import numpy as np\n'), ((222, 17, 222, 42), 'numpy.array', 'np.array', ({(222, 26, 222, 32): 'phones', (222, 34, 222, 41): '"""int32"""'}, {}), "(phones, 'int32')", True, 'import numpy as np\n'), ((223, 15, 223, 38), 'numpy.array', 'np.array', ({(223, 24, 223, 28): 'txts', (223, 30, 223, 37): '"""int32"""'}, {}), "(txts, 'int32')", True, 'import numpy as np\n'), ((225, 23, 225, 54), 'numpy.array', 'np.array', ({(225, 32, 225, 44): 'input_length', (225, 46, 225, 53): '"""int32"""'}, {}), "(input_length, 'int32')", True, 'import numpy as np\n'), ((226, 24, 226, 56), 'numpy.array', 'np.array', ({(226, 33, 226, 46): 'phones_length', (226, 48, 226, 55): '"""int32"""'}, {}), "(phones_length, 'int32')", True, 'import numpy as np\n'), ((410, 12, 410, 48), 'numpy.array', 'np.array', ({(410, 21, 410, 36): 'speech_features', (410, 38, 410, 47): '"""float32"""'}, {}), "(speech_features, 'float32')", True, 'import numpy as np\n'), ((411, 17, 411, 42), 'numpy.array', 'np.array', ({(411, 26, 411, 32): 'phones', (411, 34, 411, 41): '"""int32"""'}, {}), "(phones, 'int32')", True, 'import numpy as np\n'), ((412, 15, 412, 38), 'numpy.array', 'np.array', ({(412, 24, 412, 28): 'txts', (412, 30, 412, 37): '"""int32"""'}, {}), "(txts, 'int32')", True, 'import numpy as np\n'), ((414, 23, 414, 54), 'numpy.array', 'np.array', ({(414, 32, 414, 44): 'input_length', (414, 46, 414, 53): '"""int32"""'}, {}), "(input_length, 'int32')", True, 'import numpy as np\n'), ((415, 24, 415, 56), 'numpy.array', 'np.array', ({(415, 33, 415, 46): 'phones_length', (415, 48, 415, 55): '"""int32"""'}, {}), "(phones_length, 'int32')", True, 'import numpy as np\n'), ((44, 12, 44, 49), 'tensorflow.TensorShape', 'tf.TensorShape', ({(44, 27, 44, 48): '[self.batch, None, 1]'}, {}), '([self.batch, None, 1])', True, 'import tensorflow as tf\n'), ((46, 12, 46, 42), 'tensorflow.TensorShape', 'tf.TensorShape', ({(46, 27, 46, 41): '[self.batch]'}, {}), '([self.batch])', True, 'import tensorflow as tf\n'), ((47, 12, 47, 46), 'tensorflow.TensorShape', 'tf.TensorShape', ({(47, 27, 47, 45): '[self.batch, None]'}, {}), '([self.batch, None])', True, 'import tensorflow as tf\n'), ((48, 12, 48, 42), 'tensorflow.TensorShape', 'tf.TensorShape', ({(48, 27, 48, 41): '[self.batch]'}, {}), '([self.batch])', True, 'import tensorflow as tf\n'), ((49, 12, 49, 46), 'tensorflow.TensorShape', 'tf.TensorShape', ({(49, 27, 49, 45): '[self.batch, None]'}, {}), '([self.batch, None])', True, 'import tensorflow as tf\n'), ((71, 19, 71, 39), 'pypinyin.pinyin', 'pypinyin.pinyin', ({(71, 35, 71, 38): 'txt'}, {}), '(txt)', False, 'import pypinyin\n'), ((94, 12, 94, 46), 'numpy.random.shuffle', 'np.random.shuffle', ({(94, 30, 94, 45): 'self.train_list'}, {}), '(self.train_list)', True, 'import numpy as np\n'), ((214, 27, 214, 63), 'numpy.clip', 'np.clip', ({(214, 35, 214, 47): 'input_length', (214, 49, 214, 50): '0', (214, 52, 214, 62): 'max_in_len'}, {}), '(input_length, 0, max_in_len)', True, 'import numpy as np\n'), ((333, 21, 333, 59), 'random.sample', 'random.sample', ({(333, 35, 333, 41): 'sample', (333, 43, 333, 58): 'self.batch // 4'}, {}), '(sample, self.batch // 4)', False, 'import random\n'), ((404, 27, 404, 63), 'numpy.clip', 'np.clip', ({(404, 35, 404, 47): 'input_length', (404, 49, 404, 50): '0', (404, 52, 404, 62): 'max_in_len'}, {}), '(input_length, 0, max_in_len)', True, 'import numpy as np\n'), ((421, 14, 421, 25), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((423, 14, 423, 25), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((153, 33, 153, 67), 'numpy.expand_dims', 'np.expand_dims', ({(153, 48, 153, 62): 'speech_feature', (153, 64, 153, 66): '-1'}, {}), '(speech_feature, -1)', True, 'import numpy as np\n'), ((159, 33, 159, 67), 'numpy.expand_dims', 'np.expand_dims', ({(159, 48, 159, 62): 'speech_feature', (159, 64, 159, 66): '-1'}, {}), '(speech_feature, -1)', True, 'import numpy as np\n'), ((191, 26, 191, 49), 'numpy.array', 'np.array', ({(191, 35, 191, 48): 'phone_feature'}, {}), '(phone_feature)', True, 'import numpy as np\n'), ((192, 24, 192, 46), 'numpy.array', 'np.array', ({(192, 33, 192, 45): 'text_feature'}, {}), '(text_feature)', True, 'import numpy as np\n'), ((287, 33, 287, 67), 'numpy.expand_dims', 'np.expand_dims', ({(287, 48, 287, 62): 'speech_feature', (287, 64, 287, 66): '-1'}, {}), '(speech_feature, -1)', True, 'import numpy as np\n'), ((293, 33, 293, 67), 'numpy.expand_dims', 'np.expand_dims', ({(293, 48, 293, 62): 'speech_feature', (293, 64, 293, 66): '-1'}, {}), '(speech_feature, -1)', True, 'import numpy as np\n'), ((325, 26, 325, 49), 'numpy.array', 'np.array', ({(325, 35, 325, 48): 'phone_feature'}, {}), '(phone_feature)', True, 'import numpy as np\n'), ((326, 24, 326, 46), 'numpy.array', 'np.array', ({(326, 33, 326, 45): 'text_feature'}, {}), '(text_feature)', True, 'import numpy as np\n'), ((426, 16, 426, 62), 'logging.info', 'logging.info', ({(426, 29, 426, 61): '"""load data length zero,continue"""'}, {}), "('load data length zero,continue')", False, 'import logging\n'), ((263, 20, 263, 54), 'numpy.random.shuffle', 'np.random.shuffle', ({(263, 38, 263, 53): 'self.train_list'}, {}), '(self.train_list)', True, 'import numpy as np\n'), ((351, 37, 351, 71), 'numpy.expand_dims', 'np.expand_dims', ({(351, 52, 351, 66): 'speech_feature', (351, 68, 351, 70): '-1'}, {}), '(speech_feature, -1)', True, 'import numpy as np\n'), ((357, 37, 357, 71), 'numpy.expand_dims', 'np.expand_dims', ({(357, 52, 357, 66): 'speech_feature', (357, 68, 357, 70): '-1'}, {}), '(speech_feature, -1)', True, 'import numpy as np\n'), ((390, 30, 390, 53), 'numpy.array', 'np.array', ({(390, 39, 390, 52): 'phone_feature'}, {}), '(phone_feature)', True, 'import numpy as np\n'), ((391, 28, 391, 50), 'numpy.array', 'np.array', ({(391, 37, 391, 49): 'text_feature'}, {}), '(text_feature)', True, 'import numpy as np\n'), ((152, 40, 152, 52), 'numpy.abs', 'np.abs', ({(152, 47, 152, 51): 'data'}, {}), '(data)', True, 'import numpy as np\n'), ((286, 40, 286, 52), 'numpy.abs', 'np.abs', ({(286, 47, 286, 51): 'data'}, {}), '(data)', True, 'import numpy as np\n'), ((350, 44, 350, 56), 'numpy.abs', 'np.abs', ({(350, 51, 350, 55): 'data'}, {}), '(data)', True, 'import numpy as np\n')]
AlexKouzy/ethnicity-facts-and-figures-publisher
migrations/versions/2018_04_20_data_src_refactor.py
18ab2495a8633f585e18e607c7f75daa564a053d
"""empty message Revision ID: 2018_04_20_data_src_refactor Revises: 2018_04_11_add_sandbox_topic Create Date: 2018-04-20 13:03:32.478880 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. from sqlalchemy.dialects.postgresql import ARRAY revision = '2018_04_20_data_src_refactor' down_revision = '2018_04_11_add_sandbox_topic' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### type_of_data_types = sa.Enum('ADMINISTRATIVE', 'SURVEY', name='type_of_data_types') op.add_column('page', sa.Column('secondary_source_1_type_of_data', ARRAY(type_of_data_types), nullable=True)) op.add_column('page', sa.Column('suppression_and_disclosure', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_data_source_purpose', sa.TEXT(), nullable=True)) op.get_bind() op.execute(''' UPDATE page SET suppression_and_disclosure = suppression_rules WHERE disclosure_control is null; ''') op.execute(''' UPDATE page SET suppression_and_disclosure = disclosure_control WHERE suppression_rules is null; ''') op.execute(''' UPDATE page SET suppression_and_disclosure = trim(suppression_rules || ' ' || disclosure_control) WHERE suppression_rules is not null AND disclosure_control is not null; ''') op.drop_constraint('organisation_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('frequency_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('secondary_source_2_type_of_statistic_fkey', 'page', type_='foreignkey') op.drop_column('page', 'secondary_source_1_date_next_update') op.drop_column('page', 'secondary_source_1_date_updated') op.drop_column('page', 'secondary_source_1_suppression_rules') op.drop_column('page', 'secondary_source_1_disclosure_control') op.drop_column('page', 'secondary_source_2_frequency') op.drop_column('page', 'secondary_source_2_contact_2_name') op.drop_column('page', 'secondary_source_2_contact_2_phone') op.drop_column('page', 'secondary_source_2_url') op.drop_column('page', 'secondary_source_2_date_next_update') op.drop_column('page', 'secondary_source_2_contact_1_name') op.drop_column('page', 'last_update_date') op.drop_column('page', 'secondary_source_2_contact_1_phone') op.drop_column('page', 'secondary_source_2_publisher_text') op.drop_column('page', 'secondary_source_2_disclosure_control') op.drop_column('page', 'secondary_source_2_type_of_statistic_id') op.drop_column('page', 'secondary_source_2_suppression_rules') op.drop_column('page', 'secondary_source_2_frequency_other') op.drop_column('page', 'secondary_source_2_publisher_id') op.drop_column('page', 'secondary_source_2_title') op.drop_column('page', 'secondary_source_2_date') op.drop_column('page', 'next_update_date') op.drop_column('page', 'secondary_source_2_date_updated') op.drop_column('page', 'secondary_source_2_statistic_type') op.drop_column('page', 'secondary_source_2_frequency_id') op.drop_column('page', 'secondary_source_2_contact_2_email') op.drop_column('page', 'secondary_source_2_contact_1_email') # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('page', sa.Column('secondary_source_2_contact_1_email', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_email', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_statistic_type', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_updated', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_title', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_id', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_other', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_type_of_statistic_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_text', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_phone', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_name', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_next_update', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_url', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_phone', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_name', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('last_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('next_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_next_update', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_updated', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True)) op.create_foreign_key('secondary_source_2_type_of_statistic_fkey', 'page', 'type_of_statistic', ['secondary_source_2_type_of_statistic_id'], ['id']) op.create_foreign_key('frequency_secondary_source_2_fkey', 'page', 'frequency_of_release', ['secondary_source_2_frequency_id'], ['id']) op.create_foreign_key('organisation_secondary_source_2_fkey', 'page', 'organisation', ['secondary_source_2_publisher_id'], ['id']) op.drop_column('page', 'secondary_source_1_type_of_data') op.drop_column('page', 'suppression_and_disclosure') op.drop_column('page', 'note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_data_source_purpose') # ### end Alembic commands ###
[((24, 25, 24, 87), 'sqlalchemy.Enum', 'sa.Enum', (), '', True, 'import sqlalchemy as sa\n'), ((31, 4, 31, 17), 'alembic.op.get_bind', 'op.get_bind', ({}, {}), '()', False, 'from alembic import op\n'), ((33, 4, 36, 8), 'alembic.op.execute', 'op.execute', ({(33, 15, 36, 7): '"""\n UPDATE page SET suppression_and_disclosure = suppression_rules \n WHERE disclosure_control is null;\n """'}, {}), '(\n """\n UPDATE page SET suppression_and_disclosure = suppression_rules \n WHERE disclosure_control is null;\n """\n )', False, 'from alembic import op\n'), ((38, 4, 41, 11), 'alembic.op.execute', 'op.execute', ({(38, 15, 41, 10): '"""\n UPDATE page SET suppression_and_disclosure = disclosure_control \n WHERE suppression_rules is null;\n """'}, {}), '(\n """\n UPDATE page SET suppression_and_disclosure = disclosure_control \n WHERE suppression_rules is null;\n """\n )', False, 'from alembic import op\n'), ((43, 4, 47, 8), 'alembic.op.execute', 'op.execute', ({(43, 15, 47, 7): '"""\n UPDATE page SET suppression_and_disclosure = trim(suppression_rules || \' \' || disclosure_control)\n WHERE suppression_rules is not null\n AND disclosure_control is not null;\n """'}, {}), '(\n """\n UPDATE page SET suppression_and_disclosure = trim(suppression_rules || \' \' || disclosure_control)\n WHERE suppression_rules is not null\n AND disclosure_control is not null;\n """\n )', False, 'from alembic import op\n'), ((49, 4, 49, 90), 'alembic.op.drop_constraint', 'op.drop_constraint', (), '', False, 'from alembic import op\n'), ((50, 4, 50, 87), 'alembic.op.drop_constraint', 'op.drop_constraint', (), '', False, 'from alembic import op\n'), ((51, 4, 51, 95), 'alembic.op.drop_constraint', 'op.drop_constraint', (), '', False, 'from alembic import op\n'), ((52, 4, 52, 65), 'alembic.op.drop_column', 'op.drop_column', ({(52, 19, 52, 25): '"""page"""', (52, 27, 52, 64): '"""secondary_source_1_date_next_update"""'}, {}), "('page', 'secondary_source_1_date_next_update')", False, 'from alembic import op\n'), ((53, 4, 53, 61), 'alembic.op.drop_column', 'op.drop_column', ({(53, 19, 53, 25): '"""page"""', (53, 27, 53, 60): '"""secondary_source_1_date_updated"""'}, {}), "('page', 'secondary_source_1_date_updated')", False, 'from alembic import op\n'), ((54, 4, 54, 66), 'alembic.op.drop_column', 'op.drop_column', ({(54, 19, 54, 25): '"""page"""', (54, 27, 54, 65): '"""secondary_source_1_suppression_rules"""'}, {}), "('page', 'secondary_source_1_suppression_rules')", False, 'from alembic import op\n'), ((55, 4, 55, 67), 'alembic.op.drop_column', 'op.drop_column', ({(55, 19, 55, 25): '"""page"""', (55, 27, 55, 66): '"""secondary_source_1_disclosure_control"""'}, {}), "('page', 'secondary_source_1_disclosure_control')", False, 'from alembic import op\n'), ((56, 4, 56, 58), 'alembic.op.drop_column', 'op.drop_column', ({(56, 19, 56, 25): '"""page"""', (56, 27, 56, 57): '"""secondary_source_2_frequency"""'}, {}), "('page', 'secondary_source_2_frequency')", False, 'from alembic import op\n'), ((57, 4, 57, 63), 'alembic.op.drop_column', 'op.drop_column', ({(57, 19, 57, 25): '"""page"""', (57, 27, 57, 62): '"""secondary_source_2_contact_2_name"""'}, {}), "('page', 'secondary_source_2_contact_2_name')", False, 'from alembic import op\n'), ((58, 4, 58, 64), 'alembic.op.drop_column', 'op.drop_column', ({(58, 19, 58, 25): '"""page"""', (58, 27, 58, 63): '"""secondary_source_2_contact_2_phone"""'}, {}), "('page', 'secondary_source_2_contact_2_phone')", False, 'from alembic import op\n'), ((59, 4, 59, 52), 'alembic.op.drop_column', 'op.drop_column', ({(59, 19, 59, 25): '"""page"""', (59, 27, 59, 51): '"""secondary_source_2_url"""'}, {}), "('page', 'secondary_source_2_url')", False, 'from alembic import op\n'), ((60, 4, 60, 65), 'alembic.op.drop_column', 'op.drop_column', ({(60, 19, 60, 25): '"""page"""', (60, 27, 60, 64): '"""secondary_source_2_date_next_update"""'}, {}), "('page', 'secondary_source_2_date_next_update')", False, 'from alembic import op\n'), ((61, 4, 61, 63), 'alembic.op.drop_column', 'op.drop_column', ({(61, 19, 61, 25): '"""page"""', (61, 27, 61, 62): '"""secondary_source_2_contact_1_name"""'}, {}), "('page', 'secondary_source_2_contact_1_name')", False, 'from alembic import op\n'), ((62, 4, 62, 46), 'alembic.op.drop_column', 'op.drop_column', ({(62, 19, 62, 25): '"""page"""', (62, 27, 62, 45): '"""last_update_date"""'}, {}), "('page', 'last_update_date')", False, 'from alembic import op\n'), ((63, 4, 63, 64), 'alembic.op.drop_column', 'op.drop_column', ({(63, 19, 63, 25): '"""page"""', (63, 27, 63, 63): '"""secondary_source_2_contact_1_phone"""'}, {}), "('page', 'secondary_source_2_contact_1_phone')", False, 'from alembic import op\n'), ((64, 4, 64, 63), 'alembic.op.drop_column', 'op.drop_column', ({(64, 19, 64, 25): '"""page"""', (64, 27, 64, 62): '"""secondary_source_2_publisher_text"""'}, {}), "('page', 'secondary_source_2_publisher_text')", False, 'from alembic import op\n'), ((65, 4, 65, 67), 'alembic.op.drop_column', 'op.drop_column', ({(65, 19, 65, 25): '"""page"""', (65, 27, 65, 66): '"""secondary_source_2_disclosure_control"""'}, {}), "('page', 'secondary_source_2_disclosure_control')", False, 'from alembic import op\n'), ((66, 4, 66, 69), 'alembic.op.drop_column', 'op.drop_column', ({(66, 19, 66, 25): '"""page"""', (66, 27, 66, 68): '"""secondary_source_2_type_of_statistic_id"""'}, {}), "('page', 'secondary_source_2_type_of_statistic_id')", False, 'from alembic import op\n'), ((67, 4, 67, 66), 'alembic.op.drop_column', 'op.drop_column', ({(67, 19, 67, 25): '"""page"""', (67, 27, 67, 65): '"""secondary_source_2_suppression_rules"""'}, {}), "('page', 'secondary_source_2_suppression_rules')", False, 'from alembic import op\n'), ((68, 4, 68, 64), 'alembic.op.drop_column', 'op.drop_column', ({(68, 19, 68, 25): '"""page"""', (68, 27, 68, 63): '"""secondary_source_2_frequency_other"""'}, {}), "('page', 'secondary_source_2_frequency_other')", False, 'from alembic import op\n'), ((69, 4, 69, 61), 'alembic.op.drop_column', 'op.drop_column', ({(69, 19, 69, 25): '"""page"""', (69, 27, 69, 60): '"""secondary_source_2_publisher_id"""'}, {}), "('page', 'secondary_source_2_publisher_id')", False, 'from alembic import op\n'), ((70, 4, 70, 54), 'alembic.op.drop_column', 'op.drop_column', ({(70, 19, 70, 25): '"""page"""', (70, 27, 70, 53): '"""secondary_source_2_title"""'}, {}), "('page', 'secondary_source_2_title')", False, 'from alembic import op\n'), ((71, 4, 71, 53), 'alembic.op.drop_column', 'op.drop_column', ({(71, 19, 71, 25): '"""page"""', (71, 27, 71, 52): '"""secondary_source_2_date"""'}, {}), "('page', 'secondary_source_2_date')", False, 'from alembic import op\n'), ((72, 4, 72, 46), 'alembic.op.drop_column', 'op.drop_column', ({(72, 19, 72, 25): '"""page"""', (72, 27, 72, 45): '"""next_update_date"""'}, {}), "('page', 'next_update_date')", False, 'from alembic import op\n'), ((73, 4, 73, 61), 'alembic.op.drop_column', 'op.drop_column', ({(73, 19, 73, 25): '"""page"""', (73, 27, 73, 60): '"""secondary_source_2_date_updated"""'}, {}), "('page', 'secondary_source_2_date_updated')", False, 'from alembic import op\n'), ((74, 4, 74, 63), 'alembic.op.drop_column', 'op.drop_column', ({(74, 19, 74, 25): '"""page"""', (74, 27, 74, 62): '"""secondary_source_2_statistic_type"""'}, {}), "('page', 'secondary_source_2_statistic_type')", False, 'from alembic import op\n'), ((75, 4, 75, 61), 'alembic.op.drop_column', 'op.drop_column', ({(75, 19, 75, 25): '"""page"""', (75, 27, 75, 60): '"""secondary_source_2_frequency_id"""'}, {}), "('page', 'secondary_source_2_frequency_id')", False, 'from alembic import op\n'), ((76, 4, 76, 64), 'alembic.op.drop_column', 'op.drop_column', ({(76, 19, 76, 25): '"""page"""', (76, 27, 76, 63): '"""secondary_source_2_contact_2_email"""'}, {}), "('page', 'secondary_source_2_contact_2_email')", False, 'from alembic import op\n'), ((77, 4, 77, 64), 'alembic.op.drop_column', 'op.drop_column', ({(77, 19, 77, 25): '"""page"""', (77, 27, 77, 63): '"""secondary_source_2_contact_1_email"""'}, {}), "('page', 'secondary_source_2_contact_1_email')", False, 'from alembic import op\n'), ((110, 4, 110, 152), 'alembic.op.create_foreign_key', 'op.create_foreign_key', ({(110, 26, 110, 69): '"""secondary_source_2_type_of_statistic_fkey"""', (110, 71, 110, 77): '"""page"""', (110, 79, 110, 98): '"""type_of_statistic"""', (110, 100, 110, 143): "['secondary_source_2_type_of_statistic_id']", (110, 145, 110, 151): "['id']"}, {}), "('secondary_source_2_type_of_statistic_fkey', 'page',\n 'type_of_statistic', ['secondary_source_2_type_of_statistic_id'], ['id'])", False, 'from alembic import op\n'), ((111, 4, 111, 139), 'alembic.op.create_foreign_key', 'op.create_foreign_key', ({(111, 26, 111, 61): '"""frequency_secondary_source_2_fkey"""', (111, 63, 111, 69): '"""page"""', (111, 71, 111, 93): '"""frequency_of_release"""', (111, 95, 111, 130): "['secondary_source_2_frequency_id']", (111, 132, 111, 138): "['id']"}, {}), "('frequency_secondary_source_2_fkey', 'page',\n 'frequency_of_release', ['secondary_source_2_frequency_id'], ['id'])", False, 'from alembic import op\n'), ((112, 4, 112, 134), 'alembic.op.create_foreign_key', 'op.create_foreign_key', ({(112, 26, 112, 64): '"""organisation_secondary_source_2_fkey"""', (112, 66, 112, 72): '"""page"""', (112, 74, 112, 88): '"""organisation"""', (112, 90, 112, 125): "['secondary_source_2_publisher_id']", (112, 127, 112, 133): "['id']"}, {}), "('organisation_secondary_source_2_fkey', 'page',\n 'organisation', ['secondary_source_2_publisher_id'], ['id'])", False, 'from alembic import op\n'), ((114, 4, 114, 61), 'alembic.op.drop_column', 'op.drop_column', ({(114, 19, 114, 25): '"""page"""', (114, 27, 114, 60): '"""secondary_source_1_type_of_data"""'}, {}), "('page', 'secondary_source_1_type_of_data')", False, 'from alembic import op\n'), ((115, 4, 115, 56), 'alembic.op.drop_column', 'op.drop_column', ({(115, 19, 115, 25): '"""page"""', (115, 27, 115, 55): '"""suppression_and_disclosure"""'}, {}), "('page', 'suppression_and_disclosure')", False, 'from alembic import op\n'), ((116, 4, 116, 60), 'alembic.op.drop_column', 'op.drop_column', ({(116, 19, 116, 25): '"""page"""', (116, 27, 116, 59): '"""note_on_corrections_or_updates"""'}, {}), "('page', 'note_on_corrections_or_updates')", False, 'from alembic import op\n'), ((117, 4, 117, 79), 'alembic.op.drop_column', 'op.drop_column', ({(117, 19, 117, 25): '"""page"""', (117, 27, 117, 78): '"""secondary_source_1_note_on_corrections_or_updates"""'}, {}), "('page', 'secondary_source_1_note_on_corrections_or_updates')", False, 'from alembic import op\n'), ((118, 4, 118, 68), 'alembic.op.drop_column', 'op.drop_column', ({(118, 19, 118, 25): '"""page"""', (118, 27, 118, 67): '"""secondary_source_1_data_source_purpose"""'}, {}), "('page', 'secondary_source_1_data_source_purpose')", False, 'from alembic import op\n'), ((25, 71, 25, 96), 'sqlalchemy.dialects.postgresql.ARRAY', 'ARRAY', ({(25, 77, 25, 95): 'type_of_data_types'}, {}), '(type_of_data_types)', False, 'from sqlalchemy.dialects.postgresql import ARRAY\n'), ((26, 66, 26, 75), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((27, 70, 27, 79), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((28, 89, 28, 98), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((29, 78, 29, 87), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((83, 74, 83, 83), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((84, 74, 84, 83), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((85, 71, 85, 83), 'sqlalchemy.INTEGER', 'sa.INTEGER', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((86, 73, 86, 82), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((87, 71, 87, 80), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((88, 63, 88, 72), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((89, 64, 89, 73), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((90, 71, 90, 93), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', (), '', True, 'import sqlalchemy as sa\n'), ((91, 74, 91, 96), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', (), '', True, 'import sqlalchemy as sa\n'), ((92, 76, 92, 85), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((93, 79, 93, 91), 'sqlalchemy.INTEGER', 'sa.INTEGER', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((94, 77, 94, 86), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((95, 73, 95, 82), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((96, 74, 96, 83), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((97, 73, 97, 82), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((98, 75, 98, 84), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((99, 62, 99, 71), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((100, 74, 100, 83), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((101, 73, 101, 82), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((102, 68, 102, 77), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((103, 56, 103, 78), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', (), '', True, 'import sqlalchemy as sa\n'), ((104, 56, 104, 78), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', (), '', True, 'import sqlalchemy as sa\n'), ((105, 75, 105, 84), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((106, 71, 106, 80), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((107, 77, 107, 86), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((108, 76, 108, 85), 'sqlalchemy.TEXT', 'sa.TEXT', ({}, {}), '()', True, 'import sqlalchemy as sa\n')]
vikas-kundu/phonedict
lib/core/parse/cmdline.py
6795cab0024e792340c43d95552162a985b891f6
#!/usr/bin/env python3 # -*- coding:utf-8 -*- # coded by Vikas Kundu https://github.com/vikas-kundu # ------------------------------------------- import sys import getopt import time import config from lib.core.parse import banner from lib.core import util from lib.core import installer def options(): argv = sys.argv[1:] try: opts, args = getopt.getopt(argv, 'm:t:c:o:n:whi', ['mode','task','country','output','number','wizard','help','install']) if((len(sys.argv)==9) or (len(sys.argv)==2)): pass else: print("Error! Some parameter is missing please check!") time.sleep(2) banner.usage() sys.exit() except getopt.GetoptError as err: print(err) banner.usage() sys.exit(2) for (o, a) in opts: if(o in('-i','--install')): if(util.packages_check()==False): installer.start_install() else: print("Packages already installed!") sys.exit() elif (o in ('-w', '--wizard')): config.wizard=True elif o in ('-h','--help'): banner.usage() sys.exit() elif o in ('-m','--mode'): config.str_mode=str(a) elif o in ('-t','--task'): config.str_task=str(a) elif o in ('-c','--country'): config.str_country=str(a.lower().strip('"\'')) elif o in ('-o','--output'): config.str_output=str(a.strip('"\'')) elif o in ('-n','--number'): config.str_number=str(a.strip('"\'')) else: print("Something went wrong with argument parsing!") time.sleep(2) banner.usage() sys.exit()
[((18, 21, 18, 128), 'getopt.getopt', 'getopt.getopt', ({(18, 35, 18, 39): 'argv', (18, 41, 18, 56): '"""m:t:c:o:n:whi"""', (18, 58, 18, 127): "['mode', 'task', 'country', 'output', 'number', 'wizard', 'help', 'install']"}, {}), "(argv, 'm:t:c:o:n:whi', ['mode', 'task', 'country', 'output',\n 'number', 'wizard', 'help', 'install'])", False, 'import getopt\n'), ((23, 12, 23, 25), 'time.sleep', 'time.sleep', ({(23, 23, 23, 24): '(2)'}, {}), '(2)', False, 'import time\n'), ((24, 12, 24, 26), 'lib.core.parse.banner.usage', 'banner.usage', ({}, {}), '()', False, 'from lib.core.parse import banner\n'), ((25, 12, 25, 22), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((29, 8, 29, 22), 'lib.core.parse.banner.usage', 'banner.usage', ({}, {}), '()', False, 'from lib.core.parse import banner\n'), ((30, 8, 30, 19), 'sys.exit', 'sys.exit', ({(30, 17, 30, 18): '(2)'}, {}), '(2)', False, 'import sys\n'), ((34, 15, 34, 36), 'lib.core.util.packages_check', 'util.packages_check', ({}, {}), '()', False, 'from lib.core import util\n'), ((35, 16, 35, 41), 'lib.core.installer.start_install', 'installer.start_install', ({}, {}), '()', False, 'from lib.core import installer\n'), ((38, 16, 38, 26), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((44, 12, 44, 26), 'lib.core.parse.banner.usage', 'banner.usage', ({}, {}), '()', False, 'from lib.core.parse import banner\n'), ((45, 12, 45, 22), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((64, 12, 64, 25), 'time.sleep', 'time.sleep', ({(64, 23, 64, 24): '(2)'}, {}), '(2)', False, 'import time\n'), ((65, 12, 65, 26), 'lib.core.parse.banner.usage', 'banner.usage', ({}, {}), '()', False, 'from lib.core.parse import banner\n'), ((66, 12, 66, 22), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n')]
shubhamdang/mistral
mistral/tests/unit/utils/test_utils.py
3c83837f6ce1e4ab74fb519a63e82eaae70f9d2d
# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2015 - Huawei Technologies Co. Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral import exceptions as exc from mistral.tests.unit import base from mistral.utils import ssh_utils from mistral_lib import utils class UtilsTest(base.BaseTest): def test_itersubclasses(self): class A(object): pass class B(A): pass class C(A): pass class D(C): pass self.assertEqual([B, C, D], list(utils.iter_subclasses(A))) def test_paramiko_to_private_key(self): self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, "../dir" ) self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, "..\\dir" ) self.assertIsNone( ssh_utils._to_paramiko_private_key(private_key_filename=None, password='pass') )
[((53, 12, 54, 63), 'mistral.utils.ssh_utils._to_paramiko_private_key', 'ssh_utils._to_paramiko_private_key', (), '', False, 'from mistral.utils import ssh_utils\n'), ((38, 41, 38, 65), 'mistral_lib.utils.iter_subclasses', 'utils.iter_subclasses', ({(38, 63, 38, 64): 'A'}, {}), '(A)', False, 'from mistral_lib import utils\n')]
scls19fr/shoutcast_api
shoutcast_api/shoutcast_request.py
89a9e826b82411ae5f24ea28e1b1cb22eaaa0890
import xmltodict import json from .models import Tunein from .utils import _init_session from .Exceptions import APIException base_url = 'http://api.shoutcast.com' tunein_url = 'http://yp.shoutcast.com/{base}?id={id}' tuneins = [Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')] def call_api_xml(endpoint, params=None, session=None): session = _init_session(session) request_url = "{}{}".format(base_url, endpoint) response = session.get(request_url, params=params) if response.status_code == 200: response_as_dict = xmltodict.parse(response.content) api_response = response_as_dict.get('response') if api_response: api_status_code = int(api_response.get('statusCode')) message = "statusText:{}, statusDetailText:{}".format( api_response.get('statusText'), api_response.get('statusDetailText') ) raise APIException(message, code=api_status_code) return response_as_dict raise APIException(response.content, code=response.status_code) def call_api_json(endpoint, params=None, session=None): session = _init_session(session) request_url = "{}{}".format(base_url, endpoint) response = session.get(request_url, params=params) if response.status_code == 200: json_response = json.loads(response.content.decode('utf-8')) api_response = json_response.get('response') api_status_code = int(api_response.get('statusCode')) if api_status_code != 200: message = "statusText:{}, statusDetailText:{}".format( api_response.get('statusText'), api_response.get('statusDetailText', '') ) raise APIException(message, code=api_status_code) return json_response.get('response')['data'] raise APIException(response.reason, code=response.status_code) def call_api_tunein(station_id: int, session=None): session = _init_session(session) url = tunein_url.format(base=tuneins[2], id=station_id) response = session.get(url) if response.status_code == 200: api_response = xmltodict.parse(response.content.decode('utf-8')) return api_response raise APIException(response.reason, code=response.status_code) def call_api_tunein_any(base: Tunein, station_id: int, session=None): session = _init_session(session) url = tunein_url.format(base=base, id=station_id) response = session.get(url) if response.status_code == 200: return response.content.decode('utf-8') raise APIException(response.reason, code=response.status_code)
[((18, 27, 18, 60), 'xmltodict.parse', 'xmltodict.parse', ({(18, 43, 18, 59): 'response.content'}, {}), '(response.content)', False, 'import xmltodict\n')]
amp89/django-app-permissions
django_app_permissions/management/commands/resolve_app_groups.py
11f576d2118f5b73fdbefa0675acc3374a5a9749
from django.core.management.base import BaseCommand, no_translations from django.contrib.auth.models import Group from django.conf import settings import sys class Command(BaseCommand): def handle(self, *args, **options): sys.stdout.write("\nResolving app groups") app_list = [app_name.lower() for app_name in settings.ACCESS_CONTROLLED_INSTALLED_APPS] for app_name in app_list: created = Group.objects.get_or_create(name=app_name) sys.stdout.write(f"\n{app_name}, new={created}") sys.stdout.write("\n")
[((12, 8, 12, 50), 'sys.stdout.write', 'sys.stdout.write', ({(12, 25, 12, 49): '"""\nResolving app groups"""'}, {}), '("""\nResolving app groups""")', False, 'import sys\n'), ((18, 8, 18, 30), 'sys.stdout.write', 'sys.stdout.write', ({(18, 25, 18, 29): '"""\n"""'}, {}), "('\\n')", False, 'import sys\n'), ((15, 22, 15, 64), 'django.contrib.auth.models.Group.objects.get_or_create', 'Group.objects.get_or_create', (), '', False, 'from django.contrib.auth.models import Group\n'), ((16, 12, 16, 60), 'sys.stdout.write', 'sys.stdout.write', ({(16, 29, 16, 59): 'f"""\n{app_name}, new={created}"""'}, {}), '(f"""\n{app_name}, new={created}""")', False, 'import sys\n')]
sunzz679/swift-2.4.0--source-read
swift/common/db.py
64355268da5265440f5f7e8d280dd8cd4c2cf2a2
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Database code for Swift """ from contextlib import contextmanager, closing import hashlib import logging import os from uuid import uuid4 import sys import time import errno import six.moves.cPickle as pickle from swift import gettext_ as _ from tempfile import mkstemp from eventlet import sleep, Timeout import sqlite3 from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE from swift.common.utils import json, Timestamp, renamer, \ mkdirs, lock_parent_directory, fallocate from swift.common.exceptions import LockTimeout from swift.common.swob import HTTPBadRequest #: Whether calls will be made to preallocate disk space for database files. DB_PREALLOCATION = False #: Timeout for trying to connect to a DB BROKER_TIMEOUT = 25 #: Pickle protocol to use PICKLE_PROTOCOL = 2 #: Max number of pending entries PENDING_CAP = 131072 def utf8encode(*args): return [(s.encode('utf8') if isinstance(s, unicode) else s) for s in args] def utf8encodekeys(metadata): uni_keys = [k for k in metadata if isinstance(k, unicode)] for k in uni_keys: sv = metadata[k] del metadata[k] metadata[k.encode('utf-8')] = sv def _db_timeout(timeout, db_file, call): with LockTimeout(timeout, db_file): retry_wait = 0.001 while True: try: return call() except sqlite3.OperationalError as e: if 'locked' not in str(e): raise sleep(retry_wait) retry_wait = min(retry_wait * 2, 0.05) class DatabaseConnectionError(sqlite3.DatabaseError): """More friendly error messages for DB Errors.""" def __init__(self, path, msg, timeout=0): self.path = path self.timeout = timeout self.msg = msg def __str__(self): return 'DB connection error (%s, %s):\n%s' % ( self.path, self.timeout, self.msg) class DatabaseAlreadyExists(sqlite3.DatabaseError): """More friendly error messages for DB Errors.""" def __init__(self, path): self.path = path def __str__(self): return 'DB %s already exists' % self.path class GreenDBConnection(sqlite3.Connection): """SQLite DB Connection handler that plays well with eventlet.""" def __init__(self, database, timeout=None, *args, **kwargs): if timeout is None: timeout = BROKER_TIMEOUT self.timeout = timeout self.db_file = database super(GreenDBConnection, self).__init__(database, 0, *args, **kwargs) def cursor(self, cls=None): if cls is None: cls = GreenDBCursor return sqlite3.Connection.cursor(self, cls) def commit(self): return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Connection.commit(self)) class GreenDBCursor(sqlite3.Cursor): """SQLite Cursor handler that plays well with eventlet.""" def __init__(self, *args, **kwargs): self.timeout = args[0].timeout self.db_file = args[0].db_file super(GreenDBCursor, self).__init__(*args, **kwargs) def execute(self, *args, **kwargs): return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Cursor.execute( self, *args, **kwargs)) def dict_factory(crs, row): """ This should only be used when you need a real dict, i.e. when you're going to serialize the results. """ return dict( ((col[0], row[idx]) for idx, col in enumerate(crs.description))) def chexor(old, name, timestamp): """ Each entry in the account and container databases is XORed by the 128-bit hash on insert or delete. This serves as a rolling, order-independent hash of the contents. (check + XOR) :param old: hex representation of the current DB hash :param name: name of the object or container being inserted :param timestamp: internalized timestamp of the new record :returns: a hex representation of the new hash value """ if name is None: raise Exception('name is None!') new = hashlib.md5(('%s-%s' % (name, timestamp)).encode('utf8')).hexdigest() return '%032x' % (int(old, 16) ^ int(new, 16)) def get_db_connection(path, timeout=30, okay_to_create=False): """ Returns a properly configured SQLite database connection. :param path: path to DB :param timeout: timeout for connection :param okay_to_create: if True, create the DB if it doesn't exist :returns: DB connection object """ try: connect_time = time.time() conn = sqlite3.connect(path, check_same_thread=False, factory=GreenDBConnection, timeout=timeout) if path != ':memory:' and not okay_to_create: # attempt to detect and fail when connect creates the db file stat = os.stat(path) if stat.st_size == 0 and stat.st_ctime >= connect_time: os.unlink(path) raise DatabaseConnectionError(path, 'DB file created by connect?') conn.row_factory = sqlite3.Row conn.text_factory = str with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous = NORMAL') cur.execute('PRAGMA count_changes = OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode = DELETE') conn.create_function('chexor', 3, chexor) except sqlite3.DatabaseError: import traceback raise DatabaseConnectionError(path, traceback.format_exc(), timeout=timeout) return conn class DatabaseBroker(object): """Encapsulates working with a database.""" def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None, account=None, container=None, pending_timeout=None, stale_reads_ok=False): """Encapsulates working with a database.""" self.conn = None self.db_file = db_file self.pending_file = self.db_file + '.pending' self.pending_timeout = pending_timeout or 10 self.stale_reads_ok = stale_reads_ok self.db_dir = os.path.dirname(db_file) self.timeout = timeout self.logger = logger or logging.getLogger() self.account = account self.container = container self._db_version = -1 def __str__(self): """ Returns a string identifying the entity under broker to a human. The baseline implementation returns a full pathname to a database. This is vital for useful diagnostics. """ return self.db_file def initialize(self, put_timestamp=None, storage_policy_index=None): """ Create the DB The storage_policy_index is passed through to the subclass's ``_initialize`` method. It is ignored by ``AccountBroker``. :param put_timestamp: internalized timestamp of initial PUT request :param storage_policy_index: only required for containers """ if self.db_file == ':memory:': tmp_db_file = None conn = get_db_connection(self.db_file, self.timeout) else: mkdirs(self.db_dir) fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir) os.close(fd) conn = sqlite3.connect(tmp_db_file, check_same_thread=False, factory=GreenDBConnection, timeout=0) # creating dbs implicitly does a lot of transactions, so we # pick fast, unsafe options here and do a big fsync at the end. with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous = OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode = MEMORY') conn.create_function('chexor', 3, chexor) conn.row_factory = sqlite3.Row conn.text_factory = str conn.executescript(""" CREATE TABLE outgoing_sync ( remote_id TEXT UNIQUE, sync_point INTEGER, updated_at TEXT DEFAULT 0 ); CREATE TABLE incoming_sync ( remote_id TEXT UNIQUE, sync_point INTEGER, updated_at TEXT DEFAULT 0 ); CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync BEGIN UPDATE outgoing_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync BEGIN UPDATE outgoing_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync BEGIN UPDATE incoming_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync BEGIN UPDATE incoming_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; """) if not put_timestamp: put_timestamp = Timestamp(0).internal self._initialize(conn, put_timestamp, storage_policy_index=storage_policy_index) conn.commit() if tmp_db_file: conn.close() with open(tmp_db_file, 'r+b') as fp: os.fsync(fp.fileno()) with lock_parent_directory(self.db_file, self.pending_timeout): if os.path.exists(self.db_file): # It's as if there was a "condition" where different parts # of the system were "racing" each other. raise DatabaseAlreadyExists(self.db_file) renamer(tmp_db_file, self.db_file) self.conn = get_db_connection(self.db_file, self.timeout) else: self.conn = conn def delete_db(self, timestamp): """ Mark the DB as deleted :param timestamp: internalized delete timestamp """ # first, clear the metadata cleared_meta = {} for k in self.metadata: cleared_meta[k] = ('', timestamp) self.update_metadata(cleared_meta) # then mark the db as deleted with self.get() as conn: self._delete_db(conn, timestamp) conn.commit() def possibly_quarantine(self, exc_type, exc_value, exc_traceback): """ Checks the exception info to see if it indicates a quarantine situation (malformed or corrupted database). If not, the original exception will be reraised. If so, the database will be quarantined and a new sqlite3.DatabaseError will be raised indicating the action taken. """ if 'database disk image is malformed' in str(exc_value): exc_hint = 'malformed' elif 'file is encrypted or is not a database' in str(exc_value): exc_hint = 'corrupted' elif 'disk I/O error' in str(exc_value): exc_hint = 'disk error while accessing' else: raise exc_type, exc_value, exc_traceback prefix_path = os.path.dirname(self.db_dir) partition_path = os.path.dirname(prefix_path) dbs_path = os.path.dirname(partition_path) device_path = os.path.dirname(dbs_path) quar_path = os.path.join(device_path, 'quarantined', self.db_type + 's', os.path.basename(self.db_dir)) try: renamer(self.db_dir, quar_path, fsync=False) except OSError as e: if e.errno not in (errno.EEXIST, errno.ENOTEMPTY): raise quar_path = "%s-%s" % (quar_path, uuid4().hex) renamer(self.db_dir, quar_path, fsync=False) detail = _('Quarantined %s to %s due to %s database') % \ (self.db_dir, quar_path, exc_hint) self.logger.error(detail) raise sqlite3.DatabaseError(detail) @contextmanager def get(self): """Use with the "with" statement; returns a database connection.""" if not self.conn: if self.db_file != ':memory:' and os.path.exists(self.db_file): try: self.conn = get_db_connection(self.db_file, self.timeout) except (sqlite3.DatabaseError, DatabaseConnectionError): self.possibly_quarantine(*sys.exc_info()) else: raise DatabaseConnectionError(self.db_file, "DB doesn't exist") conn = self.conn self.conn = None try: yield conn conn.rollback() self.conn = conn except sqlite3.DatabaseError: try: conn.close() except Exception: pass self.possibly_quarantine(*sys.exc_info()) except (Exception, Timeout): conn.close() raise @contextmanager def lock(self): """Use with the "with" statement; locks a database.""" if not self.conn: if self.db_file != ':memory:' and os.path.exists(self.db_file): self.conn = get_db_connection(self.db_file, self.timeout) else: raise DatabaseConnectionError(self.db_file, "DB doesn't exist") conn = self.conn self.conn = None orig_isolation_level = conn.isolation_level conn.isolation_level = None conn.execute('BEGIN IMMEDIATE') try: yield True except (Exception, Timeout): pass try: conn.execute('ROLLBACK') conn.isolation_level = orig_isolation_level self.conn = conn except (Exception, Timeout): logging.exception( _('Broker error trying to rollback locked connection')) conn.close() def newid(self, remote_id): """ Re-id the database. This should be called after an rsync. :param remote_id: the ID of the remote database being rsynced in """ with self.get() as conn: row = conn.execute(''' UPDATE %s_stat SET id=? ''' % self.db_type, (str(uuid4()),)) row = conn.execute(''' SELECT ROWID FROM %s ORDER BY ROWID DESC LIMIT 1 ''' % self.db_contains_type).fetchone() sync_point = row['ROWID'] if row else -1 conn.execute(''' INSERT OR REPLACE INTO incoming_sync (sync_point, remote_id) VALUES (?, ?) ''', (sync_point, remote_id)) self._newid(conn) conn.commit() def _newid(self, conn): # Override for additional work when receiving an rsynced db. pass def _is_deleted(self, conn): """ Check if the database is considered deleted :param conn: database conn :returns: True if the DB is considered to be deleted, False otherwise """ raise NotImplementedError() def is_deleted(self): """ Check if the DB is considered to be deleted. :returns: True if the DB is considered to be deleted, False otherwise """ if self.db_file != ':memory:' and not os.path.exists(self.db_file): return True self._commit_puts_stale_ok() with self.get() as conn: return self._is_deleted(conn) def merge_timestamps(self, created_at, put_timestamp, delete_timestamp): """ Used in replication to handle updating timestamps. :param created_at: create timestamp :param put_timestamp: put timestamp :param delete_timestamp: delete timestamp """ with self.get() as conn: old_status = self._is_deleted(conn) conn.execute(''' UPDATE %s_stat SET created_at=MIN(?, created_at), put_timestamp=MAX(?, put_timestamp), delete_timestamp=MAX(?, delete_timestamp) ''' % self.db_type, (created_at, put_timestamp, delete_timestamp)) if old_status != self._is_deleted(conn): timestamp = Timestamp(time.time()) self._update_status_changed_at(conn, timestamp.internal) conn.commit() def get_items_since(self, start, count): """ Get a list of objects in the database between start and end. :param start: start ROWID :param count: number to get :returns: list of objects between start and end """ self._commit_puts_stale_ok() with self.get() as conn: curs = conn.execute(''' SELECT * FROM %s WHERE ROWID > ? ORDER BY ROWID ASC LIMIT ? ''' % self.db_contains_type, (start, count)) curs.row_factory = dict_factory return [r for r in curs] def get_sync(self, id, incoming=True): """ Gets the most recent sync point for a server from the sync table. :param id: remote ID to get the sync_point for :param incoming: if True, get the last incoming sync, otherwise get the last outgoing sync :returns: the sync point, or -1 if the id doesn't exist. """ with self.get() as conn: row = conn.execute( "SELECT sync_point FROM %s_sync WHERE remote_id=?" % ('incoming' if incoming else 'outgoing'), (id,)).fetchone() if not row: return -1 return row['sync_point'] def get_syncs(self, incoming=True): """ Get a serialized copy of the sync table. :param incoming: if True, get the last incoming sync, otherwise get the last outgoing sync :returns: list of {'remote_id', 'sync_point'} """ with self.get() as conn: curs = conn.execute(''' SELECT remote_id, sync_point FROM %s_sync ''' % ('incoming' if incoming else 'outgoing')) result = [] for row in curs: result.append({'remote_id': row[0], 'sync_point': row[1]}) return result def get_max_row(self): query = ''' SELECT SQLITE_SEQUENCE.seq FROM SQLITE_SEQUENCE WHERE SQLITE_SEQUENCE.name == '%s' LIMIT 1 ''' % (self.db_contains_type) with self.get() as conn: row = conn.execute(query).fetchone() return row[0] if row else -1 def get_replication_info(self): """ Get information about the DB required for replication. :returns: dict containing keys from get_info plus max_row and metadata Note:: get_info's <db_contains_type>_count is translated to just "count" and metadata is the raw string. """ info = self.get_info() info['count'] = info.pop('%s_count' % self.db_contains_type) info['metadata'] = self.get_raw_metadata() info['max_row'] = self.get_max_row() return info def get_info(self): self._commit_puts_stale_ok() with self.get() as conn: curs = conn.execute('SELECT * from %s_stat' % self.db_type) curs.row_factory = dict_factory return curs.fetchone() #在数据库中添加一条记录 def put_record(self, record): if self.db_file == ':memory:': self.merge_items([record]) return if not os.path.exists(self.db_file): raise DatabaseConnectionError(self.db_file, "DB doesn't exist") #对数据库父目录加锁 with lock_parent_directory(self.pending_file, self.pending_timeout): pending_size = 0 try: pending_size = os.path.getsize(self.pending_file) except OSError as err: if err.errno != errno.ENOENT: raise if pending_size > PENDING_CAP: self._commit_puts([record]) else: #将对象记录写入数据库文件中 with open(self.pending_file, 'a+b') as fp: # Colons aren't used in base64 encoding; so they are our # delimiter fp.write(':') fp.write(pickle.dumps( self.make_tuple_for_pickle(record), protocol=PICKLE_PROTOCOL).encode('base64')) fp.flush() def _commit_puts(self, item_list=None): """ Scan for .pending files and commit the found records by feeding them to merge_items(). Assume that lock_parent_directory has already been called. :param item_list: A list of items to commit in addition to .pending """ if self.db_file == ':memory:' or not os.path.exists(self.pending_file): return if item_list is None: item_list = [] self._preallocate() if not os.path.getsize(self.pending_file): if item_list: self.merge_items(item_list) return with open(self.pending_file, 'r+b') as fp: for entry in fp.read().split(':'): if entry: try: self._commit_puts_load(item_list, entry) except Exception: self.logger.exception( _('Invalid pending entry %(file)s: %(entry)s'), {'file': self.pending_file, 'entry': entry}) if item_list: self.merge_items(item_list) try: os.ftruncate(fp.fileno(), 0) except OSError as err: if err.errno != errno.ENOENT: raise def _commit_puts_stale_ok(self): """ Catch failures of _commit_puts() if broker is intended for reading of stats, and thus does not care for pending updates. """ if self.db_file == ':memory:' or not os.path.exists(self.pending_file): return try: with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() except LockTimeout: if not self.stale_reads_ok: raise def _commit_puts_load(self, item_list, entry): """ Unmarshall the :param:entry and append it to :param:item_list. This is implemented by a particular broker to be compatible with its :func:`merge_items`. """ raise NotImplementedError def make_tuple_for_pickle(self, record): """ Turn this db record dict into the format this service uses for pending pickles. """ raise NotImplementedError def merge_syncs(self, sync_points, incoming=True): """ Merge a list of sync points with the incoming sync table. :param sync_points: list of sync points where a sync point is a dict of {'sync_point', 'remote_id'} :param incoming: if True, get the last incoming sync, otherwise get the last outgoing sync """ with self.get() as conn: for rec in sync_points: try: conn.execute(''' INSERT INTO %s_sync (sync_point, remote_id) VALUES (?, ?) ''' % ('incoming' if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) except sqlite3.IntegrityError: conn.execute(''' UPDATE %s_sync SET sync_point=max(?, sync_point) WHERE remote_id=? ''' % ('incoming' if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) conn.commit() def _preallocate(self): """ The idea is to allocate space in front of an expanding db. If it gets within 512k of a boundary, it allocates to the next boundary. Boundaries are 2m, 5m, 10m, 25m, 50m, then every 50m after. """ if not DB_PREALLOCATION or self.db_file == ':memory:': return MB = (1024 * 1024) def prealloc_points(): for pm in (1, 2, 5, 10, 25, 50): yield pm * MB while True: pm += 50 yield pm * MB stat = os.stat(self.db_file) file_size = stat.st_size allocated_size = stat.st_blocks * 512 for point in prealloc_points(): if file_size <= point - MB / 2: prealloc_size = point break if allocated_size < prealloc_size: with open(self.db_file, 'rb+') as fp: fallocate(fp.fileno(), int(prealloc_size)) def get_raw_metadata(self): with self.get() as conn: try: metadata = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] except sqlite3.OperationalError as err: if 'no such column: metadata' not in str(err): raise metadata = '' return metadata @property def metadata(self): """ Returns the metadata dict for the database. The metadata dict values are tuples of (value, timestamp) where the timestamp indicates when that key was set to that value. """ metadata = self.get_raw_metadata() if metadata: metadata = json.loads(metadata) utf8encodekeys(metadata) else: metadata = {} return metadata @staticmethod def validate_metadata(metadata): """ Validates that metadata_falls within acceptable limits. :param metadata: to be validated :raises: HTTPBadRequest if MAX_META_COUNT or MAX_META_OVERALL_SIZE is exceeded """ meta_count = 0 meta_size = 0 for key, (value, timestamp) in metadata.items(): key = key.lower() if value != '' and (key.startswith('x-account-meta') or key.startswith('x-container-meta')): prefix = 'x-account-meta-' if key.startswith('x-container-meta-'): prefix = 'x-container-meta-' key = key[len(prefix):] meta_count = meta_count + 1 meta_size = meta_size + len(key) + len(value) if meta_count > MAX_META_COUNT: raise HTTPBadRequest('Too many metadata items; max %d' % MAX_META_COUNT) if meta_size > MAX_META_OVERALL_SIZE: raise HTTPBadRequest('Total metadata too large; max %d' % MAX_META_OVERALL_SIZE) def update_metadata(self, metadata_updates, validate_metadata=False): """ Updates the metadata dict for the database. The metadata dict values are tuples of (value, timestamp) where the timestamp indicates when that key was set to that value. Key/values will only be overwritten if the timestamp is newer. To delete a key, set its value to ('', timestamp). These empty keys will eventually be removed by :func:`reclaim` """ #从数据库中查询元数据信息,生成字典格式,保存到old_metadata old_metadata = self.metadata #如果新添加的元数据是原来元数据的子集 if set(metadata_updates).issubset(set(old_metadata)): #查询时间戳,由于网络存在乱序,所以,只更新时间最后请求的元数据 for key, (value, timestamp) in metadata_updates.items(): if timestamp > old_metadata[key][1]: break else: #所有的元数据均过期,则不作任何处理 return #到这里,就是存在需要更新的元数据 with self.get() as conn: try: md = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] md = json.loads(md) if md else {} utf8encodekeys(md) except sqlite3.OperationalError as err: if 'no such column: metadata' not in str(err): raise conn.execute(""" ALTER TABLE %s_stat ADD COLUMN metadata TEXT DEFAULT '' """ % self.db_type) md = {} #遍历待更新的所有元数据,只更新不存在的元数据或时间戳最新的元数据 for key, value_timestamp in metadata_updates.items(): value, timestamp = value_timestamp if key not in md or timestamp > md[key][1]: md[key] = value_timestamp if validate_metadata: DatabaseBroker.validate_metadata(md) conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type, (json.dumps(md),)) conn.commit() def reclaim(self, age_timestamp, sync_timestamp): """ Delete rows from the db_contains_type table that are marked deleted and whose created_at timestamp is < age_timestamp. Also deletes rows from incoming_sync and outgoing_sync where the updated_at timestamp is < sync_timestamp. In addition, this calls the DatabaseBroker's :func:`_reclaim` method. :param age_timestamp: max created_at timestamp of object rows to delete :param sync_timestamp: max update_at timestamp of sync rows to delete """ if self.db_file != ':memory:' and os.path.exists(self.pending_file): with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() with self.get() as conn: conn.execute(''' DELETE FROM %s WHERE deleted = 1 AND %s < ? ''' % (self.db_contains_type, self.db_reclaim_timestamp), (age_timestamp,)) try: conn.execute(''' DELETE FROM outgoing_sync WHERE updated_at < ? ''', (sync_timestamp,)) conn.execute(''' DELETE FROM incoming_sync WHERE updated_at < ? ''', (sync_timestamp,)) except sqlite3.OperationalError as err: # Old dbs didn't have updated_at in the _sync tables. if 'no such column: updated_at' not in str(err): raise DatabaseBroker._reclaim(self, conn, age_timestamp) conn.commit() def _reclaim(self, conn, timestamp): """ Removes any empty metadata values older than the timestamp using the given database connection. This function will not call commit on the conn, but will instead return True if the database needs committing. This function was created as a worker to limit transactions and commits from other related functions. :param conn: Database connection to reclaim metadata within. :param timestamp: Empty metadata items last updated before this timestamp will be removed. :returns: True if conn.commit() should be called """ try: md = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] if md: md = json.loads(md) keys_to_delete = [] for key, (value, value_timestamp) in md.items(): if value == '' and value_timestamp < timestamp: keys_to_delete.append(key) if keys_to_delete: for key in keys_to_delete: del md[key] conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type, (json.dumps(md),)) return True except sqlite3.OperationalError as err: if 'no such column: metadata' not in str(err): raise return False def update_put_timestamp(self, timestamp): """ Update the put_timestamp. Only modifies it if it is greater than the current timestamp. :param timestamp: internalized put timestamp """ with self.get() as conn: conn.execute( 'UPDATE %s_stat SET put_timestamp = ?' ' WHERE put_timestamp < ?' % self.db_type, (timestamp, timestamp)) conn.commit() def update_status_changed_at(self, timestamp): """ Update the status_changed_at field in the stat table. Only modifies status_changed_at if the timestamp is greater than the current status_changed_at timestamp. :param timestamp: internalized timestamp """ with self.get() as conn: self._update_status_changed_at(conn, timestamp) conn.commit() def _update_status_changed_at(self, conn, timestamp): conn.execute( 'UPDATE %s_stat SET status_changed_at = ?' ' WHERE status_changed_at < ?' % self.db_type, (timestamp, timestamp))
[]
mcrav/xdl
xdl/utils/prop_limits.py
c120a1cf50a9b668a79b118700930eb3d60a9298
"""Prop limits are used to validate the input given to xdl elements. For example, a volume property should be a positive number, optionally followed by volume units. The prop limit is used to check that input supplied is valid for that property. """ import re from typing import List, Optional class PropLimit(object): """Convenience class for storing prop limit. A prop limit is essentially a regex for validating the input to a given prop. For example, checking appropriate units are used or a value is within a certain range. Either ``regex`` or ``enum`` must be given when instantiating. If ``enum`` is given it will override whatever is given for ``regex`` and ``hint``. ``hint`` and ``default`` are both optional, but recommended, at least when using ``regex`` not ``enum``. Arguments: regex (str): Regex pattern that should match with valid values and not match with invalid values. hint (str): Useful hint for what valid value should look like, e.g. "Volume should be a number followed by volume units, e.g. '5 mL'." default (str): Default valid value. Should use standard units of the quantity involved, e.g. for volume, '0 mL'. enum (List[str]): List of values that the prop can take. This is used to automatically generate a regex from the list of allowed values. """ def __init__( self, regex: Optional[str] = None, hint: Optional[str] = '', default: Optional[str] = '', enum: Optional[List[str]] = [], ): if not regex and not enum: raise ValueError( 'Either `regex` or `enum` argument must be given.') self.default = default # If enum given generate regex from this self.enum = enum if enum: if not regex: self.regex = self.generate_enum_regex() else: self.regex = regex if not hint: self.hint = self.generate_enum_hint() else: self.hint = hint # Otherwise just set regex as attribute else: self.regex = regex self.hint = hint def validate(self, value: str) -> bool: """Validate given value against prop limit regex. Args: value (str): Value to validate against prop limit. Returns: bool: True if the value matches the prop limit, otherwise False. """ return re.match(self.regex, value) is not None def generate_enum_regex(self) -> str: """Generate regex from :py:attr:`enum`. Regex will match any of the items in :py:attr:`enum`. Returns: str: Regex that will match any of the strings in the :py:attr:`enum` list. """ regex = r'(' for item in self.enum: regex += item + r'|' regex = regex[:-1] + r')' return regex def generate_enum_hint(self) -> str: """Generate hint from :py:attr:`enum`. Hint will list all items in :py:attr:`enum`. Returns: str: Hint listing all items in :py:attr:`enum`. """ s = 'Expecting one of ' for item in self.enum[:-1]: s += f'"{item}", ' s = s[:-2] + f' or "{self.enum[-1]}".' return s ################## # Regex patterns # ################## #: Pattern to match a positive or negative float, #: e.g. '0', '-1', '1', '-10.3', '10.3', '0.0' would all be matched by this #: pattern. FLOAT_PATTERN: str = r'([-]?[0-9]+(?:[.][0-9]+)?)' #: Pattern to match a positive float, #: e.g. '0', 1', '10.3', '0.0' would all be matched by this pattern, but not #: '-10.3' or '-1'. POSITIVE_FLOAT_PATTERN: str = r'([0-9]+(?:[.][0-9]+)?)' #: Pattern to match boolean strings, specifically matching 'true' and 'false' #: case insensitvely. BOOL_PATTERN: str = r'(false|False|true|True)' #: Pattern to match all accepted volumes units case insensitvely, or empty string. VOLUME_UNITS_PATTERN: str = r'(l|L|litre|litres|liter|liters|ml|mL|cm3|cc|milliltre|millilitres|milliliter|milliliters|cl|cL|centiltre|centilitres|centiliter|centiliters|dl|dL|deciltre|decilitres|deciliter|deciliters|ul|uL|μl|μL|microlitre|microlitres|microliter|microliters)?' #: Pattern to match all accepted mass units, or empty string. MASS_UNITS_PATTERN: str = r'(g|gram|grams|kg|kilogram|kilograms|mg|milligram|milligrams|ug|μg|microgram|micrograms)?' #: Pattern to match all accepted temperature units, or empty string. TEMP_UNITS_PATTERN: str = r'(°C|K|F)?' #: Pattern to match all accepted time units, or empty string. TIME_UNITS_PATTERN = r'(days|day|h|hr|hrs|hour|hours|m|min|mins|minute|minutes|s|sec|secs|second|seconds)?' #: Pattern to match all accepted pressure units, or empty string. PRESSURE_UNITS_PATTERN = r'(mbar|bar|torr|Torr|mmhg|mmHg|atm|Pa|pa)?' #: Pattern to match all accepted rotation speed units, or empty string. ROTATION_SPEED_UNITS_PATTERN = r'(rpm|RPM)?' #: Pattern to match all accepted length units, or empty string. DISTANCE_UNITS_PATTERN = r'(nm|µm|mm|cm|m|km)?' #: Pattern to match all accepted mol units, or empty string. MOL_UNITS_PATTERN = r'(mmol|mol)?' ############### # Prop limits # ############### def generate_quantity_units_pattern( quantity_pattern: str, units_pattern: str, hint: Optional[str] = '', default: Optional[str] = '' ) -> PropLimit: """ Convenience function to generate PropLimit object for different quantity types, i.e. for variations on the number followed by unit pattern. Args: quantity_pattern (str): Pattern to match the number expected. This will typically be ``POSITIVE_FLOAT_PATTERN`` or ``FLOAT_PATTERN``. units_pattern (str): Pattern to match the units expected or empty string. Empty string is matched as not including units is allowed as in this case standard units are used. hint (str): Hint for the prop limit to tell the user what correct input should look like in the case of an errror. default (str): Default value for the prop limit, should use standard units for the prop involved. """ return PropLimit( regex=r'^((' + quantity_pattern + r'[ ]?'\ + units_pattern + r'$)|(^' + quantity_pattern + r'))$', hint=hint, default=default ) # NOTE: It is important here that defaults use the standard unit for that # quantity type as XDL app uses this to add in default units. #: Prop limit for volume props. VOLUME_PROP_LIMIT: PropLimit = PropLimit( regex=r'^(all|(' + POSITIVE_FLOAT_PATTERN + r'[ ]?'\ + VOLUME_UNITS_PATTERN + r')|(' + POSITIVE_FLOAT_PATTERN + r'))$', hint='Expecting number followed by standard volume units, e.g. "5.5 mL"', default='0 mL', ) #: Prop limit for mass props. MASS_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MASS_UNITS_PATTERN, hint='Expecting number followed by standard mass units, e.g. "2.3 g"', default='0 g' ) #: Prop limit for mol props. MOL_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MOL_UNITS_PATTERN, hint='Expecting number followed by mol or mmol, e.g. "2.3 mol".', default='0 mol', ) #: Prop limit for temp props. TEMP_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( FLOAT_PATTERN, TEMP_UNITS_PATTERN, hint='Expecting number in degrees celsius or number followed by standard temperature units, e.g. "25", "25°C", "298 K".', default='25°C', ) #: Prop limit for time props. TIME_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, TIME_UNITS_PATTERN, hint='Expecting number followed by standard time units, e.g. "15 mins", "3 hrs".', default='0 secs' ) #: Prop limit for pressure props. PRESSURE_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, PRESSURE_UNITS_PATTERN, hint='Expecting number followed by standard pressure units, e.g. "50 mbar", "1 atm".', default='1013.25 mbar' ) #: Prop limit for rotation speed props. ROTATION_SPEED_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, ROTATION_SPEED_UNITS_PATTERN, hint='Expecting RPM value, e.g. "400 RPM".', default='400 RPM', ) #: Prop limit for wavelength props. WAVELENGTH_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, DISTANCE_UNITS_PATTERN, hint='Expecting wavelength, e.g. "400 nm".', default='400 nm' ) #: Prop limit for any props requiring a positive integer such as ``repeats``. #: Used if no explicit property is given and prop type is ``int``. POSITIVE_INT_PROP_LIMIT: PropLimit = PropLimit( r'[0-9]+', hint='Expecting positive integer value, e.g. "3"', default='1', ) #: Prop limit for any props requiring a positive float. Used if no explicit #: prop type is given and prop type is ``float``. POSITIVE_FLOAT_PROP_LIMIT: PropLimit = PropLimit( regex=POSITIVE_FLOAT_PATTERN, hint='Expecting positive float value, e.g. "3", "3.5"', default='0', ) #: Prop limit for any props requiring a boolean value. Used if no explicit prop #: type is given and prop type is ``bool``. BOOL_PROP_LIMIT: PropLimit = PropLimit( BOOL_PATTERN, hint='Expecting one of "false" or "true".', default='false', ) #: Prop limit for ``WashSolid`` ``stir`` prop. This is a special case as the #: value can be ``True``, ``False`` or ``'solvent'``. WASH_SOLID_STIR_PROP_LIMIT: PropLimit = PropLimit( r'(' + BOOL_PATTERN + r'|solvent)', enum=['true', 'solvent', 'false'], hint='Expecting one of "true", "false" or "solvent".', default='True' ) #: Prop limit for ``Separate`` ``purpose`` prop. One of 'extract' or 'wash'. SEPARATION_PURPOSE_PROP_LIMIT: PropLimit = PropLimit(enum=['extract', 'wash']) #: Prop limit for ``Separate`` ``product_phase`` prop. One of 'top' or 'bottom'. SEPARATION_PRODUCT_PHASE_PROP_LIMIT: PropLimit = PropLimit(enum=['top', 'bottom']) #: Prop limit for ``Add`` ``purpose`` prop. One of 'neutralize', 'precipitate', #: 'dissolve', 'basify', 'acidify' or 'dilute'. ADD_PURPOSE_PROP_LIMIT = PropLimit( enum=[ 'neutralize', 'precipitate', 'dissolve', 'basify', 'acidify', 'dilute', ] ) #: Prop limit for ``HeatChill`` ``purpose`` prop. One of 'control-exotherm', #: 'reaction' or 'unstable-reagent'. HEATCHILL_PURPOSE_PROP_LIMIT = PropLimit( enum=['control-exotherm', 'reaction', 'unstable-reagent'] ) #: Prop limit for ``Stir`` ``purpose`` prop. 'dissolve' is only option. STIR_PURPOSE_PROP_LIMIT = PropLimit( enum=['dissolve'] ) #: Prop limit for ``Reagent`` ``role`` prop. One of 'solvent', 'reagent', #: 'catalyst', 'substrate', 'acid', 'base' or 'activating-agent'. REAGENT_ROLE_PROP_LIMIT = PropLimit( enum=[ 'solvent', 'reagent', 'catalyst', 'substrate', 'acid', 'base', 'activating-agent' ] ) #: Prop limit for ``Component`` ``component_type`` prop. One of 'reactor', #: 'filter', 'separator', 'rotavap' or 'flask'. COMPONENT_TYPE_PROP_LIMIT: PropLimit = PropLimit( enum=['reactor', 'filter', 'separator', 'rotavap', 'flask'] ) #: Pattern matching a float of value 100, e.g. '100', '100.0', '100.000' would #: all be matched. _hundred_float: str = r'(100(?:[.][0]+)?)' #: Pattern matching any float between 10.000 and 99.999. _ten_to_ninety_nine_float: str = r'([0-9][0-9](?:[.][0-9]+)?)' #: Pattern matching any float between 0 and 9.999. _zero_to_ten_float: str = r'([0-9](?:[.][0-9]+)?)' #: Pattern matching float between 0 and 100. Used for percentages. PERCENT_RANGE_PROP_LIMIT: PropLimit = PropLimit( r'^(' + _hundred_float + '|'\ + _ten_to_ninety_nine_float + '|' + _zero_to_ten_float + ')$', hint='Expecting number from 0-100 representing a percentage, e.g. "50", "8.5".', default='0', )
[((70, 15, 70, 42), 're.match', 're.match', ({(70, 24, 70, 34): 'self.regex', (70, 36, 70, 41): 'value'}, {}), '(self.regex, value)', False, 'import re\n')]
leoalfonso/dit
dit/utils/bindargs.py
e7d5f680b3f170091bb1e488303f4255eeb11ef4
""" Provides usable args and kwargs from inspect.getcallargs. For Python 3.3 and above, this module is unnecessary and can be achieved using features from PEP 362: http://www.python.org/dev/peps/pep-0362/ For example, to override a parameter of some function: >>> import inspect >>> def func(a, b=1, c=2, d=3): ... return a, b, c, d ... >>> def override_c(*args, **kwargs): ... sig = inspect.signature(override) ... ba = sig.bind(*args, **kwargs) ... ba['c'] = 10 ... return func(*ba.args, *ba.kwargs) ... >>> override_c(0, c=3) (0, 1, 10, 3) Also useful: http://www.python.org/dev/peps/pep-3102/ """ import sys import inspect from inspect import getcallargs try: from inspect import getfullargspec except ImportError: # Python 2.X from collections import namedtuple from inspect import getargspec FullArgSpec = namedtuple('FullArgSpec', 'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations') def getfullargspec(f): args, varargs, varkw, defaults = getargspec(f) kwonlyargs = [] kwonlydefaults = None annotations = getattr(f, '__annotations__', {}) return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations) def bindcallargs_leq32(_fUnCtIoN_, *args, **kwargs): """Binds arguments and keyword arguments to a function or method. Returns a tuple (bargs, bkwargs) suitable for manipulation and passing to the specified function. `bargs` consists of the bound args, varargs, and kwonlyargs from getfullargspec. `bkwargs` consists of the bound varkw from getfullargspec. Both can be used in a call to the specified function. Any default parameter values are included in the output. Examples -------- >>> def func(a, b=3, *args, **kwargs): ... pass >>> bindcallargs(func, 5) ((5, 3), {}) >>> bindcallargs(func, 5, 4, 3, 2, 1, hello='there') ((5, 4, 3, 2, 1), {'hello': 'there'}) >>> args, kwargs = bindcallargs(func, 5) >>> kwargs['b'] = 5 # overwrite default value for b >>> func(*args, **kwargs) """ # It is necessary to choose an unlikely variable name for the function. # The reason is that any kwarg by the same name will cause a TypeError # due to multiple values being passed for that argument name. func = _fUnCtIoN_ callargs = getcallargs(func, *args, **kwargs) spec = getfullargspec(func) # Construct all args and varargs and use them in bargs bargs = [callargs[arg] for arg in spec.args] if spec.varargs is not None: bargs.extend(callargs[spec.varargs]) bargs = tuple(bargs) # Start with kwonlyargs. bkwargs = {kwonlyarg: callargs[kwonlyarg] for kwonlyarg in spec.kwonlyargs} # Add in kwonlydefaults for unspecified kwonlyargs only. # Since keyword only arguements aren't allowed in python2, and we # don't support python 3.0, 3.1, 3.2, this should never be executed: if spec.kwonlydefaults is not None: # pragma: no cover bkwargs.update({k: v for k, v in spec.kwonlydefaults.items() if k not in bkwargs}) # Add in varkw. if spec.varkw is not None: bkwargs.update(callargs[spec.varkw]) return bargs, bkwargs def bindcallargs_geq33(_fUnCtIoN_, *args, **kwargs): # Should match functionality of bindcallargs_32 for Python > 3.3. sig = inspect.signature(_fUnCtIoN_) ba = sig.bind(*args, **kwargs) # Add in all default values for param in sig.parameters.values(): if param.name not in ba.arguments: ba.arguments[param.name] = param.default return ba.args, ba.kwargs if sys.version_info[0:2] < (3,3): bindcallargs = bindcallargs_leq32 else: bindcallargs = bindcallargs_geq33
[((87, 15, 87, 49), 'inspect.getcallargs', 'getcallargs', ({(87, 27, 87, 31): 'func', (87, 33, 87, 38): '*args'}, {}), '(func, *args, **kwargs)', False, 'from inspect import getcallargs\n'), ((88, 11, 88, 31), 'inspect.getfullargspec', 'getfullargspec', ({(88, 26, 88, 30): 'func'}, {}), '(func)', False, 'from inspect import getfullargspec\n'), ((113, 10, 113, 39), 'inspect.signature', 'inspect.signature', ({(113, 28, 113, 38): '_fUnCtIoN_'}, {}), '(_fUnCtIoN_)', False, 'import inspect\n'), ((43, 18, 44, 78), 'collections.namedtuple', 'namedtuple', ({(43, 29, 43, 42): '"""FullArgSpec"""', (44, 4, 44, 77): '"""args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations"""'}, {}), "('FullArgSpec',\n 'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')", False, 'from collections import namedtuple\n'), ((47, 41, 47, 54), 'inspect.getargspec', 'getargspec', ({(47, 52, 47, 53): 'f'}, {}), '(f)', False, 'from inspect import getargspec\n')]
AmyYLee/gaia
tests/python/gaia-ui-tests/gaiatest/gaia_test.py
a5dbae8235163d7f985bdeb7d649268f02749a8b
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import json import os import sys import time from marionette import MarionetteTestCase from marionette.by import By from marionette.errors import NoSuchElementException from marionette.errors import ElementNotVisibleException from marionette.errors import TimeoutException from marionette.errors import StaleElementException from marionette.errors import InvalidResponseException import mozdevice class LockScreen(object): def __init__(self, marionette): self.marionette = marionette js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_lock_screen.js")) self.marionette.import_script(js) @property def is_locked(self): self.marionette.switch_to_frame() return self.marionette.execute_script('window.wrappedJSObject.LockScreen.locked') def lock(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.lock()') assert result, 'Unable to lock screen' def unlock(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.unlock()') assert result, 'Unable to unlock screen' class GaiaApp(object): def __init__(self, origin=None, name=None, frame=None, src=None): self.frame = frame self.frame_id = frame self.src = src self.name = name self.origin = origin def __eq__(self, other): return self.__dict__ == other.__dict__ class GaiaApps(object): def __init__(self, marionette): self.marionette = marionette js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_apps.js")) self.marionette.import_script(js) def get_permission(self, app_name, permission_name): return self.marionette.execute_async_script("return GaiaApps.getPermission('%s', '%s')" % (app_name, permission_name)) def set_permission(self, app_name, permission_name, value): return self.marionette.execute_async_script("return GaiaApps.setPermission('%s', '%s', '%s')" % (app_name, permission_name, value)) def launch(self, name, switch_to_frame=True, url=None, launch_timeout=None): self.marionette.switch_to_frame() result = self.marionette.execute_async_script("GaiaApps.launchWithName('%s')" % name, script_timeout=launch_timeout) assert result, "Failed to launch app with name '%s'" % name app = GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin')) if app.frame_id is None: raise Exception("App failed to launch; there is no app frame") if switch_to_frame: self.switch_to_frame(app.frame_id, url) return app @property def displayed_app(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return GaiaApps.displayedApp();') return GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin')) def switch_to_displayed_app(self): self.marionette.switch_to_default_content() self.marionette.switch_to_frame(self.displayed_app.frame) def is_app_installed(self, app_name): self.marionette.switch_to_frame() return self.marionette.execute_async_script("GaiaApps.locateWithName('%s')" % app_name) def uninstall(self, name): self.marionette.switch_to_frame() self.marionette.execute_async_script("GaiaApps.uninstallWithName('%s')" % name) def kill(self, app): self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_apps.js")) self.marionette.import_script(js) result = self.marionette.execute_async_script("GaiaApps.kill('%s');" % app.origin) assert result, "Failed to kill app with name '%s'" % app.name def kill_all(self): self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_apps.js")) self.marionette.import_script(js) self.marionette.execute_async_script("GaiaApps.killAll()") def runningApps(self): return self.marionette.execute_script("return GaiaApps.getRunningApps()") def switch_to_frame(self, app_frame, url=None, timeout=30): self.marionette.switch_to_frame(app_frame) start = time.time() if not url: def check(now): return "about:blank" not in now else: def check(now): return url in now while (time.time() - start < timeout): if check(self.marionette.get_url()): return time.sleep(2) raise TimeoutException('Could not switch to app frame %s in time' % app_frame) class GaiaData(object): def __init__(self, marionette, testvars=None): self.marionette = marionette self.testvars = testvars or {} js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_data_layer.js")) self.marionette.import_script(js) self.marionette.set_search_timeout(10000) def set_time(self, date_number): self.marionette.set_context(self.marionette.CONTEXT_CHROME) self.marionette.execute_script("window.navigator.mozTime.set(%s);" % date_number) self.marionette.set_context(self.marionette.CONTEXT_CONTENT) @property def all_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getAllContacts();', special_powers=True) @property def sim_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getSIMContacts();', special_powers=True) def insert_contact(self, contact): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return GaiaDataLayer.insertContact(%s);' % json.dumps(contact), special_powers=True) assert result, 'Unable to insert contact %s' % contact def remove_all_contacts(self, default_script_timeout=60000): self.marionette.switch_to_frame() self.marionette.set_script_timeout(max(default_script_timeout, 1000 * len(self.all_contacts))) result = self.marionette.execute_async_script('return GaiaDataLayer.removeAllContacts();', special_powers=True) assert result, 'Unable to remove all contacts' self.marionette.set_script_timeout(default_script_timeout) def get_setting(self, name): return self.marionette.execute_async_script('return GaiaDataLayer.getSetting("%s")' % name, special_powers=True) @property def all_settings(self): return self.get_setting('*') def set_setting(self, name, value): import json value = json.dumps(value) result = self.marionette.execute_async_script('return GaiaDataLayer.setSetting("%s", %s)' % (name, value), special_powers=True) assert result, "Unable to change setting with name '%s' to '%s'" % (name, value) def _get_pref(self, datatype, name): return self.marionette.execute_script("return SpecialPowers.get%sPref('%s');" % (datatype, name), special_powers=True) def _set_pref(self, datatype, name, value): value = json.dumps(value) self.marionette.execute_script("SpecialPowers.set%sPref('%s', %s);" % (datatype, name, value), special_powers=True) def get_bool_pref(self, name): """Returns the value of a Gecko boolean pref, which is different from a Gaia setting.""" return self._get_pref('Bool', name) def set_bool_pref(self, name, value): """Sets the value of a Gecko boolean pref, which is different from a Gaia setting.""" return self._set_pref('Bool', name, value) def get_int_pref(self, name): """Returns the value of a Gecko integer pref, which is different from a Gaia setting.""" return self._get_pref('Int', name) def set_int_pref(self, name, value): """Sets the value of a Gecko integer pref, which is different from a Gaia setting.""" return self._set_pref('Int', name, value) def get_char_pref(self, name): """Returns the value of a Gecko string pref, which is different from a Gaia setting.""" return self._get_pref('Char', name) def set_char_pref(self, name, value): """Sets the value of a Gecko string pref, which is different from a Gaia setting.""" return self._set_pref('Char', name, value) def set_volume(self, value): channels = ['alarm', 'content', 'notification'] for channel in channels: self.set_setting('audio.volume.%s' % channel, value) def bluetooth_enable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script("return GaiaDataLayer.enableBluetooth()") def bluetooth_disable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script("return GaiaDataLayer.disableBluetooth()") def bluetooth_pair_device(self, device_name): return self.marionette.execute_async_script('return GaiaDataLayer.pairBluetoothDevice("%s")' % device_name) def bluetooth_unpair_all_devices(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.unpairAllBluetoothDevices()') def bluetooth_set_device_name(self, device_name): result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceName(%s);' % device_name) assert result, "Unable to set device's bluetooth name to %s" % device_name def bluetooth_set_device_discoverable_mode(self, discoverable): if (discoverable): result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(true);') else: result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(false);') assert result, 'Able to set the device bluetooth discoverable mode' @property def bluetooth_is_enabled(self): return self.marionette.execute_script("return window.navigator.mozBluetooth.enabled") @property def is_cell_data_enabled(self): return self.get_setting('ril.data.enabled') def connect_to_cell_data(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script("return GaiaDataLayer.connectToCellData()", special_powers=True) assert result, 'Unable to connect to cell data' def disable_cell_data(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script("return GaiaDataLayer.disableCellData()", special_powers=True) assert result, 'Unable to disable cell data' @property def is_cell_data_connected(self): # XXX: check bug-926169 # this is used to keep all tests passing while introducing multi-sim APIs return self.marionette.execute_script('var mobileConnection = window.navigator.mozMobileConnection || ' + 'window.navigator.mozMobileConnections && ' + 'window.navigator.mozMobileConnections[0]; ' + 'return mobileConnection.data.connected;') def enable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', True) def disable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', False) @property def is_wifi_enabled(self): return self.marionette.execute_script("return window.navigator.mozWifiManager.enabled;") def enable_wifi(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script("return GaiaDataLayer.enableWiFi()", special_powers=True) assert result, 'Unable to enable WiFi' def disable_wifi(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script("return GaiaDataLayer.disableWiFi()", special_powers=True) assert result, 'Unable to disable WiFi' def connect_to_wifi(self, network=None): network = network or self.testvars.get('wifi') assert network, 'No WiFi network provided' self.enable_wifi() self.marionette.switch_to_frame() result = self.marionette.execute_async_script("return GaiaDataLayer.connectToWiFi(%s)" % json.dumps(network)) assert result, 'Unable to connect to WiFi network' def forget_all_networks(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.forgetAllNetworks()') def is_wifi_connected(self, network=None): network = network or self.testvars.get('wifi') assert network, 'No WiFi network provided' self.marionette.switch_to_frame() return self.marionette.execute_script("return GaiaDataLayer.isWiFiConnected(%s)" % json.dumps(network)) @property def known_networks(self): return self.marionette.execute_async_script('return GaiaDataLayer.getKnownNetworks()') @property def active_telephony_state(self): # Returns the state of only the currently active call or None if no active call return self.marionette.execute_script("return GaiaDataLayer.getMozTelephonyState()") @property def is_antenna_available(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.antennaAvailable') @property def is_fm_radio_enabled(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.enabled') @property def fm_radio_frequency(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.frequency') @property def media_files(self): result = [] result.extend(self.music_files) result.extend(self.picture_files) result.extend(self.video_files) return result def delete_all_sms(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script("return GaiaDataLayer.deleteAllSms();", special_powers=True) def delete_all_call_log_entries(self): """The call log needs to be open and focused in order for this to work.""" self.marionette.execute_script('window.wrappedJSObject.RecentsDBManager.deleteAll();') def kill_active_call(self): self.marionette.execute_script("var telephony = window.navigator.mozTelephony; " + "if(telephony.active) telephony.active.hangUp();") @property def music_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllMusic();') @property def picture_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllPictures();') @property def video_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllVideos();') def sdcard_files(self, extension=''): files = self.marionette.execute_async_script( 'return GaiaDataLayer.getAllSDCardFiles();') if len(extension): return [filename for filename in files if filename.endswith(extension)] return files def send_sms(self, number, message): import json number = json.dumps(number) message = json.dumps(message) result = self.marionette.execute_async_script('return GaiaDataLayer.sendSMS(%s, %s)' % (number, message), special_powers=True) assert result, 'Unable to send SMS to recipient %s with text %s' % (number, message) class GaiaDevice(object): def __init__(self, marionette, testvars=None): self.marionette = marionette self.testvars = testvars or {} @property def manager(self): if hasattr(self, '_manager') and self._manager: return self._manager if not self.is_android_build: raise Exception('Device manager is only available for devices.') dm_type = os.environ.get('DM_TRANS', 'adb') if dm_type == 'adb': self._manager = mozdevice.DeviceManagerADB() elif dm_type == 'sut': host = os.environ.get('TEST_DEVICE') if not host: raise Exception('Must specify host with SUT!') self._manager = mozdevice.DeviceManagerSUT(host=host) else: raise Exception('Unknown device manager type: %s' % dm_type) return self._manager @property def is_android_build(self): if self.testvars.get('is_android_build') is None: self.testvars['is_android_build'] = 'Android' in self.marionette.session_capabilities['platform'] return self.testvars['is_android_build'] @property def is_online(self): # Returns true if the device has a network connection established (cell data, wifi, etc) return self.marionette.execute_script('return window.navigator.onLine;') @property def has_mobile_connection(self): # XXX: check bug-926169 # this is used to keep all tests passing while introducing multi-sim APIs return self.marionette.execute_script('var mobileConnection = window.navigator.mozMobileConnection || ' + 'window.navigator.mozMobileConnections && ' + 'window.navigator.mozMobileConnections[0]; ' + 'return mobileConnection !== undefined') @property def has_wifi(self): if not hasattr(self, '_has_wifi'): self._has_wifi = self.marionette.execute_script('return window.navigator.mozWifiManager !== undefined') return self._has_wifi def push_file(self, source, count=1, destination='', progress=None): if not destination.count('.') > 0: destination = '/'.join([destination, source.rpartition(os.path.sep)[-1]]) self.manager.mkDirs(destination) self.manager.pushFile(source, destination) if count > 1: for i in range(1, count + 1): remote_copy = '_%s.'.join(iter(destination.split('.'))) % i self.manager._checkCmd(['shell', 'dd', 'if=%s' % destination, 'of=%s' % remote_copy]) if progress: progress.update(i) self.manager.removeFile(destination) def restart_b2g(self): self.stop_b2g() time.sleep(2) self.start_b2g() def start_b2g(self): if self.marionette.instance: # launch the gecko instance attached to marionette self.marionette.instance.start() elif self.is_android_build: self.manager.shellCheckOutput(['start', 'b2g']) else: raise Exception('Unable to start B2G') self.marionette.wait_for_port() self.marionette.start_session() if self.is_android_build: self.marionette.execute_async_script(""" window.addEventListener('mozbrowserloadend', function loaded(aEvent) { if (aEvent.target.src.indexOf('ftu') != -1 || aEvent.target.src.indexOf('homescreen') != -1) { window.removeEventListener('mozbrowserloadend', loaded); marionetteScriptFinished(); } });""", script_timeout=60000) # TODO: Remove this sleep when Bug 924912 is addressed time.sleep(5) def stop_b2g(self): if self.marionette.instance: # close the gecko instance attached to marionette self.marionette.instance.close() elif self.is_android_build: self.manager.shellCheckOutput(['stop', 'b2g']) else: raise Exception('Unable to stop B2G') self.marionette.client.close() self.marionette.session = None self.marionette.window = None class GaiaTestCase(MarionetteTestCase): _script_timeout = 60000 _search_timeout = 10000 # deafult timeout in seconds for the wait_for methods _default_timeout = 30 def __init__(self, *args, **kwargs): self.restart = kwargs.pop('restart', False) kwargs.pop('iterations', None) kwargs.pop('checkpoint_interval', None) MarionetteTestCase.__init__(self, *args, **kwargs) def setUp(self): try: MarionetteTestCase.setUp(self) except InvalidResponseException: if self.restart: pass self.device = GaiaDevice(self.marionette, self.testvars) if self.restart and (self.device.is_android_build or self.marionette.instance): self.device.stop_b2g() if self.device.is_android_build: # revert device to a clean state self.device.manager.removeDir('/data/local/storage/persistent') self.device.manager.removeDir('/data/b2g/mozilla') self.device.start_b2g() # the emulator can be really slow! self.marionette.set_script_timeout(self._script_timeout) self.marionette.set_search_timeout(self._search_timeout) self.lockscreen = LockScreen(self.marionette) self.apps = GaiaApps(self.marionette) self.data_layer = GaiaData(self.marionette, self.testvars) from gaiatest.apps.keyboard.app import Keyboard self.keyboard = Keyboard(self.marionette) self.cleanUp() def cleanUp(self): # remove media if self.device.is_android_build: for filename in self.data_layer.media_files: # filename is a fully qualified path self.device.manager.removeFile(filename) # Switch off keyboard FTU screen self.data_layer.set_setting("keyboard.ftu.enabled", False) # restore settings from testvars [self.data_layer.set_setting(name, value) for name, value in self.testvars.get('settings', {}).items()] # unlock self.lockscreen.unlock() # If we are restarting all of these values are reset to default earlier in the setUp if not self.restart: # disable passcode before restore settings from testvars self.data_layer.set_setting('lockscreen.passcode-lock.code', '1111') self.data_layer.set_setting('lockscreen.passcode-lock.enabled', False) # Change language back to English self.data_layer.set_setting("language.current", "en-US") # Switch off spanish keyboard before test self.data_layer.set_setting("keyboard.layouts.spanish", False) # Set do not track pref back to the default self.data_layer.set_setting('privacy.donottrackheader.value', '-1') if self.data_layer.get_setting('ril.radio.disabled'): # enable the device radio, disable Airplane mode self.data_layer.set_setting('ril.radio.disabled', False) # Re-set edge gestures pref to False self.data_layer.set_setting('edgesgesture.enabled', False) # disable carrier data connection if self.device.has_mobile_connection: self.data_layer.disable_cell_data() self.data_layer.disable_cell_roaming() if self.device.has_wifi: self.data_layer.enable_wifi() self.data_layer.forget_all_networks() self.data_layer.disable_wifi() # remove data self.data_layer.remove_all_contacts(self._script_timeout) # reset to home screen self.marionette.execute_script("window.wrappedJSObject.dispatchEvent(new Event('home'));") # kill any open apps self.apps.kill_all() # disable sound completely self.data_layer.set_volume(0) def install_marketplace(self): _yes_button_locator = (By.ID, 'app-install-install-button') mk = {"name": "Marketplace Dev", "manifest": "https://marketplace-dev.allizom.org/manifest.webapp ", } if not self.apps.is_app_installed(mk['name']): # install the marketplace dev app self.marionette.execute_script('navigator.mozApps.install("%s")' % mk['manifest']) # TODO add this to the system app object when we have one self.wait_for_element_displayed(*_yes_button_locator) self.marionette.find_element(*_yes_button_locator).tap() self.wait_for_element_not_displayed(*_yes_button_locator) def connect_to_network(self): if not self.device.is_online: try: self.connect_to_local_area_network() except: if self.device.has_mobile_connection: self.data_layer.connect_to_cell_data() else: raise Exception('Unable to connect to network') assert self.device.is_online def connect_to_local_area_network(self): if not self.device.is_online: if self.testvars.get('wifi') and self.device.has_wifi: self.data_layer.connect_to_wifi() assert self.device.is_online else: raise Exception('Unable to connect to local area network') def push_resource(self, filename, count=1, destination=''): self.device.push_file(self.resource(filename), count, '/'.join(['sdcard', destination])) def resource(self, filename): return os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', filename)) def change_orientation(self, orientation): """ There are 4 orientation states which the phone can be passed in: portrait-primary(which is the default orientation), landscape-primary, portrait-secondary and landscape-secondary """ self.marionette.execute_async_script(""" if (arguments[0] === arguments[1]) { marionetteScriptFinished(); } else { var expected = arguments[1]; window.screen.onmozorientationchange = function(e) { console.log("Received 'onmozorientationchange' event."); waitFor( function() { window.screen.onmozorientationchange = null; marionetteScriptFinished(); }, function() { return window.screen.mozOrientation === expected; } ); }; console.log("Changing orientation to '" + arguments[1] + "'."); window.screen.mozLockOrientation(arguments[1]); };""", script_args=[self.screen_orientation, orientation]) @property def screen_width(self): return self.marionette.execute_script('return window.screen.width') @property def screen_orientation(self): return self.marionette.execute_script('return window.screen.mozOrientation') def wait_for_element_present(self, by, locator, timeout=_default_timeout): timeout = float(timeout) + time.time() while time.time() < timeout: time.sleep(0.5) try: return self.marionette.find_element(by, locator) except NoSuchElementException: pass else: raise TimeoutException( 'Element %s not present before timeout' % locator) def wait_for_element_not_present(self, by, locator, timeout=_default_timeout): timeout = float(timeout) + time.time() while time.time() < timeout: time.sleep(0.5) try: self.marionette.find_element(by, locator) except NoSuchElementException: break else: raise TimeoutException( 'Element %s still present after timeout' % locator) def wait_for_element_displayed(self, by, locator, timeout=_default_timeout): timeout = float(timeout) + time.time() e = None while time.time() < timeout: time.sleep(0.5) try: if self.marionette.find_element(by, locator).is_displayed(): break except (NoSuchElementException, StaleElementException) as e: pass else: # This is an effortless way to give extra debugging information if isinstance(e, NoSuchElementException): raise TimeoutException('Element %s not present before timeout' % locator) else: raise TimeoutException('Element %s present but not displayed before timeout' % locator) def wait_for_element_not_displayed(self, by, locator, timeout=_default_timeout): timeout = float(timeout) + time.time() while time.time() < timeout: time.sleep(0.5) try: if not self.marionette.find_element(by, locator).is_displayed(): break except StaleElementException: pass except NoSuchElementException: break else: raise TimeoutException( 'Element %s still visible after timeout' % locator) def wait_for_condition(self, method, timeout=_default_timeout, message="Condition timed out"): """Calls the method provided with the driver as an argument until the \ return value is not False.""" end_time = time.time() + timeout while time.time() < end_time: try: value = method(self.marionette) if value: return value except (NoSuchElementException, StaleElementException): pass time.sleep(0.5) else: raise TimeoutException(message) def is_element_present(self, by, locator): try: self.marionette.find_element(by, locator) return True except: return False def is_element_displayed(self, by, locator): try: return self.marionette.find_element(by, locator).is_displayed() except (NoSuchElementException, ElementNotVisibleException): return False def tearDown(self): self.lockscreen = None self.apps = None self.data_layer = None MarionetteTestCase.tearDown(self) class GaiaEnduranceTestCase(GaiaTestCase): def __init__(self, *args, **kwargs): self.iterations = kwargs.pop('iterations') or 1 self.checkpoint_interval = kwargs.pop('checkpoint_interval') or self.iterations GaiaTestCase.__init__(self, *args, **kwargs) def drive(self, test, app): self.test_method = test self.app_under_test = app # Now drive the actual test case iterations for count in range(1, self.iterations + 1): self.iteration = count self.marionette.log("%s iteration %d of %d" % (self.test_method.__name__, count, self.iterations)) # Print to console so can see what iteration we're on while test is running if self.iteration == 1: print "\n" print "Iteration %d of %d..." % (count, self.iterations) sys.stdout.flush() self.test_method() # Checkpoint time? if ((count % self.checkpoint_interval) == 0) or count == self.iterations: self.checkpoint() # Finished, now process checkpoint data into .json output self.process_checkpoint_data() def checkpoint(self): # Console output so know what's happening if watching console print "Checkpoint..." sys.stdout.flush() # Sleep to give device idle time (for gc) idle_time = 30 self.marionette.log("sleeping %d seconds to give the device some idle time" % idle_time) time.sleep(idle_time) # Dump out some memory status info self.marionette.log("checkpoint") self.cur_time = time.strftime("%Y%m%d%H%M%S", time.localtime()) # If first checkpoint, create the file if it doesn't exist already if self.iteration in (0, self.checkpoint_interval): self.checkpoint_path = "checkpoints" if not os.path.exists(self.checkpoint_path): os.makedirs(self.checkpoint_path, 0755) self.log_name = "%s/checkpoint_%s_%s.log" % (self.checkpoint_path, self.test_method.__name__, self.cur_time) with open(self.log_name, 'a') as log_file: log_file.write('%s Gaia Endurance Test: %s\n' % (self.cur_time, self.test_method.__name__)) output_str = self.device.manager.shellCheckOutput(["b2g-ps"]) with open(self.log_name, 'a') as log_file: log_file.write('%s Checkpoint after iteration %d of %d:\n' % (self.cur_time, self.iteration, self.iterations)) log_file.write('%s\n' % output_str) def close_app(self): # Close the current app (self.app) by using the home button self.marionette.switch_to_frame() self.marionette.execute_script("window.wrappedJSObject.dispatchEvent(new Event('home'));") # Bring up the cards view _cards_view_locator = ('id', 'cards-view') self.marionette.execute_script("window.wrappedJSObject.dispatchEvent(new Event('holdhome'));") self.wait_for_element_displayed(*_cards_view_locator) # Sleep a bit time.sleep(5) # Tap the close icon for the current app locator_part_two = '#cards-view li.card[data-origin*="%s"] .close-card' % self.app_under_test.lower() _close_button_locator = ('css selector', locator_part_two) close_card_app_button = self.marionette.find_element(*_close_button_locator) close_card_app_button.tap() def process_checkpoint_data(self): # Process checkpoint data into .json self.marionette.log("processing checkpoint data from %s" % self.log_name) # Open the checkpoint file checkpoint_file = open(self.log_name, 'r') # Grab every b2g rss reading for each checkpoint b2g_rss_list = [] for next_line in checkpoint_file: if next_line.startswith("b2g"): b2g_rss_list.append(next_line.split()[5]) # Close the checkpoint file checkpoint_file.close() # Calculate the average b2g_rss total = 0 for b2g_mem_value in b2g_rss_list: total += int(b2g_mem_value) avg_rss = total / len(b2g_rss_list) # Create a summary text file summary_name = self.log_name.replace('.log', '_summary.log') summary_file = open(summary_name, 'w') # Write the summarized checkpoint data summary_file.write('test_name: %s\n' % self.test_method.__name__) summary_file.write('completed: %s\n' % self.cur_time) summary_file.write('app_under_test: %s\n' % self.app_under_test.lower()) summary_file.write('total_iterations: %d\n' % self.iterations) summary_file.write('checkpoint_interval: %d\n' % self.checkpoint_interval) summary_file.write('b2g_rss: ') summary_file.write(', '.join(b2g_rss_list)) summary_file.write('\navg_rss: %d\n\n' % avg_rss) # Close the summary file summary_file.close() # Write to suite summary file suite_summary_file_name = '%s/avg_b2g_rss_suite_summary.log' % self.checkpoint_path suite_summary_file = open(suite_summary_file_name, 'a') suite_summary_file.write('%s: %s\n' % (self.test_method.__name__, avg_rss)) suite_summary_file.close()
[]
certik/pyjamas
library/__mozilla__/pyjamas/DOM.py
5bb72e63e50f09743ac986f4c9690ba50c499ba9
def buttonClick(button): JS(""" var doc = button.ownerDocument; if (doc != null) { var evt = doc.createEvent('MouseEvents'); evt.initMouseEvent('click', true, true, null, 0, 0, 0, 0, 0, false, false, false, false, 0, null); button.dispatchEvent(evt); } """) def compare(elem1, elem2): JS(""" if (!elem1 && !elem2) { return true; } else if (!elem1 || !elem2) { return false; } if (!elem1.isSameNode) { return (elem1 == elem2); } return (elem1.isSameNode(elem2)); """) def eventGetButton(evt): JS(""" var button = evt.which; if(button == 2) { return 4; } else if (button == 3) { return 2; } else { return button || 0; } """) # This is what is in GWT 1.5 for getAbsoluteLeft. err... #""" # // We cannot use DOMImpl here because offsetLeft/Top return erroneous # // values when overflow is not visible. We have to difference screenX # // here due to a change in getBoxObjectFor which causes inconsistencies # // on whether the calculations are inside or outside of the element's # // border. # try { # return $doc.getBoxObjectFor(elem).screenX # - $doc.getBoxObjectFor($doc.documentElement).screenX; # } catch (e) { # // This works around a bug in the FF3 betas. The bug # // should be fixed before they release, so this can # // be removed at a later date. # // https://bugzilla.mozilla.org/show_bug.cgi?id=409111 # // DOMException.WRONG_DOCUMENT_ERR == 4 # if (e.code == 4) { # return 0; # } # throw e; # } #""" def getAbsoluteLeft(elem): JS(""" // Firefox 3 expects getBoundingClientRect // getBoundingClientRect can be float: 73.1 instead of 74, see // gwt's workaround at user/src/com/google/gwt/dom/client/DOMImplMozilla.java:47 // Please note, their implementation has 1px offset. if ( typeof elem.getBoundingClientRect == 'function' ) { var left = Math.ceil(elem.getBoundingClientRect().left); return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft; } // Older Firefox can use getBoxObjectFor else { var left = $doc.getBoxObjectFor(elem).x; var parent = elem.parentNode; while (parent) { if (parent.scrollLeft > 0) { left = left - parent.scrollLeft; } parent = parent.parentNode; } return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft; } """) # This is what is in GWT 1.5 for getAbsoluteTop. err... #""" # // We cannot use DOMImpl here because offsetLeft/Top return erroneous # // values when overflow is not visible. We have to difference screenY # // here due to a change in getBoxObjectFor which causes inconsistencies # // on whether the calculations are inside or outside of the element's # // border. # try { # return $doc.getBoxObjectFor(elem).screenY # - $doc.getBoxObjectFor($doc.documentElement).screenY; # } catch (e) { # // This works around a bug in the FF3 betas. The bug # // should be fixed before they release, so this can # // be removed at a later date. # // https://bugzilla.mozilla.org/show_bug.cgi?id=409111 # // DOMException.WRONG_DOCUMENT_ERR == 4 # if (e.code == 4) { # return 0; # } # throw e; # } #""" def getAbsoluteTop(elem): JS(""" // Firefox 3 expects getBoundingClientRect if ( typeof elem.getBoundingClientRect == 'function' ) { var top = Math.ceil(elem.getBoundingClientRect().top); return top + $doc.body.scrollTop + $doc.documentElement.scrollTop; } // Older Firefox can use getBoxObjectFor else { var top = $doc.getBoxObjectFor(elem).y; var parent = elem.parentNode; while (parent) { if (parent.scrollTop > 0) { top -= parent.scrollTop; } parent = parent.parentNode; } return top + $doc.body.scrollTop + $doc.documentElement.scrollTop; } """) def getChildIndex(parent, child): JS(""" var count = 0, current = parent.firstChild; while (current) { if (! current.isSameNode) { if (current == child) { return count; } } else if (current.isSameNode(child)) { return count; } if (current.nodeType == 1) { ++count; } current = current.nextSibling; } return -1; """) def isOrHasChild(parent, child): JS(""" while (child) { if ((!parent.isSameNode)) { if (parent == child) { return true; } } else if (parent.isSameNode(child)) { return true; } try { child = child.parentNode; } catch(e) { // Give up on 'Permission denied to get property // HTMLDivElement.parentNode' // See https://bugzilla.mozilla.org/show_bug.cgi?id=208427 return false; } if (child && (child.nodeType != 1)) { child = null; } } return false; """) def releaseCapture(elem): JS(""" if ((DOM.sCaptureElem != null) && DOM.compare(elem, DOM.sCaptureElem)) DOM.sCaptureElem = null; if (!elem.isSameNode) { if (elem == $wnd.__captureElem) { $wnd.__captureElem = null; } } else if (elem.isSameNode($wnd.__captureElem)) { $wnd.__captureElem = null; } """)
[]
ExpoAshique/ProveBanking__s
apps/vendors/migrations/0090_auto_20160610_2125.py
f0b45fffea74d00d14014be27aa50fe5f42f6903
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-06-10 21:25 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('vendors', '0089_auto_20160602_2123'), ] operations = [ migrations.AlterField( model_name='vendor', name='email', field=models.EmailField(blank=True, max_length=254, verbose_name='Email'), ), ]
[((18, 18, 18, 85), 'django.db.models.EmailField', 'models.EmailField', (), '', False, 'from django.db import migrations, models\n')]
fujihiraryo/library
graph/articulation_points.py
cdb01e710219d7111f890d09f89531916dd03533
from depth_first_search import DFS def articulation_points(graph): n = len(graph) dfs = DFS(graph) order = [None] * n for i, x in enumerate(dfs.preorder): order[x] = i lower = order[:] for x in dfs.preorder[::-1]: for y in graph[x]: if y == dfs.parent[x]: continue lower[x] = min(lower[x], lower[y]) if len(dfs.children[0]) > 1: yield 0 for x in range(1, n): if any(order[x] <= lower[y] for y in dfs.children[x]): yield x
[((6, 10, 6, 20), 'depth_first_search.DFS', 'DFS', ({(6, 14, 6, 19): 'graph'}, {}), '(graph)', False, 'from depth_first_search import DFS\n')]
AndreAngelucci/popcorn_time_bot
database.py
710b77b59d6c62569c1bf6984c7cf9adac8ea840
import pymongo from conf import Configuracoes class Mongo_Database: """ Singleton com a conexao com o MongoDB """ _instancia = None def __new__(cls, *args, **kwargs): if not(cls._instancia): cls._instancia = super(Mongo_Database, cls).__new__(cls, *args, **kwargs) return cls._instancia def __init__(self,): #pega a string de conexao no arquivo de configuracao string_conexao = Configuracoes().get_config("database", "string_connection") assert (string_conexao != ""), "String de conexao indefinida" try: self.mongo_client = pymongo.MongoClient(string_conexao) self.collection_filmes = self.mongo_client["popcorn_time"]["filmes"] self.collection_tweets = self.mongo_client["twitter_log"]["tweets"] except: raise Exception("Nao foi possivel se conectar ao B.D.") print("Conectado a", string_conexao) def grava_filmes(self, lista_filmes): #verifica se o filme ja existe #se nao existir, grava e adiciona a lista de novos filmes novos = [] try: for filme in lista_filmes: if (self.collection_filmes.count_documents({"_id": filme["_id"]}) == 0): self.collection_filmes.insert_one(filme) novos.append(filme) finally: return novos def grava_tweet(self, tweet_info): #grava o retorno dos tweets self.collection_tweets.insert_one(tweet_info)
[((17, 32, 17, 67), 'pymongo.MongoClient', 'pymongo.MongoClient', ({(17, 52, 17, 66): 'string_conexao'}, {}), '(string_conexao)', False, 'import pymongo\n'), ((14, 25, 14, 40), 'conf.Configuracoes', 'Configuracoes', ({}, {}), '()', False, 'from conf import Configuracoes\n')]
JorisHerbots/niip_iot_zombie_apocalypse
sensor_core/sleep.py
3ff848f3dab1dde9d2417d0a2c56a76a85e18920
import machine import pycom import utime from exceptions import Exceptions class Sleep: @property def wakeReason(self): return machine.wake_reason()[0] @property def wakePins(self): return machine.wake_reason()[1] @property def powerOnWake(self): return self.wakeReason == machine.PWRON_WAKE @property def pinWake(self): return self.wakeReason == machine.PIN_WAKE @property def RTCWake(self): return self.wakeReason == machine.RTC_WAKE @property def ULPWake(self): return self.wakeReason == machine.ULP_WAKE @property def isSleepWake(self): return self.pinWake or self.RTCWake or self.ULPWake @property def activeTime(self): return self.__activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart) @property def inactiveTime(self): return self.__inactiveTime ACTIVE_TIME_KEY = 'activeTime' INACTIVE_TIME_KEY = 'inactiveTime' SLEEP_TIME_KEY = 'sleepTime' def __init__(self): self.__activityStart = utime.ticks_ms() self.__initPersistentVariable(Sleep.ACTIVE_TIME_KEY) self.__initPersistentVariable(Sleep.INACTIVE_TIME_KEY) if not self.powerOnWake: sleptTime = pycom.nvs_get(Sleep.SLEEP_TIME_KEY) - machine.remaining_sleep_time() pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) + sleptTime) self.__activeTime = pycom.nvs_get(Sleep.ACTIVE_TIME_KEY) self.__inactiveTime = pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) self.__wakeUpPins = [] def __initPersistentVariable(self, key, value=0): if (pycom.nvs_get(key) == None): pycom.nvs_set(key, value) def addWakeUpPin(self, pin): # P2, P3, P4, P6, P8 to P10 and P13 to P23 if isinstance(pin, list): self.__wakeUpPins.extend(pin) else: self.__wakeUpPins.append(pin) try: machine.pin_sleep_wakeup(self.__wakeUpPins, mode=machine.WAKEUP_ANY_HIGH, enable_pull=True) except Exception as e: Exceptions.error(Exception('Sleep not available: ' + str(e))) def resetTimers(self): pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, 0) pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, 0) def sleep(self, milliseconds=0): if milliseconds == 0: milliseconds = 604800000 # 1 week pycom.nvs_set(Sleep.SLEEP_TIME_KEY, milliseconds) pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, self.activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart)) try: machine.deepsleep(milliseconds) except Exception as e: Exceptions.error(Exception('Deepsleep not available: ' + str(e))) def delay(self, milliseconds): utime.sleep_ms(milliseconds)
[((43, 31, 43, 47), 'utime.ticks_ms', 'utime.ticks_ms', ({}, {}), '()', False, 'import utime\n'), ((52, 28, 52, 64), 'pycom.nvs_get', 'pycom.nvs_get', ({(52, 42, 52, 63): 'Sleep.ACTIVE_TIME_KEY'}, {}), '(Sleep.ACTIVE_TIME_KEY)', False, 'import pycom\n'), ((53, 30, 53, 68), 'pycom.nvs_get', 'pycom.nvs_get', ({(53, 44, 53, 67): 'Sleep.INACTIVE_TIME_KEY'}, {}), '(Sleep.INACTIVE_TIME_KEY)', False, 'import pycom\n'), ((75, 8, 75, 47), 'pycom.nvs_set', 'pycom.nvs_set', ({(75, 22, 75, 43): 'Sleep.ACTIVE_TIME_KEY', (75, 45, 75, 46): '(0)'}, {}), '(Sleep.ACTIVE_TIME_KEY, 0)', False, 'import pycom\n'), ((76, 8, 76, 49), 'pycom.nvs_set', 'pycom.nvs_set', ({(76, 22, 76, 45): 'Sleep.INACTIVE_TIME_KEY', (76, 47, 76, 48): '(0)'}, {}), '(Sleep.INACTIVE_TIME_KEY, 0)', False, 'import pycom\n'), ((82, 8, 82, 57), 'pycom.nvs_set', 'pycom.nvs_set', ({(82, 22, 82, 42): 'Sleep.SLEEP_TIME_KEY', (82, 44, 82, 56): 'milliseconds'}, {}), '(Sleep.SLEEP_TIME_KEY, milliseconds)', False, 'import pycom\n'), ((91, 8, 91, 36), 'utime.sleep_ms', 'utime.sleep_ms', ({(91, 23, 91, 35): 'milliseconds'}, {}), '(milliseconds)', False, 'import utime\n'), ((11, 15, 11, 36), 'machine.wake_reason', 'machine.wake_reason', ({}, {}), '()', False, 'import machine\n'), ((14, 15, 14, 36), 'machine.wake_reason', 'machine.wake_reason', ({}, {}), '()', False, 'import machine\n'), ((58, 12, 58, 30), 'pycom.nvs_get', 'pycom.nvs_get', ({(58, 26, 58, 29): 'key'}, {}), '(key)', False, 'import pycom\n'), ((59, 12, 59, 37), 'pycom.nvs_set', 'pycom.nvs_set', ({(59, 26, 59, 29): 'key', (59, 31, 59, 36): 'value'}, {}), '(key, value)', False, 'import pycom\n'), ((70, 12, 70, 103), 'machine.pin_sleep_wakeup', 'machine.pin_sleep_wakeup', (), '', False, 'import machine\n'), ((86, 12, 86, 43), 'machine.deepsleep', 'machine.deepsleep', ({(86, 30, 86, 42): 'milliseconds'}, {}), '(milliseconds)', False, 'import machine\n'), ((33, 52, 33, 68), 'utime.ticks_ms', 'utime.ticks_ms', ({}, {}), '()', False, 'import utime\n'), ((49, 24, 49, 59), 'pycom.nvs_get', 'pycom.nvs_get', ({(49, 38, 49, 58): 'Sleep.SLEEP_TIME_KEY'}, {}), '(Sleep.SLEEP_TIME_KEY)', False, 'import pycom\n'), ((49, 62, 49, 92), 'machine.remaining_sleep_time', 'machine.remaining_sleep_time', ({}, {}), '()', False, 'import machine\n'), ((50, 51, 50, 89), 'pycom.nvs_get', 'pycom.nvs_get', ({(50, 65, 50, 88): 'Sleep.INACTIVE_TIME_KEY'}, {}), '(Sleep.INACTIVE_TIME_KEY)', False, 'import pycom\n'), ((83, 80, 83, 96), 'utime.ticks_ms', 'utime.ticks_ms', ({}, {}), '()', False, 'import utime\n')]
Supermaxman/pytorch-gleam
pytorch_gleam/search/rerank_format.py
8b0d8dddc812e8ae120c9760fd44fe93da3f902d
import torch import argparse from collections import defaultdict import os import json def load_predictions(input_path): pred_list = [] for file_name in os.listdir(input_path): if file_name.endswith('.pt'): preds = torch.load(os.path.join(input_path, file_name)) pred_list.extend(preds) question_scores = defaultdict(lambda: defaultdict(dict)) p_count = 0 u_count = 0 for prediction in pred_list: doc_pass_id = prediction['id'] q_p_id = prediction['question_id'] # score = prediction['pos_score'] score = prediction['pos_score'] - prediction['neg_score'] if doc_pass_id not in question_scores or q_p_id not in question_scores[doc_pass_id]: p_count += 1 u_count += 1 question_scores[doc_pass_id][q_p_id] = score print(f'{p_count} unique predictions') print(f'{u_count} total predictions') return question_scores def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input_path', required=True) parser.add_argument('-o', '--output_path', required=True) args = parser.parse_args() input_path = args.input_path output_path = args.output_path question_scores = load_predictions(input_path) with open(output_path, 'w') as f: json.dump(question_scores, f) if __name__ == '__main__': main()
[((11, 18, 11, 40), 'os.listdir', 'os.listdir', ({(11, 29, 11, 39): 'input_path'}, {}), '(input_path)', False, 'import os\n'), ((34, 10, 34, 35), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((44, 2, 44, 31), 'json.dump', 'json.dump', ({(44, 12, 44, 27): 'question_scores', (44, 29, 44, 30): 'f'}, {}), '(question_scores, f)', False, 'import json\n'), ((16, 39, 16, 56), 'collections.defaultdict', 'defaultdict', ({(16, 51, 16, 55): 'dict'}, {}), '(dict)', False, 'from collections import defaultdict\n'), ((13, 22, 13, 57), 'os.path.join', 'os.path.join', ({(13, 35, 13, 45): 'input_path', (13, 47, 13, 56): 'file_name'}, {}), '(input_path, file_name)', False, 'import os\n')]
LeonardoPereirajr/Curso_em_video_Python
des036.py
9d8a97ba3389c8e86b37dfd089fab5d04adc146d
casa = int(input('Qual o valor da casa? ')) sal = int(input('Qual seu salario? ')) prazo = int(input('Quantos meses deseja pagar ? ')) parcela = casa/prazo margem = sal* (30/100) if parcela > margem: print('Este negocio não foi aprovado, aumente o prazo .') else: print("Negocio aprovado pois a parcela é de R$ {} e voce pode pagar R$ {} mensais".format(parcela,margem))
[]
SukhadaM/HackBit-Interview-Preparation-Portal
HackBitApp/migrations/0003_roadmap.py
f4c6b0d7168a4ea4ffcf1569183b1614752d9946
# Generated by Django 3.1.7 on 2021-03-27 18:22 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('HackBitApp', '0002_company_photo'), ] operations = [ migrations.CreateModel( name='Roadmap', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('company_name', models.CharField(db_index=True, max_length=200, unique=True)), ('photo1', models.ImageField(upload_to='photos/company/roadmap')), ('photo2', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ('photo3', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ], options={ 'verbose_name': 'roadmap', 'verbose_name_plural': 'roadmaps', 'ordering': ('company_name',), }, ), ]
[((16, 23, 16, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((17, 33, 17, 93), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((18, 27, 18, 80), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n'), ((19, 27, 19, 92), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n'), ((20, 27, 20, 92), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n')]
Romit-Maulik/Tutorials-Demos-Practice
Other_Python/Kernel_Methods/matrix_operations.py
a58ddc819f24a16f7059e63d7f201fc2cd23e03a
# -*- coding: utf-8 -*- """ Created on Wed Jul 22 14:36:48 2020 @author: matth """ import autograd.numpy as np #%% Kernel operations # Returns the norm of the pairwise difference def norm_matrix(matrix_1, matrix_2): norm_square_1 = np.sum(np.square(matrix_1), axis = 1) norm_square_1 = np.reshape(norm_square_1, (-1,1)) norm_square_2 = np.sum(np.square(matrix_2), axis = 1) norm_square_2 = np.reshape(norm_square_2, (-1,1)) d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) inner_matrix = np.matmul(matrix_1, np.transpose(matrix_2)) norm_diff = -2 * inner_matrix + norm_square_1 + np.transpose(norm_square_2) return norm_diff # Returns the pairwise inner product def inner_matrix(matrix_1, matrix_2): d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) return np.matmul(matrix_1, np.transpose(matrix_2)) if __name__ == '__main__': print('This is the matrix operations file')
[((14, 20, 14, 53), 'autograd.numpy.reshape', 'np.reshape', ({(14, 31, 14, 44): 'norm_square_1', (14, 46, 14, 52): '(-1, 1)'}, {}), '(norm_square_1, (-1, 1))', True, 'import autograd.numpy as np\n'), ((17, 20, 17, 53), 'autograd.numpy.reshape', 'np.reshape', ({(17, 31, 17, 44): 'norm_square_2', (17, 46, 17, 52): '(-1, 1)'}, {}), '(norm_square_2, (-1, 1))', True, 'import autograd.numpy as np\n'), ((13, 27, 13, 46), 'autograd.numpy.square', 'np.square', ({(13, 37, 13, 45): 'matrix_1'}, {}), '(matrix_1)', True, 'import autograd.numpy as np\n'), ((16, 27, 16, 46), 'autograd.numpy.square', 'np.square', ({(16, 37, 16, 45): 'matrix_2'}, {}), '(matrix_2)', True, 'import autograd.numpy as np\n'), ((23, 17, 23, 39), 'autograd.numpy.transpose', 'np.transpose', ({(23, 30, 23, 38): 'matrix_1'}, {}), '(matrix_1)', True, 'import autograd.numpy as np\n'), ((25, 39, 25, 61), 'autograd.numpy.transpose', 'np.transpose', ({(25, 52, 25, 60): 'matrix_2'}, {}), '(matrix_2)', True, 'import autograd.numpy as np\n'), ((27, 52, 27, 79), 'autograd.numpy.transpose', 'np.transpose', ({(27, 65, 27, 78): 'norm_square_2'}, {}), '(norm_square_2)', True, 'import autograd.numpy as np\n'), ((37, 17, 37, 39), 'autograd.numpy.transpose', 'np.transpose', ({(37, 30, 37, 38): 'matrix_1'}, {}), '(matrix_1)', True, 'import autograd.numpy as np\n'), ((38, 31, 38, 53), 'autograd.numpy.transpose', 'np.transpose', ({(38, 44, 38, 52): 'matrix_2'}, {}), '(matrix_2)', True, 'import autograd.numpy as np\n')]
meyerweb/wpt
cors/resources/cors-makeheader.py
f04261533819893c71289614c03434c06856c13e
import json from wptserve.utils import isomorphic_decode def main(request, response): origin = request.GET.first(b"origin", request.headers.get(b'origin') or b'none') if b"check" in request.GET: token = request.GET.first(b"token") value = request.server.stash.take(token) if value is not None: if request.GET.first(b"check", None) == b"keep": request.server.stash.put(token, value) body = u"1" else: body = u"0" return [(b"Content-Type", b"text/plain")], body if origin != b'none': response.headers.set(b"Access-Control-Allow-Origin", origin) if b'origin2' in request.GET: response.headers.append(b"Access-Control-Allow-Origin", request.GET.first(b'origin2')) #Preflight if b'headers' in request.GET: response.headers.set(b"Access-Control-Allow-Headers", request.GET.first(b'headers')) if b'credentials' in request.GET: response.headers.set(b"Access-Control-Allow-Credentials", request.GET.first(b'credentials')) if b'methods' in request.GET: response.headers.set(b"Access-Control-Allow-Methods", request.GET.first(b'methods')) code_raw = request.GET.first(b'code', None) if code_raw: code = int(code_raw) else: code = None if request.method == u'OPTIONS': #Override the response code if we're in a preflight and it's asked if b'preflight' in request.GET: code = int(request.GET.first(b'preflight')) #Log that the preflight actually happened if we have an ident if b'token' in request.GET: request.server.stash.put(request.GET[b'token'], True) if b'location' in request.GET: if code is None: code = 302 if code >= 300 and code < 400: response.headers.set(b"Location", request.GET.first(b'location')) headers = {} for name, values in request.headers.items(): if len(values) == 1: headers[isomorphic_decode(name)] = isomorphic_decode(values[0]) else: #I have no idea, really headers[name] = values headers[u'get_value'] = isomorphic_decode(request.GET.first(b'get_value', b'')) body = json.dumps(headers) if code: return (code, b"StatusText"), [], body else: return body
[((64, 11, 64, 30), 'json.dumps', 'json.dumps', ({(64, 22, 64, 29): 'headers'}, {}), '(headers)', False, 'import json\n'), ((57, 47, 57, 75), 'wptserve.utils.isomorphic_decode', 'isomorphic_decode', ({(57, 65, 57, 74): 'values[0]'}, {}), '(values[0])', False, 'from wptserve.utils import isomorphic_decode\n'), ((57, 20, 57, 43), 'wptserve.utils.isomorphic_decode', 'isomorphic_decode', ({(57, 38, 57, 42): 'name'}, {}), '(name)', False, 'from wptserve.utils import isomorphic_decode\n')]
wlfyit/PiLightsLib
device_osc_grid.py
98e39af45f05d0ee44e2f166de5b654d58df33ae
#!/usr/bin/env python3 from pythonosc import osc_bundle_builder from pythonosc import osc_message_builder from pythonosc import udp_client from .device import DeviceObj # OSC Grid Object class OSCGrid(DeviceObj): def __init__(self, name, width, height, ip, port, bri=1): DeviceObj.__init__(self, name, "osc_grid", width, height) self.buffer = [] self.brightness = bri self.osc = udp_client.SimpleUDPClient(ip, port) def set(self, r, g, b, x=0, y=0): DeviceObj.set(self, r, g, b, x, y) # Set Pixel builder = osc_message_builder.OscMessageBuilder(address="/light/{0}/{1}/color".format(x, y)) builder.add_arg(r) builder.add_arg(g) builder.add_arg(b) self.buffer.append(builder.build()) def show(self): DeviceObj.show(self) # Update Display bundle = osc_bundle_builder.OscBundleBuilder(0) for m in self.buffer: bundle.add_content(m) self.osc.send(bundle.build()) self.buffer.clear()
[((17, 19, 17, 55), 'pythonosc.udp_client.SimpleUDPClient', 'udp_client.SimpleUDPClient', ({(17, 46, 17, 48): 'ip', (17, 50, 17, 54): 'port'}, {}), '(ip, port)', False, 'from pythonosc import udp_client\n'), ((35, 17, 35, 55), 'pythonosc.osc_bundle_builder.OscBundleBuilder', 'osc_bundle_builder.OscBundleBuilder', ({(35, 53, 35, 54): '0'}, {}), '(0)', False, 'from pythonosc import osc_bundle_builder\n')]
StevenSume/EasyCMDB
main/models.py
c2c44c9efe2de2729659d81ef886abff242ac1c5
from .app import db class Project(db.Model): __tablename__ = 'projects' id = db.Column(db.Integer,primary_key=True,autoincrement=True) project_name = db.Column(db.String(64),unique=True,index=True) def to_dict(self): mydict = { 'id': self.id, 'project_name': self.project_name } return mydict def __repr__(self): return '<Project %r>' % self.__name__ class Item(db.Model): __tablename__ = 'Items' id = db.Column(db.Integer, primary_key=True,autoincrement=True) project_id = db.Column(db.Integer) key = db.Column(db.String(64),nullable=False) value = db.Column(db.String(64),nullable=False) def to_dict(self): mydict = { 'id': self.id, 'project_id': self.project_id, 'key': self.key, 'value': self.value } return mydict def __repr__(self): return '<Item %r>' % self.__name__
[]
iron-io/iron_cache_python
test.py
f68f5a5e216e3189397ffd7d243de0d53bf7c764
from iron_cache import * import unittest import requests class TestIronCache(unittest.TestCase): def setUp(self): self.cache = IronCache("test_cache") def test_get(self): self.cache.put("test_item", "testing") item = self.cache.get("test_item") self.assertEqual(item.value, "testing") def test_delete(self): self.cache.put("test_item", "will be deleted") self.cache.delete("test_item") self.assertRaises(requests.exceptions.HTTPError, self.cache.get, "test_item") def test_increment(self): self.cache.put("test_item", 2) self.cache.increment("test_item") item = self.cache.get("test_item") self.assertEqual(item.value, 3) self.cache.increment("test_item", amount=42) item = self.cache.get("test_item") self.assertEqual(item.value, 45) def test_decrement(self): self.cache.put("test_item", 100) self.cache.decrement("test_item") item = self.cache.get("test_item") self.assertEqual(item.value, 99) self.cache.decrement("test_item", amount=98) item = self.cache.get("test_item") self.assertEqual(item.value, 1) if __name__ == '__main__': unittest.main()
[((40, 4, 40, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n')]
sebasmurphy/iarpa
lib_exec/StereoPipeline/libexec/asp_image_utils.py
aca39cc5390a153a9779a636ab2523e65cb6d3b0
#!/usr/bin/env python # -*- coding: utf-8 -*- # __BEGIN_LICENSE__ # Copyright (c) 2009-2013, United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. All # rights reserved. # # The NGT platform is licensed under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # __END_LICENSE__ """ Basic functions for working with images on disk. """ import sys, os, re, subprocess, string, time, errno import asp_string_utils def stripRgbImageAlphaChannel(inputPath, outputPath): """Makes an RGB copy of an RBGA image""" cmd = 'gdal_translate ' + inputPath + ' ' + outputPath + ' -b 1 -b 2 -b 3 -co "COMPRESS=LZW" -co "TILED=YES" -co "BLOCKXSIZE=256" -co "BLOCKYSIZE=256"' print cmd os.system(cmd) def getImageSize(imagePath): """Returns the size [samples, lines] in an image""" # Make sure the input file exists if not os.path.exists(imagePath): raise Exception('Image file ' + imagePath + ' not found!') # Use subprocess to suppress the command output cmd = ['gdalinfo', imagePath] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err = p.communicate() # Extract the size from the text sizePos = textOutput.find('Size is') endPos = textOutput.find('\n', sizePos+7) sizeStr = textOutput[sizePos+7:endPos] sizeStrs = sizeStr.strip().split(',') numSamples = int(sizeStrs[0]) numLines = int(sizeStrs[1]) size = [numSamples, numLines] return size def isIsisFile(filePath): """Returns True if the file is an ISIS file, False otherwise.""" # Currently we treat all files with .cub extension as ISIS files extension = os.path.splitext(filePath)[1] return (extension == '.cub') def getImageStats(imagePath): """Obtains some image statistics from gdalinfo""" if not os.path.exists(imagePath): raise Exception('Image file ' + imagePath + ' not found!') # Call command line tool silently cmd = ['gdalinfo', imagePath, '-stats'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err = p.communicate() # Statistics are computed seperately for each band bandStats = [] band = 0 while (True): # Loop until we run out of bands # Look for the stats line for this band bandString = 'Band ' + str(band+1) + ' Block=' bandLoc = textOutput.find(bandString) if bandLoc < 0: return bandStats # Quit if we did not find it # Now parse out the statistics for this band bandMaxStart = textOutput.find('STATISTICS_MAXIMUM=', bandLoc) bandMeanStart = textOutput.find('STATISTICS_MEAN=', bandLoc) bandMinStart = textOutput.find('STATISTICS_MINIMUM=', bandLoc) bandStdStart = textOutput.find('STATISTICS_STDDEV=', bandLoc) bandMax = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMaxStart) bandMean = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMeanStart) bandMin = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMinStart) bandStd = asp_string_utils.getNumberAfterEqualSign(textOutput, bandStdStart) # Add results to the output list bandStats.append( (bandMin, bandMax, bandMean, bandStd) ) band = band + 1 # Move to the next band
[]
leipzig/gatk-sv
src/sv-pipeline/04_variant_resolution/scripts/merge_RdTest_genotypes.py
96566cbbaf0f8f9c8452517b38eea1e5dd6ed33a
#!/usr/bin/env python import argparse DELIMITER = "\t" def merge(genotypes_filename, gq_filename, merged_filename): with open(genotypes_filename, "r") as genotypes, open(gq_filename, "r") as gq, open(merged_filename, "w") as merged: # Integrity check: do the files have same columns? genotypes_header = genotypes.readline().rstrip().split(DELIMITER) gq_header = gq.readline().rstrip().split(DELIMITER) if not genotypes_header == gq_header: raise ValueError("The files do not have same number/order of columns") n_cols = len(gq_header) for genotypes_line, gq_line in zip(genotypes, gq): x = genotypes_line.rstrip().split(DELIMITER) y = gq_line.rstrip().split(DELIMITER) # Check if lines in the files are in the correct order. if not x[0:4] == y[0:4]: raise ValueError(f"The lines in the files are not in the same order; " f"expected the following lines to match.\n{x[0:4]}\n{y[0:4]}") h = DELIMITER.join(x[0:4]) for i in range(4, n_cols): merged.write(DELIMITER.join([h, gq_header[i], x[i], y[i]]) + "\n") if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('genotypes') parser.add_argument('GQ') parser.add_argument('fout') args = parser.parse_args() merge(args.genotypes, args.GQ, args.fout)
[((34, 13, 36, 61), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n')]
fidelisrafael/esperanto-analyzer
esperanto_analyzer/web/__init__.py
af1e8609ec0696e3d1975aa0ba0c88e5f04f8468
from .api.server import run_app
[]
CSID-DGU/2021-2-OSSP2-TwoRolless-2
crawling/sns/main.py
e9381418e3899d8e1e78415e9ab23b73b4f30a95
import tweepy import traceback import time import pymongo from tweepy import OAuthHandler from pymongo import MongoClient from pymongo.cursor import CursorType twitter_consumer_key = "" twitter_consumer_secret = "" twitter_access_token = "" twitter_access_secret = "" auth = OAuthHandler(twitter_consumer_key, twitter_consumer_secret) auth.set_access_token(twitter_access_token, twitter_access_secret) api = tweepy.API(auth) def crawllTwit(snsname, findtag): account = snsname tweets = api.user_timeline(screen_name=account, count=100, include_rts=False, exclude_replies=True, tweet_mode='extended') snsList = [] snsTime = [] url = [] pic = [] i = 0 for tweet in tweets: flag = tweet.full_text.find(findtag) if flag >= 0: ttp = tweet.full_text.split("https://") gong = "" count = 0 for slist in ttp: if count == (len(ttp) - 1): break gong = gong + slist count += 1 snsList.append(gong) snsTime.append(tweet.created_at) tmp = f"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}" url.append(tmp) i += 1 media = tweet.entities.get('media', []) if (len(media) > 0): pic.append(media[0]['media_url']) else: pic.append("") j = 0 while j < len(snsList): if j == 10: break snsList[j] = snsList[j].replace('&lt;', '<') snsList[j] = snsList[j].replace('&gt;', '>') snsList[j] = snsList[j].replace('▶️', ' ⇒ ') j += 1 mydb = my_client['TwoRolless'] mycol = mydb['sns'] for k in range(0, len(snsList)): if k == 15: break x = mycol.insert_one( { "tag": findtag, "time": snsTime[k], "text": snsList[k], "img": pic[k], "url": url[k] } ) conn_str = "" my_client = pymongo.MongoClient(conn_str) if __name__ == '__main__': while True: print("cycles start") mydb = my_client['TwoRolless'] mycol = mydb['sns'] mycol.remove({}) crawllTwit("@m_thelastman", "더라스트맨") crawllTwit("@Musical_NarGold", "나르치스와_골드문트") crawllTwit("@rndworks", "더데빌") crawllTwit("@ninestory9", "엘리펀트송") crawllTwit("@companyrang", "쿠로이저택엔누가살고있을까") crawllTwit("@companyrang", "난쟁이들") crawllTwit("@page1company", "곤투모로우") crawllTwit("@HONGcompany", "더모먼트") crawllTwit("@orchardmusical", "칠칠") crawllTwit("@livecorp2011", "팬레터") crawllTwit("@shownote", "젠틀맨스가이드") crawllTwit("@od_musical", "지킬앤하이드") crawllTwit("@kontentz", "엔딩노트") crawllTwit("@i_seensee", "빌리") crawllTwit("@doublek_ent", "은하철도의") crawllTwit("@Insight_Since96", "뱀파이어아더") print("cycle end") print("sleep 30 seconds") time.sleep(30) print("sleep end")
[((14, 7, 14, 66), 'tweepy.OAuthHandler', 'OAuthHandler', ({(14, 20, 14, 40): 'twitter_consumer_key', (14, 42, 14, 65): 'twitter_consumer_secret'}, {}), '(twitter_consumer_key, twitter_consumer_secret)', False, 'from tweepy import OAuthHandler\n'), ((16, 6, 16, 22), 'tweepy.API', 'tweepy.API', ({(16, 17, 16, 21): 'auth'}, {}), '(auth)', False, 'import tweepy\n'), ((77, 12, 77, 41), 'pymongo.MongoClient', 'pymongo.MongoClient', ({(77, 32, 77, 40): 'conn_str'}, {}), '(conn_str)', False, 'import pymongo\n'), ((103, 8, 103, 22), 'time.sleep', 'time.sleep', ({(103, 19, 103, 21): '(30)'}, {}), '(30)', False, 'import time\n')]
jepabe/Demo_earth2
demos/interactive-classifier/config.py
ab20c3a9114904219688b16f8a1273e68927e6f9
#!/usr/bin/env python """Handles Earth Engine service account configuration.""" import ee # The service account email address authorized by your Google contact. # Set up a service account as described in the README. EE_ACCOUNT = '[email protected]' # The private key associated with your service account in Privacy Enhanced # Email format (.pem suffix). To convert a private key from the RSA format # (.p12 suffix) to .pem, run the openssl command like this: # openssl pkcs12 -in downloaded-privatekey.p12 -nodes -nocerts > privatekey.pem EE_PRIVATE_KEY_FILE = 'privatekey.pem' EE_CREDENTIALS = ee.ServiceAccountCredentials(EE_ACCOUNT, EE_PRIVATE_KEY_FILE)
[((16, 17, 16, 78), 'ee.ServiceAccountCredentials', 'ee.ServiceAccountCredentials', ({(16, 46, 16, 56): 'EE_ACCOUNT', (16, 58, 16, 77): 'EE_PRIVATE_KEY_FILE'}, {}), '(EE_ACCOUNT, EE_PRIVATE_KEY_FILE)', False, 'import ee\n')]
Neo-sunny/pythonProgs
PythonScripting/NumbersInPython.py
a9d2359d8a09d005d0ba6f94d7d256bf91499793
""" Demonstration of numbers in Python """ # Python has an integer type called int print("int") print("---") print(0) print(1) print(-3) print(70383028364830) print("") # Python has a real number type called float print("float") print("-----") print(0.0) print(7.35) print(-43.2) print("") # Limited precision print("Precision") print("---------") print(4.56372883832331773) print(1.23456789012345678) print("") # Scientific/exponential notation print("Scientific notation") print("-------------------") print(5e32) print(999999999999999999999999999999999999999.9) print("") # Infinity print("Infinity") print("--------") print(1e500) print(-1e500) print("") # Conversions print("Conversions between numeric types") print("---------------------------------") print(float(3)) print(float(99999999999999999999999999999999999999)) print(int(3.0)) print(int(3.7)) print(int(-3.7)) """ Demonstration of simple arithmetic expressions in Python """ # Unary + and - print("Unary operators") print(+3) print(-5) print(+7.86) print(-3348.63) print("") # Simple arithmetic print("Addition and Subtraction") print(1 + 2) print(48 - 89) print(3.45 + 2.7) print(87.3384 - 12.35) print(3 + 6.7) print(9.8 - 4) print("") print("Multiplication") print(3 * 2) print(7.8 * 27.54) print(7 * 8.2) print("") print("Division") print(8 / 2) print(3 / 2) print(7.538 / 14.3) print(8 // 2) print(3 // 2) print(7.538 // 14.3) print("") print("Exponentiation") print(3 ** 2) print(5 ** 4) print(32.6 ** 7) print(9 ** 0.5) """ Demonstration of compound arithmetic expressions in Python """ # Expressions can include multiple operations print("Compound expressions") print(3 + 5 + 7 + 27) #Operator with same precedence are evaluated from left to right print(18 - 6 + 4) print("") # Operator precedence defines how expressions are evaluated print("Operator precedence") print(7 + 3 * 5) print(5.5 * 6 // 2 + 8) print(-3 ** 2) print("") # Use parentheses to change evaluation order print("Grouping with parentheses") print((7 + 3) * 5) print(5.5 * ((6 // 2) + 8)) print((-3) ** 2) """ Demonstration of the use of variables and how to assign values to them. """ # The = operator can be used to assign values to variables bakers_dozen = 12 + 1 temperature = 93 # Variables can be used as values and in expressions print(temperature, bakers_dozen) print("celsius:", (temperature - 32) * 5 / 9) print("fahrenheit:", float(temperature)) # You can assign a different value to an existing variable temperature = 26 print("new value:", temperature) # Multiple variables can be used in arbitrary expressions offset = 32 multiplier = 5.0 / 9.0 celsius = (temperature - offset) * multiplier print("celsius value:", celsius)
[]
JoZimmer/Beam-Models
3DBeam/source/solving_strategies/strategies/linear_solver.py
e701c0bae6e3035e7a07cc590da4a132b133dcff
from source.solving_strategies.strategies.solver import Solver class LinearSolver(Solver): def __init__(self, array_time, time_integration_scheme, dt, comp_model, initial_conditions, force, structure_model): super().__init__(array_time, time_integration_scheme, dt, comp_model, initial_conditions, force, structure_model) def _print_solver_info(self): print("Linear Solver") def solve(self): # time loop for i in range(0, len(self.array_time)): self.step = i current_time = self.array_time[i] #print("time: {0:.2f}".format(current_time)) self.scheme.solve_single_step(self.force[:, i]) # appending results to the list self.displacement[:, i] = self.scheme.get_displacement() self.velocity[:, i] = self.scheme.get_velocity() self.acceleration[:, i] = self.scheme.get_acceleration() # TODO: only calculate reaction when user wants it # if self.structure_model is not None: # self.dynamic_reaction[:, i] = self._compute_reaction() # reaction computed in dynamic analysis # TODO: only calculate reaction when user wants it # moved reaction computation to dynamic analysis level # AK . this doesnt considers the support reaction check #if self.structure_model is not None: # self.dynamic_reaction[:, i] = self._compute_reaction() # update results self.scheme.update()
[]
Littledelma/mofadog
payment/migrations/0002_auto_20171125_0022.py
5a7c6672da248e400a8a5746506a6e7b273c9510
# -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-11-24 16:22 from __future__ import unicode_literals import datetime from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('payment', '0001_initial'), ] operations = [ migrations.AlterField( model_name='history_order', name='dead_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719840, tzinfo=utc), verbose_name='daed_date'), ), migrations.AlterField( model_name='history_order', name='order_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719662, tzinfo=utc), verbose_name='order date'), ), migrations.AlterField( model_name='history_order', name='valid_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719758, tzinfo=utc), verbose_name='valid_date'), ), ]
[((20, 47, 20, 109), 'datetime.datetime', 'datetime.datetime', (), '', False, 'import datetime\n'), ((25, 47, 25, 109), 'datetime.datetime', 'datetime.datetime', (), '', False, 'import datetime\n'), ((30, 47, 30, 109), 'datetime.datetime', 'datetime.datetime', (), '', False, 'import datetime\n')]
NathanHowell/sqlfluff
src/sqlfluff/rules/L024.py
9eb30226d77727cd613947e144a0abe483151f18
"""Implementation of Rule L024.""" from sqlfluff.core.rules.doc_decorators import document_fix_compatible from sqlfluff.rules.L023 import Rule_L023 @document_fix_compatible class Rule_L024(Rule_L023): """Single whitespace expected after USING in JOIN clause. | **Anti-pattern** .. code-block:: sql SELECT b FROM foo LEFT JOIN zoo USING(a) | **Best practice** | The • character represents a space. | Add a space after USING, to avoid confusing it | for a function. .. code-block:: sql :force: SELECT b FROM foo LEFT JOIN zoo USING•(a) """ expected_mother_segment_type = "join_clause" pre_segment_identifier = ("name", "using") post_segment_identifier = ("type", "bracketed") expand_children = None allow_newline = True
[]
mikeireland/chronostar
projects/scocen/cmd_components_simple.py
fcf37614e1d145f3a5e265e54512bf8cd98051a0
""" Plot CMDs for each component. """ import numpy as np from astropy.table import Table import matplotlib.pyplot as plt import matplotlib.cm as cm plt.ion() # Pretty plots from fig_settings import * ############################################ # Some things are the same for all the plotting scripts and we put # this into a single library to avoid confusion. import scocenlib as lib data_filename = lib.data_filename comps_filename = lib.comps_filename compnames = lib.compnames colors = lib.colors ############################################ # Minimal probability required for membership pmin_membership = 0.5 ############################################ # how to split subplots grid = [5, 5] # CMD limits xlim = [-1, 5] ylim = [17, -3] ############################################ # Read data try: tab = tab0 comps = comps0 except: tab0 = Table.read(data_filename) Gmag = tab0['phot_g_mean_mag'] - 5 * np.log10(1.0 / (tab0['parallax'] * 1e-3) / 10) # tab['parallax'] in micro arcsec tab0['Gmag'] = Gmag comps0 = Table.read(comps_filename) tab = tab0 comps = comps0 # Main sequence parametrization # fitpar for pmag, rpmag fitpar = [0.17954163, -2.48748376, 12.9279348, -31.35434182, 38.31330583, -12.25864507] poly = np.poly1d(fitpar) x = np.linspace(1, 4, 100) y = poly(x) m = y > 4 yms = y[m] xms = x[m] def plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim): ax.plot(xms, yms, c='brown', label='Median main sequence', linewidth=1) ax.plot(xms, yms - 1, c='brown', label='1 mag above the median', linewidth=1, linestyle='--') ax.plot(xms, yms - 1.5, c='brown', label='1.5 mag above the median', linewidth=1, linestyle='--') ax.axvline(x=0.369, linewidth=0.5, color='k') # F ax.axvline(x=0.767, linewidth=0.5, color='k') # G ax.axvline(x=0.979, linewidth=0.5, color='k') # K ax.axvline(x=1.848, linewidth=0.5, color='k') # M ax.set_xlim(xlim[0], xlim[1]) ax.set_ylim(ylim[0], ylim[1]) return ax print('Plotting %d components.'%len(comps)) fig=plt.figure() for i, c in enumerate(comps): ax = fig.add_subplot(grid[0], grid[1], i+1) # TODO: adjust this if needed comp_ID = c['comp_ID'] col=tab['membership%s'%comp_ID] mask = col > pmin_membership t=tab[mask] if len(t)>100: alpha=0.5 else: alpha=1 t.sort('membership%s'%comp_ID) #~ t.reverse() #~ ax.scatter(t['bp_rp'], t['Gmag'], s=1, c='k', alpha=alpha) ax.scatter(t['bp_rp'], t['Gmag'], s=1, c=t['membership%s'%comp_ID], alpha=1, vmin=0.5, vmax=1, cmap=cm.jet) ax=plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim) age=c['Age'] ax.set_title('%s (%.2f$\pm$%.2f Myr %s) %d'%(comp_ID, age, c['Crossing_time'], c['Age_reliable'], len(t))) #~ plt.tight_layout() plt.show()
[((9, 0, 9, 9), 'matplotlib.pyplot.ion', 'plt.ion', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((54, 7, 54, 24), 'numpy.poly1d', 'np.poly1d', ({(54, 17, 54, 23): 'fitpar'}, {}), '(fitpar)', True, 'import numpy as np\n'), ((55, 4, 55, 26), 'numpy.linspace', 'np.linspace', ({(55, 16, 55, 17): '1', (55, 19, 55, 20): '4', (55, 22, 55, 25): '100'}, {}), '(1, 4, 100)', True, 'import numpy as np\n'), ((78, 4, 78, 16), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((105, 0, 105, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((40, 11, 40, 36), 'astropy.table.Table.read', 'Table.read', ({(40, 22, 40, 35): 'data_filename'}, {}), '(data_filename)', False, 'from astropy.table import Table\n'), ((45, 13, 45, 39), 'astropy.table.Table.read', 'Table.read', ({(45, 24, 45, 38): 'comps_filename'}, {}), '(comps_filename)', False, 'from astropy.table import Table\n'), ((42, 41, 42, 87), 'numpy.log10', 'np.log10', ({(42, 50, 42, 86): "(1.0 / (tab0['parallax'] * 0.001) / 10)"}, {}), "(1.0 / (tab0['parallax'] * 0.001) / 10)", True, 'import numpy as np\n')]
rhlahuja/snowflake-connector-python
test/test_cursor_binding.py
6abc56c970cdb698a833b7f6ac9cbe7dfa667abd
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2012-2018 Snowflake Computing Inc. All right reserved. # import pytest from snowflake.connector.errors import (ProgrammingError) def test_binding_security(conn_cnx, db_parameters): """ SQL Injection Tests """ try: with conn_cnx() as cnx: cnx.cursor().execute( "CREATE OR REPLACE TABLE {name} " "(aa INT, bb STRING)".format( name=db_parameters['name'])) cnx.cursor().execute( "INSERT INTO {name} VALUES(%s, %s)".format( name=db_parameters['name']), (1, 'test1')) cnx.cursor().execute( "INSERT INTO {name} VALUES(%(aa)s, %(bb)s)".format( name=db_parameters['name']), {'aa': 2, 'bb': 'test2'}) for rec in cnx.cursor().execute( "SELECT * FROM {name} ORDER BY 1 DESC".format( name=db_parameters['name'])): break assert rec[0] == 2, 'First column' assert rec[1] == 'test2', 'Second column' for rec in cnx.cursor().execute( "SELECT * FROM {name} WHERE aa=%s".format( name=db_parameters['name']), (1,)): break assert rec[0] == 1, 'First column' assert rec[1] == 'test1', 'Second column' # SQL injection safe test # Good Example with pytest.raises(ProgrammingError): cnx.cursor().execute( "SELECT * FROM {name} WHERE aa=%s".format( name=db_parameters['name']), ("1 or aa>0",)) with pytest.raises(ProgrammingError): cnx.cursor().execute( "SELECT * FROM {name} WHERE aa=%(aa)s".format( name=db_parameters['name']), {"aa": "1 or aa>0"}) # Bad Example in application. DON'T DO THIS c = cnx.cursor() c.execute("SELECT * FROM {name} WHERE aa=%s".format( name=db_parameters['name']) % ("1 or aa>0",)) rec = c.fetchall() assert len(rec) == 2, "not raising error unlike the previous one." finally: with conn_cnx() as cnx: cnx.cursor().execute( "drop table if exists {name}".format( name=db_parameters['name'])) def test_binding_list(conn_cnx, db_parameters): """ SQL binding list type for IN """ try: with conn_cnx() as cnx: cnx.cursor().execute( "CREATE OR REPLACE TABLE {name} " "(aa INT, bb STRING)".format( name=db_parameters['name'])) cnx.cursor().execute( "INSERT INTO {name} VALUES(%s, %s)".format( name=db_parameters['name']), (1, 'test1')) cnx.cursor().execute( "INSERT INTO {name} VALUES(%(aa)s, %(bb)s)".format( name=db_parameters['name']), {'aa': 2, 'bb': 'test2'}) cnx.cursor().execute( "INSERT INTO {name} VALUES(3, 'test3')".format( name=db_parameters['name'])) for rec in cnx.cursor().execute(""" SELECT * FROM {name} WHERE aa IN (%s) ORDER BY 1 DESC """.format(name=db_parameters['name']), ([1, 3],)): break assert rec[0] == 3, 'First column' assert rec[1] == 'test3', 'Second column' for rec in cnx.cursor().execute( "SELECT * FROM {name} WHERE aa=%s".format( name=db_parameters['name']), (1,)): break assert rec[0] == 1, 'First column' assert rec[1] == 'test1', 'Second column' rec = cnx.cursor().execute(""" SELECT * FROM {name} WHERE aa IN (%s) ORDER BY 1 DESC """.format(name=db_parameters['name']), ((1,),)) finally: with conn_cnx() as cnx: cnx.cursor().execute( "drop table if exists {name}".format( name=db_parameters['name'])) def test_unsupported_binding(conn_cnx, db_parameters): """ Unsupported data binding """ try: with conn_cnx() as cnx: cnx.cursor().execute( "CREATE OR REPLACE TABLE {name} " "(aa INT, bb STRING)".format( name=db_parameters['name'])) cnx.cursor().execute( "INSERT INTO {name} VALUES(%s, %s)".format( name=db_parameters['name']), (1, 'test1')) sql = 'select count(*) from {name} where aa=%s'.format( name=db_parameters['name']) with cnx.cursor() as cur: rec = cur.execute(sql, (1,)).fetchone() assert rec[0] is not None, 'no value is returned' # dict with pytest.raises(ProgrammingError): cnx.cursor().execute(sql, ({'value': 1},)) finally: with conn_cnx() as cnx: cnx.cursor().execute( "drop table if exists {name}".format( name=db_parameters['name']))
[((44, 17, 44, 48), 'pytest.raises', 'pytest.raises', ({(44, 31, 44, 47): 'ProgrammingError'}, {}), '(ProgrammingError)', False, 'import pytest\n'), ((50, 17, 50, 48), 'pytest.raises', 'pytest.raises', ({(50, 31, 50, 47): 'ProgrammingError'}, {}), '(ProgrammingError)', False, 'import pytest\n'), ((138, 17, 138, 48), 'pytest.raises', 'pytest.raises', ({(138, 31, 138, 47): 'ProgrammingError'}, {}), '(ProgrammingError)', False, 'import pytest\n')]
hectormartinez/rougexstem
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/model/__init__.py
32da9eab253cb88fc1882e59026e8b5b40900a25
# Natural Language Toolkit: Language Models # # Copyright (C) 2001-2008 University of Pennsylvania # Author: Steven Bird <[email protected]> # URL: <http://nltk.sf.net> # For license information, see LICENSE.TXT class ModelI(object): """ A processing interface for assigning a probability to the next word. """ def __init__(self): '''Create a new language model.''' raise NotImplementedError() def train(self, text): '''Train the model on the text.''' raise NotImplementedError() def probability(self, word, context): '''Evaluate the probability of this word in this context.''' raise NotImplementedError() def choose_random_word(self, context): '''Randomly select a word that is likely to appear in this context.''' raise NotImplementedError() def entropy(self, text): '''Evaluate the total entropy of a message with respect to the model. This is the sum of the log probability of each word in the message.''' raise NotImplementedError()
[]
JovaniPink/flask-apps
flask-graphene-sqlalchemy/models.py
de887f15261c286986cf38d234d49f7e4eb79c1a
import os from graphene_sqlalchemy import SQLAlchemyObjectType from sqlalchemy import Column, Integer, String, create_engine from sqlalchemy.orm import scoped_session, sessionmaker from sqlalchemy.ext.declarative import declarative_base POSTGRES_CONNECTION_STRING = ( os.environ.get("POSTGRES_CONNECTION_STRING") or "postgres://postgres:password@localhost:6432/postgres" ) engine = create_engine(POSTGRES_CONNECTION_STRING, convert_unicode=True) db_session = scoped_session( sessionmaker(autocommit=False, autoflush=False, bind=engine) ) Base = declarative_base() Base.query = db_session.query_property() class UserModel(Base): __tablename__ = "users" id = Column(Integer, primary_key=True) name = Column(String) balance = Column(Integer) class MinAmountModel(Base): __tablename__ = "min_amount" amount = Column(Integer, primary_key=True) class User(SQLAlchemyObjectType): class Meta: model = UserModel class MinAmount(SQLAlchemyObjectType): class Meta: model = MinAmountModel
[((12, 9, 12, 72), 'sqlalchemy.create_engine', 'create_engine', (), '', False, 'from sqlalchemy import Column, Integer, String, create_engine\n'), ((16, 7, 16, 25), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ({}, {}), '()', False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((8, 4, 8, 48), 'os.environ.get', 'os.environ.get', ({(8, 19, 8, 47): '"""POSTGRES_CONNECTION_STRING"""'}, {}), "('POSTGRES_CONNECTION_STRING')", False, 'import os\n'), ((14, 4, 14, 64), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (), '', False, 'from sqlalchemy.orm import scoped_session, sessionmaker\n'), ((22, 9, 22, 42), 'sqlalchemy.Column', 'Column', (), '', False, 'from sqlalchemy import Column, Integer, String, create_engine\n'), ((23, 11, 23, 25), 'sqlalchemy.Column', 'Column', ({(23, 18, 23, 24): 'String'}, {}), '(String)', False, 'from sqlalchemy import Column, Integer, String, create_engine\n'), ((24, 14, 24, 29), 'sqlalchemy.Column', 'Column', ({(24, 21, 24, 28): 'Integer'}, {}), '(Integer)', False, 'from sqlalchemy import Column, Integer, String, create_engine\n'), ((29, 13, 29, 46), 'sqlalchemy.Column', 'Column', (), '', False, 'from sqlalchemy import Column, Integer, String, create_engine\n')]
sean-mackenzie/curlypiv
curlypiv/synthetics/microsig.py
21c96c1bb1ba2548c4d5bebb389eb66ff58f851d
# microsig """ Author: Maximilliano Rossi More detail about the MicroSIG can be found at: Website: https://gitlab.com/defocustracking/microsig-python Publication: Rossi M, Synthetic image generator for defocusing and astigmatic PIV/PTV, Meas. Sci. Technol., 31, 017003 (2020) DOI:10.1088/1361-6501/ab42bb. """ import numpy as np import imageio import tkinter as tk import os from os import listdir from os.path import isfile, basename, join, isdir import sys import glob # import time as tm from tkinter import filedialog # ----- code adapted by Sean MacKenzie ------ # 2.0 define class class CurlypivMicrosigCollection(object): def __init__(self, testSetup, synCol, use_gui=False, use_internal_setting=False, setting_file=None, use_internal_data=False, data_files=None, to_internal_sequence=False, destination_folder=None, output_dtype='np.uint16'): if not isinstance(testSetup, object): raise ValueError("{} must be a CurlypivTestSetup class object".format(testSetup)) if not isinstance(synCol, object): raise ValueError("{} must be a CurlypivSyntheticCollection class object".format(synCol)) valid_output_dtype = ['np.uint16', 'np.uint8'] if output_dtype not in valid_output_dtype: raise ValueError("{} must be one of {}".format(output_dtype, valid_output_dtype)) self.testSetup = testSetup self.synCol = synCol self.use_gui = use_gui self.output_dtype = output_dtype if self.use_gui: run() else: if use_internal_setting: self.setting_file = self.synCol.microsigSetup else: if not isinstance(setting_file, str): raise ValueError("{} must be a filepath to microsig settings text file".format(setting_file)) self.setting_file = os.path.abspath(setting_file) if use_internal_data: raise ValueError("script to use internal data still in development") else: if not isinstance(data_files, str): raise ValueError("{} must be a filepath to particle location text files".format(data_files)) all_files = glob.glob(data_files + '/*.txt') save_files = [] for ff in [f for f in all_files if f.endswith('.txt')]: save_files.append(ff) save_files.sort() self.data_files = save_files if to_internal_sequence: raise ValueError("script to use internal data still in development") else: if not isinstance(destination_folder, str): raise ValueError("{} must be a filepath to write output images".format(destination_folder)) self.destination_folder = os.path.abspath(destination_folder) self.generate() def generate(self): # %% mic = {} f = open(self.setting_file) for x in f: words = x.split() mic[words[0]] = float(words[2]) mic['pixel_dim_x'] = int(mic['pixel_dim_x']) mic['pixel_dim_y'] = int(mic['pixel_dim_y']) mic['n_rays'] = int(mic['n_rays']) # %% ii = 0; ii_tot = len(self.data_files) for data in self.data_files: ii = ii + 1 print('creating image {0} of {1} ...'.format(ii, ii_tot)) P = np.genfromtxt(data) if len(P.shape) == 1: P = np.array([P]) head, tail = os.path.split(data) I = take_image(mic, P) if self.output_dtype == 'np.uint16': imageio.imwrite(os.path.join(self.destination_folder, (tail[:-3] + 'tif')), np.uint16(I)) elif self.output_dtype == 'np.uint8': imageio.imwrite(os.path.join(self.destination_folder, (tail[:-3] + 'tif')), np.uint8(I)) print('done!') # %% def sorter(f): sorting = int(f[:-4]) return sorting def run(): # %% root = tk.Tk() root.attributes('-topmost', True) root.withdraw() setting_file = filedialog.askopenfilenames( title="Select settings file", parent=root, filetypes=(("txt files", "*.txt"), ("all files", "*.*"))) if not setting_file: sys.exit('input file not valid') data_files = filedialog.askopenfilenames( title="Select data file(s)", parent=root, filetypes=(("txt files", "*.txt"), ("all files", "*.*"))) if not setting_file: sys.exit('input file not valid') destination_folder = filedialog.askdirectory( title="Select destination file", parent=root) if not setting_file: sys.exit('input file not valid') # %% mic = {} f = open(setting_file[0]) for x in f: words = x.split() mic[words[0]] = float(words[2]) mic['pixel_dim_x'] = int(mic['pixel_dim_x']) mic['pixel_dim_y'] = int(mic['pixel_dim_y']) mic['n_rays'] = int(mic['n_rays']) # %% ii = 0; ii_tot = len(data_files) for data in data_files: ii = ii + 1 print('creating image {0} of {1} ...'.format(ii, ii_tot)) P = np.genfromtxt(data) if len(P.shape) == 1: P = np.array([P]) head, tail = os.path.split(data) I = take_image(mic, P) print('done!') # %% def take_image(mic, P): # NOTE: x and xp represent here light fields and should not be confused$ # with particle image coordinates which are represented by P I = np.zeros((mic['pixel_dim_y'], mic['pixel_dim_x'])); dp_s = np.unique(P[:, 3]) if P.shape[1] == 5 or P.shape[1] == 8: k_id = P[:, -1] else: k_id = np.ones(P.shape[0]) if P.shape[1] <= 5 and dp_s.size == 1: n_points = int(np.round(mic['points_per_pixel'] * 2 * np.pi * (dp_s * mic['magnification'] / mic['pixel_size']) ** 2)) xp = create_particle(dp_s, n_points, mic['n_rays']) for ii in range(0, P.shape[0]): Id = image_spherical(mic, xp, P[ii, 0:3]) I = I + Id * k_id[ii] elif P.shape[1] <= 5 and dp_s.size != 1: for ii in range(0, P.shape[0]): n_points = int(np.round(mic['points_per_pixel'] * 2 * np.pi * (P[ii, 3] * mic['magnification'] / mic['pixel_size']) ** 2)) xp = create_particle(P[ii, 3], n_points, mic['n_rays']) Id = image_spherical(mic, xp, P[ii, 0:3]) I = I + Id * k_id[ii] elif P.shape[1] >= 7: for ii in range(0, P.shape[0]): n_points = int(np.round(mic['points_per_pixel'] * 2 * np.pi * (P[ii, 3] * mic['magnification'] / mic['pixel_size']) ** 2)) ecc = P[ii, 4] if ecc > 1: # area elipsoid/area sphere fact = 1 / 2 * (1 + ecc / np.sqrt(1 - 1 / ecc ** 2) * np.arcsin(np.sqrt(1 - 1 / ecc ** 2))) n_points = int(np.round(fact * n_points)) elif ecc < 1: # area elipsoid/area sphere fact = 1 / 2 * (1 + ecc ** 2 / np.sqrt(1 - ecc ** 2) * np.arctan(np.sqrt(1 - ecc ** 2))) n_points = int(np.round(fact * n_points)) xp = create_ellipsoid(P[ii, 3:7], n_points, mic['n_rays']) Id = image_spherical(mic, xp, P[ii, 0:3]); I = I + Id * k_id[ii] I = I * mic['gain'] if mic['background_mean'] != 0: I = I + mic['background_mean'] if mic['background_noise'] != 0: Irand = np.random.normal(0, mic['background_noise'], (mic['pixel_dim_y'], mic['pixel_dim_x'])) I = I + np.round(Irand) # I = np.round(I+random('norm',0,mic.background_noise,... # mic.pixel_dim_y,mic.pixel_dim_x)); return I # %% def image_spherical(mic, xp, P1): # take image of a particle with a spherical lens # NOTE: x and xp represent here light fields and should not be confused$ # with particle image coordinates which are represented by P1 lens_radius = (np.tan(np.arcsin(mic['numerical_aperture'])) * (1 + 1 / mic['magnification']) * mic['focal_length']) # distance lens-ccd dCCD = -mic['focal_length'] * (mic['magnification'] + 1); # distance particle-lens dPART = P1[2] + mic['focal_length'] * (1 / mic['magnification'] + 1); # linear transformation from the object plane to the lens plane T2 = np.array([[1, 0, dPART, 0], [0, 1, 0, dPART], [0, 0, 1, 0], [0, 0, 0, 1]]) # light field right before the lens x = np.linalg.inv(T2) @ xp # remove rays outside of the lens aperture ind = x[0, :] ** 2 + x[1, :] ** 2 <= lens_radius ** 2 x = x[:, ind] # transformation of the light field with spherical lens a = x[0, :]; b = x[1, :] c = x[2, :]; d = x[3, :] # radius of curvature of the lens rk = mic['focal_length'] * (mic['ri_lens'] / mic['ri_medium'] - 1) * 2 dum = a * 0 # refraction medium-lens # ray-vector befor lens Vr = np.vstack((1 + dum, c, d)) Vr = (Vr / np.tile(np.sqrt(sum(Vr ** 2)), (3, 1))) # normal-vector to the lens surface Vl = np.vstack((rk + dum, a, b)) Vl = (Vl / np.tile(np.sqrt(sum(Vl ** 2)), (3, 1))) # tangent-vector to the lens surface Vrot = np.cross(Vr, Vl, axisa=0, axisb=0) Vrot = np.cross(Vrot, Vl, axisa=1, axisb=0).transpose() Vrot = Vrot / np.tile(np.sqrt(sum(Vrot ** 2)), (3, 1)) # angle after snell-law correction vx = np.sum(Vr * Vl, axis=0) # dot product! vy = np.sum(Vr * Vrot, axis=0) # dot product! th11 = np.arcsin(mic['ri_medium'] / mic['ri_lens'] * np.sin(np.arctan(vy / vx))) # new ray-vector inside the lens Vr11 = (Vl * np.tile(np.cos(th11), (3, 1)) + Vrot * np.tile(np.sin(th11), (3, 1))) Vr = Vr11 / np.tile(Vr11[0, :], (3, 1)) # refraction lens-medium # normal-vector to the lens surface Vl2 = np.vstack((Vl[0, :], -Vl[1:, :])) # tangent-vector to the lens surface Vrot = np.cross(Vr, Vl2, axisa=0, axisb=0) Vrot = np.cross(Vrot, Vl2, axisa=1, axisb=0).transpose() Vrot = Vrot / np.tile(np.sqrt(sum(Vrot ** 2)), (3, 1)) # angle after snell-law correction vx = np.sum(Vr * Vl2, axis=0) # dot product! vy = np.sum(Vr * Vrot, axis=0) # dot product! th11 = np.arcsin(mic['ri_lens'] / mic['ri_medium'] * np.sin(np.arctan(vy / vx))) # new ray-vector outside the lens Vr11 = (Vl2 * np.tile(np.cos(th11), (3, 1)) + Vrot * np.tile(np.sin(th11), (3, 1))) Vr = Vr11 / np.tile(Vr11[0, :], (3, 1)) # light field after the spherical lens x[2, :] = Vr[1, :] x[3, :] = Vr[2, :] if mic['cyl_focal_length'] == 0: # linear transformation from the lens plane to the ccd plane T1 = np.array([[1, 0, -dCCD, 0], [0, 1, 0, -dCCD], [0, 0, 1, 0], [0, 0, 0, 1]]) # light field at the ccd plane xs = np.linalg.inv(T1) @ x else: # # linear transformation from the lens plane to the cyl_lens plane T1c = np.array([[1, 0, -dCCD * 1 / 3, 0], [0, 1, 0, -dCCD * 1 / 3], [0, 0, 1, 0], [0, 0, 0, 1]]) # # light field at the cylindrical lens plane xc = np.linalg.inv(T1c) @ x # # light field after the cylindrical lens plane Tc = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [-1 / mic['cyl_focal_length'], 0, 1, 0], [0, 0, 0, 1]]) xc_a = np.linalg.inv(Tc) @ xc # # light field at the ccd plane T1 = np.array([[1, 0, -dCCD * 2 / 3, 0], [0, 1, 0, -dCCD * 2 / 3], [0, 0, 1, 0], [0, 0, 0, 1]]); # # light field at the ccd plane xs = np.linalg.inv(T1) @ xc_a # transform the position in pixel units X = np.round(xs[0, :] / mic['pixel_size'] + P1[0]) Y = np.round(xs[1, :] / mic['pixel_size'] + P1[1]) # remove rays outside the CCD ind = np.all([X > 0, X <= mic['pixel_dim_x'], Y > 0, Y <= mic['pixel_dim_y'], X.imag == 0, Y.imag == 0], axis=0) # count number of rays in each pixel countXY = np.sort(Y[ind] + (X[ind] - 1) * mic['pixel_dim_y']) indi, ia = np.unique(countXY, return_index=True) nCounts = np.hstack((ia[1:], countXY.size + 1)) - ia # prepare image I = np.zeros((mic['pixel_dim_y'], mic['pixel_dim_x'])) Ifr = I.flatten('F') Ifr[indi.astype(int) - 1] = nCounts I = Ifr.reshape(mic['pixel_dim_y'], mic['pixel_dim_x'], order='F') return I # %% def create_particle(D, Ns, Nr): R = D / 2 V = spiral_sphere(Ns) V[0:2, V[0, :] > 0] = -V[0:2, V[0, :] > 0] x = R * V[0, :] y = R * V[1, :] z = R * V[2, :] V0 = spiral_sphere(Nr + 2) V0 = V0[:, 1:-1] u = np.tile(x, (Nr, 1)) v = np.tile(y, (Nr, 1)) s = u * 0 t = u * 0 phs = np.random.uniform(-np.pi, np.pi, z.size) cs = np.cos(phs) sn = np.sin(phs) for k in range(0, Ns): Rot = np.array([[cs[k], -sn[k], 0], [sn[k], cs[k], 0], [0, 0, 1]]) Vr = Rot @ V0 Vr[0, :] = -abs(Vr[0, :]) s[:, k] = Vr[1, :] / Vr[0, :] t[:, k] = Vr[2, :] / Vr[0, :] u[:, k] = y[k] - s[:, k] * x[k] v[:, k] = z[k] - t[:, k] * x[k] xp = np.vstack((u.flatten('F'), v.flatten('F'), s.flatten('F'), t.flatten('F'))) return xp # %% def create_ellipsoid(Deab, Ns, Nr): D = Deab[0]; ecc = Deab[1] alpha = Deab[2]; beta = Deab[3] R = D / 2 V = spiral_sphere(Ns) V = R * V V[2, :] = V[2, :] * ecc R_beta = np.array([[np.cos(beta), 0, np.sin(beta)], [0, 1, 0], [-np.sin(beta), 0, np.cos(beta)]]) R_alpha = np.array([[np.cos(alpha), -np.sin(alpha), 0], [np.sin(alpha), np.cos(alpha), 0], [0, 0, 1]]) Vf = R_alpha @ (R_beta @ V) ii1 = (Vf[1, :] == np.min(Vf[1, :])).nonzero()[0][0] ii2 = (Vf[1, :] == np.max(Vf[1, :])).nonzero()[0][0] ii3 = (Vf[2, :] == np.min(Vf[2, :])).nonzero()[0][0] ii4 = (Vf[2, :] == np.max(Vf[2, :])).nonzero()[0][0] Vdum = Vf[:, [ii1, ii2, ii3, ii4]] A = np.c_[Vdum[1, :], Vdum[2, :], np.ones(Vdum.shape[1])] C, _, _, _ = np.linalg.lstsq(A, Vdum[0, :], rcond=None) V1dum = C[0] * Vf[1, :] + C[1] * Vf[2, :] + C[2] ind = (Vf[0, :] - V1dum) < 0 x = Vf[0, ind] y = Vf[1, ind] z = Vf[2, ind] Ns = z.size V0 = spiral_sphere(Nr + 2) V0 = V0[:, 1:-1] u = np.tile(x, (Nr, 1)) v = np.tile(y, (Nr, 1)) s = u * 0 t = u * 0 phs = np.random.uniform(-np.pi, np.pi, z.size) cs = np.cos(phs) sn = np.sin(phs) for k in range(0, Ns): Rot = np.array([[cs[k], -sn[k], 0], [sn[k], cs[k], 0], [0, 0, 1]]) Vr = Rot @ V0 Vr[0, :] = -abs(Vr[0, :]) s[:, k] = Vr[1, :] / Vr[0, :] t[:, k] = Vr[2, :] / Vr[0, :] u[:, k] = y[k] - s[:, k] * x[k] v[:, k] = z[k] - t[:, k] * x[k] xp = np.vstack((u.flatten('F'), v.flatten('F'), s.flatten('F'), t.flatten('F'))) return xp # %% def spiral_sphere(N): gr = (1 + np.sqrt(5)) / 2 # golden ratio ga = 2 * np.pi * (1 - 1 / gr) # golden angle ind_p = np.arange(0, N) # particle (i.e., point sample) index lat = np.arccos(1 - 2 * ind_p / ( N - 1)) # latitude is defined so that particle index is proportional to surface area between 0 and lat lon = ind_p * ga # position particles at even intervals along longitude # Convert from spherical to Cartesian co-ordinates x = np.sin(lat) * np.cos(lon) y = np.sin(lat) * np.sin(lon) z = np.cos(lat) V = np.vstack((x, y, z)) return V # %% if __name__ == '__main__': run()
[((125, 11, 125, 18), 'tkinter.Tk', 'tk.Tk', ({}, {}), '()', True, 'import tkinter as tk\n'), ((129, 19, 131, 65), 'tkinter.filedialog.askopenfilenames', 'filedialog.askopenfilenames', (), '', False, 'from tkinter import filedialog\n'), ((136, 17, 138, 65), 'tkinter.filedialog.askopenfilenames', 'filedialog.askopenfilenames', (), '', False, 'from tkinter import filedialog\n'), ((143, 25, 144, 53), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', (), '', False, 'from tkinter import filedialog\n'), ((181, 8, 181, 58), 'numpy.zeros', 'np.zeros', ({(181, 17, 181, 57): "(mic['pixel_dim_y'], mic['pixel_dim_x'])"}, {}), "((mic['pixel_dim_y'], mic['pixel_dim_x']))", True, 'import numpy as np\n'), ((183, 11, 183, 29), 'numpy.unique', 'np.unique', ({(183, 21, 183, 28): 'P[:, (3)]'}, {}), '(P[:, (3)])', True, 'import numpy as np\n'), ((261, 9, 264, 33), 'numpy.array', 'np.array', ({(261, 18, 264, 32): '[[1, 0, dPART, 0], [0, 1, 0, dPART], [0, 0, 1, 0], [0, 0, 0, 1]]'}, {}), '([[1, 0, dPART, 0], [0, 1, 0, dPART], [0, 0, 1, 0], [0, 0, 0, 1]])', True, 'import numpy as np\n'), ((283, 9, 283, 35), 'numpy.vstack', 'np.vstack', ({(283, 19, 283, 34): '(1 + dum, c, d)'}, {}), '((1 + dum, c, d))', True, 'import numpy as np\n'), ((286, 9, 286, 36), 'numpy.vstack', 'np.vstack', ({(286, 19, 286, 35): '(rk + dum, a, b)'}, {}), '((rk + dum, a, b))', True, 'import numpy as np\n'), ((289, 11, 289, 45), 'numpy.cross', 'np.cross', (), '', True, 'import numpy as np\n'), ((293, 9, 293, 32), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((294, 9, 294, 34), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((303, 10, 303, 43), 'numpy.vstack', 'np.vstack', ({(303, 20, 303, 42): '(Vl[(0), :], -Vl[1:, :])'}, {}), '((Vl[(0), :], -Vl[1:, :]))', True, 'import numpy as np\n'), ((305, 11, 305, 46), 'numpy.cross', 'np.cross', (), '', True, 'import numpy as np\n'), ((309, 9, 309, 33), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((310, 9, 310, 34), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((353, 8, 353, 54), 'numpy.round', 'np.round', ({(353, 17, 353, 53): "xs[(0), :] / mic['pixel_size'] + P1[0]"}, {}), "(xs[(0), :] / mic['pixel_size'] + P1[0])", True, 'import numpy as np\n'), ((354, 8, 354, 54), 'numpy.round', 'np.round', ({(354, 17, 354, 53): "xs[(1), :] / mic['pixel_size'] + P1[1]"}, {}), "(xs[(1), :] / mic['pixel_size'] + P1[1])", True, 'import numpy as np\n'), ((357, 10, 358, 52), 'numpy.all', 'np.all', (), '', True, 'import numpy as np\n'), ((361, 14, 361, 65), 'numpy.sort', 'np.sort', ({(361, 22, 361, 64): "Y[ind] + (X[ind] - 1) * mic['pixel_dim_y']"}, {}), "(Y[ind] + (X[ind] - 1) * mic['pixel_dim_y'])", True, 'import numpy as np\n'), ((362, 15, 362, 52), 'numpy.unique', 'np.unique', (), '', True, 'import numpy as np\n'), ((366, 8, 366, 58), 'numpy.zeros', 'np.zeros', ({(366, 17, 366, 57): "(mic['pixel_dim_y'], mic['pixel_dim_x'])"}, {}), "((mic['pixel_dim_y'], mic['pixel_dim_x']))", True, 'import numpy as np\n'), ((386, 8, 386, 27), 'numpy.tile', 'np.tile', ({(386, 16, 386, 17): 'x', (386, 19, 386, 26): '(Nr, 1)'}, {}), '(x, (Nr, 1))', True, 'import numpy as np\n'), ((387, 8, 387, 27), 'numpy.tile', 'np.tile', ({(387, 16, 387, 17): 'y', (387, 19, 387, 26): '(Nr, 1)'}, {}), '(y, (Nr, 1))', True, 'import numpy as np\n'), ((391, 10, 391, 50), 'numpy.random.uniform', 'np.random.uniform', ({(391, 28, 391, 34): '-np.pi', (391, 36, 391, 41): 'np.pi', (391, 43, 391, 49): 'z.size'}, {}), '(-np.pi, np.pi, z.size)', True, 'import numpy as np\n'), ((392, 9, 392, 20), 'numpy.cos', 'np.cos', ({(392, 16, 392, 19): 'phs'}, {}), '(phs)', True, 'import numpy as np\n'), ((393, 9, 393, 20), 'numpy.sin', 'np.sin', ({(393, 16, 393, 19): 'phs'}, {}), '(phs)', True, 'import numpy as np\n'), ((439, 17, 439, 59), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (), '', True, 'import numpy as np\n'), ((449, 8, 449, 27), 'numpy.tile', 'np.tile', ({(449, 16, 449, 17): 'x', (449, 19, 449, 26): '(Nr, 1)'}, {}), '(x, (Nr, 1))', True, 'import numpy as np\n'), ((450, 8, 450, 27), 'numpy.tile', 'np.tile', ({(450, 16, 450, 17): 'y', (450, 19, 450, 26): '(Nr, 1)'}, {}), '(y, (Nr, 1))', True, 'import numpy as np\n'), ((454, 10, 454, 50), 'numpy.random.uniform', 'np.random.uniform', ({(454, 28, 454, 34): '-np.pi', (454, 36, 454, 41): 'np.pi', (454, 43, 454, 49): 'z.size'}, {}), '(-np.pi, np.pi, z.size)', True, 'import numpy as np\n'), ((455, 9, 455, 20), 'numpy.cos', 'np.cos', ({(455, 16, 455, 19): 'phs'}, {}), '(phs)', True, 'import numpy as np\n'), ((456, 9, 456, 20), 'numpy.sin', 'np.sin', ({(456, 16, 456, 19): 'phs'}, {}), '(phs)', True, 'import numpy as np\n'), ((478, 12, 478, 27), 'numpy.arange', 'np.arange', ({(478, 22, 478, 23): '0', (478, 25, 478, 26): 'N'}, {}), '(0, N)', True, 'import numpy as np\n'), ((479, 10, 480, 23), 'numpy.arccos', 'np.arccos', ({(479, 20, 480, 22): '1 - 2 * ind_p / (N - 1)'}, {}), '(1 - 2 * ind_p / (N - 1))', True, 'import numpy as np\n'), ((486, 8, 486, 19), 'numpy.cos', 'np.cos', ({(486, 15, 486, 18): 'lat'}, {}), '(lat)', True, 'import numpy as np\n'), ((487, 8, 487, 28), 'numpy.vstack', 'np.vstack', ({(487, 18, 487, 27): '(x, y, z)'}, {}), '((x, y, z))', True, 'import numpy as np\n'), ((134, 8, 134, 40), 'sys.exit', 'sys.exit', ({(134, 17, 134, 39): '"""input file not valid"""'}, {}), "('input file not valid')", False, 'import sys\n'), ((141, 8, 141, 40), 'sys.exit', 'sys.exit', ({(141, 17, 141, 39): '"""input file not valid"""'}, {}), "('input file not valid')", False, 'import sys\n'), ((147, 8, 147, 40), 'sys.exit', 'sys.exit', ({(147, 17, 147, 39): '"""input file not valid"""'}, {}), "('input file not valid')", False, 'import sys\n'), ((165, 12, 165, 31), 'numpy.genfromtxt', 'np.genfromtxt', ({(165, 26, 165, 30): 'data'}, {}), '(data)', True, 'import numpy as np\n'), ((169, 21, 169, 40), 'os.path.split', 'os.path.split', ({(169, 35, 169, 39): 'data'}, {}), '(data)', False, 'import os\n'), ((187, 15, 187, 34), 'numpy.ones', 'np.ones', ({(187, 23, 187, 33): 'P.shape[0]'}, {}), '(P.shape[0])', True, 'import numpy as np\n'), ((238, 16, 239, 74), 'numpy.random.normal', 'np.random.normal', ({(238, 33, 238, 34): '0', (238, 36, 238, 59): "mic['background_noise']", (239, 33, 239, 73): "(mic['pixel_dim_y'], mic['pixel_dim_x'])"}, {}), "(0, mic['background_noise'], (mic['pixel_dim_y'], mic[\n 'pixel_dim_x']))", True, 'import numpy as np\n'), ((267, 8, 267, 25), 'numpy.linalg.inv', 'np.linalg.inv', ({(267, 22, 267, 24): 'T2'}, {}), '(T2)', True, 'import numpy as np\n'), ((300, 16, 300, 43), 'numpy.tile', 'np.tile', ({(300, 24, 300, 34): 'Vr11[(0), :]', (300, 36, 300, 42): '(3, 1)'}, {}), '(Vr11[(0), :], (3, 1))', True, 'import numpy as np\n'), ((317, 16, 317, 43), 'numpy.tile', 'np.tile', ({(317, 24, 317, 34): 'Vr11[(0), :]', (317, 36, 317, 42): '(3, 1)'}, {}), '(Vr11[(0), :], (3, 1))', True, 'import numpy as np\n'), ((324, 13, 327, 37), 'numpy.array', 'np.array', ({(324, 22, 327, 36): '[[1, 0, -dCCD, 0], [0, 1, 0, -dCCD], [0, 0, 1, 0], [0, 0, 0, 1]]'}, {}), '([[1, 0, -dCCD, 0], [0, 1, 0, -dCCD], [0, 0, 1, 0], [0, 0, 0, 1]])', True, 'import numpy as np\n'), ((332, 14, 335, 38), 'numpy.array', 'np.array', ({(332, 23, 335, 37): '[[1, 0, -dCCD * 1 / 3, 0], [0, 1, 0, -dCCD * 1 / 3], [0, 0, 1, 0], [0, 0, 0, 1]\n ]'}, {}), '([[1, 0, -dCCD * 1 / 3, 0], [0, 1, 0, -dCCD * 1 / 3], [0, 0, 1, 0],\n [0, 0, 0, 1]])', True, 'import numpy as np\n'), ((339, 13, 342, 37), 'numpy.array', 'np.array', ({(339, 22, 342, 36): "[[1, 0, 0, 0], [0, 1, 0, 0], [-1 / mic['cyl_focal_length'], 0, 1, 0], [0, 0,\n 0, 1]]"}, {}), "([[1, 0, 0, 0], [0, 1, 0, 0], [-1 / mic['cyl_focal_length'], 0, 1, \n 0], [0, 0, 0, 1]])", True, 'import numpy as np\n'), ((345, 13, 348, 37), 'numpy.array', 'np.array', ({(345, 22, 348, 36): '[[1, 0, -dCCD * 2 / 3, 0], [0, 1, 0, -dCCD * 2 / 3], [0, 0, 1, 0], [0, 0, 0, 1]\n ]'}, {}), '([[1, 0, -dCCD * 2 / 3, 0], [0, 1, 0, -dCCD * 2 / 3], [0, 0, 1, 0],\n [0, 0, 0, 1]])', True, 'import numpy as np\n'), ((363, 14, 363, 51), 'numpy.hstack', 'np.hstack', ({(363, 24, 363, 50): '(ia[1:], countXY.size + 1)'}, {}), '((ia[1:], countXY.size + 1))', True, 'import numpy as np\n'), ((395, 14, 396, 54), 'numpy.array', 'np.array', ({(395, 23, 396, 53): '[[cs[k], -sn[k], 0], [sn[k], cs[k], 0], [0, 0, 1]]'}, {}), '([[cs[k], -sn[k], 0], [sn[k], cs[k], 0], [0, 0, 1]])', True, 'import numpy as np\n'), ((458, 14, 459, 54), 'numpy.array', 'np.array', ({(458, 23, 459, 53): '[[cs[k], -sn[k], 0], [sn[k], cs[k], 0], [0, 0, 1]]'}, {}), '([[cs[k], -sn[k], 0], [sn[k], cs[k], 0], [0, 0, 1]])', True, 'import numpy as np\n'), ((484, 8, 484, 19), 'numpy.sin', 'np.sin', ({(484, 15, 484, 18): 'lat'}, {}), '(lat)', True, 'import numpy as np\n'), ((484, 22, 484, 33), 'numpy.cos', 'np.cos', ({(484, 29, 484, 32): 'lon'}, {}), '(lon)', True, 'import numpy as np\n'), ((485, 8, 485, 19), 'numpy.sin', 'np.sin', ({(485, 15, 485, 18): 'lat'}, {}), '(lat)', True, 'import numpy as np\n'), ((485, 22, 485, 33), 'numpy.sin', 'np.sin', ({(485, 29, 485, 32): 'lon'}, {}), '(lon)', True, 'import numpy as np\n'), ((100, 16, 100, 35), 'numpy.genfromtxt', 'np.genfromtxt', ({(100, 30, 100, 34): 'data'}, {}), '(data)', True, 'import numpy as np\n'), ((104, 25, 104, 44), 'os.path.split', 'os.path.split', ({(104, 39, 104, 43): 'data'}, {}), '(data)', False, 'import os\n'), ((167, 16, 167, 29), 'numpy.array', 'np.array', ({(167, 25, 167, 28): '[P]'}, {}), '([P])', True, 'import numpy as np\n'), ((191, 23, 192, 87), 'numpy.round', 'np.round', ({(191, 32, 192, 86): "mic['points_per_pixel'] * 2 * np.pi * (dp_s * mic['magnification'] / mic[\n 'pixel_size']) ** 2"}, {}), "(mic['points_per_pixel'] * 2 * np.pi * (dp_s * mic['magnification'] /\n mic['pixel_size']) ** 2)", True, 'import numpy as np\n'), ((240, 16, 240, 31), 'numpy.round', 'np.round', ({(240, 25, 240, 30): 'Irand'}, {}), '(Irand)', True, 'import numpy as np\n'), ((290, 11, 290, 47), 'numpy.cross', 'np.cross', (), '', True, 'import numpy as np\n'), ((306, 11, 306, 48), 'numpy.cross', 'np.cross', (), '', True, 'import numpy as np\n'), ((329, 13, 329, 30), 'numpy.linalg.inv', 'np.linalg.inv', ({(329, 27, 329, 29): 'T1'}, {}), '(T1)', True, 'import numpy as np\n'), ((337, 13, 337, 31), 'numpy.linalg.inv', 'np.linalg.inv', ({(337, 27, 337, 30): 'T1c'}, {}), '(T1c)', True, 'import numpy as np\n'), ((343, 15, 343, 32), 'numpy.linalg.inv', 'np.linalg.inv', ({(343, 29, 343, 31): 'Tc'}, {}), '(Tc)', True, 'import numpy as np\n'), ((350, 13, 350, 30), 'numpy.linalg.inv', 'np.linalg.inv', ({(350, 27, 350, 29): 'T1'}, {}), '(T1)', True, 'import numpy as np\n'), ((475, 14, 475, 24), 'numpy.sqrt', 'np.sqrt', ({(475, 22, 475, 23): '(5)'}, {}), '(5)', True, 'import numpy as np\n'), ((59, 36, 59, 65), 'os.path.abspath', 'os.path.abspath', ({(59, 52, 59, 64): 'setting_file'}, {}), '(setting_file)', False, 'import os\n'), ((67, 28, 67, 60), 'glob.glob', 'glob.glob', ({(67, 38, 67, 59): "data_files + '/*.txt'"}, {}), "(data_files + '/*.txt')", False, 'import glob\n'), ((79, 42, 79, 77), 'os.path.abspath', 'os.path.abspath', ({(79, 58, 79, 76): 'destination_folder'}, {}), '(destination_folder)', False, 'import os\n'), ((102, 20, 102, 33), 'numpy.array', 'np.array', ({(102, 29, 102, 32): '[P]'}, {}), '([P])', True, 'import numpy as np\n'), ((253, 26, 253, 62), 'numpy.arcsin', 'np.arcsin', ({(253, 36, 253, 61): "mic['numerical_aperture']"}, {}), "(mic['numerical_aperture'])", True, 'import numpy as np\n'), ((296, 28, 296, 46), 'numpy.arctan', 'np.arctan', ({(296, 38, 296, 45): 'vy / vx'}, {}), '(vy / vx)', True, 'import numpy as np\n'), ((298, 25, 298, 37), 'numpy.cos', 'np.cos', ({(298, 32, 298, 36): 'th11'}, {}), '(th11)', True, 'import numpy as np\n'), ((299, 27, 299, 39), 'numpy.sin', 'np.sin', ({(299, 34, 299, 38): 'th11'}, {}), '(th11)', True, 'import numpy as np\n'), ((313, 28, 313, 46), 'numpy.arctan', 'np.arctan', ({(313, 38, 313, 45): 'vy / vx'}, {}), '(vy / vx)', True, 'import numpy as np\n'), ((315, 26, 315, 38), 'numpy.cos', 'np.cos', ({(315, 33, 315, 37): 'th11'}, {}), '(th11)', True, 'import numpy as np\n'), ((316, 27, 316, 39), 'numpy.sin', 'np.sin', ({(316, 34, 316, 38): 'th11'}, {}), '(th11)', True, 'import numpy as np\n'), ((423, 24, 423, 36), 'numpy.cos', 'np.cos', ({(423, 31, 423, 35): 'beta'}, {}), '(beta)', True, 'import numpy as np\n'), ((423, 41, 423, 53), 'numpy.sin', 'np.sin', ({(423, 48, 423, 52): 'beta'}, {}), '(beta)', True, 'import numpy as np\n'), ((425, 42, 425, 54), 'numpy.cos', 'np.cos', ({(425, 49, 425, 53): 'beta'}, {}), '(beta)', True, 'import numpy as np\n'), ((426, 25, 426, 38), 'numpy.cos', 'np.cos', ({(426, 32, 426, 37): 'alpha'}, {}), '(alpha)', True, 'import numpy as np\n'), ((427, 25, 427, 38), 'numpy.sin', 'np.sin', ({(427, 32, 427, 37): 'alpha'}, {}), '(alpha)', True, 'import numpy as np\n'), ((427, 40, 427, 53), 'numpy.cos', 'np.cos', ({(427, 47, 427, 52): 'alpha'}, {}), '(alpha)', True, 'import numpy as np\n'), ((438, 38, 438, 60), 'numpy.ones', 'np.ones', ({(438, 46, 438, 59): 'Vdum.shape[1]'}, {}), '(Vdum.shape[1])', True, 'import numpy as np\n'), ((108, 32, 108, 90), 'os.path.join', 'os.path.join', ({(108, 45, 108, 68): 'self.destination_folder', (108, 71, 108, 88): "(tail[:-3] + 'tif')"}, {}), "(self.destination_folder, tail[:-3] + 'tif')", False, 'import os\n'), ((109, 28, 109, 40), 'numpy.uint16', 'np.uint16', ({(109, 38, 109, 39): 'I'}, {}), '(I)', True, 'import numpy as np\n'), ((202, 27, 203, 95), 'numpy.round', 'np.round', ({(202, 36, 203, 94): "mic['points_per_pixel'] * 2 * np.pi * (P[ii, 3] * mic['magnification'] /\n mic['pixel_size']) ** 2"}, {}), "(mic['points_per_pixel'] * 2 * np.pi * (P[ii, 3] * mic[\n 'magnification'] / mic['pixel_size']) ** 2)", True, 'import numpy as np\n'), ((425, 25, 425, 37), 'numpy.sin', 'np.sin', ({(425, 32, 425, 36): 'beta'}, {}), '(beta)', True, 'import numpy as np\n'), ((426, 41, 426, 54), 'numpy.sin', 'np.sin', ({(426, 48, 426, 53): 'alpha'}, {}), '(alpha)', True, 'import numpy as np\n'), ((111, 32, 111, 90), 'os.path.join', 'os.path.join', ({(111, 45, 111, 68): 'self.destination_folder', (111, 71, 111, 88): "(tail[:-3] + 'tif')"}, {}), "(self.destination_folder, tail[:-3] + 'tif')", False, 'import os\n'), ((112, 28, 112, 39), 'numpy.uint8', 'np.uint8', ({(112, 37, 112, 38): 'I'}, {}), '(I)', True, 'import numpy as np\n'), ((213, 27, 214, 95), 'numpy.round', 'np.round', ({(213, 36, 214, 94): "mic['points_per_pixel'] * 2 * np.pi * (P[ii, 3] * mic['magnification'] /\n mic['pixel_size']) ** 2"}, {}), "(mic['points_per_pixel'] * 2 * np.pi * (P[ii, 3] * mic[\n 'magnification'] / mic['pixel_size']) ** 2)", True, 'import numpy as np\n'), ((432, 23, 432, 39), 'numpy.min', 'np.min', ({(432, 30, 432, 38): 'Vf[(1), :]'}, {}), '(Vf[(1), :])', True, 'import numpy as np\n'), ((433, 23, 433, 39), 'numpy.max', 'np.max', ({(433, 30, 433, 38): 'Vf[(1), :]'}, {}), '(Vf[(1), :])', True, 'import numpy as np\n'), ((434, 23, 434, 39), 'numpy.min', 'np.min', ({(434, 30, 434, 38): 'Vf[(2), :]'}, {}), '(Vf[(2), :])', True, 'import numpy as np\n'), ((435, 23, 435, 39), 'numpy.max', 'np.max', ({(435, 30, 435, 38): 'Vf[(2), :]'}, {}), '(Vf[(2), :])', True, 'import numpy as np\n'), ((221, 31, 221, 56), 'numpy.round', 'np.round', ({(221, 40, 221, 55): 'fact * n_points'}, {}), '(fact * n_points)', True, 'import numpy as np\n'), ((226, 31, 226, 56), 'numpy.round', 'np.round', ({(226, 40, 226, 55): 'fact * n_points'}, {}), '(fact * n_points)', True, 'import numpy as np\n'), ((219, 42, 219, 67), 'numpy.sqrt', 'np.sqrt', ({(219, 50, 219, 66): '(1 - 1 / ecc ** 2)'}, {}), '(1 - 1 / ecc ** 2)', True, 'import numpy as np\n'), ((220, 44, 220, 69), 'numpy.sqrt', 'np.sqrt', ({(220, 52, 220, 68): '(1 - 1 / ecc ** 2)'}, {}), '(1 - 1 / ecc ** 2)', True, 'import numpy as np\n'), ((224, 47, 224, 68), 'numpy.sqrt', 'np.sqrt', ({(224, 55, 224, 67): '(1 - ecc ** 2)'}, {}), '(1 - ecc ** 2)', True, 'import numpy as np\n'), ((225, 44, 225, 65), 'numpy.sqrt', 'np.sqrt', ({(225, 52, 225, 64): '(1 - ecc ** 2)'}, {}), '(1 - ecc ** 2)', True, 'import numpy as np\n')]
kmiya/AutowareArchitectureProposal.iv
planning/scenario_planning/lane_driving/motion_planning/obstacle_avoidance_planner/scripts/trajectory_visualizer.py
386b52c9cc90f4535ad833014f2f9500f0e64ccf
# Copyright 2020 Tier IV, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # !/usr/bin/env python # -*- coding: utf-8 -*- # TODO(kosuke murakami): write ros2 visualizer # import rospy # from autoware_planning_msgs.msg import Trajectory # from autoware_planning_msgs.msg import TrajectoryPoint # import matplotlib.pyplot as plt # import numpy as np # import tf # from geometry_msgs.msg import Vector3 # def quaternion_to_euler(quaternion): # """Convert Quaternion to Euler Angles # quaternion: geometry_msgs/Quaternion # euler: geometry_msgs/Vector3 # """ # e = tf.transformations.euler_from_quaternion( # (quaternion.x, quaternion.y, quaternion.z, quaternion.w)) # return Vector3(x=e[0], y=e[1], z=e[2]) # class TrajectoryVisualizer(): # def __init__(self): # self.in_trajectory = Trajectory() # self.debug_trajectory = Trajectory() # self.debug_fixed_trajectory = Trajectory() # self.plot_done1 = True # self.plot_done2 = True # self.plot_done3 = True # self.length = 50 # self.substatus1 = rospy.Subscriber( # "/planning/scenario_planning/lane_driving/motion_planning/obstacle_avoidance_planner/trajectory", # Trajectory, self.CallBackTraj, queue_size=1, tcp_nodelay=True) # rospy.Timer(rospy.Duration(0.3), self.timerCallback) # def CallBackTraj(self, cmd): # if (self.plot_done1): # self.in_trajectory = cmd # self.plot_done1 = False # def CallBackDebugTraj(self, cmd): # if (self.plot_done2): # self.debug_trajectory = cmd # self.plot_done2 = False # def CallBackDebugFixedTraj(self, cmd): # if (self.plot_done3): # self.debug_fixed_trajectory = cmd # self.plot_done3 = False # def timerCallback(self, event): # self.plotTrajectory() # self.plot_done1 = True # self.plot_done2 = True # self.plot_done3 = True # def CalcArcLength(self, traj): # s_arr = [] # ds = 0.0 # s_sum = 0.0 # if len(traj.points) > 0: # s_arr.append(s_sum) # for i in range(1, len(traj.points)): # p0 = traj.points[i-1] # p1 = traj.points[i] # dx = p1.pose.position.x - p0.pose.position.x # dy = p1.pose.position.y - p0.pose.position.y # ds = np.sqrt(dx**2 + dy**2) # s_sum += ds # if(s_sum > self.length): # break # s_arr.append(s_sum) # return s_arr # def CalcX(self, traj): # v_list = [] # for p in traj.points: # v_list.append(p.pose.position.x) # return v_list # def CalcY(self, traj): # v_list = [] # for p in traj.points: # v_list.append(p.pose.position.y) # return v_list # def CalcYaw(self, traj, s_arr): # v_list = [] # for p in traj.points: # v_list.append(quaternion_to_euler(p.pose.orientation).z) # return v_list[0: len(s_arr)] # def plotTrajectory(self): # plt.clf() # ax3 = plt.subplot(1, 1, 1) # x = self.CalcArcLength(self.in_trajectory) # y = self.CalcYaw(self.in_trajectory, x) # if len(x) == len(y): # ax3.plot(x, y, label="final", marker="*") # ax3.set_xlabel("arclength [m]") # ax3.set_ylabel("yaw") # plt.pause(0.01) # def main(): # rospy.init_node("trajectory_visualizer") # TrajectoryVisualizer() # rospy.spin() # if __name__ == "__main__": # main()
[]
agokhale11/test2
main/forms.py
deddf17e7bb67777251cf73cbdb5f6970c16050a
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm from django.contrib.auth.models import User from django import forms class UploadFileForm(forms.Form): title = forms.CharField(max_length=50) file = forms.FileField() # If you don't do this you cannot use Bootstrap CSS class LoginForm(AuthenticationForm): username = forms.CharField(label="Username", max_length=16, widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'username'})) password = forms.CharField(label="Password", max_length=16, widget=forms.PasswordInput(attrs={'class': 'form-control', 'name': 'password'})) class SignUpForm(UserCreationForm): full_name = forms.CharField(label="Full Name", max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'full_name'})) email = forms.EmailField(label = "Email", max_length =50, widget=forms.EmailInput(attrs={'class': 'form-control', 'name': 'email'})) class Meta: model = User fields = ("email", "full_name", "username", "password1", "password2") def save(self, commit=True): user = super(SignUpForm, self).save(commit=False) user.full_name = self.cleaned_data["full_name"] user.email = self.cleaned_data["email"] if commit: user.save() return user class EmailSignupForm(UserCreationForm): full_name = forms.CharField(label="Full Name", max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'full_name'})) class Meta: model = User fields = ("full_name", "username", "password1", "password2") def save(self, commit=True): user = super(EmailSignupForm, self).save(commit=False) user.full_name = self.cleaned_data["full_name"] if commit: user.save() return user class ChangePasswordForm(forms.Form): security_code = forms.CharField(label="Security Code", max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'security_code'})) password1 = forms.CharField(label="New Password", max_length=16, widget=forms.PasswordInput(attrs={'class': 'form-control', 'name': 'password1'})) password2 = forms.CharField(label="Re-enter New Password", max_length=16, widget=forms.PasswordInput(attrs={'class': 'form-control', 'name': 'password2'})) class Meta: fields = ("security_code", "password1", "password2")
[((7, 12, 7, 42), 'django.forms.CharField', 'forms.CharField', (), '', False, 'from django import forms\n'), ((8, 11, 8, 28), 'django.forms.FileField', 'forms.FileField', ({}, {}), '()', False, 'from django import forms\n'), ((14, 38, 14, 106), 'django.forms.TextInput', 'forms.TextInput', (), '', False, 'from django import forms\n'), ((16, 38, 16, 110), 'django.forms.PasswordInput', 'forms.PasswordInput', (), '', False, 'from django import forms\n'), ((21, 40, 21, 109), 'django.forms.TextInput', 'forms.TextInput', (), '', False, 'from django import forms\n'), ((23, 69, 23, 135), 'django.forms.EmailInput', 'forms.EmailInput', (), '', False, 'from django import forms\n'), ((40, 40, 40, 109), 'django.forms.TextInput', 'forms.TextInput', (), '', False, 'from django import forms\n'), ((55, 40, 55, 113), 'django.forms.TextInput', 'forms.TextInput', (), '', False, 'from django import forms\n'), ((58, 38, 58, 111), 'django.forms.PasswordInput', 'forms.PasswordInput', (), '', False, 'from django import forms\n'), ((60, 38, 60, 111), 'django.forms.PasswordInput', 'forms.PasswordInput', (), '', False, 'from django import forms\n')]
PythonProgramming/Pandas-Basics-with-2.7
pandas 9 - Statistics Information on data sets.py
a6ecd5ac7c25dba83e934549903f229de89290d3
import pandas as pd from pandas import DataFrame df = pd.read_csv('sp500_ohlc.csv', index_col = 'Date', parse_dates=True) df['H-L'] = df.High - df.Low # Giving us count (rows), mean (avg), std (standard deviation for the entire # set), minimum for the set, maximum for the set, and some %s in that range. print( df.describe()) x = input('enter to cont') # gives us correlation data. Remember the 3d chart we plotted? # now you can see if correlation of H-L and Volume also is correlated # with price swings. Correlations for your correlations print( df.corr()) x = input('enter to cont') # covariance... now plenty of people know what correlation is, but what in the # heck is covariance. # Let's defined the two. # covariance is the measure of how two variables change together. # correlation is the measure of how two variables move in relation to eachother. # so covariance is a more direct assessment of the relationship between two variables. # Maybe a better way to put it is that covariance is the measure of the strength of correlation. print( df.cov()) x = input('enter to cont') print( df[['Volume','H-L']].corr()) x = input('enter to cont') # see how it makes a table? # so now, we can actually perform a service that some people actually pay for # I once had a short freelance gig doing this # so a popular form of analysis within especially forex is to compare correlations between # the currencies. The idea here is that you pace one currency with another. # import datetime import pandas.io.data C = pd.io.data.get_data_yahoo('C', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) AAPL = pd.io.data.get_data_yahoo('AAPL', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) MSFT = pd.io.data.get_data_yahoo('MSFT', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) TSLA = pd.io.data.get_data_yahoo('TSLA', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) print( C.head()) x = input('enter to cont') del C['Open'] # , 'high', 'low', 'close', 'volume' del C['High'] del C['Low'] del C['Close'] del C['Volume'] corComp = C corComp.rename(columns={'Adj Close': 'C'}, inplace=True) corComp['AAPL'] = AAPL['Adj Close'] corComp['MSFT'] = MSFT['Adj Close'] corComp['TSLA'] = TSLA['Adj Close'] print( corComp.head()) x = input('enter to cont') print( corComp.corr()) x = input('enter to cont') C = pd.io.data.get_data_yahoo('C', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) AAPL = pd.io.data.get_data_yahoo('AAPL', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) MSFT = pd.io.data.get_data_yahoo('MSFT', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) TSLA = pd.io.data.get_data_yahoo('TSLA', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) BAC = pd.io.data.get_data_yahoo('BAC', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) BBRY = pd.io.data.get_data_yahoo('BBRY', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) CMG = pd.io.data.get_data_yahoo('CMG', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) EBAY = pd.io.data.get_data_yahoo('EBAY', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) JPM = pd.io.data.get_data_yahoo('JPM', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) SBUX = pd.io.data.get_data_yahoo('SBUX', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) TGT = pd.io.data.get_data_yahoo('TGT', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) WFC = pd.io.data.get_data_yahoo('WFC', start=datetime.datetime(2011, 10, 1), end=datetime.datetime(2014, 1, 1)) x = input('enter to cont') print( C.head()) del C['Open'] # , 'high', 'low', 'close', 'volume' del C['High'] del C['Low'] del C['Close'] del C['Volume'] corComp = C corComp.rename(columns={'Adj Close': 'C'}, inplace=True) corComp['BAC'] = BAC['Adj Close'] corComp['MSFT'] = MSFT['Adj Close'] corComp['TSLA'] = TSLA['Adj Close'] corComp['AAPL'] = AAPL['Adj Close'] corComp['BBRY'] = BBRY['Adj Close'] corComp['CMG'] = CMG['Adj Close'] corComp['EBAY'] = EBAY['Adj Close'] corComp['JPM'] = JPM['Adj Close'] corComp['SBUX'] = SBUX['Adj Close'] corComp['TGT'] = TGT['Adj Close'] corComp['WFC'] = WFC['Adj Close'] print( corComp.head()) x = input('enter to cont') print( corComp.corr()) x = input('enter to cont') fancy = corComp.corr() fancy.to_csv('bigmoney.csv')
[((4, 5, 4, 72), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((48, 39, 48, 69), 'datetime.datetime', 'datetime.datetime', ({(48, 57, 48, 61): '2011', (48, 63, 48, 65): '10', (48, 67, 48, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((49, 37, 49, 66), 'datetime.datetime', 'datetime.datetime', ({(49, 55, 49, 59): '2014', (49, 61, 49, 62): '1', (49, 64, 49, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n'), ((51, 39, 51, 69), 'datetime.datetime', 'datetime.datetime', ({(51, 57, 51, 61): '2011', (51, 63, 51, 65): '10', (51, 67, 51, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((52, 37, 52, 66), 'datetime.datetime', 'datetime.datetime', ({(52, 55, 52, 59): '2014', (52, 61, 52, 62): '1', (52, 64, 52, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n'), ((54, 39, 54, 69), 'datetime.datetime', 'datetime.datetime', ({(54, 57, 54, 61): '2011', (54, 63, 54, 65): '10', (54, 67, 54, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((55, 37, 55, 66), 'datetime.datetime', 'datetime.datetime', ({(55, 55, 55, 59): '2014', (55, 61, 55, 62): '1', (55, 64, 55, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n'), ((57, 39, 57, 69), 'datetime.datetime', 'datetime.datetime', ({(57, 57, 57, 61): '2011', (57, 63, 57, 65): '10', (57, 67, 57, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((58, 37, 58, 66), 'datetime.datetime', 'datetime.datetime', ({(58, 55, 58, 59): '2014', (58, 61, 58, 62): '1', (58, 64, 58, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n'), ((91, 39, 91, 69), 'datetime.datetime', 'datetime.datetime', ({(91, 57, 91, 61): '2011', (91, 63, 91, 65): '10', (91, 67, 91, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((92, 37, 92, 66), 'datetime.datetime', 'datetime.datetime', ({(92, 55, 92, 59): '2014', (92, 61, 92, 62): '1', (92, 64, 92, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n'), ((94, 39, 94, 69), 'datetime.datetime', 'datetime.datetime', ({(94, 57, 94, 61): '2011', (94, 63, 94, 65): '10', (94, 67, 94, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((95, 37, 95, 66), 'datetime.datetime', 'datetime.datetime', ({(95, 55, 95, 59): '2014', (95, 61, 95, 62): '1', (95, 64, 95, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n'), ((97, 39, 97, 69), 'datetime.datetime', 'datetime.datetime', ({(97, 57, 97, 61): '2011', (97, 63, 97, 65): '10', (97, 67, 97, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((98, 37, 98, 66), 'datetime.datetime', 'datetime.datetime', ({(98, 55, 98, 59): '2014', (98, 61, 98, 62): '1', (98, 64, 98, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n'), ((100, 39, 100, 69), 'datetime.datetime', 'datetime.datetime', ({(100, 57, 100, 61): '2011', (100, 63, 100, 65): '10', (100, 67, 100, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((101, 37, 101, 66), 'datetime.datetime', 'datetime.datetime', ({(101, 55, 101, 59): '2014', (101, 61, 101, 62): '1', (101, 64, 101, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n'), ((103, 39, 103, 69), 'datetime.datetime', 'datetime.datetime', ({(103, 57, 103, 61): '2011', (103, 63, 103, 65): '10', (103, 67, 103, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((104, 37, 104, 66), 'datetime.datetime', 'datetime.datetime', ({(104, 55, 104, 59): '2014', (104, 61, 104, 62): '1', (104, 64, 104, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n'), ((106, 39, 106, 69), 'datetime.datetime', 'datetime.datetime', ({(106, 57, 106, 61): '2011', (106, 63, 106, 65): '10', (106, 67, 106, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((107, 37, 107, 66), 'datetime.datetime', 'datetime.datetime', ({(107, 55, 107, 59): '2014', (107, 61, 107, 62): '1', (107, 64, 107, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n'), ((109, 39, 109, 69), 'datetime.datetime', 'datetime.datetime', ({(109, 57, 109, 61): '2011', (109, 63, 109, 65): '10', (109, 67, 109, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((110, 37, 110, 66), 'datetime.datetime', 'datetime.datetime', ({(110, 55, 110, 59): '2014', (110, 61, 110, 62): '1', (110, 64, 110, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n'), ((112, 39, 112, 69), 'datetime.datetime', 'datetime.datetime', ({(112, 57, 112, 61): '2011', (112, 63, 112, 65): '10', (112, 67, 112, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((113, 37, 113, 66), 'datetime.datetime', 'datetime.datetime', ({(113, 55, 113, 59): '2014', (113, 61, 113, 62): '1', (113, 64, 113, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n'), ((115, 39, 115, 69), 'datetime.datetime', 'datetime.datetime', ({(115, 57, 115, 61): '2011', (115, 63, 115, 65): '10', (115, 67, 115, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((116, 37, 116, 66), 'datetime.datetime', 'datetime.datetime', ({(116, 55, 116, 59): '2014', (116, 61, 116, 62): '1', (116, 64, 116, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n'), ((118, 39, 118, 69), 'datetime.datetime', 'datetime.datetime', ({(118, 57, 118, 61): '2011', (118, 63, 118, 65): '10', (118, 67, 118, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((119, 37, 119, 66), 'datetime.datetime', 'datetime.datetime', ({(119, 55, 119, 59): '2014', (119, 61, 119, 62): '1', (119, 64, 119, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n'), ((121, 39, 121, 69), 'datetime.datetime', 'datetime.datetime', ({(121, 57, 121, 61): '2011', (121, 63, 121, 65): '10', (121, 67, 121, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((122, 37, 122, 66), 'datetime.datetime', 'datetime.datetime', ({(122, 55, 122, 59): '2014', (122, 61, 122, 62): '1', (122, 64, 122, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n'), ((124, 39, 124, 69), 'datetime.datetime', 'datetime.datetime', ({(124, 57, 124, 61): '2011', (124, 63, 124, 65): '10', (124, 67, 124, 68): '1'}, {}), '(2011, 10, 1)', False, 'import datetime\n'), ((125, 37, 125, 66), 'datetime.datetime', 'datetime.datetime', ({(125, 55, 125, 59): '2014', (125, 61, 125, 62): '1', (125, 64, 125, 65): '1'}, {}), '(2014, 1, 1)', False, 'import datetime\n')]
songdaegeun/school-zone-enforcement-system
working/tkinter_widget/test.py
b5680909fd5a348575563534428d2117f8dc2e3f
import cv2 import numpy as np import threading def test(): while 1: img1=cv2.imread('captured car1.jpg') print("{}".format(img1.shape)) print("{}".format(img1)) cv2.imshow('asd',img1) cv2.waitKey(1) t1 = threading.Thread(target=test) t1.start()
[((13, 5, 13, 34), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((7, 13, 7, 44), 'cv2.imread', 'cv2.imread', ({(7, 24, 7, 43): '"""captured car1.jpg"""'}, {}), "('captured car1.jpg')", False, 'import cv2\n'), ((10, 8, 10, 30), 'cv2.imshow', 'cv2.imshow', ({(10, 19, 10, 24): '"""asd"""', (10, 25, 10, 29): 'img1'}, {}), "('asd', img1)", False, 'import cv2\n'), ((11, 8, 11, 22), 'cv2.waitKey', 'cv2.waitKey', ({(11, 20, 11, 21): '(1)'}, {}), '(1)', False, 'import cv2\n')]
aristanetworks/ceilometer
ceilometer/compute/virt/hyperv/utilsv2.py
8776b137f82f71eef1241bcb1600de10c1f77394
# Copyright 2013 Cloudbase Solutions Srl # # Author: Claudiu Belu <[email protected]> # Alessandro Pilotti <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility class for VM related operations. Based on the "root/virtualization/v2" namespace available starting with Hyper-V Server / Windows Server 2012. """ import sys if sys.platform == 'win32': import wmi from oslo.config import cfg from ceilometer.compute.virt import inspector from ceilometer.openstack.common.gettextutils import _ from ceilometer.openstack.common import log as logging CONF = cfg.CONF LOG = logging.getLogger(__name__) class HyperVException(inspector.InspectorException): pass class UtilsV2(object): _VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized' _PROC_SETTING = 'Msvm_ProcessorSettingData' _SYNTH_ETH_PORT = 'Msvm_SyntheticEthernetPortSettingData' _ETH_PORT_ALLOC = 'Msvm_EthernetPortAllocationSettingData' _PORT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData' _STORAGE_ALLOC = 'Msvm_StorageAllocationSettingData' _VS_SETTING_DATA = 'Msvm_VirtualSystemSettingData' _METRICS_ME = 'Msvm_MetricForME' _BASE_METRICS_VALUE = 'Msvm_BaseMetricValue' _CPU_METRIC_NAME = 'Aggregated Average CPU Utilization' _NET_IN_METRIC_NAME = 'Filtered Incoming Network Traffic' _NET_OUT_METRIC_NAME = 'Filtered Outgoing Network Traffic' # Disk metrics are supported from Hyper-V 2012 R2 _DISK_RD_METRIC_NAME = 'Disk Data Read' _DISK_WR_METRIC_NAME = 'Disk Data Written' def __init__(self, host='.'): if sys.platform == 'win32': self._init_hyperv_wmi_conn(host) self._init_cimv2_wmi_conn(host) self._host_cpu_info = None def _init_hyperv_wmi_conn(self, host): self._conn = wmi.WMI(moniker='//%s/root/virtualization/v2' % host) def _init_cimv2_wmi_conn(self, host): self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host) def get_host_cpu_info(self): if not self._host_cpu_info: host_cpus = self._conn_cimv2.Win32_Processor() self._host_cpu_info = (host_cpus[0].MaxClockSpeed, len(host_cpus)) return self._host_cpu_info def get_all_vms(self): vms = [(v.ElementName, v.Name) for v in self._conn.Msvm_ComputerSystem(['ElementName', 'Name'], Caption="Virtual Machine")] return vms def get_cpu_metrics(self, vm_name): vm = self._lookup_vm(vm_name) cpu_sd = self._get_vm_resources(vm, self._PROC_SETTING)[0] cpu_metrics_def = self._get_metric_def(self._CPU_METRIC_NAME) cpu_metric_aggr = self._get_metrics(vm, cpu_metrics_def) cpu_used = 0 if cpu_metric_aggr: cpu_used = long(cpu_metric_aggr[0].MetricValue) return (cpu_used, int(cpu_sd.VirtualQuantity), long(vm.OnTimeInMilliseconds)) def get_vnic_metrics(self, vm_name): vm = self._lookup_vm(vm_name) ports = self._get_vm_resources(vm, self._ETH_PORT_ALLOC) vnics = self._get_vm_resources(vm, self._SYNTH_ETH_PORT) metric_def_in = self._get_metric_def(self._NET_IN_METRIC_NAME) metric_def_out = self._get_metric_def(self._NET_OUT_METRIC_NAME) for port in ports: vnic = [v for v in vnics if port.Parent == v.path_()][0] metric_value_instances = self._get_metric_value_instances( port.associators(wmi_result_class=self._PORT_ACL_SET_DATA), self._BASE_METRICS_VALUE) metric_values = self._sum_metric_values_by_defs( metric_value_instances, [metric_def_in, metric_def_out]) yield { 'rx_mb': metric_values[0], 'tx_mb': metric_values[1], 'element_name': vnic.ElementName, 'address': vnic.Address } def get_disk_metrics(self, vm_name): vm = self._lookup_vm(vm_name) metric_def_r = self._get_metric_def(self._DISK_RD_METRIC_NAME) metric_def_w = self._get_metric_def(self._DISK_WR_METRIC_NAME) disks = self._get_vm_resources(vm, self._STORAGE_ALLOC) for disk in disks: metric_values = self._get_metric_values( disk, [metric_def_r, metric_def_w]) # Thi sis e.g. the VHD file location if disk.HostResource: host_resource = disk.HostResource[0] yield { # Values are in megabytes 'read_mb': metric_values[0], 'write_mb': metric_values[1], 'instance_id': disk.InstanceID, 'host_resource': host_resource } def _sum_metric_values(self, metrics): tot_metric_val = 0 for metric in metrics: tot_metric_val += long(metric.MetricValue) return tot_metric_val def _sum_metric_values_by_defs(self, element_metrics, metric_defs): metric_values = [] for metric_def in metric_defs: if metric_def: metrics = self._filter_metrics(element_metrics, metric_def) metric_values.append(self._sum_metric_values(metrics)) else: # In case the metric is not defined on this host metric_values.append(0) return metric_values def _get_metric_value_instances(self, elements, result_class): instances = [] for el in elements: associators = el.associators(wmi_result_class=result_class) if associators: instances.append(associators[0]) return instances def _get_metric_values(self, element, metric_defs): element_metrics = element.associators( wmi_association_class=self._METRICS_ME) return self._sum_metric_values_by_defs(element_metrics, metric_defs) def _lookup_vm(self, vm_name): vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) n = len(vms) if n == 0: raise inspector.InstanceNotFoundException( _('VM %s not found on Hyper-V') % vm_name) elif n > 1: raise HyperVException(_('Duplicate VM name found: %s') % vm_name) else: return vms[0] def _get_metrics(self, element, metric_def): return self._filter_metrics( element.associators( wmi_association_class=self._METRICS_ME), metric_def) def _filter_metrics(self, all_metrics, metric_def): return [v for v in all_metrics if v.MetricDefinitionId == metric_def.Id] def _get_metric_def(self, metric_def): metric = self._conn.CIM_BaseMetricDefinition(ElementName=metric_def) if metric: return metric[0] def _get_vm_setting_data(self, vm): vm_settings = vm.associators( wmi_result_class=self._VS_SETTING_DATA) # Avoid snapshots return [s for s in vm_settings if s.VirtualSystemType == self._VIRTUAL_SYSTEM_TYPE_REALIZED][0] def _get_vm_resources(self, vm, resource_class): setting_data = self._get_vm_setting_data(vm) return setting_data.associators(wmi_result_class=resource_class)
[((35, 6, 35, 33), 'ceilometer.openstack.common.log.getLogger', 'logging.getLogger', ({(35, 24, 35, 32): '__name__'}, {}), '(__name__)', True, 'from ceilometer.openstack.common import log as logging\n'), ((69, 21, 69, 74), 'wmi.WMI', 'wmi.WMI', (), '', False, 'import wmi\n'), ((72, 27, 72, 68), 'wmi.WMI', 'wmi.WMI', (), '', False, 'import wmi\n'), ((182, 16, 182, 47), 'ceilometer.openstack.common.gettextutils._', '_', ({(182, 18, 182, 46): '"""VM %s not found on Hyper-V"""'}, {}), "('VM %s not found on Hyper-V')", False, 'from ceilometer.openstack.common.gettextutils import _\n'), ((184, 34, 184, 66), 'ceilometer.openstack.common.gettextutils._', '_', ({(184, 36, 184, 65): '"""Duplicate VM name found: %s"""'}, {}), "('Duplicate VM name found: %s')", False, 'from ceilometer.openstack.common.gettextutils import _\n')]
cajones314/avocd2019
src/cli.py
268e03c5d1bb5b3e14459b831916bb7846f40def
# system from io import IOBase, StringIO import os # 3rd party import click # internal from days import DayFactory # import logging # logger = logging.getLogger(__name__) # logger.setLevel(logging.DEBUG) # ch = logging.StreamHandler() # logger.addHandler(ch) @click.group(invoke_without_command=True) @click.option('-d', '--day', required=True, type=click.IntRange(1, 31), metavar="<1..31>", help="Day you want to select.") @click.option('-p', '--puzzle', required=True, type=click.IntRange(1, 2), metavar="<1|2>", help="Puzzle you want to run.") @click.option('-i', '--input', required=True, type=click.Path(exists=True), help="Path to puzzle data.") def cli(day: int, puzzle: int, input: str): filename = os.path.join(input, f"{day:02}_puzzle_{puzzle}.txt") if os.path.exists(filename): input_stream = open(filename, "r") else: input_stream = StringIO('') avocd = DayFactory(day, input_stream) try: print(avocd.run(puzzle)) except NotImplementedError: print(f"Puzzle {puzzle} for day {day} not implemented.") if __name__ == "__main__": # pylint: disable=no-value-for-parameter cli()
[((19, 1, 19, 41), 'click.group', 'click.group', (), '', False, 'import click\n'), ((24, 13, 24, 65), 'os.path.join', 'os.path.join', ({(24, 26, 24, 31): 'input', (24, 33, 24, 64): 'f"""{day:02}_puzzle_{puzzle}.txt"""'}, {}), "(input, f'{day:02}_puzzle_{puzzle}.txt')", False, 'import os\n'), ((25, 5, 25, 29), 'os.path.exists', 'os.path.exists', ({(25, 20, 25, 28): 'filename'}, {}), '(filename)', False, 'import os\n'), ((29, 10, 29, 39), 'days.DayFactory', 'DayFactory', ({(29, 21, 29, 24): 'day', (29, 26, 29, 38): 'input_stream'}, {}), '(day, input_stream)', False, 'from days import DayFactory\n'), ((28, 19, 28, 31), 'io.StringIO', 'StringIO', ({(28, 28, 28, 30): '""""""'}, {}), "('')", False, 'from io import IOBase, StringIO\n'), ((20, 49, 20, 70), 'click.IntRange', 'click.IntRange', ({(20, 64, 20, 65): '(1)', (20, 67, 20, 69): '(31)'}, {}), '(1, 31)', False, 'import click\n'), ((21, 52, 21, 72), 'click.IntRange', 'click.IntRange', ({(21, 67, 21, 68): '(1)', (21, 70, 21, 71): '(2)'}, {}), '(1, 2)', False, 'import click\n'), ((22, 51, 22, 74), 'click.Path', 'click.Path', (), '', False, 'import click\n')]
wrosecrans/colormap
option_c.py
0b6a3b7e4caa5df72e7bad8ba196acfbbe5e5946
from matplotlib.colors import LinearSegmentedColormap from numpy import nan, inf # Used to reconstruct the colormap in viscm parameters = {'xp': [-5.4895292543686764, 14.790571669586654, 82.5546687431056, 29.15531114139253, -4.1316769886951761, -13.002076438907238], 'yp': [-35.948168839230306, -42.273376159885785, -28.845467523197698, 52.03426124197, 36.832712600868973, 40.792291220556734], 'min_JK': 16.8314150305, 'max_JK': 95} cm_data = [[ 5.03832136e-02, 2.98028976e-02, 5.27974883e-01], [ 6.35363639e-02, 2.84259729e-02, 5.33123681e-01], [ 7.53531234e-02, 2.72063728e-02, 5.38007001e-01], [ 8.62217979e-02, 2.61253206e-02, 5.42657691e-01], [ 9.63786097e-02, 2.51650976e-02, 5.47103487e-01], [ 1.05979704e-01, 2.43092436e-02, 5.51367851e-01], [ 1.15123641e-01, 2.35562500e-02, 5.55467728e-01], [ 1.23902903e-01, 2.28781011e-02, 5.59423480e-01], [ 1.32380720e-01, 2.22583774e-02, 5.63250116e-01], [ 1.40603076e-01, 2.16866674e-02, 5.66959485e-01], [ 1.48606527e-01, 2.11535876e-02, 5.70561711e-01], [ 1.56420649e-01, 2.06507174e-02, 5.74065446e-01], [ 1.64069722e-01, 2.01705326e-02, 5.77478074e-01], [ 1.71573925e-01, 1.97063415e-02, 5.80805890e-01], [ 1.78950212e-01, 1.92522243e-02, 5.84054243e-01], [ 1.86212958e-01, 1.88029767e-02, 5.87227661e-01], [ 1.93374449e-01, 1.83540593e-02, 5.90329954e-01], [ 2.00445260e-01, 1.79015512e-02, 5.93364304e-01], [ 2.07434551e-01, 1.74421086e-02, 5.96333341e-01], [ 2.14350298e-01, 1.69729276e-02, 5.99239207e-01], [ 2.21196750e-01, 1.64970484e-02, 6.02083323e-01], [ 2.27982971e-01, 1.60071509e-02, 6.04867403e-01], [ 2.34714537e-01, 1.55015065e-02, 6.07592438e-01], [ 2.41396253e-01, 1.49791041e-02, 6.10259089e-01], [ 2.48032377e-01, 1.44393586e-02, 6.12867743e-01], [ 2.54626690e-01, 1.38820918e-02, 6.15418537e-01], [ 2.61182562e-01, 1.33075156e-02, 6.17911385e-01], [ 2.67702993e-01, 1.27162163e-02, 6.20345997e-01], [ 2.74190665e-01, 1.21091423e-02, 6.22721903e-01], [ 2.80647969e-01, 1.14875915e-02, 6.25038468e-01], [ 2.87076059e-01, 1.08554862e-02, 6.27294975e-01], [ 2.93477695e-01, 1.02128849e-02, 6.29490490e-01], [ 2.99855122e-01, 9.56079551e-03, 6.31623923e-01], [ 3.06209825e-01, 8.90185346e-03, 6.33694102e-01], [ 3.12543124e-01, 8.23900704e-03, 6.35699759e-01], [ 3.18856183e-01, 7.57551051e-03, 6.37639537e-01], [ 3.25150025e-01, 6.91491734e-03, 6.39512001e-01], [ 3.31425547e-01, 6.26107379e-03, 6.41315649e-01], [ 3.37683446e-01, 5.61830889e-03, 6.43048936e-01], [ 3.43924591e-01, 4.99053080e-03, 6.44710195e-01], [ 3.50149699e-01, 4.38202557e-03, 6.46297711e-01], [ 3.56359209e-01, 3.79781761e-03, 6.47809772e-01], [ 3.62553473e-01, 3.24319591e-03, 6.49244641e-01], [ 3.68732762e-01, 2.72370721e-03, 6.50600561e-01], [ 3.74897270e-01, 2.24514897e-03, 6.51875762e-01], [ 3.81047116e-01, 1.81356205e-03, 6.53068467e-01], [ 3.87182639e-01, 1.43446923e-03, 6.54176761e-01], [ 3.93304010e-01, 1.11388259e-03, 6.55198755e-01], [ 3.99410821e-01, 8.59420809e-04, 6.56132835e-01], [ 4.05502914e-01, 6.78091517e-04, 6.56977276e-01], [ 4.11580082e-01, 5.77101735e-04, 6.57730380e-01], [ 4.17642063e-01, 5.63847476e-04, 6.58390492e-01], [ 4.23688549e-01, 6.45902780e-04, 6.58956004e-01], [ 4.29719186e-01, 8.31008207e-04, 6.59425363e-01], [ 4.35733575e-01, 1.12705875e-03, 6.59797077e-01], [ 4.41732123e-01, 1.53984779e-03, 6.60069009e-01], [ 4.47713600e-01, 2.07954744e-03, 6.60240367e-01], [ 4.53677394e-01, 2.75470302e-03, 6.60309966e-01], [ 4.59622938e-01, 3.57374415e-03, 6.60276655e-01], [ 4.65549631e-01, 4.54518084e-03, 6.60139383e-01], [ 4.71456847e-01, 5.67758762e-03, 6.59897210e-01], [ 4.77343929e-01, 6.97958743e-03, 6.59549311e-01], [ 4.83210198e-01, 8.45983494e-03, 6.59094989e-01], [ 4.89054951e-01, 1.01269996e-02, 6.58533677e-01], [ 4.94877466e-01, 1.19897486e-02, 6.57864946e-01], [ 5.00677687e-01, 1.40550640e-02, 6.57087561e-01], [ 5.06454143e-01, 1.63333443e-02, 6.56202294e-01], [ 5.12206035e-01, 1.88332232e-02, 6.55209222e-01], [ 5.17932580e-01, 2.15631918e-02, 6.54108545e-01], [ 5.23632990e-01, 2.45316468e-02, 6.52900629e-01], [ 5.29306474e-01, 2.77468735e-02, 6.51586010e-01], [ 5.34952244e-01, 3.12170300e-02, 6.50165396e-01], [ 5.40569510e-01, 3.49501310e-02, 6.48639668e-01], [ 5.46157494e-01, 3.89540334e-02, 6.47009884e-01], [ 5.51715423e-01, 4.31364795e-02, 6.45277275e-01], [ 5.57242538e-01, 4.73307585e-02, 6.43443250e-01], [ 5.62738096e-01, 5.15448092e-02, 6.41509389e-01], [ 5.68201372e-01, 5.57776706e-02, 6.39477440e-01], [ 5.73631859e-01, 6.00281369e-02, 6.37348841e-01], [ 5.79028682e-01, 6.42955547e-02, 6.35126108e-01], [ 5.84391137e-01, 6.85790261e-02, 6.32811608e-01], [ 5.89718606e-01, 7.28775875e-02, 6.30407727e-01], [ 5.95010505e-01, 7.71902878e-02, 6.27916992e-01], [ 6.00266283e-01, 8.15161895e-02, 6.25342058e-01], [ 6.05485428e-01, 8.58543713e-02, 6.22685703e-01], [ 6.10667469e-01, 9.02039303e-02, 6.19950811e-01], [ 6.15811974e-01, 9.45639838e-02, 6.17140367e-01], [ 6.20918555e-01, 9.89336721e-02, 6.14257440e-01], [ 6.25986869e-01, 1.03312160e-01, 6.11305174e-01], [ 6.31016615e-01, 1.07698641e-01, 6.08286774e-01], [ 6.36007543e-01, 1.12092335e-01, 6.05205491e-01], [ 6.40959444e-01, 1.16492495e-01, 6.02064611e-01], [ 6.45872158e-01, 1.20898405e-01, 5.98867442e-01], [ 6.50745571e-01, 1.25309384e-01, 5.95617300e-01], [ 6.55579615e-01, 1.29724785e-01, 5.92317494e-01], [ 6.60374266e-01, 1.34143997e-01, 5.88971318e-01], [ 6.65129493e-01, 1.38566428e-01, 5.85582301e-01], [ 6.69845385e-01, 1.42991540e-01, 5.82153572e-01], [ 6.74522060e-01, 1.47418835e-01, 5.78688247e-01], [ 6.79159664e-01, 1.51847851e-01, 5.75189431e-01], [ 6.83758384e-01, 1.56278163e-01, 5.71660158e-01], [ 6.88318440e-01, 1.60709387e-01, 5.68103380e-01], [ 6.92840088e-01, 1.65141174e-01, 5.64521958e-01], [ 6.97323615e-01, 1.69573215e-01, 5.60918659e-01], [ 7.01769334e-01, 1.74005236e-01, 5.57296144e-01], [ 7.06177590e-01, 1.78437000e-01, 5.53656970e-01], [ 7.10548747e-01, 1.82868306e-01, 5.50003579e-01], [ 7.14883195e-01, 1.87298986e-01, 5.46338299e-01], [ 7.19181339e-01, 1.91728906e-01, 5.42663338e-01], [ 7.23443604e-01, 1.96157962e-01, 5.38980786e-01], [ 7.27670428e-01, 2.00586086e-01, 5.35292612e-01], [ 7.31862231e-01, 2.05013174e-01, 5.31600995e-01], [ 7.36019424e-01, 2.09439071e-01, 5.27908434e-01], [ 7.40142557e-01, 2.13863965e-01, 5.24215533e-01], [ 7.44232102e-01, 2.18287899e-01, 5.20523766e-01], [ 7.48288533e-01, 2.22710942e-01, 5.16834495e-01], [ 7.52312321e-01, 2.27133187e-01, 5.13148963e-01], [ 7.56303937e-01, 2.31554749e-01, 5.09468305e-01], [ 7.60263849e-01, 2.35975765e-01, 5.05793543e-01], [ 7.64192516e-01, 2.40396394e-01, 5.02125599e-01], [ 7.68090391e-01, 2.44816813e-01, 4.98465290e-01], [ 7.71957916e-01, 2.49237220e-01, 4.94813338e-01], [ 7.75795522e-01, 2.53657797e-01, 4.91170517e-01], [ 7.79603614e-01, 2.58078397e-01, 4.87539124e-01], [ 7.83382636e-01, 2.62499662e-01, 4.83917732e-01], [ 7.87132978e-01, 2.66921859e-01, 4.80306702e-01], [ 7.90855015e-01, 2.71345267e-01, 4.76706319e-01], [ 7.94549101e-01, 2.75770179e-01, 4.73116798e-01], [ 7.98215577e-01, 2.80196901e-01, 4.69538286e-01], [ 8.01854758e-01, 2.84625750e-01, 4.65970871e-01], [ 8.05466945e-01, 2.89057057e-01, 4.62414580e-01], [ 8.09052419e-01, 2.93491117e-01, 4.58869577e-01], [ 8.12611506e-01, 2.97927865e-01, 4.55337565e-01], [ 8.16144382e-01, 3.02368130e-01, 4.51816385e-01], [ 8.19651255e-01, 3.06812282e-01, 4.48305861e-01], [ 8.23132309e-01, 3.11260703e-01, 4.44805781e-01], [ 8.26587706e-01, 3.15713782e-01, 4.41315901e-01], [ 8.30017584e-01, 3.20171913e-01, 4.37835947e-01], [ 8.33422053e-01, 3.24635499e-01, 4.34365616e-01], [ 8.36801237e-01, 3.29104836e-01, 4.30905052e-01], [ 8.40155276e-01, 3.33580106e-01, 4.27454836e-01], [ 8.43484103e-01, 3.38062109e-01, 4.24013059e-01], [ 8.46787726e-01, 3.42551272e-01, 4.20579333e-01], [ 8.50066132e-01, 3.47048028e-01, 4.17153264e-01], [ 8.53319279e-01, 3.51552815e-01, 4.13734445e-01], [ 8.56547103e-01, 3.56066072e-01, 4.10322469e-01], [ 8.59749520e-01, 3.60588229e-01, 4.06916975e-01], [ 8.62926559e-01, 3.65119408e-01, 4.03518809e-01], [ 8.66077920e-01, 3.69660446e-01, 4.00126027e-01], [ 8.69203436e-01, 3.74211795e-01, 3.96738211e-01], [ 8.72302917e-01, 3.78773910e-01, 3.93354947e-01], [ 8.75376149e-01, 3.83347243e-01, 3.89975832e-01], [ 8.78422895e-01, 3.87932249e-01, 3.86600468e-01], [ 8.81442916e-01, 3.92529339e-01, 3.83228622e-01], [ 8.84435982e-01, 3.97138877e-01, 3.79860246e-01], [ 8.87401682e-01, 4.01761511e-01, 3.76494232e-01], [ 8.90339687e-01, 4.06397694e-01, 3.73130228e-01], [ 8.93249647e-01, 4.11047871e-01, 3.69767893e-01], [ 8.96131191e-01, 4.15712489e-01, 3.66406907e-01], [ 8.98983931e-01, 4.20391986e-01, 3.63046965e-01], [ 9.01807455e-01, 4.25086807e-01, 3.59687758e-01], [ 9.04601295e-01, 4.29797442e-01, 3.56328796e-01], [ 9.07364995e-01, 4.34524335e-01, 3.52969777e-01], [ 9.10098088e-01, 4.39267908e-01, 3.49610469e-01], [ 9.12800095e-01, 4.44028574e-01, 3.46250656e-01], [ 9.15470518e-01, 4.48806744e-01, 3.42890148e-01], [ 9.18108848e-01, 4.53602818e-01, 3.39528771e-01], [ 9.20714383e-01, 4.58417420e-01, 3.36165582e-01], [ 9.23286660e-01, 4.63250828e-01, 3.32800827e-01], [ 9.25825146e-01, 4.68103387e-01, 3.29434512e-01], [ 9.28329275e-01, 4.72975465e-01, 3.26066550e-01], [ 9.30798469e-01, 4.77867420e-01, 3.22696876e-01], [ 9.33232140e-01, 4.82779603e-01, 3.19325444e-01], [ 9.35629684e-01, 4.87712357e-01, 3.15952211e-01], [ 9.37990034e-01, 4.92666544e-01, 3.12575440e-01], [ 9.40312939e-01, 4.97642038e-01, 3.09196628e-01], [ 9.42597771e-01, 5.02639147e-01, 3.05815824e-01], [ 9.44843893e-01, 5.07658169e-01, 3.02433101e-01], [ 9.47050662e-01, 5.12699390e-01, 2.99048555e-01], [ 9.49217427e-01, 5.17763087e-01, 2.95662308e-01], [ 9.51343530e-01, 5.22849522e-01, 2.92274506e-01], [ 9.53427725e-01, 5.27959550e-01, 2.88883445e-01], [ 9.55469640e-01, 5.33093083e-01, 2.85490391e-01], [ 9.57468770e-01, 5.38250172e-01, 2.82096149e-01], [ 9.59424430e-01, 5.43431038e-01, 2.78700990e-01], [ 9.61335930e-01, 5.48635890e-01, 2.75305214e-01], [ 9.63202573e-01, 5.53864931e-01, 2.71909159e-01], [ 9.65023656e-01, 5.59118349e-01, 2.68513200e-01], [ 9.66798470e-01, 5.64396327e-01, 2.65117752e-01], [ 9.68525639e-01, 5.69699633e-01, 2.61721488e-01], [ 9.70204593e-01, 5.75028270e-01, 2.58325424e-01], [ 9.71835007e-01, 5.80382015e-01, 2.54931256e-01], [ 9.73416145e-01, 5.85761012e-01, 2.51539615e-01], [ 9.74947262e-01, 5.91165394e-01, 2.48151200e-01], [ 9.76427606e-01, 5.96595287e-01, 2.44766775e-01], [ 9.77856416e-01, 6.02050811e-01, 2.41387186e-01], [ 9.79232922e-01, 6.07532077e-01, 2.38013359e-01], [ 9.80556344e-01, 6.13039190e-01, 2.34646316e-01], [ 9.81825890e-01, 6.18572250e-01, 2.31287178e-01], [ 9.83040742e-01, 6.24131362e-01, 2.27937141e-01], [ 9.84198924e-01, 6.29717516e-01, 2.24595006e-01], [ 9.85300760e-01, 6.35329876e-01, 2.21264889e-01], [ 9.86345421e-01, 6.40968508e-01, 2.17948456e-01], [ 9.87332067e-01, 6.46633475e-01, 2.14647532e-01], [ 9.88259846e-01, 6.52324832e-01, 2.11364122e-01], [ 9.89127893e-01, 6.58042630e-01, 2.08100426e-01], [ 9.89935328e-01, 6.63786914e-01, 2.04858855e-01], [ 9.90681261e-01, 6.69557720e-01, 2.01642049e-01], [ 9.91364787e-01, 6.75355082e-01, 1.98452900e-01], [ 9.91984990e-01, 6.81179025e-01, 1.95294567e-01], [ 9.92540939e-01, 6.87029567e-01, 1.92170500e-01], [ 9.93031693e-01, 6.92906719e-01, 1.89084459e-01], [ 9.93456302e-01, 6.98810484e-01, 1.86040537e-01], [ 9.93813802e-01, 7.04740854e-01, 1.83043180e-01], [ 9.94103226e-01, 7.10697814e-01, 1.80097207e-01], [ 9.94323596e-01, 7.16681336e-01, 1.77207826e-01], [ 9.94473934e-01, 7.22691379e-01, 1.74380656e-01], [ 9.94553260e-01, 7.28727890e-01, 1.71621733e-01], [ 9.94560594e-01, 7.34790799e-01, 1.68937522e-01], [ 9.94494964e-01, 7.40880020e-01, 1.66334918e-01], [ 9.94355411e-01, 7.46995448e-01, 1.63821243e-01], [ 9.94140989e-01, 7.53136955e-01, 1.61404226e-01], [ 9.93850778e-01, 7.59304390e-01, 1.59091984e-01], [ 9.93482190e-01, 7.65498551e-01, 1.56890625e-01], [ 9.93033251e-01, 7.71719833e-01, 1.54807583e-01], [ 9.92505214e-01, 7.77966775e-01, 1.52854862e-01], [ 9.91897270e-01, 7.84239120e-01, 1.51041581e-01], [ 9.91208680e-01, 7.90536569e-01, 1.49376885e-01], [ 9.90438793e-01, 7.96858775e-01, 1.47869810e-01], [ 9.89587065e-01, 8.03205337e-01, 1.46529128e-01], [ 9.88647741e-01, 8.09578605e-01, 1.45357284e-01], [ 9.87620557e-01, 8.15977942e-01, 1.44362644e-01], [ 9.86509366e-01, 8.22400620e-01, 1.43556679e-01], [ 9.85314198e-01, 8.28845980e-01, 1.42945116e-01], [ 9.84031139e-01, 8.35315360e-01, 1.42528388e-01], [ 9.82652820e-01, 8.41811730e-01, 1.42302653e-01], [ 9.81190389e-01, 8.48328902e-01, 1.42278607e-01], [ 9.79643637e-01, 8.54866468e-01, 1.42453425e-01], [ 9.77994918e-01, 8.61432314e-01, 1.42808191e-01], [ 9.76264977e-01, 8.68015998e-01, 1.43350944e-01], [ 9.74443038e-01, 8.74622194e-01, 1.44061156e-01], [ 9.72530009e-01, 8.81250063e-01, 1.44922913e-01], [ 9.70532932e-01, 8.87896125e-01, 1.45918663e-01], [ 9.68443477e-01, 8.94563989e-01, 1.47014438e-01], [ 9.66271225e-01, 9.01249365e-01, 1.48179639e-01], [ 9.64021057e-01, 9.07950379e-01, 1.49370428e-01], [ 9.61681481e-01, 9.14672479e-01, 1.50520343e-01], [ 9.59275646e-01, 9.21406537e-01, 1.51566019e-01], [ 9.56808068e-01, 9.28152065e-01, 1.52409489e-01], [ 9.54286813e-01, 9.34907730e-01, 1.52921158e-01], [ 9.51726083e-01, 9.41670605e-01, 1.52925363e-01], [ 9.49150533e-01, 9.48434900e-01, 1.52177604e-01], [ 9.46602270e-01, 9.55189860e-01, 1.50327944e-01], [ 9.44151742e-01, 9.61916487e-01, 1.46860789e-01], [ 9.41896120e-01, 9.68589814e-01, 1.40955606e-01], [ 9.40015097e-01, 9.75158357e-01, 1.31325517e-01]] test_cm = LinearSegmentedColormap.from_list(__file__, cm_data) if __name__ == "__main__": import matplotlib.pyplot as plt import numpy as np try: from viscm import viscm viscm(test_cm) except ImportError: print("viscm not found, falling back on simple display") plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=test_cm) plt.show()
[((268, 10, 268, 62), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', ({(268, 44, 268, 52): '__file__', (268, 54, 268, 61): 'cm_data'}, {}), '(__file__, cm_data)', False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((282, 4, 282, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((277, 8, 277, 22), 'viscm.viscm', 'viscm', ({(277, 14, 277, 21): 'test_cm'}, {}), '(test_cm)', False, 'from viscm import viscm\n'), ((280, 19, 280, 43), 'numpy.linspace', 'np.linspace', ({(280, 31, 280, 32): '(0)', (280, 34, 280, 37): '(100)', (280, 39, 280, 42): '(256)'}, {}), '(0, 100, 256)', True, 'import numpy as np\n')]
Aditya239233/MDP
RPI/yolov5/algorithm/planner/algorithms/hybrid_astar/draw/draw.py
87491e1d67e547c11f4bdd5d784d120473429eae
import matplotlib.pyplot as plt import numpy as np import math from algorithm.planner.utils.car_utils import Car_C PI = np.pi class Arrow: def __init__(self, x, y, theta, L, c): angle = np.deg2rad(30) d = 0.3 * L w = 2 x_start = x y_start = y x_end = x + L * np.cos(theta) y_end = y + L * np.sin(theta) theta_hat_L = theta + PI - angle theta_hat_R = theta + PI + angle x_hat_start = x_end x_hat_end_L = x_hat_start + d * np.cos(theta_hat_L) x_hat_end_R = x_hat_start + d * np.cos(theta_hat_R) y_hat_start = y_end y_hat_end_L = y_hat_start + d * np.sin(theta_hat_L) y_hat_end_R = y_hat_start + d * np.sin(theta_hat_R) plt.plot([x_start, x_end], [y_start, y_end], color=c, linewidth=w) plt.plot([x_hat_start, x_hat_end_L], [y_hat_start, y_hat_end_L], color=c, linewidth=w) plt.plot([x_hat_start, x_hat_end_R], [y_hat_start, y_hat_end_R], color=c, linewidth=w) class Car: def __init__(self, x, y, yaw, w, L): theta_B = PI + yaw xB = x + L / 4 * np.cos(theta_B) yB = y + L / 4 * np.sin(theta_B) theta_BL = theta_B + PI / 2 theta_BR = theta_B - PI / 2 x_BL = xB + w / 2 * np.cos(theta_BL) # Bottom-Left vertex y_BL = yB + w / 2 * np.sin(theta_BL) x_BR = xB + w / 2 * np.cos(theta_BR) # Bottom-Right vertex y_BR = yB + w / 2 * np.sin(theta_BR) x_FL = x_BL + L * np.cos(yaw) # Front-Left vertex y_FL = y_BL + L * np.sin(yaw) x_FR = x_BR + L * np.cos(yaw) # Front-Right vertex y_FR = y_BR + L * np.sin(yaw) plt.plot([x_BL, x_BR, x_FR, x_FL, x_BL], [y_BL, y_BR, y_FR, y_FL, y_BL], linewidth=1, color='black') Arrow(x, y, yaw, L / 2, 'black') def draw_car(x, y, yaw, steer, color='black', extended_car=True): if extended_car: car = np.array([[-Car_C.RB, -Car_C.RB, Car_C.RF, Car_C.RF, -Car_C.RB, Car_C.ACTUAL_RF, Car_C.ACTUAL_RF, -Car_C.ACTUAL_RB, -Car_C.ACTUAL_RB], [Car_C.W / 2, -Car_C.W / 2, -Car_C.W / 2, Car_C.W / 2, Car_C.W / 2, Car_C.W/2, -Car_C.W/2, -Car_C.W/2, Car_C.W/2]]) else: car = np.array([[-Car_C.RB, -Car_C.RB, Car_C.RF, Car_C.RF, -Car_C.RB], [Car_C.W / 2, -Car_C.W / 2, -Car_C.W / 2, Car_C.W / 2, Car_C.W / 2]]) wheel = np.array([[-Car_C.TR, -Car_C.TR, Car_C.TR, Car_C.TR, -Car_C.TR], [Car_C.TW / 4, -Car_C.TW / 4, -Car_C.TW / 4, Car_C.TW / 4, Car_C.TW / 4]]) rlWheel = wheel.copy() rrWheel = wheel.copy() frWheel = wheel.copy() flWheel = wheel.copy() Rot1 = np.array([[math.cos(yaw), -math.sin(yaw)], [math.sin(yaw), math.cos(yaw)]]) Rot2 = np.array([[math.cos(steer), math.sin(steer)], [-math.sin(steer), math.cos(steer)]]) frWheel = np.dot(Rot2, frWheel) flWheel = np.dot(Rot2, flWheel) frWheel += np.array([[Car_C.WB], [-Car_C.WD / 2]]) flWheel += np.array([[Car_C.WB], [Car_C.WD / 2]]) rrWheel[1, :] -= Car_C.WD / 2 rlWheel[1, :] += Car_C.WD / 2 frWheel = np.dot(Rot1, frWheel) flWheel = np.dot(Rot1, flWheel) rrWheel = np.dot(Rot1, rrWheel) rlWheel = np.dot(Rot1, rlWheel) car = np.dot(Rot1, car) frWheel += np.array([[x], [y]]) flWheel += np.array([[x], [y]]) rrWheel += np.array([[x], [y]]) rlWheel += np.array([[x], [y]]) car += np.array([[x], [y]]) plt.plot(car[0, :], car[1, :], color) plt.plot(frWheel[0, :], frWheel[1, :], color) plt.plot(rrWheel[0, :], rrWheel[1, :], color) plt.plot(flWheel[0, :], flWheel[1, :], color) plt.plot(rlWheel[0, :], rlWheel[1, :], color) Arrow(x, y, yaw, Car_C.WB * 0.8, color)
[((74, 12, 75, 96), 'numpy.array', 'np.array', ({(74, 21, 75, 95): '[[-Car_C.TR, -Car_C.TR, Car_C.TR, Car_C.TR, -Car_C.TR], [Car_C.TW / 4, -\n Car_C.TW / 4, -Car_C.TW / 4, Car_C.TW / 4, Car_C.TW / 4]]'}, {}), '([[-Car_C.TR, -Car_C.TR, Car_C.TR, Car_C.TR, -Car_C.TR], [Car_C.TW /\n 4, -Car_C.TW / 4, -Car_C.TW / 4, Car_C.TW / 4, Car_C.TW / 4]])', True, 'import numpy as np\n'), ((88, 14, 88, 35), 'numpy.dot', 'np.dot', ({(88, 21, 88, 25): 'Rot2', (88, 27, 88, 34): 'frWheel'}, {}), '(Rot2, frWheel)', True, 'import numpy as np\n'), ((89, 14, 89, 35), 'numpy.dot', 'np.dot', ({(89, 21, 89, 25): 'Rot2', (89, 27, 89, 34): 'flWheel'}, {}), '(Rot2, flWheel)', True, 'import numpy as np\n'), ((91, 15, 91, 54), 'numpy.array', 'np.array', ({(91, 24, 91, 53): '[[Car_C.WB], [-Car_C.WD / 2]]'}, {}), '([[Car_C.WB], [-Car_C.WD / 2]])', True, 'import numpy as np\n'), ((92, 15, 92, 53), 'numpy.array', 'np.array', ({(92, 24, 92, 52): '[[Car_C.WB], [Car_C.WD / 2]]'}, {}), '([[Car_C.WB], [Car_C.WD / 2]])', True, 'import numpy as np\n'), ((96, 14, 96, 35), 'numpy.dot', 'np.dot', ({(96, 21, 96, 25): 'Rot1', (96, 27, 96, 34): 'frWheel'}, {}), '(Rot1, frWheel)', True, 'import numpy as np\n'), ((97, 14, 97, 35), 'numpy.dot', 'np.dot', ({(97, 21, 97, 25): 'Rot1', (97, 27, 97, 34): 'flWheel'}, {}), '(Rot1, flWheel)', True, 'import numpy as np\n'), ((99, 14, 99, 35), 'numpy.dot', 'np.dot', ({(99, 21, 99, 25): 'Rot1', (99, 27, 99, 34): 'rrWheel'}, {}), '(Rot1, rrWheel)', True, 'import numpy as np\n'), ((100, 14, 100, 35), 'numpy.dot', 'np.dot', ({(100, 21, 100, 25): 'Rot1', (100, 27, 100, 34): 'rlWheel'}, {}), '(Rot1, rlWheel)', True, 'import numpy as np\n'), ((101, 10, 101, 27), 'numpy.dot', 'np.dot', ({(101, 17, 101, 21): 'Rot1', (101, 23, 101, 26): 'car'}, {}), '(Rot1, car)', True, 'import numpy as np\n'), ((103, 15, 103, 35), 'numpy.array', 'np.array', ({(103, 24, 103, 34): '[[x], [y]]'}, {}), '([[x], [y]])', True, 'import numpy as np\n'), ((104, 15, 104, 35), 'numpy.array', 'np.array', ({(104, 24, 104, 34): '[[x], [y]]'}, {}), '([[x], [y]])', True, 'import numpy as np\n'), ((105, 15, 105, 35), 'numpy.array', 'np.array', ({(105, 24, 105, 34): '[[x], [y]]'}, {}), '([[x], [y]])', True, 'import numpy as np\n'), ((106, 15, 106, 35), 'numpy.array', 'np.array', ({(106, 24, 106, 34): '[[x], [y]]'}, {}), '([[x], [y]])', True, 'import numpy as np\n'), ((107, 11, 107, 31), 'numpy.array', 'np.array', ({(107, 20, 107, 30): '[[x], [y]]'}, {}), '([[x], [y]])', True, 'import numpy as np\n'), ((109, 4, 109, 41), 'matplotlib.pyplot.plot', 'plt.plot', ({(109, 13, 109, 22): 'car[(0), :]', (109, 24, 109, 33): 'car[(1), :]', (109, 35, 109, 40): 'color'}, {}), '(car[(0), :], car[(1), :], color)', True, 'import matplotlib.pyplot as plt\n'), ((110, 4, 110, 49), 'matplotlib.pyplot.plot', 'plt.plot', ({(110, 13, 110, 26): 'frWheel[(0), :]', (110, 28, 110, 41): 'frWheel[(1), :]', (110, 43, 110, 48): 'color'}, {}), '(frWheel[(0), :], frWheel[(1), :], color)', True, 'import matplotlib.pyplot as plt\n'), ((111, 4, 111, 49), 'matplotlib.pyplot.plot', 'plt.plot', ({(111, 13, 111, 26): 'rrWheel[(0), :]', (111, 28, 111, 41): 'rrWheel[(1), :]', (111, 43, 111, 48): 'color'}, {}), '(rrWheel[(0), :], rrWheel[(1), :], color)', True, 'import matplotlib.pyplot as plt\n'), ((112, 4, 112, 49), 'matplotlib.pyplot.plot', 'plt.plot', ({(112, 13, 112, 26): 'flWheel[(0), :]', (112, 28, 112, 41): 'flWheel[(1), :]', (112, 43, 112, 48): 'color'}, {}), '(flWheel[(0), :], flWheel[(1), :], color)', True, 'import matplotlib.pyplot as plt\n'), ((113, 4, 113, 49), 'matplotlib.pyplot.plot', 'plt.plot', ({(113, 13, 113, 26): 'rlWheel[(0), :]', (113, 28, 113, 41): 'rlWheel[(1), :]', (113, 43, 113, 48): 'color'}, {}), '(rlWheel[(0), :], rlWheel[(1), :], color)', True, 'import matplotlib.pyplot as plt\n'), ((11, 16, 11, 30), 'numpy.deg2rad', 'np.deg2rad', ({(11, 27, 11, 29): '30'}, {}), '(30)', True, 'import numpy as np\n'), ((31, 8, 31, 74), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((32, 8, 33, 66), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((34, 8, 35, 66), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((58, 8, 60, 44), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((68, 14, 69, 135), 'numpy.array', 'np.array', ({(68, 23, 69, 134): '[[-Car_C.RB, -Car_C.RB, Car_C.RF, Car_C.RF, -Car_C.RB, Car_C.ACTUAL_RF,\n Car_C.ACTUAL_RF, -Car_C.ACTUAL_RB, -Car_C.ACTUAL_RB], [Car_C.W / 2, -\n Car_C.W / 2, -Car_C.W / 2, Car_C.W / 2, Car_C.W / 2, Car_C.W / 2, -\n Car_C.W / 2, -Car_C.W / 2, Car_C.W / 2]]'}, {}), '([[-Car_C.RB, -Car_C.RB, Car_C.RF, Car_C.RF, -Car_C.RB, Car_C.\n ACTUAL_RF, Car_C.ACTUAL_RF, -Car_C.ACTUAL_RB, -Car_C.ACTUAL_RB], [Car_C\n .W / 2, -Car_C.W / 2, -Car_C.W / 2, Car_C.W / 2, Car_C.W / 2, Car_C.W /\n 2, -Car_C.W / 2, -Car_C.W / 2, Car_C.W / 2]])', True, 'import numpy as np\n'), ((71, 14, 72, 89), 'numpy.array', 'np.array', ({(71, 23, 72, 88): '[[-Car_C.RB, -Car_C.RB, Car_C.RF, Car_C.RF, -Car_C.RB], [Car_C.W / 2, -\n Car_C.W / 2, -Car_C.W / 2, Car_C.W / 2, Car_C.W / 2]]'}, {}), '([[-Car_C.RB, -Car_C.RB, Car_C.RF, Car_C.RF, -Car_C.RB], [Car_C.W /\n 2, -Car_C.W / 2, -Car_C.W / 2, Car_C.W / 2, Car_C.W / 2]])', True, 'import numpy as np\n'), ((17, 24, 17, 37), 'numpy.cos', 'np.cos', ({(17, 31, 17, 36): 'theta'}, {}), '(theta)', True, 'import numpy as np\n'), ((18, 24, 18, 37), 'numpy.sin', 'np.sin', ({(18, 31, 18, 36): 'theta'}, {}), '(theta)', True, 'import numpy as np\n'), ((24, 40, 24, 59), 'numpy.cos', 'np.cos', ({(24, 47, 24, 58): 'theta_hat_L'}, {}), '(theta_hat_L)', True, 'import numpy as np\n'), ((25, 40, 25, 59), 'numpy.cos', 'np.cos', ({(25, 47, 25, 58): 'theta_hat_R'}, {}), '(theta_hat_R)', True, 'import numpy as np\n'), ((28, 40, 28, 59), 'numpy.sin', 'np.sin', ({(28, 47, 28, 58): 'theta_hat_L'}, {}), '(theta_hat_L)', True, 'import numpy as np\n'), ((29, 40, 29, 59), 'numpy.sin', 'np.sin', ({(29, 47, 29, 58): 'theta_hat_R'}, {}), '(theta_hat_R)', True, 'import numpy as np\n'), ((42, 25, 42, 40), 'numpy.cos', 'np.cos', ({(42, 32, 42, 39): 'theta_B'}, {}), '(theta_B)', True, 'import numpy as np\n'), ((43, 25, 43, 40), 'numpy.sin', 'np.sin', ({(43, 32, 43, 39): 'theta_B'}, {}), '(theta_B)', True, 'import numpy as np\n'), ((48, 28, 48, 44), 'numpy.cos', 'np.cos', ({(48, 35, 48, 43): 'theta_BL'}, {}), '(theta_BL)', True, 'import numpy as np\n'), ((49, 28, 49, 44), 'numpy.sin', 'np.sin', ({(49, 35, 49, 43): 'theta_BL'}, {}), '(theta_BL)', True, 'import numpy as np\n'), ((50, 28, 50, 44), 'numpy.cos', 'np.cos', ({(50, 35, 50, 43): 'theta_BR'}, {}), '(theta_BR)', True, 'import numpy as np\n'), ((51, 28, 51, 44), 'numpy.sin', 'np.sin', ({(51, 35, 51, 43): 'theta_BR'}, {}), '(theta_BR)', True, 'import numpy as np\n'), ((53, 26, 53, 37), 'numpy.cos', 'np.cos', ({(53, 33, 53, 36): 'yaw'}, {}), '(yaw)', True, 'import numpy as np\n'), ((54, 26, 54, 37), 'numpy.sin', 'np.sin', ({(54, 33, 54, 36): 'yaw'}, {}), '(yaw)', True, 'import numpy as np\n'), ((55, 26, 55, 37), 'numpy.cos', 'np.cos', ({(55, 33, 55, 36): 'yaw'}, {}), '(yaw)', True, 'import numpy as np\n'), ((56, 26, 56, 37), 'numpy.sin', 'np.sin', ({(56, 33, 56, 36): 'yaw'}, {}), '(yaw)', True, 'import numpy as np\n'), ((82, 22, 82, 35), 'math.cos', 'math.cos', ({(82, 31, 82, 34): 'yaw'}, {}), '(yaw)', False, 'import math\n'), ((83, 22, 83, 35), 'math.sin', 'math.sin', ({(83, 31, 83, 34): 'yaw'}, {}), '(yaw)', False, 'import math\n'), ((83, 37, 83, 50), 'math.cos', 'math.cos', ({(83, 46, 83, 49): 'yaw'}, {}), '(yaw)', False, 'import math\n'), ((85, 22, 85, 37), 'math.cos', 'math.cos', ({(85, 31, 85, 36): 'steer'}, {}), '(steer)', False, 'import math\n'), ((85, 39, 85, 54), 'math.sin', 'math.sin', ({(85, 48, 85, 53): 'steer'}, {}), '(steer)', False, 'import math\n'), ((86, 40, 86, 55), 'math.cos', 'math.cos', ({(86, 49, 86, 54): 'steer'}, {}), '(steer)', False, 'import math\n'), ((82, 38, 82, 51), 'math.sin', 'math.sin', ({(82, 47, 82, 50): 'yaw'}, {}), '(yaw)', False, 'import math\n'), ((86, 23, 86, 38), 'math.sin', 'math.sin', ({(86, 32, 86, 37): 'steer'}, {}), '(steer)', False, 'import math\n')]
RuiCoreSci/Flask-Restful
models/database_models/comment_model.py
03f98a17487d407b69b853a9bf0ed20d2c5b003b
from sqlalchemy import Integer, Text, DateTime, func, Boolean, text from models.database_models import Base, Column class Comment(Base): __tablename__ = "comment" id = Column(Integer, primary_key=True, ) user_id = Column(Integer, nullable=False, comment="评论用户的 ID") post_id = Column(Integer, nullable=False, comment="Post 文章的 ID") content = Column(Text, nullable=False, comment="用户的评论") create_time = Column(DateTime, server_default=func.now(), comment="创建时间") update_time = Column(DateTime, server_default=func.now(), onupdate=func.now(), comment="更新时间") deleted = Column(Boolean, default=False, server_default=text('0'), nullable=False, comment="该项目是否被删除")
[((9, 9, 9, 44), 'models.database_models.Column', 'Column', (), '', False, 'from models.database_models import Base, Column\n'), ((10, 14, 10, 75), 'models.database_models.Column', 'Column', (), '', False, 'from models.database_models import Base, Column\n'), ((11, 14, 11, 74), 'models.database_models.Column', 'Column', (), '', False, 'from models.database_models import Base, Column\n'), ((12, 14, 12, 69), 'models.database_models.Column', 'Column', (), '', False, 'from models.database_models import Base, Column\n'), ((14, 50, 14, 60), 'sqlalchemy.func.now', 'func.now', ({}, {}), '()', False, 'from sqlalchemy import Integer, Text, DateTime, func, Boolean, text\n'), ((15, 50, 15, 60), 'sqlalchemy.func.now', 'func.now', ({}, {}), '()', False, 'from sqlalchemy import Integer, Text, DateTime, func, Boolean, text\n'), ((15, 71, 15, 81), 'sqlalchemy.func.now', 'func.now', ({}, {}), '()', False, 'from sqlalchemy import Integer, Text, DateTime, func, Boolean, text\n'), ((16, 60, 16, 69), 'sqlalchemy.text', 'text', ({(16, 65, 16, 68): '"""0"""'}, {}), "('0')", False, 'from sqlalchemy import Integer, Text, DateTime, func, Boolean, text\n')]
jmsantorum/aws-deploy
aws_deploy/ecs/helper.py
f117cff3a5440ee42470feaa2a83263c3212cf10
import json import re from datetime import datetime from json.decoder import JSONDecodeError import click from boto3.session import Session from boto3_type_annotations.ecs import Client from botocore.exceptions import ClientError, NoCredentialsError from dateutil.tz.tz import tzlocal from dictdiffer import diff JSON_LIST_REGEX = re.compile(r'^\[.*\]$') LAUNCH_TYPE_EC2 = 'EC2' LAUNCH_TYPE_FARGATE = 'FARGATE' def read_env_file(container_name, file): env_vars = [] try: with open(file) as f: for line in f: if line.startswith('#') or not line.strip() or '=' not in line: continue key, value = line.strip().split('=', 1) env_vars.append((container_name, key, value)) except Exception as e: raise EcsTaskDefinitionCommandError(str(e)) return tuple(env_vars) class EcsClient(object): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, region_name=None, profile_name=None): session = Session( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, region_name=region_name, profile_name=profile_name ) self.boto: Client = session.client('ecs') self.events = session.client('events') def describe_services(self, cluster_name, service_name): return self.boto.describe_services( cluster=cluster_name, services=[service_name] ) def describe_task_definition(self, task_definition_arn): try: return self.boto.describe_task_definition( taskDefinition=task_definition_arn, include=[ 'TAGS', ] ) except ClientError: raise UnknownTaskDefinitionError( u'Unknown task definition arn: %s' % task_definition_arn ) def list_tasks(self, cluster_name, service_name): return self.boto.list_tasks( cluster=cluster_name, serviceName=service_name ) def describe_tasks(self, cluster_name, task_arns): return self.boto.describe_tasks(cluster=cluster_name, tasks=task_arns) def register_task_definition(self, family, containers, volumes, role_arn, execution_role_arn, tags, additional_properties): if tags: additional_properties['tags'] = tags return self.boto.register_task_definition( family=family, containerDefinitions=containers, volumes=volumes, taskRoleArn=role_arn, executionRoleArn=execution_role_arn, **additional_properties ) def deregister_task_definition(self, task_definition_arn): return self.boto.deregister_task_definition( taskDefinition=task_definition_arn ) def update_service(self, cluster, service, desired_count, task_definition): if desired_count is None: return self.boto.update_service( cluster=cluster, service=service, taskDefinition=task_definition ) return self.boto.update_service( cluster=cluster, service=service, desiredCount=desired_count, taskDefinition=task_definition ) def run_task(self, cluster, task_definition, count, started_by, overrides, launchtype='EC2', subnets=(), security_groups=(), public_ip=False, platform_version=None): if launchtype == LAUNCH_TYPE_FARGATE: if not subnets or not security_groups: msg = 'At least one subnet (--subnet) and one security ' \ 'group (--securitygroup) definition are required ' \ 'for launch type FARGATE' raise TaskPlacementError(msg) network_configuration = { "awsvpcConfiguration": { "subnets": subnets, "securityGroups": security_groups, "assignPublicIp": "ENABLED" if public_ip else "DISABLED" } } if platform_version is None: platform_version = 'LATEST' return self.boto.run_task( cluster=cluster, taskDefinition=task_definition, count=count, startedBy=started_by, overrides=overrides, launchType=launchtype, networkConfiguration=network_configuration, platformVersion=platform_version, ) return self.boto.run_task( cluster=cluster, taskDefinition=task_definition, count=count, startedBy=started_by, overrides=overrides ) def update_rule(self, cluster, rule, task_definition): target = self.events.list_targets_by_rule(Rule=rule)['Targets'][0] target['Arn'] = task_definition.arn.partition('task-definition')[0] + 'cluster/' + cluster target['EcsParameters']['TaskDefinitionArn'] = task_definition.arn self.events.put_targets(Rule=rule, Targets=[target]) return target['Id'] class EcsService(dict): def __init__(self, cluster, service_definition=None, **kwargs): self._cluster = cluster super(EcsService, self).__init__(service_definition, **kwargs) def set_task_definition(self, task_definition): self[u'taskDefinition'] = task_definition.arn @property def cluster(self): return self._cluster @property def name(self): return self.get(u'serviceName') @property def task_definition(self): return self.get(u'taskDefinition') @property def desired_count(self): return self.get(u'desiredCount') @property def deployment_created_at(self): for deployment in self.get(u'deployments'): if deployment.get(u'status') == u'PRIMARY': return deployment.get(u'createdAt') return datetime.now() @property def deployment_updated_at(self): for deployment in self.get(u'deployments'): if deployment.get(u'status') == u'PRIMARY': return deployment.get(u'updatedAt') return datetime.now() @property def errors(self): return self.get_warnings( since=self.deployment_updated_at ) @property def older_errors(self): return self.get_warnings( since=self.deployment_created_at, until=self.deployment_updated_at ) def get_warnings(self, since=None, until=None): since = since or self.deployment_created_at until = until or datetime.now(tz=tzlocal()) errors = {} for event in self.get(u'events'): if u'unable' not in event[u'message']: continue if since < event[u'createdAt'] < until: errors[event[u'createdAt']] = event[u'message'] return errors class EcsTaskDefinition(object): def __init__(self, containerDefinitions, volumes, family, revision, status, taskDefinitionArn, requiresAttributes=None, taskRoleArn=None, executionRoleArn=None, compatibilities=None, tags=None, **kwargs): self.containers = containerDefinitions self.volumes = volumes self.family = family self.revision = revision self.status = status self.arn = taskDefinitionArn self.requires_attributes = requiresAttributes or {} self.role_arn = taskRoleArn or '' self.execution_role_arn = executionRoleArn or '' self.tags = tags self.additional_properties = kwargs self._diff = [] # the compatibilities parameter is returned from the ECS API, when # describing a task, but may not be included, when registering a new # task definition. Just storing it for now. self.compatibilities = compatibilities @property def container_names(self): for container in self.containers: yield container['name'] @property def images(self): for container in self.containers: yield container['name'], container['image'] @property def family_revision(self): return f'{self.family}:{self.revision}' @property def updated(self) -> bool: return self._diff != [] @property def diff(self): return self._diff def show_diff(self, show_diff: bool = False): if show_diff: click.secho('Task definition modified:') for d in self._diff: click.secho(f' {str(d)}', fg='blue') click.secho('') def diff_raw(self, task_b): containers_a = {c['name']: c for c in self.containers} containers_b = {c['name']: c for c in task_b.containers} requirements_a = sorted([r['name'] for r in self.requires_attributes]) requirements_b = sorted([r['name'] for r in task_b.requires_attributes]) for container in containers_a: containers_a[container]['environment'] = {e['name']: e['value'] for e in containers_a[container].get('environment', {})} for container in containers_b: containers_b[container]['environment'] = {e['name']: e['value'] for e in containers_b[container].get('environment', {})} for container in containers_a: containers_a[container]['secrets'] = {e['name']: e['valueFrom'] for e in containers_a[container].get('secrets', {})} for container in containers_b: containers_b[container]['secrets'] = {e['name']: e['valueFrom'] for e in containers_b[container].get('secrets', {})} composite_a = { 'containers': containers_a, 'volumes': self.volumes, 'requires_attributes': requirements_a, 'role_arn': self.role_arn, 'execution_role_arn': self.execution_role_arn, 'compatibilities': self.compatibilities, 'additional_properties': self.additional_properties, } composite_b = { 'containers': containers_b, 'volumes': task_b.volumes, 'requires_attributes': requirements_b, 'role_arn': task_b.role_arn, 'execution_role_arn': task_b.execution_role_arn, 'compatibilities': task_b.compatibilities, 'additional_properties': task_b.additional_properties, } return list(diff(composite_a, composite_b)) def get_overrides(self): override = dict() overrides = [] for diff in self.diff: if override.get('name') != diff.container: override = dict(name=diff.container) overrides.append(override) if diff.field == 'command': override['command'] = self.get_overrides_command(diff.value) elif diff.field == 'environment': override['environment'] = self.get_overrides_env(diff.value) elif diff.field == 'secrets': override['secrets'] = self.get_overrides_secrets(diff.value) return overrides @staticmethod def parse_command(command): if re.match(JSON_LIST_REGEX, command): try: return json.loads(command) except JSONDecodeError as e: raise EcsTaskDefinitionCommandError( f"command should be valid JSON list. Got following command: {command} resulting in error: {str(e)}" ) return command.split() @staticmethod def get_overrides_command(command): return EcsTaskDefinition.parse_command(command) @staticmethod def get_overrides_env(env): return [{"name": e, "value": env[e]} for e in env] @staticmethod def get_overrides_secrets(secrets): return [{"name": s, "valueFrom": secrets[s]} for s in secrets] def get_tag(self, key): for tag in self.tags: if tag['key'] == key: return tag['value'] return None def set_tag(self, key: str, value: str): if key and value: done = False for tag in self.tags: if tag['key'] == key: if tag['value'] != value: diff = EcsTaskDefinitionDiff( container=None, field=f"tags['{key}']", value=value, old_value=tag['value'] ) self._diff.append(diff) tag['value'] = value done = True break if not done: diff = EcsTaskDefinitionDiff(container=None, field=f"tags['{key}']", value=value, old_value=None) self._diff.append(diff) self.tags.append({'key': key, 'value': value}) def set_images(self, tag=None, **images): self.validate_container_options(**images) for container in self.containers: if container['name'] in images: new_image = images[container['name']] diff = EcsTaskDefinitionDiff( container=container['name'], field='image', value=new_image, old_value=container['image'] ) self._diff.append(diff) container['image'] = new_image elif tag: image_definition = container['image'].rsplit(':', 1) new_image = f'{image_definition[0]}:{tag.strip()}' # check if tag changes if new_image != container['image']: diff = EcsTaskDefinitionDiff( container=container['name'], field='image', value=new_image, old_value=container['image'] ) self._diff.append(diff) container['image'] = new_image def set_commands(self, **commands): self.validate_container_options(**commands) for container in self.containers: if container['name'] in commands: new_command = commands[container['name']] diff = EcsTaskDefinitionDiff( container=container['name'], field='command', value=new_command, old_value=container.get('command') ) self._diff.append(diff) container['command'] = self.parse_command(new_command) def set_environment(self, environment_list, exclusive=False, env_file=((None, None),)): environment = {} if None not in env_file[0]: for env in env_file: line = read_env_file(env[0], env[1]) environment_list = line + environment_list for env in environment_list: environment.setdefault(env[0], {}) environment[env[0]][env[1]] = env[2] self.validate_container_options(**environment) for container in self.containers: if container['name'] in environment: self.apply_container_environment( container=container, new_environment=environment[container['name']], exclusive=exclusive, ) elif exclusive is True: self.apply_container_environment( container=container, new_environment={}, exclusive=exclusive, ) def apply_container_environment(self, container, new_environment, exclusive=False): environment = container.get('environment', {}) old_environment = {env['name']: env['value'] for env in environment} if exclusive is True: merged = new_environment else: merged = old_environment.copy() merged.update(new_environment) if old_environment == merged: return diff = EcsTaskDefinitionDiff( container=container['name'], field='environment', value=merged, old_value=old_environment ) self._diff.append(diff) container['environment'] = [ {"name": e, "value": merged[e]} for e in merged ] def set_secrets(self, secrets_list, exclusive=False): secrets = {} for secret in secrets_list: secrets.setdefault(secret[0], {}) secrets[secret[0]][secret[1]] = secret[2] self.validate_container_options(**secrets) for container in self.containers: if container['name'] in secrets: self.apply_container_secrets( container=container, new_secrets=secrets[container['name']], exclusive=exclusive, ) elif exclusive is True: self.apply_container_secrets( container=container, new_secrets={}, exclusive=exclusive, ) def apply_container_secrets(self, container, new_secrets, exclusive=False): secrets = container.get('secrets', {}) old_secrets = {secret['name']: secret['valueFrom'] for secret in secrets} if exclusive is True: merged = new_secrets else: merged = old_secrets.copy() merged.update(new_secrets) if old_secrets == merged: return diff = EcsTaskDefinitionDiff( container=container['name'], field='secrets', value=merged, old_value=old_secrets ) self._diff.append(diff) container['secrets'] = [ {"name": s, "valueFrom": merged[s]} for s in merged ] def validate_container_options(self, **container_options): for container_name in container_options: if container_name not in self.container_names: raise UnknownContainerError(f'Unknown container: {container_name}') def set_role_arn(self, role_arn): if role_arn: diff = EcsTaskDefinitionDiff( container=None, field='role_arn', value=role_arn, old_value=self.role_arn ) self.role_arn = role_arn self._diff.append(diff) def set_execution_role_arn(self, execution_role_arn): if execution_role_arn: diff = EcsTaskDefinitionDiff( container=None, field='execution_role_arn', value=execution_role_arn, old_value=self.execution_role_arn ) self.execution_role_arn = execution_role_arn self._diff.append(diff) class EcsTaskDefinitionDiff(object): def __init__(self, container, field, value, old_value): self.container = container self.field = field self.value = value self.old_value = old_value def __repr__(self): if self.field == 'environment': return '\n'.join(self._get_environment_diffs( self.container, self.value, self.old_value, )) elif self.field == 'secrets': return '\n'.join(self._get_secrets_diffs( self.container, self.value, self.old_value, )) elif self.container: return f'Changed {self.field} of container "{self.container}" to: "{self.value}" (was: "{self.old_value}")' else: return f'Changed {self.field} to: "{self.value}" (was: "{self.old_value}")' @staticmethod def _get_environment_diffs(container, env, old_env): diffs = [] for name, value in env.items(): old_value = old_env.get(name) if value != old_value or value and not old_value: message = f'Changed environment "{name}" of container "{container}" to: "{value}"' diffs.append(message) for old_name in old_env.keys(): if old_name not in env.keys(): message = f'Removed environment "{old_name}" of container "{container}"' diffs.append(message) return diffs @staticmethod def _get_secrets_diffs(container, secrets, old_secrets): diffs = [] for name, value in secrets.items(): old_value = old_secrets.get(name) if value != old_value or not old_value: message = f'Changed secret "{name}" of container "{container}" to: "{value}"' diffs.append(message) for old_name in old_secrets.keys(): if old_name not in secrets.keys(): message = f'Removed secret "{old_name}" of container "{container}"' diffs.append(message) return diffs class EcsAction(object): def __init__(self, client: EcsClient, cluster_name: str, service_name: str): self._client = client self._cluster_name = cluster_name self._service_name = service_name try: if service_name: self._service = self.get_service() except IndexError: raise EcsConnectionError( u'An error occurred when calling the DescribeServices ' u'operation: Service not found.' ) except ClientError as e: raise EcsConnectionError(str(e)) except NoCredentialsError: raise EcsConnectionError( u'Unable to locate credentials. Configure credentials ' u'by running "aws configure".' ) def get_service(self): services_definition = self._client.describe_services( cluster_name=self._cluster_name, service_name=self._service_name ) return EcsService( cluster=self._cluster_name, service_definition=services_definition[u'services'][0] ) def get_current_task_definition(self, service): return self.get_task_definition(service.task_definition) def get_task_definition(self, task_definition): task_definition_payload = self._client.describe_task_definition( task_definition_arn=task_definition ) task_definition = EcsTaskDefinition( tags=task_definition_payload.get('tags', None), **task_definition_payload[u'taskDefinition'] ) return task_definition def update_task_definition(self, task_definition): response = self._client.register_task_definition( family=task_definition.family, containers=task_definition.containers, volumes=task_definition.volumes, role_arn=task_definition.role_arn, execution_role_arn=task_definition.execution_role_arn, tags=task_definition.tags, additional_properties=task_definition.additional_properties ) new_task_definition = EcsTaskDefinition(**response[u'taskDefinition']) return new_task_definition def deregister_task_definition(self, task_definition): self._client.deregister_task_definition(task_definition.arn) def update_service(self, service, desired_count=None): response = self._client.update_service( cluster=service.cluster, service=service.name, desired_count=desired_count, task_definition=service.task_definition ) return EcsService(self._cluster_name, response[u'service']) def is_deployed(self, service): if len(service[u'deployments']) != 1: return False running_tasks = self._client.list_tasks( cluster_name=service.cluster, service_name=service.name ) if not running_tasks[u'taskArns']: return service.desired_count == 0 running_count = self.get_running_tasks_count( service=service, task_arns=running_tasks[u'taskArns'] ) return service.desired_count == running_count def get_running_tasks_count(self, service, task_arns): running_count = 0 tasks_details = self._client.describe_tasks( cluster_name=self._cluster_name, task_arns=task_arns ) for task in tasks_details[u'tasks']: arn = task[u'taskDefinitionArn'] status = task[u'lastStatus'] if arn == service.task_definition and status == u'RUNNING': running_count += 1 return running_count @property def client(self): return self._client @property def service(self): return self._service @property def cluster_name(self): return self._cluster_name @property def service_name(self): return self._service_name class DeployAction(EcsAction): def deploy(self, task_definition): try: self._service.set_task_definition(task_definition) return self.update_service(self._service) except ClientError as e: raise EcsError(str(e)) class ScaleAction(EcsAction): def scale(self, desired_count): try: return self.update_service(self._service, desired_count) except ClientError as e: raise EcsError(str(e)) class RunAction(EcsAction): def __init__(self, client, cluster_name): super(RunAction, self).__init__(client, cluster_name, None) self._client = client self._cluster_name = cluster_name self.started_tasks = [] def run(self, task_definition, count, started_by, launchtype, subnets, security_groups, public_ip, platform_version): try: result = self._client.run_task( cluster=self._cluster_name, task_definition=task_definition.family_revision, count=count, started_by=started_by, overrides=dict(containerOverrides=task_definition.get_overrides()), launchtype=launchtype, subnets=subnets, security_groups=security_groups, public_ip=public_ip, platform_version=platform_version, ) self.started_tasks = result['tasks'] return True except ClientError as e: raise EcsError(str(e)) class UpdateAction(EcsAction): def __init__(self, client): super(UpdateAction, self).__init__(client, None, None) class DiffAction(EcsAction): def __init__(self, client): super(DiffAction, self).__init__(client, None, None) class EcsError(Exception): pass class EcsConnectionError(EcsError): pass class UnknownContainerError(EcsError): pass class TaskPlacementError(EcsError): pass class UnknownTaskDefinitionError(EcsError): pass class EcsTaskDefinitionCommandError(EcsError): pass
[((13, 18, 13, 41), 're.compile', 're.compile', ({(13, 29, 13, 40): '"""^\\\\[.*\\\\]$"""'}, {}), "('^\\\\[.*\\\\]$')", False, 'import re\n'), ((36, 18, 42, 9), 'boto3.session.Session', 'Session', (), '', False, 'from boto3.session import Session\n'), ((186, 15, 186, 29), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((193, 15, 193, 29), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((333, 11, 333, 45), 're.match', 're.match', ({(333, 20, 333, 35): 'JSON_LIST_REGEX', (333, 37, 333, 44): 'command'}, {}), '(JSON_LIST_REGEX, command)', False, 'import re\n'), ((266, 12, 266, 52), 'click.secho', 'click.secho', ({(266, 24, 266, 51): '"""Task definition modified:"""'}, {}), "('Task definition modified:')", False, 'import click\n'), ((269, 12, 269, 27), 'click.secho', 'click.secho', ({(269, 24, 269, 26): '""""""'}, {}), "('')", False, 'import click\n'), ((335, 23, 335, 42), 'json.loads', 'json.loads', ({(335, 34, 335, 41): 'command'}, {}), '(command)', False, 'import json\n'), ((210, 41, 210, 50), 'dateutil.tz.tz.tzlocal', 'tzlocal', ({}, {}), '()', False, 'from dateutil.tz.tz import tzlocal\n')]
emmaling27/networks-research
sbm.py
be209e2b653a1fe9eec480a94538d59104e4aa23
import networkx as nx from scipy.special import comb import attr @attr.s class Count(object): """Count class with monochromatic and bichromatic counts""" n = attr.ib() monochromatic = attr.ib(default=0) bichromatic = attr.ib(default=0) def count_edge(self, u, v): if (u < self.n / 2) != (v < self.n / 2): self.bichromatic += 1 else: self.monochromatic += 1 class SBM(): """SBM class with predicted numbers of wedges and local bridges and actual counts""" def __init__(self, n, p, q, seed=0): self.n = n self.p = p self.q = q self.g = nx.generators.community.stochastic_block_model( [int(self.n / 2), int(self.n / 2)], [[p, q], [q, p]], seed=seed) def is_bichromatic(self, u, v): return (u < self.n / 2) != (v < self.n / 2) def get_bichromatic_fraction(self): bichromatic = 0 for (x, y) in self.g.edges(): if self.is_bichromatic(x, y): bichromatic += 1 return bichromatic / len(self.g.edges()) def is_local_bridge(self, u, v): return not set(self.g.neighbors(u)).intersection(set(self.g.neighbors(v))) def count_local_bridges(self): monochromatic, bichromatic = 0, 0 for (u, v) in self.g.edges(): if self.is_local_bridge(u, v): if self.is_bichromatic(u, v): bichromatic += 1 else: monochromatic += 1 return monochromatic, bichromatic def _count_possible_edges(self, local_bridge): count = Count(self.n) for u in range(self.n): for v in range(u+1, self.n): if not self.g.has_edge(u, v) and \ (self.is_local_bridge(u, v) == local_bridge): count.count_edge(u, v) return count def count_possible_local_bridges(self): return self._count_possible_edges(local_bridge=True) def count_possible_closures(self): return self._count_possible_edges(local_bridge=False) def count_wedges(self): count = Count(self.n) for v in self.g.nodes(): sorted_neighbors = sorted(self.g.neighbors(v)) for i in range(len(sorted_neighbors)): for j in range(i + 1, len(sorted_neighbors)): if not self.g.has_edge(sorted_neighbors[i], sorted_neighbors[j]): count.count_edge(sorted_neighbors[i], sorted_neighbors[j]) return count def predicted_wedges(self): return Count( self.n, monochromatic=3 * 2 * comb(self.n/2, 3) * self.p**2 * (1-self.p) \ + self.n * comb(self.n/2, 2) * self.q**2 * (1-self.p), bichromatic=2 * self.n * comb(self.n/2, 2) * self.p * self.q * (1-self.q) ) def predicted_local_bridges(self): return Count( self.n, monochromatic=2 * (1-self.p) * comb(self.n/2, 2) * (1-self.p**2)**(self.n/2-2) * (1-self.q**2)**(self.n/2), bichromatic=(1-self.q) * (self.n/2) ** 2 * (1-self.p*self.q)**(self.n-2) ) def predicted_possible_closures(self): return Count( self.n, monochromatic=2 * (1-self.p) * comb(self.n/2, 2) * (1 - (1-self.p**2)**(self.n/2-2) * (1-self.q**2)**(self.n/2)), bichromatic=(1-self.q) * (self.n/2) ** 2 * (1 - (1-self.p*self.q)**(self.n-2)) ) def predicted_possible_edges(self): return Count( self.n, monochromatic=2 * (1-self.p) * comb(self.n/2, 2), bichromatic=(1-self.q) * (self.n/2) ** 2 )
[((9, 8, 9, 17), 'attr.ib', 'attr.ib', ({}, {}), '()', False, 'import attr\n'), ((10, 20, 10, 38), 'attr.ib', 'attr.ib', (), '', False, 'import attr\n'), ((11, 18, 11, 36), 'attr.ib', 'attr.ib', (), '', False, 'import attr\n'), ((104, 43, 104, 60), 'scipy.special.comb', 'comb', ({(104, 48, 104, 56): '(self.n / 2)', (104, 58, 104, 59): '(2)'}, {}), '(self.n / 2, 2)', False, 'from scipy.special import comb\n'), ((97, 43, 97, 60), 'scipy.special.comb', 'comb', ({(97, 48, 97, 56): '(self.n / 2)', (97, 58, 97, 59): '(2)'}, {}), '(self.n / 2, 2)', False, 'from scipy.special import comb\n'), ((90, 43, 90, 60), 'scipy.special.comb', 'comb', ({(90, 48, 90, 56): '(self.n / 2)', (90, 58, 90, 59): '(2)'}, {}), '(self.n / 2, 2)', False, 'from scipy.special import comb\n'), ((82, 34, 82, 51), 'scipy.special.comb', 'comb', ({(82, 39, 82, 47): '(self.n / 2)', (82, 49, 82, 50): '(3)'}, {}), '(self.n / 2, 3)', False, 'from scipy.special import comb\n'), ((83, 23, 83, 40), 'scipy.special.comb', 'comb', ({(83, 28, 83, 36): '(self.n / 2)', (83, 38, 83, 39): '(2)'}, {}), '(self.n / 2, 2)', False, 'from scipy.special import comb\n'), ((84, 37, 84, 54), 'scipy.special.comb', 'comb', ({(84, 42, 84, 50): '(self.n / 2)', (84, 52, 84, 53): '(2)'}, {}), '(self.n / 2, 2)', False, 'from scipy.special import comb\n')]
PhilHarnish/forge
src/data/graph/ops/anagram_transform_op.py
663f19d759b94d84935c14915922070635a4af65
from typing import Callable, Collection, Iterable, List, Union from data.anagram import anagram_iter from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer Transformer = Callable[['bloom_node.BloomNode'], 'bloom_node.BloomNode'] _SPACE_MASK = bloom_mask.for_alpha(' ') def merge_fn( host: 'bloom_node.BloomNode', sources: List['bloom_node.BloomNode'], extra: list, whitelist: Collection = None, blacklist: Collection = None, **kwargs) -> None: del kwargs assert len(sources) == 1 exit_node = sources[0] assert len(extra) == 1 state = _normalize_state(exit_node, extra[0]) children = list(state) # TODO: Need a cleaner way to inject and rerun these nodes. if len(children) == 1: host.op = _op_mixin.Op(_op_mixin.OP_IDENTITY, children) else: host.op = _op_mixin.Op(_op_mixin.OP_ADD, children) # HACK: This duplicates BloomNode._expand, essentially. for key, reduced in bloom_node_reducer.reduce( host, whitelist=whitelist, blacklist=blacklist): host.link(key, reduced) class _AnagramTransformIndex(object): """Singleton object used during anagram traversal.""" def __init__( self, exit_node: 'bloom_node.BloomNode', root: anagram_iter.AnagramIter) -> None: self._exit_node = exit_node reference = bloom_node.BloomNode() reference.distance(0) reference.weight(1, True) reference_choice_paths = {} for choice, _ in root.available(): reference_choice_paths[choice] = choice(reference) self._reference_choice_paths = reference_choice_paths self._child_cache = {} def iter( self, anagrams: anagram_iter.AnagramIter, ) -> Iterable['bloom_node.BloomNode']: for child_choice, child_anagrams in anagrams.items(): key = (child_choice, child_anagrams) if key not in self._child_cache: self._child_cache[key] = self._make_child(child_choice, child_anagrams) yield self._child_cache[key] def _make_child( self, choice: Transformer, anagrams: anagram_iter.AnagramIter) -> 'bloom_node.BloomNode': children = list(anagrams.available()) if not children: return choice(self._exit_node) elif len(children) == 1: child_choice, child_duplicates = children[0] node = self._exit_node while child_duplicates: node = child_choice(node) child_duplicates -= 1 return choice(node) # Compute requirements from exits. node = self._exit_node // _AnagramState(self, anagrams) node.provide_mask = self._exit_node.provide_mask node.require_mask = self._exit_node.require_mask node.lengths_mask = self._exit_node.lengths_mask node.annotate({'anagrams': anagrams}) node.max_weight = self._exit_node.max_weight nodes_with_spaces = [] for child_choice, child_duplicates in children: path = self._reference_choice_paths[child_choice] if path.require_mask and path.require_mask & _SPACE_MASK: nodes_with_spaces.append(path) node.provide_mask |= path.provide_mask node.require_mask |= path.require_mask node.lengths_mask = bloom_mask.lengths_product( node.lengths_mask, path.lengths_mask, duplicates=child_duplicates) if nodes_with_spaces: # Distance and provide masks should be correct. Reset required values. # Any route to any of the spaces is now okay but 1+ must be taken. node.require_mask = bloom_mask.REQUIRE_NOTHING for node_with_spaces in nodes_with_spaces: # Only require what all node_with_spaces require. node.require_mask &= node_with_spaces.require_mask return choice(node) class _AnagramState(object): def __init__( self, index: _AnagramTransformIndex, anagrams: anagram_iter.AnagramIter): self._index = index self._anagrams = anagrams def __iter__(self) -> Iterable['bloom_node.BloomNode']: yield from self._index.iter(self._anagrams) def __repr__(self) -> str: return '_AnagramState(%s)' % self._anagrams __str__ = __repr__ def _normalize_state( exit_node: 'bloom_node.BloomNode', index: Union[Iterable, anagram_iter.AnagramIter]) -> _AnagramState: if isinstance(index, _AnagramState): return index # `index` is an iterable list of ???, one-by-one these will be taken as a # route to the `exit_node`. initial_anagrams = anagram_iter.from_choices(index) index = _AnagramTransformIndex(exit_node, initial_anagrams) return _AnagramState(index, initial_anagrams)
[((7, 14, 7, 39), 'data.graph.bloom_mask.for_alpha', 'bloom_mask.for_alpha', ({(7, 35, 7, 38): '""" """'}, {}), "(' ')", False, 'from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer\n'), ((28, 22, 29, 53), 'data.graph.bloom_node_reducer.reduce', 'bloom_node_reducer.reduce', (), '', False, 'from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer\n'), ((123, 21, 123, 53), 'data.anagram.anagram_iter.from_choices', 'anagram_iter.from_choices', ({(123, 47, 123, 52): 'index'}, {}), '(index)', False, 'from data.anagram import anagram_iter\n'), ((24, 14, 24, 59), 'data.graph._op_mixin.Op', '_op_mixin.Op', ({(24, 27, 24, 48): '_op_mixin.OP_IDENTITY', (24, 50, 24, 58): 'children'}, {}), '(_op_mixin.OP_IDENTITY, children)', False, 'from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer\n'), ((26, 14, 26, 54), 'data.graph._op_mixin.Op', '_op_mixin.Op', ({(26, 27, 26, 43): '_op_mixin.OP_ADD', (26, 45, 26, 53): 'children'}, {}), '(_op_mixin.OP_ADD, children)', False, 'from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer\n'), ((40, 16, 40, 38), 'data.graph.bloom_node.BloomNode', 'bloom_node.BloomNode', ({}, {}), '()', False, 'from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer\n'), ((87, 26, 88, 76), 'data.graph.bloom_mask.lengths_product', 'bloom_mask.lengths_product', (), '', False, 'from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer\n')]
tikki/pygogapi
gogapi/api.py
f1b3a811444dc521ea4ad7884104086b52348995
import json import re import logging import html.parser import zlib import requests from gogapi import urls from gogapi.base import NotAuthorizedError, logger from gogapi.product import Product, Series from gogapi.search import SearchResult DEBUG_JSON = False GOGDATA_RE = re.compile(r"gogData\.?(.*?) = (.+);") CLIENT_VERSION = "1.2.17.9" # Just for their statistics USER_AGENT = "GOGGalaxyClient/{} pygogapi/0.1".format(CLIENT_VERSION) REQUEST_RETRIES = 3 PRODUCT_EXPANDABLE = [ "downloads", "expanded_dlcs", "description", "screenshots", "videos", "related_products", "changelog" ] USER_EXPANDABLE = ["friendStatus", "wishlistStatus", "blockedStatus"] LOCALE_CODES = ["de-DE", "en-US", "fr-FR", "pt-BR", "pl-PL", "ru-RU", "zh-Hans"] CURRENCY_CODES = [ "USD", "EUR", "GBP", "AUD", "RUB", "PLN", "CAD", "CHF", "NOK", "SEK", "DKK" ] def find_scripts(site): parser = ScriptParser() parser.feed(site) return parser.scripts class ScriptParser(html.parser.HTMLParser): def __init__(self): super().__init__() self.last_tag = None self.scripts = [] def handle_starttag(self, tag, attrs): self.last_tag = tag def handle_data(self, data): if self.last_tag == "script": self.scripts.append(data) class GogApi: def __init__(self, token=None): self.token = token self.locale = (None, None, None) # TODO: replace tuple self.session = requests.Session() self.session.headers["User-Agent"] = USER_AGENT self.force_authorize = False # Helpers def request(self, method, url, authorized=True, allow_redirects=False, **kwargs): """ Wrapper around requests.request that also handles authorization, retries and logging """ if authorized or self.force_authorize: if self.token is None: raise NotAuthorizedError() if self.token.expired(): self.token.refresh() self.session.headers["Authorization"] = \ "Bearer " + self.token.access_token else: self.session.headers.pop("Authorization", None) # Retries retries = REQUEST_RETRIES while retries > 0: resp = self.session.request( method, url, allow_redirects=allow_redirects, **kwargs) if resp.status_code < 400: return resp elif 400 <= resp.status_code < 500: break else: retries -= 1 resp.raise_for_status() def get(self, *args, **kwargs): """ Wrapper around requests.get """ return self.request("GET", *args, **kwargs) def post(self, *args, **kwargs): """ Wrapper around requests.post """ return self.request("POST", *args, **kwargs) def request_json(self, *args, compressed=False, **kwargs): """ Wrapper around GogApi.request that automatically parses the JSON response. Also does zlib decompression because GOG decided to reinvent the wheel instead of using HTTP gzip encoding for their content system V2. """ resp = self.request(*args, **kwargs) if not compressed: if DEBUG_JSON: print(resp.text) return resp.json() else: json_comp = resp.content json_text = zlib.decompress(json_comp, 15).decode("utf-8") if DEBUG_JSON: print(json_text) return json.loads(json_text) def get_json(self, *args, **kwargs): """ Wrapper around GogApi.get with JSON parsing """ return self.request_json("GET", *args, **kwargs) def get_gogdata(self, url, *args, **kwargs): """ Downloads a page and returns the embedded JavaScript gogData variable. """ resp = self.get(url, *args, **kwargs) gogdata = {} for script in find_scripts(resp.text): matches = GOGDATA_RE.finditer(resp.text) for match in matches: subkey = match.group(1) value = match.group(2) value_parsed = json.loads(value) if subkey: data = {subkey: value_parsed} else: data = value_parsed gogdata.update(data) return gogdata def set_locale(self, country, currency, locale): """ country: ISO 3166 Alpha-2 currency: ISO 4217 locale: ISO 639 + ISO 3166 like language[_territory] """ if len(country) != 2: return AttributeError("Invalid country code {}".format(country)) elif currency not in CURRENCY_CODES: return AttributeError("Invalid currency code {}".format(locale)) elif locale not in LOCALE_CODES: return AttributeError("Invalid locale code {}".format(locale)) self.locale = (country, currency, locale) self.session.cookies["gog_lc"] = "_".join(self.locale) # Web APIs def web_game_gogdata(self, slug): return self.get_gogdata(urls.web("game", slug), authorized=False) def web_games_gogdata(self): return self.get_gogdata(urls.web("account.games")) def web_movies_gogdata(self): return self.get_gogdata(urls.web("account.movies")) def web_wishlist_gogdata(self): return self.get_gogdata(urls.web("account.wishlist")) def web_friends_gogdata(self): return self.get_gogdata(urls.web("account.friends")) def web_chat_gogdata(self): return self.get_gogdata(urls.web("account.chat")) def web_wallet_gogdata(self): return self.get_gogdata(urls.web("wallet")) def web_orders_gogdata(self): return self.get_gogdata(urls.web("settings.orders")) def web_account_gamedetails(self, game_id): return self.get_json(urls.web("account.gamedetails", game_id)) def web_account_search(self, **query): """ Allowed query keys: category: Genre feature: Feature hiddenFlag: Show hidden games language: Language mediaType: Game or movie page: Page number search: Search string sortBy: Sort order system: OS tags: Tags totalPages: Total Pages """ return self.get_json(urls.web("account.get_filtered"), params=query) def web_search(self, **query): """ Allowed query keys: category: Genre devpub: Developer or Published feature: Features language: Language mediaType: Game or movie page: Page number price: Price range release: Release timeframe search: Search string sort: Sort order system: OS limit: Max results """ return self.get_json( urls.web("search.filtering"), params=query, authorized=False) def web_user_data(self): return self.get_json(urls.web("user.data")) def web_user_games(self): return self.get_json(urls.web("user.games")) def web_user_wishlist(self): return self.get_json(urls.web("user.wishlist")) def web_user_wishlist_add(self, game_id): """Returns new wishlist""" return self.get_json(urls.web("user.wishlist.add", game_id)) def web_user_wishlist_remove(self, game_id): """Returns new wishlist""" return self.get_json(urls.web("user.wishlist.remove", game_id)) def web_user_ratings(self): return self.get_json(urls.web("user.ratings")) def web_user_review_votes(self): return self.get_json(urls.web("user.review_votes")) def web_user_change_currency(self, currency): return self.get_json(urls.web("user.change_currency", currency)) def web_user_change_language(self, lang): return self.get_json(urls.web("user.change_language", lang)) def web_user_set_redirect_url(self, url): """Set redirect url after login. Only know valid url: checkout""" return self.get(urls.web("user.set_redirect_url", params={"url": url})) def web_user_review_guidelines(self): return self.get_json(urls.web("user.review_guidelines")) def web_user_public_info(self, user_id, expand=None): if not expand: params = None elif expand == True: params = {"expand": ",".join(USER_EXPANDABLE)} else: params = {"expand": ",".join(expand)} return self.get_json( urls.web("user.public.info", user_id, params=params)) def web_user_public_block(self, user_id): return self.get_json(urls.web("user.public.block", user_id)) def web_user_public_unblock(self, user_id): return self.get_json(urls.web("user.public.unblock", user_id)) def web_friends_remove(self, user_id): return self.get_json(urls.web("friends.remove", user_id)) def web_friends_invite(self, user_id): return self.get_json(urls.web("friends.invite", user_id)) def web_friends_accept(self, user_id): return self.get_json(urls.web("friends.accept", user_id)) def web_friends_decline(self, user_id): return self.get_json(urls.web("friends.decline", user_id)) def web_cart_get(self): return self.get_json(urls.web("cart.get")) def web_cart_add(self, game_id): return self.get_json(urls.web("cart.add", game_id)) def web_cart_add_series(self, series_id): return self.get_json(urls.web("cart.add_series", series_id)) def web_cart_remove(self, game_id): return self.get_json(urls.web("cart.remove", game_id)) def web_reviews_search(self, game_id): return self.get_json(urls.web("reviews.search", game_id)) def web_reviews_vote(self, game_id): return self.get_json(urls.web("reviews.vote", game_id)) def web_reviews_report(self, game_id): return self.get_json(urls.web("reviews.report", game_id)) def web_reviews_rate(self, game_id): return self.get_json(urls.web("reviews.rate", game_id)) def web_reviews_add(self, game_id): return self.get_json(urls.web("reviews.add", game_id)) def web_order_change_currency(self, order_id, currency): return self.get_json( urls.web("order.change_currency", order_id, currency)) def web_order_add(self, order_id, game_id): return self.get_json(urls.web("order.add", order_id, game_id)) def web_order_remove(self, order_id, game_id): return self.get_json(urls.web("order.remove", order_id, game_id)) def web_order_enable_store_credit(self, order_id): return self.get_json(urls.web("order.enable_store_credit", order_id)) def web_order_disable_store_credit(self, order_id): return self.get_json(urls.web("order.disable_store_credit", order_id)) def web_order_set_as_gift(self, order_id): return self.get_json(urls.web("order.set_as_gift", order_id)) def web_order_set_as_not_gift(self, order_id): return self.get_json(urls.web("order.set_as_non_gift", order_id)) def web_order_process_order(self, order_id): return self.get_json(urls.web("order.process_order", order_id)) def web_order_payment_status(self, order_id): return self.get_json(urls.web("order.payment_status", order_id)) def web_order_check_status(self, order_id): return self.get_json(urls.web("order.check_status", order_id)) def web_checkout(self, order_id=None): if order_id is None: return self.get_json(urls.web("checkout")) else: return self.get_json(urls.web("checkout_id", order_id)) def web_checkout_manual(self, order_id): return self.get_json(urls.web("checkout_manual", order_id)) # Galaxy APIs def galaxy_file(self, game_id, dl_url): dl_url = dl_url.lstrip("/") return self.get_json(urls.galaxy("file", game_id, dl_url)) def galaxy_user(self, user_id=None): if user_id is None: user_id = self.token.user_id return self.get_json(urls.galaxy("user", user_id)) def galaxy_friends(self, user_id=None): if user_id is None: user_id = self.token.user_id return self.get_json(urls.galaxy("friends", user_id)) def galaxy_invitations(self, user_id=None): if user_id is None: user_id = self.token.user_id return self.get_json(urls.galaxy("invitations", user_id)) def galaxy_status(self, user_id=None): if user_id is None: user_id = self.token.user_id reqdata = {"version": CLIENT_VERSION} self.post(urls.galaxy("status", user_id), data=reqdata) def galaxy_statuses(self, user_ids): user_ids_str = ",".join(user_ids) params = {"user_id": user_ids_str} #self.request("OPTIONS", urls.galaxy("statuses"), params=params) return self.get_json(urls.galaxy("statuses"), params=params) def galaxy_achievements(self, game_id, user_id=None): if user_id is None: user_id = self.token.user_id return self.get_json(urls.galaxy("achievements", game_id, user_id)) def galaxy_sessions(self, game_id, user_id=None): if user_id is None: user_id = self.token.user_id return self.get_json(urls.galaxy("sessions", game_id, user_id)) def galaxy_friends_achievements(self, game_id, user_id=None): if user_id is None: user_id = self.token.user_id return self.get_json( urls.galaxy("friends.achievements", game_id, user_id)) def galaxy_friends_sessions(self, game_id, user_id=None): if user_id is None: user_id = self.token.user_id return self.get_json(urls.galaxy("friends.sessions", game_id, user_id)) def galaxy_product(self, game_id, expand=None): if not expand: params = {} elif expand is True: params = {"expand": ",".join(PRODUCT_EXPANDABLE)} else: params = {"expand": ",".join(expand)} if self.locale[2]: params["locale"] = self.locale[2] return self.get_json( urls.galaxy("product", game_id), params=params, authorized=False) def galaxy_products(self, game_ids, expand=None): if not expand: params = {} elif expand is True: params = {"expand": ",".join(PRODUCT_EXPANDABLE)} else: params = {"expand": ",".join(expand)} if self.locale[2]: params["locale"] = self.locale[2] ids_string = ",".join(str(game_id) for game_id in game_ids) params["ids"] = ids_string return self.get_json( urls.galaxy("products"), params=params, authorized=False) def galaxy_secure_link(self, game_id, path, generation): return self.get_json( urls.galaxy("cs.securelink", game_id), params={"path": path, "generation": generation}) def galaxy_builds(self, game_id, system): return self.get_json( urls.galaxy("cs.builds", game_id, system), authorized=False) def galaxy_cs_meta(self, meta_id): return self.get_json( urls.galaxy("cs.meta", meta_id[0:2], meta_id[2:4], meta_id), compressed=True, authorized=False) def galaxy_client_config(): return self.get_json(urls.galaxy("client-config"), authorized=False) def product(self, product_id, slug=None): return Product(self, product_id, slug) def search(self, **query): search_data = self.web_search(**query) return SearchResult(self, query, search_data)
[((15, 13, 15, 51), 're.compile', 're.compile', ({(15, 24, 15, 50): '"""gogData\\\\.?(.*?) = (.+);"""'}, {}), "('gogData\\\\.?(.*?) = (.+);')", False, 'import re\n'), ((59, 23, 59, 41), 'requests.Session', 'requests.Session', ({}, {}), '()', False, 'import requests\n'), ((477, 15, 477, 46), 'gogapi.product.Product', 'Product', ({(477, 23, 477, 27): 'self', (477, 29, 477, 39): 'product_id', (477, 41, 477, 45): 'slug'}, {}), '(self, product_id, slug)', False, 'from gogapi.product import Product, Series\n'), ((481, 15, 481, 53), 'gogapi.search.SearchResult', 'SearchResult', ({(481, 28, 481, 32): 'self', (481, 34, 481, 39): 'query', (481, 41, 481, 52): 'search_data'}, {}), '(self, query, search_data)', False, 'from gogapi.search import SearchResult\n'), ((125, 19, 125, 40), 'json.loads', 'json.loads', ({(125, 30, 125, 39): 'json_text'}, {}), '(json_text)', False, 'import json\n'), ((174, 32, 174, 54), 'gogapi.urls.web', 'urls.web', ({(174, 41, 174, 47): '"""game"""', (174, 49, 174, 53): 'slug'}, {}), "('game', slug)", False, 'from gogapi import urls\n'), ((177, 32, 177, 57), 'gogapi.urls.web', 'urls.web', ({(177, 41, 177, 56): '"""account.games"""'}, {}), "('account.games')", False, 'from gogapi import urls\n'), ((180, 32, 180, 58), 'gogapi.urls.web', 'urls.web', ({(180, 41, 180, 57): '"""account.movies"""'}, {}), "('account.movies')", False, 'from gogapi import urls\n'), ((183, 32, 183, 60), 'gogapi.urls.web', 'urls.web', ({(183, 41, 183, 59): '"""account.wishlist"""'}, {}), "('account.wishlist')", False, 'from gogapi import urls\n'), ((186, 32, 186, 59), 'gogapi.urls.web', 'urls.web', ({(186, 41, 186, 58): '"""account.friends"""'}, {}), "('account.friends')", False, 'from gogapi import urls\n'), ((189, 32, 189, 56), 'gogapi.urls.web', 'urls.web', ({(189, 41, 189, 55): '"""account.chat"""'}, {}), "('account.chat')", False, 'from gogapi import urls\n'), ((192, 32, 192, 50), 'gogapi.urls.web', 'urls.web', ({(192, 41, 192, 49): '"""wallet"""'}, {}), "('wallet')", False, 'from gogapi import urls\n'), ((195, 32, 195, 59), 'gogapi.urls.web', 'urls.web', ({(195, 41, 195, 58): '"""settings.orders"""'}, {}), "('settings.orders')", False, 'from gogapi import urls\n'), ((198, 29, 198, 69), 'gogapi.urls.web', 'urls.web', ({(198, 38, 198, 59): '"""account.gamedetails"""', (198, 61, 198, 68): 'game_id'}, {}), "('account.gamedetails', game_id)", False, 'from gogapi import urls\n'), ((215, 29, 215, 61), 'gogapi.urls.web', 'urls.web', ({(215, 38, 215, 60): '"""account.get_filtered"""'}, {}), "('account.get_filtered')", False, 'from gogapi import urls\n'), ((234, 12, 234, 40), 'gogapi.urls.web', 'urls.web', ({(234, 21, 234, 39): '"""search.filtering"""'}, {}), "('search.filtering')", False, 'from gogapi import urls\n'), ((238, 29, 238, 50), 'gogapi.urls.web', 'urls.web', ({(238, 38, 238, 49): '"""user.data"""'}, {}), "('user.data')", False, 'from gogapi import urls\n'), ((241, 29, 241, 51), 'gogapi.urls.web', 'urls.web', ({(241, 38, 241, 50): '"""user.games"""'}, {}), "('user.games')", False, 'from gogapi import urls\n'), ((244, 29, 244, 54), 'gogapi.urls.web', 'urls.web', ({(244, 38, 244, 53): '"""user.wishlist"""'}, {}), "('user.wishlist')", False, 'from gogapi import urls\n'), ((248, 29, 248, 67), 'gogapi.urls.web', 'urls.web', ({(248, 38, 248, 57): '"""user.wishlist.add"""', (248, 59, 248, 66): 'game_id'}, {}), "('user.wishlist.add', game_id)", False, 'from gogapi import urls\n'), ((252, 29, 252, 70), 'gogapi.urls.web', 'urls.web', ({(252, 38, 252, 60): '"""user.wishlist.remove"""', (252, 62, 252, 69): 'game_id'}, {}), "('user.wishlist.remove', game_id)", False, 'from gogapi import urls\n'), ((255, 29, 255, 53), 'gogapi.urls.web', 'urls.web', ({(255, 38, 255, 52): '"""user.ratings"""'}, {}), "('user.ratings')", False, 'from gogapi import urls\n'), ((258, 29, 258, 58), 'gogapi.urls.web', 'urls.web', ({(258, 38, 258, 57): '"""user.review_votes"""'}, {}), "('user.review_votes')", False, 'from gogapi import urls\n'), ((261, 29, 261, 71), 'gogapi.urls.web', 'urls.web', ({(261, 38, 261, 60): '"""user.change_currency"""', (261, 62, 261, 70): 'currency'}, {}), "('user.change_currency', currency)", False, 'from gogapi import urls\n'), ((264, 29, 264, 67), 'gogapi.urls.web', 'urls.web', ({(264, 38, 264, 60): '"""user.change_language"""', (264, 62, 264, 66): 'lang'}, {}), "('user.change_language', lang)", False, 'from gogapi import urls\n'), ((268, 24, 268, 78), 'gogapi.urls.web', 'urls.web', (), '', False, 'from gogapi import urls\n'), ((271, 29, 271, 63), 'gogapi.urls.web', 'urls.web', ({(271, 38, 271, 62): '"""user.review_guidelines"""'}, {}), "('user.review_guidelines')", False, 'from gogapi import urls\n'), ((281, 12, 281, 64), 'gogapi.urls.web', 'urls.web', (), '', False, 'from gogapi import urls\n'), ((284, 29, 284, 67), 'gogapi.urls.web', 'urls.web', ({(284, 38, 284, 57): '"""user.public.block"""', (284, 59, 284, 66): 'user_id'}, {}), "('user.public.block', user_id)", False, 'from gogapi import urls\n'), ((287, 29, 287, 69), 'gogapi.urls.web', 'urls.web', ({(287, 38, 287, 59): '"""user.public.unblock"""', (287, 61, 287, 68): 'user_id'}, {}), "('user.public.unblock', user_id)", False, 'from gogapi import urls\n'), ((291, 29, 291, 64), 'gogapi.urls.web', 'urls.web', ({(291, 38, 291, 54): '"""friends.remove"""', (291, 56, 291, 63): 'user_id'}, {}), "('friends.remove', user_id)", False, 'from gogapi import urls\n'), ((294, 29, 294, 64), 'gogapi.urls.web', 'urls.web', ({(294, 38, 294, 54): '"""friends.invite"""', (294, 56, 294, 63): 'user_id'}, {}), "('friends.invite', user_id)", False, 'from gogapi import urls\n'), ((297, 29, 297, 64), 'gogapi.urls.web', 'urls.web', ({(297, 38, 297, 54): '"""friends.accept"""', (297, 56, 297, 63): 'user_id'}, {}), "('friends.accept', user_id)", False, 'from gogapi import urls\n'), ((300, 29, 300, 65), 'gogapi.urls.web', 'urls.web', ({(300, 38, 300, 55): '"""friends.decline"""', (300, 57, 300, 64): 'user_id'}, {}), "('friends.decline', user_id)", False, 'from gogapi import urls\n'), ((304, 29, 304, 49), 'gogapi.urls.web', 'urls.web', ({(304, 38, 304, 48): '"""cart.get"""'}, {}), "('cart.get')", False, 'from gogapi import urls\n'), ((307, 29, 307, 58), 'gogapi.urls.web', 'urls.web', ({(307, 38, 307, 48): '"""cart.add"""', (307, 50, 307, 57): 'game_id'}, {}), "('cart.add', game_id)", False, 'from gogapi import urls\n'), ((310, 29, 310, 67), 'gogapi.urls.web', 'urls.web', ({(310, 38, 310, 55): '"""cart.add_series"""', (310, 57, 310, 66): 'series_id'}, {}), "('cart.add_series', series_id)", False, 'from gogapi import urls\n'), ((313, 29, 313, 61), 'gogapi.urls.web', 'urls.web', ({(313, 38, 313, 51): '"""cart.remove"""', (313, 53, 313, 60): 'game_id'}, {}), "('cart.remove', game_id)", False, 'from gogapi import urls\n'), ((317, 29, 317, 64), 'gogapi.urls.web', 'urls.web', ({(317, 38, 317, 54): '"""reviews.search"""', (317, 56, 317, 63): 'game_id'}, {}), "('reviews.search', game_id)", False, 'from gogapi import urls\n'), ((320, 29, 320, 62), 'gogapi.urls.web', 'urls.web', ({(320, 38, 320, 52): '"""reviews.vote"""', (320, 54, 320, 61): 'game_id'}, {}), "('reviews.vote', game_id)", False, 'from gogapi import urls\n'), ((323, 29, 323, 64), 'gogapi.urls.web', 'urls.web', ({(323, 38, 323, 54): '"""reviews.report"""', (323, 56, 323, 63): 'game_id'}, {}), "('reviews.report', game_id)", False, 'from gogapi import urls\n'), ((326, 29, 326, 62), 'gogapi.urls.web', 'urls.web', ({(326, 38, 326, 52): '"""reviews.rate"""', (326, 54, 326, 61): 'game_id'}, {}), "('reviews.rate', game_id)", False, 'from gogapi import urls\n'), ((329, 29, 329, 61), 'gogapi.urls.web', 'urls.web', ({(329, 38, 329, 51): '"""reviews.add"""', (329, 53, 329, 60): 'game_id'}, {}), "('reviews.add', game_id)", False, 'from gogapi import urls\n'), ((334, 12, 334, 65), 'gogapi.urls.web', 'urls.web', ({(334, 21, 334, 44): '"""order.change_currency"""', (334, 46, 334, 54): 'order_id', (334, 56, 334, 64): 'currency'}, {}), "('order.change_currency', order_id, currency)", False, 'from gogapi import urls\n'), ((337, 29, 337, 69), 'gogapi.urls.web', 'urls.web', ({(337, 38, 337, 49): '"""order.add"""', (337, 51, 337, 59): 'order_id', (337, 61, 337, 68): 'game_id'}, {}), "('order.add', order_id, game_id)", False, 'from gogapi import urls\n'), ((340, 29, 340, 72), 'gogapi.urls.web', 'urls.web', ({(340, 38, 340, 52): '"""order.remove"""', (340, 54, 340, 62): 'order_id', (340, 64, 340, 71): 'game_id'}, {}), "('order.remove', order_id, game_id)", False, 'from gogapi import urls\n'), ((343, 29, 343, 76), 'gogapi.urls.web', 'urls.web', ({(343, 38, 343, 65): '"""order.enable_store_credit"""', (343, 67, 343, 75): 'order_id'}, {}), "('order.enable_store_credit', order_id)", False, 'from gogapi import urls\n'), ((346, 29, 346, 77), 'gogapi.urls.web', 'urls.web', ({(346, 38, 346, 66): '"""order.disable_store_credit"""', (346, 68, 346, 76): 'order_id'}, {}), "('order.disable_store_credit', order_id)", False, 'from gogapi import urls\n'), ((349, 29, 349, 68), 'gogapi.urls.web', 'urls.web', ({(349, 38, 349, 57): '"""order.set_as_gift"""', (349, 59, 349, 67): 'order_id'}, {}), "('order.set_as_gift', order_id)", False, 'from gogapi import urls\n'), ((352, 29, 352, 72), 'gogapi.urls.web', 'urls.web', ({(352, 38, 352, 61): '"""order.set_as_non_gift"""', (352, 63, 352, 71): 'order_id'}, {}), "('order.set_as_non_gift', order_id)", False, 'from gogapi import urls\n'), ((355, 29, 355, 70), 'gogapi.urls.web', 'urls.web', ({(355, 38, 355, 59): '"""order.process_order"""', (355, 61, 355, 69): 'order_id'}, {}), "('order.process_order', order_id)", False, 'from gogapi import urls\n'), ((358, 29, 358, 71), 'gogapi.urls.web', 'urls.web', ({(358, 38, 358, 60): '"""order.payment_status"""', (358, 62, 358, 70): 'order_id'}, {}), "('order.payment_status', order_id)", False, 'from gogapi import urls\n'), ((361, 29, 361, 69), 'gogapi.urls.web', 'urls.web', ({(361, 38, 361, 58): '"""order.check_status"""', (361, 60, 361, 68): 'order_id'}, {}), "('order.check_status', order_id)", False, 'from gogapi import urls\n'), ((371, 29, 371, 66), 'gogapi.urls.web', 'urls.web', ({(371, 38, 371, 55): '"""checkout_manual"""', (371, 57, 371, 65): 'order_id'}, {}), "('checkout_manual', order_id)", False, 'from gogapi import urls\n'), ((377, 29, 377, 65), 'gogapi.urls.galaxy', 'urls.galaxy', ({(377, 41, 377, 47): '"""file"""', (377, 49, 377, 56): 'game_id', (377, 58, 377, 64): 'dl_url'}, {}), "('file', game_id, dl_url)", False, 'from gogapi import urls\n'), ((382, 29, 382, 57), 'gogapi.urls.galaxy', 'urls.galaxy', ({(382, 41, 382, 47): '"""user"""', (382, 49, 382, 56): 'user_id'}, {}), "('user', user_id)", False, 'from gogapi import urls\n'), ((387, 29, 387, 60), 'gogapi.urls.galaxy', 'urls.galaxy', ({(387, 41, 387, 50): '"""friends"""', (387, 52, 387, 59): 'user_id'}, {}), "('friends', user_id)", False, 'from gogapi import urls\n'), ((392, 29, 392, 64), 'gogapi.urls.galaxy', 'urls.galaxy', ({(392, 41, 392, 54): '"""invitations"""', (392, 56, 392, 63): 'user_id'}, {}), "('invitations', user_id)", False, 'from gogapi import urls\n'), ((398, 18, 398, 48), 'gogapi.urls.galaxy', 'urls.galaxy', ({(398, 30, 398, 38): '"""status"""', (398, 40, 398, 47): 'user_id'}, {}), "('status', user_id)", False, 'from gogapi import urls\n'), ((404, 29, 404, 52), 'gogapi.urls.galaxy', 'urls.galaxy', ({(404, 41, 404, 51): '"""statuses"""'}, {}), "('statuses')", False, 'from gogapi import urls\n'), ((409, 29, 409, 74), 'gogapi.urls.galaxy', 'urls.galaxy', ({(409, 41, 409, 55): '"""achievements"""', (409, 57, 409, 64): 'game_id', (409, 66, 409, 73): 'user_id'}, {}), "('achievements', game_id, user_id)", False, 'from gogapi import urls\n'), ((414, 29, 414, 70), 'gogapi.urls.galaxy', 'urls.galaxy', ({(414, 41, 414, 51): '"""sessions"""', (414, 53, 414, 60): 'game_id', (414, 62, 414, 69): 'user_id'}, {}), "('sessions', game_id, user_id)", False, 'from gogapi import urls\n'), ((420, 12, 420, 65), 'gogapi.urls.galaxy', 'urls.galaxy', ({(420, 24, 420, 46): '"""friends.achievements"""', (420, 48, 420, 55): 'game_id', (420, 57, 420, 64): 'user_id'}, {}), "('friends.achievements', game_id, user_id)", False, 'from gogapi import urls\n'), ((425, 29, 425, 78), 'gogapi.urls.galaxy', 'urls.galaxy', ({(425, 41, 425, 59): '"""friends.sessions"""', (425, 61, 425, 68): 'game_id', (425, 70, 425, 77): 'user_id'}, {}), "('friends.sessions', game_id, user_id)", False, 'from gogapi import urls\n'), ((439, 16, 439, 47), 'gogapi.urls.galaxy', 'urls.galaxy', ({(439, 28, 439, 37): '"""product"""', (439, 39, 439, 46): 'game_id'}, {}), "('product', game_id)", False, 'from gogapi import urls\n'), ((456, 12, 456, 35), 'gogapi.urls.galaxy', 'urls.galaxy', ({(456, 24, 456, 34): '"""products"""'}, {}), "('products')", False, 'from gogapi import urls\n'), ((460, 12, 460, 49), 'gogapi.urls.galaxy', 'urls.galaxy', ({(460, 24, 460, 39): '"""cs.securelink"""', (460, 41, 460, 48): 'game_id'}, {}), "('cs.securelink', game_id)", False, 'from gogapi import urls\n'), ((465, 12, 465, 53), 'gogapi.urls.galaxy', 'urls.galaxy', ({(465, 24, 465, 35): '"""cs.builds"""', (465, 37, 465, 44): 'game_id', (465, 46, 465, 52): 'system'}, {}), "('cs.builds', game_id, system)", False, 'from gogapi import urls\n'), ((469, 12, 469, 71), 'gogapi.urls.galaxy', 'urls.galaxy', ({(469, 24, 469, 33): '"""cs.meta"""', (469, 35, 469, 47): 'meta_id[0:2]', (469, 49, 469, 61): 'meta_id[2:4]', (469, 63, 469, 70): 'meta_id'}, {}), "('cs.meta', meta_id[0:2], meta_id[2:4], meta_id)", False, 'from gogapi import urls\n'), ((474, 29, 474, 57), 'gogapi.urls.galaxy', 'urls.galaxy', ({(474, 41, 474, 56): '"""client-config"""'}, {}), "('client-config')", False, 'from gogapi import urls\n'), ((74, 22, 74, 42), 'gogapi.base.NotAuthorizedError', 'NotAuthorizedError', ({}, {}), '()', False, 'from gogapi.base import NotAuthorizedError, logger\n'), ((146, 31, 146, 48), 'json.loads', 'json.loads', ({(146, 42, 146, 47): 'value'}, {}), '(value)', False, 'import json\n'), ((366, 33, 366, 53), 'gogapi.urls.web', 'urls.web', ({(366, 42, 366, 52): '"""checkout"""'}, {}), "('checkout')", False, 'from gogapi import urls\n'), ((368, 33, 368, 66), 'gogapi.urls.web', 'urls.web', ({(368, 42, 368, 55): '"""checkout_id"""', (368, 57, 368, 65): 'order_id'}, {}), "('checkout_id', order_id)", False, 'from gogapi import urls\n'), ((122, 24, 122, 54), 'zlib.decompress', 'zlib.decompress', ({(122, 40, 122, 49): 'json_comp', (122, 51, 122, 53): '15'}, {}), '(json_comp, 15)', False, 'import zlib\n')]
gibsonMatt/stacks-pairwise
setup.py
8f3cde603c2bfed255f6c399557e9332072886fb
import pathlib import os from setuptools import setup # The directory containing this file HERE = pathlib.Path(__file__).parent # The text of the README file README = (HERE / "README.md").read_text() # specify requirements of your package here REQUIREMENTS = ['biopython', 'numpy', 'pandas'] setup(name='stacksPairwise', version='0.0.0', description='Calculate pairwise divergence (pairwise pi) from Stacks `samples.fa` output fle', long_description=README, long_description_content_type="text/markdown", url='https://github.com/gibsonmatt/stacks-pairwise', author='Matt Gibson', author_email='[email protected]', license='MIT', packages=['stacksPairwise'], install_requires=REQUIREMENTS, entry_points={ "console_scripts": [ "stacksPairwise=stacksPairwise.__main__:main" ] }, keywords='genetics genotyping sequencing Stacks' )
[((14, 0, 31, 1), 'setuptools.setup', 'setup', (), '', False, 'from setuptools import setup\n'), ((6, 7, 6, 29), 'pathlib.Path', 'pathlib.Path', ({(6, 20, 6, 28): '__file__'}, {}), '(__file__)', False, 'import pathlib\n')]
komax/spanningtree-crossingnumber
csv_experiment.py
444c8809a543905000a63c9d2ff1dcfb31835766
#! /usr/bin/env python import os import sys args = sys.argv[1:] os.system('python -O -m spanningtree.csv_experiment_statistics ' + ' '.join(args))
[]
klemenkotar/dcrl
projects/tutorials/object_nav_ithor_dagger_then_ppo_one_object.py
457be7af1389db37ec12e165dfad646e17359162
import torch import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from allenact.algorithms.onpolicy_sync.losses import PPO from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig from allenact.utils.experiment_utils import ( Builder, PipelineStage, TrainingPipeline, LinearDecay, ) from projects.tutorials.object_nav_ithor_ppo_one_object import ( ObjectNavThorPPOExperimentConfig, ) class ObjectNavThorDaggerThenPPOExperimentConfig(ObjectNavThorPPOExperimentConfig): """A simple object navigation experiment in THOR. Training with DAgger and then PPO. """ @classmethod def tag(cls): return "ObjectNavThorDaggerThenPPO" @classmethod def training_pipeline(cls, **kwargs): dagger_steos = int(1e4) ppo_steps = int(1e6) lr = 2.5e-4 num_mini_batch = 2 if not torch.cuda.is_available() else 6 update_repeats = 4 num_steps = 128 metric_accumulate_interval = cls.MAX_STEPS * 10 # Log every 10 max length tasks save_interval = 10000 gamma = 0.99 use_gae = True gae_lambda = 1.0 max_grad_norm = 0.5 return TrainingPipeline( save_interval=save_interval, metric_accumulate_interval=metric_accumulate_interval, optimizer_builder=Builder(optim.Adam, dict(lr=lr)), num_mini_batch=num_mini_batch, update_repeats=update_repeats, max_grad_norm=max_grad_norm, num_steps=num_steps, named_losses={ "ppo_loss": PPO(clip_decay=LinearDecay(ppo_steps), **PPOConfig), "imitation_loss": Imitation(), # We add an imitation loss. }, gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD, pipeline_stages=[ PipelineStage( loss_names=["imitation_loss"], teacher_forcing=LinearDecay( startp=1.0, endp=0.0, steps=dagger_steos, ), max_stage_steps=dagger_steos, ), PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps,), ], lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)} ), )
[((34, 34, 34, 59), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((54, 34, 54, 45), 'allenact.algorithms.onpolicy_sync.losses.imitation.Imitation', 'Imitation', ({}, {}), '()', False, 'from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation\n'), ((68, 16, 68, 82), 'allenact.utils.experiment_utils.PipelineStage', 'PipelineStage', (), '', False, 'from allenact.utils.experiment_utils import Builder, PipelineStage, TrainingPipeline, LinearDecay\n'), ((71, 40, 71, 68), 'allenact.utils.experiment_utils.LinearDecay', 'LinearDecay', (), '', False, 'from allenact.utils.experiment_utils import Builder, PipelineStage, TrainingPipeline, LinearDecay\n'), ((53, 43, 53, 65), 'allenact.utils.experiment_utils.LinearDecay', 'LinearDecay', ({(53, 55, 53, 64): 'ppo_steps'}, {}), '(ppo_steps)', False, 'from allenact.utils.experiment_utils import Builder, PipelineStage, TrainingPipeline, LinearDecay\n'), ((63, 36, 65, 21), 'allenact.utils.experiment_utils.LinearDecay', 'LinearDecay', (), '', False, 'from allenact.utils.experiment_utils import Builder, PipelineStage, TrainingPipeline, LinearDecay\n')]
DanilKrivonos/BioCAT-nrp-BIOsynthesis-Caluster-Analyzing-Tool
BioCAT/src/Calculating_scores.py
d58d330e3e11380c0c917a0ad9c12a51447f1624
from numpy import array from pickle import load from pandas import read_csv import os from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper # Importing random forest model modelpath = os.path.dirname(os.path.abspath(__file__)) + '/RFC.dump' Rf = load(open(modelpath, 'rb')) # The function generate list of shuflled matrix def make_shuffle_matrix(matrix, cpu, iterat): """ The functuion generate massive of shuffled matrix. Parameters ---------- matrix : pandas DataFrame PSSM profile. cpu : int Number of tred used. iterat : int Number of iterations of shuffling. Returns ------- module_shuffling_matrix : list List of matrix, shuffled by module. substrate_shuffling_matrix : list List of matrix, shuffled by substrate. """ module_shuffling_matrix = multi_thread_shuffling(matrix, ShufflingType='module', iterations=iterat, threads=cpu) substrate_shuffling_matrix = multi_thread_shuffling(matrix, ShufflingType='substrate', iterations=iterat, threads=cpu) return module_shuffling_matrix, substrate_shuffling_matrix # The fujnction finds suquence with maximum possible value, results from alignment def get_MaxSeq(matrix, variant_seq): """ The functuion parallel calculation of scores for shuffled matrix. Parameters ---------- matrix : pandas DataFrame PSSM profile. variant_seq : list Variant of core peptide chain. Returns ------- shuffled_scores : list List of scores for shuffled matrix. """ MaxSeq = [] subs = matrix.keys()[1: ] # Find sequence, wich have maximum alignment score for idx in matrix.index: MAX_value = max(list(matrix.iloc[idx][1:])) for key in subs: if matrix[key][idx] == MAX_value: MaxSeq.append(key) # If two smonomer have same value break # Making two variants of MaxSeq MaxSeq_full = MaxSeq.copy() MaxSeq_nan = MaxSeq.copy() for max_sub_idx in range(len(MaxSeq)): if variant_seq[max_sub_idx] == 'nan': MaxSeq_nan[max_sub_idx] = 'nan' # Adding nan to MaxSeq return MaxSeq_full, MaxSeq_nan # The function gives an information about clusters def get_cluster_info(table, BGC_ID, target_file): """ The functuion return information about cluster. Parameters ---------- table : pandas DataFrame Table with meta inforamtion about NRPS clusters. BGC_ID : str PSSM cluster ID. target_file : pandas DataFrame PSSM profile. Returns ------- Name : str Cluster ID. Coord_cluster : str Coordinate of cluster. strand : str Strand of cluster. """ for ind in table[table['ID'].str.contains(BGC_ID)].index: Name = table[table['ID'].str.contains(target_file.split('.')[0].split('_A_')[1])]['Name'][ind] Coord_cluster = table['Coordinates of cluster'][ind] strand = table['Gen strand'][ind] break return Name, Coord_cluster, strand # Calculate scores def calculate_scores(variant_seq, matrix, substrate_shuffling_matrix, module_shuffling_matrix, cpu, iterat): """ Calculating scores. Parameters ---------- variant_seq : list Variant of core peptide chain. matrix : pandas DataFrame PSSM profile. substrate_shuffling_matrix : list List of matrix, shuffled by substrate. module_shuffling_matrix : list List of matrix, shuffled by module. cpu : int Number of threads used. iterat : int Number of iterations of shuffling. Returns ------- Sln_score : float Mln_score : float Slt_score : float Mlt_score : float Sdn_score : float Mdn_score : float Sdt_score : float Mdt_score : float Scores, which calculated with shuffling matrix by different variants. M - module shuffling S - substrate shuffling l - logarithmic transformation of score d - raw score n - MaxSeq with nan replacement t - MaxSeq without nan replacement Relative_score : float Relative score (Probability of target class) Binary : float Binary score of cluster matching. """ # Finding suquence with maximum possible value, results from alignment MaxSeq_full, MaxSeq_nan = get_MaxSeq(matrix, variant_seq) # Calculating shuffled scores Sln_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, substrate_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu)) Mln_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, module_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu)) Slt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, substrate_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu)) Mlt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, module_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu)) Sdn_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, substrate_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu)) Mdn_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, module_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu)) Sdt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, substrate_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu)) Mdt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, module_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu)) # Calculating scores for target sequence log_target_score = get_score(variant_seq, matrix, type_value='log') non_log_target_score = get_score(variant_seq, matrix, type_value=None) # Calculating features scores Sln_score = len(Sln_shuffled_score[Sln_shuffled_score < log_target_score])/len(Sln_shuffled_score) Mln_score = len(Mln_shuffled_score[Mln_shuffled_score < log_target_score])/len(Mln_shuffled_score) Slt_score = len(Slt_shuffled_score[Slt_shuffled_score < log_target_score])/len(Slt_shuffled_score) Mlt_score = len(Mlt_shuffled_score[Mlt_shuffled_score < log_target_score])/len(Mlt_shuffled_score) Sdn_score = len(Sdn_shuffled_score[Sdn_shuffled_score < non_log_target_score])/len(Sdn_shuffled_score) Mdn_score = len(Mdn_shuffled_score[Mdn_shuffled_score < non_log_target_score])/len(Mdn_shuffled_score) Sdt_score = len(Sdt_shuffled_score[Sdt_shuffled_score < non_log_target_score])/len(Sdt_shuffled_score) Mdt_score = len(Mdt_shuffled_score[Mdt_shuffled_score < non_log_target_score])/len(Mdt_shuffled_score) # Calculating Relative score Relative_score = round(Rf.predict_proba([[Sln_score, Mln_score, Sdn_score, Mdn_score, Sdt_score, Mdt_score, Slt_score, Mlt_score ]])[0][1], 3) Binary = Rf.predict([[Sln_score, Mln_score, Sdn_score, Mdn_score, Sdt_score, Mdt_score, Slt_score, Mlt_score ]])[0] return Sln_score, Mln_score, Slt_score, Mlt_score, Sdn_score, Mdn_score, Sdt_score, Mdt_score, Relative_score, Binary def give_results(tsv_out, folder, files, table, ID, PeptideSeq, skip, cpu, iterat): """ The functuion return information about cluster. Parameters ---------- tsv_out : dict Empty dictionary for adding results. folder : str Path to PSSMs. files : list List of PSSMs. table : pandas DataFrame Table with meta inforamtion about NRPS clusters. ID : str Name of substance. PeptideSeq : dict Core peptide chains for different biosynthesis types (e.g. A, B, or C). kip : int Number of presumptive skip. cpu : int Number of threads used. iterat : int Number of iterations of shuffling. Returns ------- tsv_out : dict Full dictionary for adding results. """ for target_file in files: try: BGC_ID = target_file.split('.')[0].split('_A_')[1] except: continue if '_A_' not in target_file: continue Name, Coord_cluster, strand = get_cluster_info(table, BGC_ID, target_file) # Getting information about cluster BGC = read_csv(folder + target_file, sep='\t') # Skipping mode if skip == 0: BGC = [BGC] else: BGC == skipper(BGC, skip) for matrix in BGC: # Check quality of matrix if len(matrix) == 1: continue check = 0 values = matrix.drop(matrix.columns[0], axis=1).values for i in values: if all(i) == 0: check += 1 if check == len(values): # If thes condition is True, the matrix of unrecognized monomers continue # Generating shuffling matrix module_shuffling_matrix, substrate_shuffling_matrix = make_shuffle_matrix(matrix, cpu, iterat) for BS_type in PeptideSeq:# For every biosynthesis profile pathways if PeptideSeq[BS_type] == None: # If in sequence only nan monomers continue if len(PeptideSeq[BS_type]) == 0: # If have not the variant continue # Check correctness of PeptideSeq length_max= get_max_aminochain(PeptideSeq[BS_type]) EPs = make_combine(PeptideSeq[BS_type], length_max, matrix, delta=3) if EPs is None: # If length sequnce can't be scaled to cluster size continue for variant_seq in EPs: Sln_score, Mln_score, Slt_score, Mlt_score, Sdn_score, Mdn_score, Sdt_score, Mdt_score, Relative_score, Binary = calculate_scores(variant_seq, matrix, substrate_shuffling_matrix, module_shuffling_matrix, cpu, iterat) #Recordind dictionary tsv_out['Chromosome ID'].append(Name) tsv_out['Coordinates of cluster'].append(Coord_cluster) tsv_out['Strand'].append(strand) tsv_out['Substance'].append(ID) tsv_out['BGC ID'].append(BGC_ID) tsv_out['Putative linearized NRP sequence'].append('--'.join(variant_seq)) tsv_out['Biosynthesis profile'].append('Type {}'.format(BS_type)) tsv_out['Sln score'].append(Sln_score) #shaffling substrates in matrix with log score and nan in maximally possible sequence tsv_out['Mln score'].append(Mln_score) #shaffling modules matrix with log score and nan in maximally possible sequence tsv_out['Sdn score'].append(Sdn_score) #shaffling substrates matrix without log score and nan in maximally possible sequence tsv_out['Mdn score'].append(Mdn_score) #shaffling modules matrix without log score and nan in maximally possible sequence tsv_out['Sdt score'].append(Sdt_score) #shaffling substrates matrix without log score in maximally possible sequence tsv_out['Mdt score'].append(Mdt_score) #shaffling modules matrix without log score in maximally possible sequence tsv_out['Slt score'].append(Slt_score) #shaffling substrates matrix with log score in maximally possible sequence tsv_out['Mlt score'].append(Mlt_score) #shaffling modules matrix with log score in maximally possible sequence tsv_out['Relative score'].append(Relative_score) #Final score tsv_out['Binary'].append(Binary) #Binary value return tsv_out
[((32, 30, 32, 116), 'BioCAT.src.Combinatorics.multi_thread_shuffling', 'multi_thread_shuffling', (), '', False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((33, 33, 33, 122), 'BioCAT.src.Combinatorics.multi_thread_shuffling', 'multi_thread_shuffling', (), '', False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((156, 23, 156, 71), 'BioCAT.src.Combinatorics.get_score', 'get_score', (), '', False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((157, 27, 157, 74), 'BioCAT.src.Combinatorics.get_score', 'get_score', (), '', False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((9, 28, 9, 53), 'os.path.abspath', 'os.path.abspath', ({(9, 44, 9, 52): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((147, 31, 147, 152), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (), '', False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((148, 31, 148, 149), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (), '', False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((149, 31, 149, 153), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (), '', False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((150, 31, 150, 150), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (), '', False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((151, 31, 151, 151), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (), '', False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((152, 31, 152, 148), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (), '', False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((153, 31, 153, 152), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (), '', False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((154, 31, 154, 149), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (), '', False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((223, 14, 223, 54), 'pandas.read_csv', 'read_csv', (), '', False, 'from pandas import read_csv\n'), ((231, 19, 231, 37), 'BioCAT.src.Combinatorics.skipper', 'skipper', ({(231, 27, 231, 30): 'BGC', (231, 32, 231, 36): 'skip'}, {}), '(BGC, skip)', False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((261, 28, 261, 67), 'BioCAT.src.Combinatorics.get_max_aminochain', 'get_max_aminochain', ({(261, 47, 261, 66): 'PeptideSeq[BS_type]'}, {}), '(PeptideSeq[BS_type])', False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((262, 22, 262, 84), 'BioCAT.src.Combinatorics.make_combine', 'make_combine', (), '', False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n')]
m4ta1l/deal
deal/linter/_extractors/returns.py
2a8e9bf412b8635b00a2b798dd8802375814a1c8
# built-in from typing import Optional # app from .common import TOKENS, Extractor, Token, traverse from .value import UNKNOWN, get_value get_returns = Extractor() inner_extractor = Extractor() def has_returns(body: list) -> bool: for expr in traverse(body=body): if isinstance(expr, TOKENS.RETURN + TOKENS.YIELD): return True return False @get_returns.register(*TOKENS.RETURN) def handle_return(expr) -> Optional[Token]: value = get_value(expr=expr.value) if value is UNKNOWN: return None return Token(value=value, line=expr.lineno, col=expr.value.col_offset) @get_returns.register(*TOKENS.YIELD) def handle_yield(expr) -> Optional[Token]: value = get_value(expr=expr.value) if value is UNKNOWN: return None return Token(value=value, line=expr.lineno, col=expr.value.col_offset)
[]
yourball/qubiter
qubiter/device_specific/chip_couplings_ibm.py
5ef0ea064fa8c9f125f7951a01fbb88504a054a5
def aaa(): # trick sphinx to build link in doc pass # retired ibmqx2_c_to_tars =\ { 0: [1, 2], 1: [2], 2: [], 3: [2, 4], 4: [2] } # 6 edges # retired ibmqx4_c_to_tars =\ { 0: [], 1: [0], 2: [0, 1, 4], 3: [2, 4], 4: [] } # 6 edges # retired ibmq16Rus_c_to_tars = \ { 0: [], 1: [0, 2], 2: [3], 3: [4, 14], 4: [], 5: [4], 6: [5, 7, 11], 7: [10], 8: [7], 9: [8, 10], 10: [], 11: [10], 12: [5, 11, 13], 13: [4, 14], 14: [], 15: [0, 2, 14] } # 22 edges ibm20AustinTokyo_c_to_tars = \ { 0: [1, 5], 1: [0, 2, 6, 7], 2: [1, 3, 6, 7], 3: [2, 4, 8, 9], 4: [3, 8, 9], 5: [0, 6, 10, 11], 6: [1, 2, 5, 7, 10, 11], 7: [1, 2, 6, 8, 12, 13], 8: [3, 4, 7, 9, 12, 13], 9: [3, 4, 8, 14], 10: [5, 6, 11, 15], 11: [5, 6, 10, 12, 16, 17], 12: [7, 8, 11, 13, 16, 17], 13: [7, 8, 12, 14, 18, 19], 14: [9, 13, 18, 19], 15: [10, 16], 16: [11, 12, 15, 17], 17: [11, 12, 16, 18], 18: [13, 14, 17, 19], 19: [13, 14, 18] } # 86 edges ibmq5YorktownTenerife_c_to_tars = \ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 4], 3: [2, 4], 4: [2, 3] } # 12 edges ibmq14Melb_c_to_tars = \ { 0: [1], 1: [0, 2, 13], 2: [1, 3, 12], 3: [2, 4, 11], 4: [3, 5, 10], 5: [4, 6, 9], 6: [5, 8], 7: [8], 8: [6, 7, 9], 9: [5, 8, 10], 10: [4, 9, 11], 11: [3, 10, 12], 12: [2, 11, 13], 13: [1, 12] } # 36 edges
[]
rainshen49/citadel-trading-comp
Template.py
3c3b6464f548d4920f46b5f5cd113ebc4a1d08a5
import signal import requests import time from math import floor shutdown = False MAIN_TAKER = 0.0065 MAIN_MAKER = 0.002 ALT_TAKER = 0.005 ALT_MAKER = 0.0035 TAKER = (MAIN_TAKER + ALT_TAKER)*2 MAKER = MAIN_MAKER + ALT_MAKER TAKEMAIN = MAIN_TAKER - ALT_MAKER TAKEALT = ALT_TAKER - MAIN_MAKER BUFFER = 0.01 NaN = float('nan') class ApiException(Exception): pass class Book(object): def __init__(self, sym, json): global NaN self.sym = sym self.json = json # could be cached self.bids = self.json['bids'] self.asks = self.json['asks'] self.ask_price = 1 self.asks_quantity_left = 0 self.bid_price = 1 self.bids_quantity_left = 0 if self.bids: self.bid_price = self.bids[0]['price'] if self.asks: self.ask_price = self.asks[0]['price'] def bids_room(self): if self.bids: quantity = sum([b['quantity'] for b in self.bids if b['price'] == self.bid_price]) filled = sum([b['quantity_filled'] for b in self.bids if b['price'] == self.bid_price]) return quantity - filled else: return 0 def asks_room(self): if self.asks: quantity = sum([b['quantity'] for b in self.asks if b['price'] == self.ask_price]) filled = sum([b['quantity_filled'] for b in self.asks if b['price'] == self.ask_price]) return quantity - filled else: return 0 class Limits(dict): def __init__(self, json): self.update(json) self.gross_limit = int(json['gross_limit']) self.net_limit = int(json['net_limit']) self.gross = int(json['gross']) self.net = int(json['net']) class OHLC(dict): def __init__(self, sym, json): self.sym = sym self.update(json) self.tick = json['tick'] self.open = json['open'] self.high = json['high'] self.low = json['low'] self.close = json['close'] class Shock(dict): def __init__(self, news, currtick): self.ticker = news['ticker'] self.elapsed = currtick - news['tick'] headline = news['headline'] try: self.amount = float(headline[-6:].replace('$', '')) except: self.amount = 0 class Session(object): def __init__(self, url, key): self.url = url self.key = key self.tick = -1 def __enter__(self): self.session = requests.Session() self.session.headers.update({'X-API-Key': self.key}) return self def __exit__(self, type, value, traceback): self.session.close() def get_tick(self): while True: resp = self.session.get(self.url + '/v1/case', params=None) if not resp.ok: raise ApiException('could not get tick: ' + str(resp)) json = resp.json() if json['status'] == 'STOPPED' or shutdown: return False if json['tick'] != self.tick: self.tick = json['tick'] print('.', self.tick) return True # this timer is unnecessary, network latency should be enough time.sleep(0.1) def get_book(self, sym): resp = self.session.get( self.url + '/v1/securities/book', params={'ticker': sym}) if not resp.ok: raise ApiException('could not get book: ' + str(resp)) return Book(sym, resp.json()) def send_order(self, sym, side, price, size): resp = self.session.post(self.url + '/v1/orders', params={ 'ticker': sym, 'type': 'LIMIT', 'action': side, 'quantity': size, 'price': price}) if resp.ok: print('sent order', side, sym, size, '@', price) else: print('failed to send order', side, sym, size, '@', price, ':', resp.text) def getLimit(self): resp = self.session.get(self.url+'/v1/limits') if not resp.ok: raise ApiException('could not get limit: '+str(resp)) return Limits(resp.json()[0]) def getSecurities(self, sym=None): if sym is None: resp = self.session.get(self.url+'/v1/securities') else: resp = self.session.get( self.url+'/v1/securities', params={'ticker': sym}) if not resp.ok: raise ApiException('could not get position: '+str(resp)) json = resp.json() return {sec['ticker']: {k: sec[k] for k in [ "position", "vwap", "nlv", "last", "bid", "bid_size", "ask", "ask_size", "unrealized", "realized" ]} for sec in json} def get_OHLC(self, sym, ticks=50): resp = self.session.get( self.url + '/v1/securities/history', params={'ticker': sym,'limit':ticks}) if not resp.ok: raise ApiException('could not get OHLC: ' + str(resp)) return [OHLC(sym, ohlc) for ohlc in resp.json()] def buy(self, sym, price, size): self.send_order(sym, 'BUY', price, size) def sell(self, sym, price, size): self.send_order(sym, 'SELL', price, size) def send_market(self, sym, side, size): resp = self.session.post(self.url + '/v1/orders', params={ 'ticker': sym, 'type': 'MARKET', 'action': side, 'quantity': size}) if resp.ok: json = resp.json() print('market order', side, sym, size, '@', json['vwap']) return json['vwap'] else: print('failed to send order', side, sym, size, '@Market:', resp.text) return 0 def buyM(self, sym, size): return self.send_market(sym, 'BUY', size) def sellM(self, sym, size): return self.send_market(sym, 'SELL', size) def getNews(self): resp = self.session.get(self.url + '/v1/news', params={'limit': 10}) if not resp.ok: raise ApiException('failed to get news', resp.text) else: json = resp.json() # only care about recent news return [Shock(news, self.tick) for news in json if news['tick'] > self.tick-4] def getTrader(self): resp = self.session.get(self.url + '/v1/trader') if not resp.ok: raise ApiException('failed to get trader info', resp.text) else: json = resp.json() return json def main(): # price does change in every tick # check position # plain arbitradge # index arbitrage # shock handling # wave riding # pairTickers = [('WMT-M', 'WMT-A'), ('CAT-M', 'CAT-A'), ('MMM-M', 'MMM-A')] with Session('http://localhost:9998', 'VHK3DEDE') as session: while session.get_tick(): try: shock_runner(session) exchange_arbitrage(session, "WMT-M", "WMT-A") exchange_arbitrage(session, "CAT-M", "CAT-A") exchange_arbitrage(session, "MMM-M", "MMM-A") index_arbitrage(session, ['WMT', 'MMM', 'CAT']) except Exception as ex: print("error", str(ex)) # trader = session.getTrader() # print(trader['nlv']) # TODO: position cleaner: try to reduce gross position loss-free # TODO: implement range runner for the last x ticks def avg(arr): return sum(arr)/float(len(arr)) def window_trend(left,right): leftavg = avg(left) rightavg = avg(right) if rightavg > leftavg: return 1 elif rightavg < leftavg: return -1 else: return 0 def splitarr(arr): n = len(arr) left = arr[:n//2] right = arr[n//2:] return left,right def wwindow_trend(prices): left, right = splitarr(prices) trend = window_trend(left,right) lleft, lright = splitarr(left) rleft, rright = splitarr(right) trendl = window_trend(lleft,lright) trendr = window_trend(rleft,rright) return trend + trendl + trendr def trend_runner(session, ticker): if session.tick<20: return # short term trend prices = session.get_OHLC(ticker, 20) highs = [price.high for price in prices] lows = [price.low for price in prices] highTrend = wwindow_trend(highs) lowTrend = wwindow_trend(lows) if highTrend+lowTrend < -4: # volatile, but no trend session.buyM(ticker,1000) if highTrend+lowTrend > 4: session.sellM(ticker,1000) print(ticker,"short hightrend",highTrend,"lowtrend",lowTrend) if session.tick<100: return prices = session.get_OHLC(ticker, 100) highs = [price.high for price in prices] lows = [price.low for price in prices] highTrend = wwindow_trend(highs) lowTrend = wwindow_trend(lows) # grown too much if highTrend+lowTrend < -4: # volatile, but no trend session.sellM(ticker,1000) # dropped too much if highTrend+lowTrend > 4: session.buyM(ticker,1000) print(ticker,"long hightrend",highTrend,"lowtrend",lowTrend) def shock_runner(session): shocks = session.getNews() quantity = 50000 for shock in sorted(shocks, key=lambda s: s.elapsed): Mticker = shock.ticker+"-M" Aticker = shock.ticker+"-A" if shock.elapsed < 2: if shock.amount > MAIN_TAKER + BUFFER*2: session.buyM(Mticker, quantity) session.buyM(Aticker, quantity) elif - shock.amount > MAIN_TAKER + BUFFER*2: session.sellM(Mticker, quantity) session.sellM(Aticker, quantity) print('shock', shock.ticker, shock.amount) if shock.elapsed == 2: if shock.amount > MAIN_TAKER + BUFFER*2: session.sellM(Mticker, quantity) session.sellM(Aticker, quantity) elif - shock.amount > MAIN_TAKER + BUFFER*2: session.buyM(Mticker, quantity) session.buyM(Aticker, quantity) print('post shock', shock.ticker, shock.amount) TAKER4 = MAIN_TAKER * 5 def index_arbitrage(session, tickers): secs = session.getSecurities() ETF = secs['ETF'] etfBid = ETF['bid'] etfAsk = ETF['ask'] bestBids = {} bestBidsQ = {} bestAsks = {} bestAsksQ = {} for ticker in tickers: tickerM = ticker+"-M" tickerA = ticker+"-A" Mticker = secs[tickerM] Aticker = secs[tickerA] Mbid = Mticker['bid'] Abid = Aticker['bid'] Mask = Mticker['ask'] Aask = Aticker['ask'] if Mbid >= Abid: bestBids[tickerM] = Mbid bestBidsQ[tickerM] = Mticker['bid_size'] else: bestBids[tickerA] = Abid bestBidsQ[tickerA] = Aticker['bid_size'] if Mask <= Aask: bestAsks[tickerM] = Mask bestAsksQ[tickerM] = Mticker['ask_size'] else: bestAsks[tickerA] = Aask bestAsksQ[tickerA] = Aticker['ask_size'] compositBid = sum(bestBids.values()) compositBidQ = min(bestBidsQ.values()) compositAsk = sum(bestAsks.values()) compositAskQ = min(bestAsksQ.values()) boughtprice = 0 soldprice = 0 if etfBid - compositAsk > TAKER4+BUFFER: quantity = ETF['bid_size'] if ETF['bid_size'] < compositAskQ else compositAskQ if quantity == 0: return quantity = min([quantity, 50000]) soldprice = session.sellM('ETF', quantity) for ticker in bestAsks: boughtprice += session.buyM(ticker, quantity) print('Plan ETF', etfBid, 'Stocks', compositAsk) print('Actual ETF', soldprice, 'Stocks', boughtprice) elif compositBid - etfAsk > TAKER4+BUFFER: quantity = ETF['ask_size'] if ETF['ask_size'] < compositBidQ else compositBidQ if quantity == 0: return quantity = min([quantity, 50000]) for ticker in bestBids: soldprice += session.sellM(ticker, quantity) boughtprice = session.buyM('ETF', quantity) print('Plan Stocks', compositBid, 'ETF', etfAsk) print('Actual Stocks', soldprice, 'ETF', boughtprice) # TODO: send limit orders and use market to cover unfilled ones after def exchange_arbitrage(session, mticker, aticker): global NaN mbook = session.get_book(mticker) masks_room = mbook.asks_room() mbids_room = mbook.bids_room() abook = session.get_book(aticker) aasks_room = abook.asks_room() abids_room = abook.bids_room() # a lot of room, make market orders if mbook.bid_price - abook.ask_price > TAKER+BUFFER*2: quantity = aasks_room if aasks_room < mbids_room else mbids_room quantity = min([quantity, 50000]) session.sellM(mbook.sym, quantity) session.buyM(abook.sym, quantity) elif abook.bid_price - mbook.ask_price > TAKER+BUFFER*2: quantity = aasks_room if aasks_room < mbids_room else mbids_room quantity = min([quantity, 50000]) session.sellM(abook.sym, quantity) session.buyM(mbook.sym, quantity) # only a little room, make limit orders if mbook.bid_price - abook.ask_price > BUFFER: quantity = aasks_room if aasks_room < mbids_room else mbids_room quantity = min([quantity, 50000]) session.sell(mbook.sym, mbook.bid_price, quantity) session.buy(abook.sym, abook.ask_price, quantity) elif abook.bid_price - mbook.ask_price > BUFFER: quantity = aasks_room if aasks_room < mbids_room else mbids_room quantity = min([quantity, 50000]) session.sell(abook.sym, abook.bid_price, quantity) session.buy(mbook.sym, mbook.ask_price, quantity) def sigint(signum, frame): global shutdown signal.signal(signal.SIGINT, signal.SIG_DFL) shutdown = True if __name__ == '__main__': signal.signal(signal.SIGINT, sigint) main()
[((430, 4, 430, 48), 'signal.signal', 'signal.signal', ({(430, 18, 430, 31): 'signal.SIGINT', (430, 33, 430, 47): 'signal.SIG_DFL'}, {}), '(signal.SIGINT, signal.SIG_DFL)', False, 'import signal\n'), ((435, 4, 435, 40), 'signal.signal', 'signal.signal', ({(435, 18, 435, 31): 'signal.SIGINT', (435, 33, 435, 39): 'sigint'}, {}), '(signal.SIGINT, sigint)', False, 'import signal\n'), ((100, 23, 100, 41), 'requests.Session', 'requests.Session', ({}, {}), '()', False, 'import requests\n'), ((120, 12, 120, 27), 'time.sleep', 'time.sleep', ({(120, 23, 120, 26): '(0.1)'}, {}), '(0.1)', False, 'import time\n')]
souviksaha97/spydrnet-physical
examples/basic/wire_feedthrough.py
b07bcc152737158ea7cbebf0ef844abe49d29c5e
""" ========================================== Genrating feedthrough from single instance ========================================== This example demostrates how to generate a feedthrough wire connection for a given scalar or vector wires. **Initial Design** .. hdl-diagram:: ../../../examples/basic/_initial_design.v :type: netlistsvg :align: center :module: top **Output1** ``wire0`` feedthough from ``inst_2_1`` .. hdl-diagram:: ../../../examples/basic/_output_wire.v :type: netlistsvg :align: center :module: top **Output2** ``bus_in`` feedthrough from ``inst_1_0`` .. hdl-diagram:: ../../../examples/basic/_output_bus.v :type: netlistsvg :align: center :module: top """ from os import path import spydrnet as sdn import spydrnet_physical as sdnphy netlist = sdnphy.load_netlist_by_name('basic_hierarchy') top = netlist.top_instance.reference cable0 = next(top.get_cables("wire0")) inst2 = next(top.get_instances("inst_2_0")) sdn.compose(netlist, '_initial_design.v', skip_constraints=True) top.create_feedthrough(inst2, cable0) top.create_unconn_wires() sdn.compose(netlist, '_output_wire.v', skip_constraints=True) netlist = sdnphy.load_netlist_by_name('basic_hierarchy') top = netlist.top_instance.reference bus_in = next(top.get_cables("bus_in")) inst1 = next(top.get_instances("inst_1_0")) cables = top.create_feedthrough(inst1, bus_in) top.create_unconn_wires() sdn.compose(netlist, '_output_bus.v', skip_constraints=True)
[((37, 10, 37, 56), 'spydrnet_physical.load_netlist_by_name', 'sdnphy.load_netlist_by_name', ({(37, 38, 37, 55): '"""basic_hierarchy"""'}, {}), "('basic_hierarchy')", True, 'import spydrnet_physical as sdnphy\n'), ((43, 0, 43, 64), 'spydrnet.compose', 'sdn.compose', (), '', True, 'import spydrnet as sdn\n'), ((48, 0, 48, 61), 'spydrnet.compose', 'sdn.compose', (), '', True, 'import spydrnet as sdn\n'), ((51, 10, 51, 56), 'spydrnet_physical.load_netlist_by_name', 'sdnphy.load_netlist_by_name', ({(51, 38, 51, 55): '"""basic_hierarchy"""'}, {}), "('basic_hierarchy')", True, 'import spydrnet_physical as sdnphy\n'), ((59, 0, 59, 60), 'spydrnet.compose', 'sdn.compose', (), '', True, 'import spydrnet as sdn\n')]
sunnyfloyd/panderyx
workflows/workflow.py
82f03625159833930ff044a43a6619ab710ff159
from __future__ import annotations from typing import Optional, Union from tools import tools from exceptions import workflow_exceptions class Workflow: """A class to represent a workflow. Workflow class provides set of methods to manage state of the workflow. It allows for tool insertions, removals and modifications. When workflow is run data flow is built and each tool linked to the workflow instance is executed in determined order. Tool outputs are then consolidated in a JSON format. """ TOOL_CHOICES = { "generic": tools.GenericTool, "large_generic": tools.LargeGenericTool, "input": tools.InputTool, } def __init__(self) -> None: """Initializes Workflow class with root tool. Workflow class is initialized with root tool with tool ID `0`. `_root` points to root tool directly. """ self._root = tools.RootTool(id=0) self._tools = {0: self._root} self._used_ids = {0} def insert_tool( self, tool_choice: str, input_ids: Optional[Union[list[int], int]] = None, output_ids: Optional[Union[list[int], int]] = None, coordinates: Optional[tuple[int, int]] = None, ) -> tools.Tool: """Inserts a new tool to the current workflow. Args: tool_choice (str): determines what tool is created (based on the available choices defined within the Workflow class). input_ids (list[int], int]): starting input or inputs for the tool identified by their IDs. Defaults to None. output_ids (list[int], int): starting output or outputs for the tool identified by their IDs. Defaults to None. coordinates (tuple[int, int]): coordinates for the tool on canvas. Defaults to None. Raises: workflow_exceptions.ToolNotAvailable: indicates that provided string does not refer to an available tool from the Workflow class. Returns: tools.Tool: instance of a Tool's class. """ try: tool_class = self.TOOL_CHOICES[tool_choice] except KeyError: raise workflow_exceptions.ToolNotAvailable next_id = self._get_next_tool_id() tool = tool_class(id=next_id) self._tools[next_id] = tool self._add_tool_id(next_id) if input_ids is not None: self.add_tool_input(tool_id=tool.id, input_ids=input_ids) if output_ids is not None: output_ids = self._clean_tool_ids(output_ids) for output_id in output_ids: self.add_tool_input(tool_id=output_id, input_ids=tool.id) if coordinates is not None: self.set_tool_coordinates(tool_id=tool.id, coordinates=coordinates) return tool def remove_tool(self, tool_ids: Union[list[int], int]) -> None: """Removes existing tool from the current workflow. Removes the tool from the workflow and updates inputs and outputs of the linked tool instances. Args: tool_ids (list[int], int): tool ID or IDs that ought to be removed. Raises: workflow_exceptions.RootCannotBeDeleted: indicates that selected tool for removal is a root which cannot be deleted. """ tool_ids = self._clean_tool_ids(tool_ids) for tool_id in tool_ids: tool = self._get_tool_by_id(tool_id) if tool.is_root: raise workflow_exceptions.RootCannotBeDeleted # remove tool from linked tools' inputs tool_outputs = tool.outputs for output_id in tool_outputs: self.remove_tool_input(tool_id=output_id, input_ids=tool.id) # remove tool from linked tools' outputs tool_inputs = tool.inputs for input_id in tool_inputs: self.remove_tool_input(tool_id=tool.id, input_ids=input_id) del self._tools[tool_id] def add_tool_input( self, tool_id: int, input_ids: Union[list[int], int] ) -> tools.Tool: """Adds new input(s) for the tool existing in the current workflow. Args: tool_id (int): tool ID to which input(s) should be added. input_ids (list[int], int]): input(s) to be added to the tool identified by their IDs. Returns: tools.Tool: instance of a Tool's class. """ tool = self._get_tool_by_id(tool_id) input_ids = self._clean_tool_ids(input_ids) for input_id in input_ids: tool.add_input(input_id) self._tools[input_id].add_output(tool_id) return tool def remove_tool_input( self, tool_id: int, input_ids: Union[list[int], int] ) -> tools.Tool: """Removes input(s) from the tool existing in the current workflow. Args: tool_id (int): tool ID from which input(s) should be removed. input_ids (list[int], int]): input(s) to be removed from the tool identified by their IDs. Returns: tools.Tool: instance of a Tool's class. """ tool = self._get_tool_by_id(tool_id) input_ids = self._clean_tool_ids(input_ids) for input_id in input_ids: tool.remove_input(input_id) self._tools[input_id].remove_output(tool_id) return tool def set_tool_config(self, tool_id: int, data: dict) -> tools.Tool: """Sets tool's config to passed data dict. Args: tool_id (int): tool ID for which config should be set. data (dict): dict of parameters for given tool. Returns: tools.Tool: instance of a Tool's class. """ tool = self._get_tool_by_id(tool_id) tool.config = data return tool def set_tool_coordinates( self, tool_id: int, coordinates: Optional[tuple[int, int]] = None ) -> tools.Tool: """Sets (x, y) coordinates for the tool existing in the current workflow. If no coordinates are passed to this method, default coordinates will be calculated using `_get_default_coordinates()` internal method. Args: tool_id (int): tool ID for which coordinates are to be set. coordinates (tuple[int, int]): tuple of (x, y) coordinates. Defaults to None. Returns: tools.Tool: instance of a Tool's class. """ # I need to decide where to put a check if coordinates will fit a canvas tool = self._get_tool_by_id(tool_id) coordinates = ( coordinates if coordinates is not None else self._get_default_coordinates() ) tool.coordinates = coordinates return tool def _get_default_coordinates(self) -> tuple[int, int]: # might require more sophisticated logic in the future return (0, 0) def _get_tool_by_id(self, tool_id: int) -> tools.Tool: """Returns an instance of a Tool class selected by its ID. Args: tool_id (int): tool ID. Raises: workflow_exceptions.ToolDoesNotExist: indicates that for provided ID there is no tool in this workflow. Returns: tools.Tool: instance of a Tool's class. """ try: tool = self._tools[tool_id] except KeyError: raise workflow_exceptions.ToolDoesNotExist return tool def _clean_tool_ids(self, tool_ids: Union[list[int], int]) -> list[int]: """Returns a validated list of tool ID(s). Checks whether passed tool ID(s) exist in the current workflow and returns the list of tool IDs. If at least one of the provided tool IDs is not found, it raises an exception. Args: tool_ids (list[int], int): tool ID(s) to be cleaned. Raises: workflow_exceptions.ToolDoesNotExist: indicates that at least one of the provided tool IDs is not present in the current workflow. Returns: list[int]: list of checked tool IDs. """ cleaned_tool_ids = ( list(set(tool_ids)) if isinstance(tool_ids, list) else [tool_ids] ) if any(tool_id not in self._tools for tool_id in cleaned_tool_ids): raise workflow_exceptions.ToolDoesNotExist return cleaned_tool_ids def _add_tool_id(self, tool_id: int) -> None: """Adds an ID to the used ID pool. Args: tool_id (int): ID to be added to the used ID pool. """ self._used_ids.add(tool_id) def _get_next_tool_id(self) -> int: """Returns a next available ID to be used for a tool instance. Returns: int: next available tool ID. """ return max(self._used_ids) + 1 def _build_flow(self) -> None: NotImplementedError def __len__(self) -> int: return len(self._tools) - 1
[((31, 21, 31, 41), 'tools.tools.RootTool', 'tools.RootTool', (), '', False, 'from tools import tools\n')]
namtel-hp/fundraising-website
team_fundraising/text.py
30cb0cd2bd4505454295d11715e70712525234a3
class Donation_text: # Shown as a message across the top of the page on return from a donation # used in views.py:new_donation() thank_you = ( "Thank you for your donation. " "You may need to refresh this page to see the donation." ) confirmation_email_subject = ( 'Thank you for donating to the Triple Crown for Heart! ' ) # Start of the email sent confirming the paypal payment has gone through # used in paypal.py:process_paypal() confirmation_email_opening = ( 'Thank you for your donation of ' ) # Closing of the email sent confirming the paypal payment has gone through # used in paypal.py:process_paypal() confirmation_email_closing = ( '.\n\nFor all donations over $20, you will receive a tax receipt for ' 'the 2019 tax year.' '\nYour PayPal receipt should arrive in a separate email.\n' ) notification_email_subject = ( "You got a donation!" ) notification_email_opening = ( "Great news! You've just received a donation of " ) notification_email_closing = ( "\n\nAwesome work! They would probably appreciate " "a quick thank you email.\n\n" "-- Triple Crown for Heart\n" ) class Fundraiser_text: # Subject of the email sent on signup signup_email_subject = ( "Welcome to fundraising for the Triple Crown for Heart!" ) # Start of the email sent when someone signs up # used in views.py:signup() signup_email_opening = ( "Thanks for signing up to fundraise with us!\n" "Your fundraising page can be found at:\n" ) # Closing of the email sent when someone signs up # used in views.py:signup() signup_email_closing = ( '\n\nYou can change your information by using the "Login" link at the ' 'top of that page.' '\n\nThe easiest way to start fundraising is to post the above link ' 'on social media or write a short email to your friends telling them ' 'about your ride.' '\nDon\'t forget to include the link to your page!\n' ) # Message show at the top of the fundraiser page after signing up # used in views.py:signup() signup_return_message = ( "Thank you for signing up. Sharing your fundraiser page on social " "media or over email is the best way to get donations." ) signup_wrong_password_existing_user = ( "The username already exists, but the password entered is incorrect. " "If you were already a fundraiser for a previous campaign, please " "enter your previous password or use " "<a href='/team_fundraising/accounts/password_reset/'>" "Forgot your password</a>. If this is your first campaign, " "please choose a different username." )
[]
wagtail/wagtail-live
tests/wagtail_live/test_apps.py
dd769be089d457cf36db2506520028bc5f506ac3
from django.apps import apps from django.test import override_settings from wagtail_live.signals import live_page_update def test_live_page_update_signal_receivers(): assert len(live_page_update.receivers) == 0 @override_settings( WAGTAIL_LIVE_PUBLISHER="tests.testapp.publishers.DummyWebsocketPublisher" ) def test_live_page_update_signal_receivers_websocket(): app_config = apps.get_app_config("wagtail_live") app_config.ready() try: # Receiver should be connected, no IndexError receiver = live_page_update.receivers[0] finally: live_page_update.disconnect(receiver)
[((11, 1, 13, 1), 'django.test.override_settings', 'override_settings', (), '', False, 'from django.test import override_settings\n'), ((15, 17, 15, 52), 'django.apps.apps.get_app_config', 'apps.get_app_config', ({(15, 37, 15, 51): '"""wagtail_live"""'}, {}), "('wagtail_live')", False, 'from django.apps import apps\n'), ((22, 8, 22, 45), 'wagtail_live.signals.live_page_update.disconnect', 'live_page_update.disconnect', ({(22, 36, 22, 44): 'receiver'}, {}), '(receiver)', False, 'from wagtail_live.signals import live_page_update\n')]
vtta2008/pipelineTool
PLM/options.py
2431d2fc987e3b31f2a6a63427fee456fa0765a0
# -*- coding: utf-8 -*- """ Script Name: Author: Do Trinh/Jimmy - 3D artist. Description: """ # ------------------------------------------------------------------------------------------------------------- """ Import """ import os from PySide2.QtWidgets import (QFrame, QStyle, QAbstractItemView, QSizePolicy, QLineEdit, QPlainTextEdit, QGraphicsItem, QGraphicsView, QGraphicsScene, QRubberBand, QCalendarWidget, ) from PySide2.QtCore import QEvent, QSettings, QSize, Qt, QDateTime from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor SingleSelection = QCalendarWidget.SingleSelection NoSelection = QCalendarWidget.NoSelection SingleLetterDay = QCalendarWidget.SingleLetterDayNames ShortDay = QCalendarWidget.ShortDayNames LongDay = QCalendarWidget.LongDayNames NoHoriHeader = QCalendarWidget.NoHorizontalHeader NoVertHeader = QCalendarWidget.NoVerticalHeader IsoWeekNum = QCalendarWidget.ISOWeekNumbers SelectMode = QCalendarWidget.SelectionMode HoriHeaderFm = QCalendarWidget.HorizontalHeaderFormat VertHeaderFm = QCalendarWidget.VerticalHeaderFormat DayOfWeek = Qt.DayOfWeek Sunday = Qt.Sunday Monday = Qt.Monday Tuesday = Qt.Tuesday Wednesday = Qt.Wednesday Thursday = Qt.Thursday Friday = Qt.Friday Saturday = Qt.Saturday ICONSIZE = 32 ICONBUFFER = -1 BTNTAGSIZE = QSize(87, 20) TAGBTNSIZE = QSize(87-1, 20-1) BTNICONSIZE = QSize(ICONSIZE, ICONSIZE) ICONBTNSIZE = QSize(ICONSIZE+ICONBUFFER, ICONSIZE+ICONBUFFER) DAMG_LOGO_COLOR = QColor(0, 114, 188, 255) # Basic color GlobalColor = Qt.GlobalColor WHITE = QColor(Qt.white) LIGHTGRAY = QColor(Qt.lightGray) GRAY = QColor(Qt.gray) DARKGRAY = QColor(Qt.darkGray) BLACK = QColor(Qt.black) RED = QColor(Qt.red) GREEN = QColor(Qt.green) BLUE = QColor(Qt.blue) DARKRED = QColor(Qt.darkRed) DARKGREEN = QColor(Qt.darkGreen) DARKBLUE = QColor(Qt.darkBlue) CYAN = QColor(Qt.cyan) MAGENTA = QColor(Qt.magenta) YELLOW = QColor(Qt.yellow) DARKCYAN = QColor(Qt.darkCyan) DARKMAGENTA = QColor(Qt.darkMagenta) DARKYELLOW = QColor(Qt.darkYellow) # Dark Palette color Color_BACKGROUND_LIGHT = QColor('#505F69') COLOR_BACKGROUND_NORMAL = QColor('#32414B') COLOR_BACKGROUND_DARK = QColor('#19232D') COLOR_FOREGROUND_LIGHT = QColor('#F0F0F0') COLOR_FOREGROUND_NORMAL = QColor('#AAAAAA') COLOR_FOREGROUND_DARK = QColor('#787878') COLOR_SELECTION_LIGHT = QColor('#148CD2') COLOR_SELECTION_NORMAL = QColor('#1464A0') COLOR_SELECTION_DARK = QColor('#14506E') # Nice color blush = QColor(246, 202, 203, 255) petal = QColor(247, 170, 189, 255) petunia = QColor(231, 62, 151, 255) deep_pink = QColor(229, 2, 120, 255) melon = QColor(241, 118, 110, 255) pomegranate = QColor(178, 27, 32, 255) poppy_red = QColor(236, 51, 39, 255) orange_red = QColor(240, 101, 53, 255) olive = QColor(174, 188, 43, 255) spring = QColor(227, 229, 121, 255) yellow = QColor(255, 240, 29, 255) mango = QColor(254, 209, 26, 255) cantaloupe = QColor(250, 176, 98, 255) tangelo = QColor(247, 151, 47, 255) burnt_orange = QColor(236, 137, 36, 255) bright_orange = QColor(242, 124, 53, 255) moss = QColor(176, 186, 39, 255) sage = QColor(212, 219, 145, 255) apple = QColor(178, 215, 140, 255) grass = QColor(111, 178, 68, 255) forest = QColor(69, 149, 62, 255) peacock = QColor(21, 140, 167, 255) teal = QColor(24, 157, 193, 255) aqua = QColor(153, 214, 218, 255) violet = QColor(55, 52, 144, 255) deep_blue = QColor(15, 86, 163, 255) hydrangea = QColor(150, 191, 229, 255) sky = QColor(139, 210, 244, 255) dusk = QColor(16, 102, 162, 255) midnight = QColor(14, 90, 131, 255) seaside = QColor(87, 154, 188, 255) poolside = QColor(137, 203, 225, 255) eggplant = QColor(86, 5, 79, 255) lilac = QColor(222, 192, 219, 255) chocolate = QColor(87, 43, 3, 255) blackout = QColor(19, 17, 15, 255) stone = QColor(125, 127, 130, 255) gravel = QColor(181, 182, 185, 255) pebble = QColor(217, 212, 206, 255) sand = QColor(185, 172, 151, 255) ignoreARM = Qt.IgnoreAspectRatio scrollAsNeed = Qt.ScrollBarAsNeeded scrollOff = Qt.ScrollBarAlwaysOff scrollOn = Qt.ScrollBarAlwaysOn SiPoMin = QSizePolicy.Minimum # Size policy SiPoMax = QSizePolicy.Maximum SiPoExp = QSizePolicy.Expanding SiPoPre = QSizePolicy.Preferred SiPoIgn = QSizePolicy.Ignored frameStyle = QFrame.Sunken | QFrame.Panel center = Qt.AlignCenter # Alignment right = Qt.AlignRight left = Qt.AlignLeft top = Qt.AlignTop bottom = Qt.AlignBottom hori = Qt.Horizontal vert = Qt.Vertical dockL = Qt.LeftDockWidgetArea # Docking area dockR = Qt.RightDockWidgetArea dockT = Qt.TopDockWidgetArea dockB = Qt.BottomDockWidgetArea dockAll = Qt.AllDockWidgetAreas datetTimeStamp = QDateTime.currentDateTime().toString("hh:mm - dd MMMM yy") # datestamp PRS = dict(password = QLineEdit.Password, center = center , left = left , right = right, spmax = SiPoMax , sppre = SiPoPre, spexp = SiPoExp, spign = SiPoIgn, expanding = QSizePolicy.Expanding, spmin = SiPoMin,) # ------------------------------------------------------------------------------------------------------------- """ Event """ NO_WRAP = QPlainTextEdit.NoWrap NO_FRAME = QPlainTextEdit.NoFrame ELIDE_RIGHT = Qt.ElideRight ELIDE_NONE = Qt.ElideNone # ------------------------------------------------------------------------------------------------------------- """ Window state """ StateNormal = Qt.WindowNoState StateMax = Qt.WindowMaximized StateMin = Qt.WindowMinimized State_Selected = QStyle.State_Selected # ------------------------------------------------------------------------------------------------------------- """ Nodegraph setting variables """ ASPEC_RATIO = Qt.KeepAspectRatio SMOOTH_TRANS = Qt.SmoothTransformation SCROLLBAROFF = Qt.ScrollBarAlwaysOff # Scrollbar SCROLLBARON = Qt.ScrollBarAlwaysOn SCROLLBARNEED = Qt.ScrollBarAsNeeded WORD_WRAP = Qt.TextWordWrap INTERSECT_ITEM_SHAPE = Qt.IntersectsItemShape CONTAIN_ITEM_SHAPE = Qt.ContainsItemShape MATCH_EXACTLY = Qt.MatchExactly DRAG_ONLY = QAbstractItemView.DragOnly # ------------------------------------------------------------------------------------------------------------- """ UI flags """ ITEMENABLE = Qt.ItemIsEnabled ITEMMOVEABLE = QGraphicsItem.ItemIsMovable ITEMSENDGEOCHANGE = QGraphicsItem.ItemSendsGeometryChanges ITEMSCALECHANGE = QGraphicsItem.ItemScaleChange ITEMPOSCHANGE = QGraphicsItem.ItemPositionChange DEVICECACHE = QGraphicsItem.DeviceCoordinateCache SELECTABLE = QGraphicsItem.ItemIsSelectable MOVEABLE = QGraphicsItem.ItemIsMovable FOCUSABLE = QGraphicsItem.ItemIsFocusable PANEL = QGraphicsItem.ItemIsPanel NOINDEX = QGraphicsScene.NoIndex # Scene RUBBER_DRAG = QGraphicsView.RubberBandDrag # Viewer RUBBER_REC = QRubberBand.Rectangle POS_CHANGE = QGraphicsItem.ItemPositionChange NODRAG = QGraphicsView.NoDrag NOFRAME = QGraphicsView.NoFrame ANCHOR_NO = QGraphicsView.NoAnchor ANCHOR_UNDERMICE = QGraphicsView.AnchorUnderMouse ANCHOR_CENTER = QGraphicsView.AnchorViewCenter CACHE_BG = QGraphicsView.CacheBackground UPDATE_VIEWRECT = QGraphicsView.BoundingRectViewportUpdate UPDATE_FULLVIEW = QGraphicsView.FullViewportUpdate UPDATE_SMARTVIEW = QGraphicsView.SmartViewportUpdate UPDATE_BOUNDINGVIEW = QGraphicsView.BoundingRectViewportUpdate UPDATE_MINIMALVIEW = QGraphicsView.MinimalViewportUpdate STAY_ON_TOP = Qt.WindowStaysOnTopHint STRONG_FOCUS = Qt.StrongFocus SPLASHSCREEN = Qt.SplashScreen FRAMELESS = Qt.FramelessWindowHint CUSTOMIZE = Qt.CustomizeWindowHint CLOSEBTN = Qt.WindowCloseButtonHint MINIMIZEBTN = Qt.WindowMinimizeButtonHint AUTO_COLOR = Qt.AutoColor # ------------------------------------------------------------------------------------------------------------- """ Drawing """ ANTIALIAS = QPainter.Antialiasing # Painter ANTIALIAS_TEXT = QPainter.TextAntialiasing ANTIALIAS_HIGH_QUALITY = QPainter.HighQualityAntialiasing SMOOTH_PIXMAP_TRANSFORM = QPainter.SmoothPixmapTransform NON_COSMETIC_PEN = QPainter.NonCosmeticDefaultPen NO_BRUSH = Qt.NoBrush # Brush NO_PEN = Qt.NoPen # Pen ROUND_CAP = Qt.RoundCap ROUND_JOIN = Qt.RoundJoin PATTERN_SOLID = Qt.SolidPattern # Pattern LINE_SOLID = Qt.SolidLine # Line LINE_DASH = Qt.DashLine LINE_DOT = Qt.DotLine LINE_DASH_DOT = Qt.DashDotDotLine TRANSPARENT = Qt.transparent TRANSPARENT_MODE = Qt.TransparentMode # ------------------------------------------------------------------------------------------------------------- """ Meta Object """ QUEUEDCONNECTION = Qt.QueuedConnection # ------------------------------------------------------------------------------------------------------------- """ Keyboard and cursor """ TEXT_BOLD = QFont.Bold TEXT_NORMAL = QFont.Normal MONO_SPACE = QFont.Monospace TEXT_MENEOMIC = Qt.TextShowMnemonic KEY_PRESS = QEvent.KeyPress KEY_RELEASE = QEvent.KeyRelease KEY_ALT = Qt.Key_Alt KEY_DEL = Qt.Key_Delete KEY_TAB = Qt.Key_Tab KEY_SHIFT = Qt.Key_Shift KEY_CTRL = Qt.Key_Control KEY_BACKSPACE = Qt.Key_Backspace KEY_ENTER = Qt.Key_Enter KEY_RETURN = Qt.Key_Return KEY_F = Qt.Key_F KEY_S = Qt.Key_S ALT_MODIFIER = Qt.AltModifier CTRL_MODIFIER = Qt.ControlModifier SHIFT_MODIFIER = Qt.ShiftModifier NO_MODIFIER = Qt.NoModifier CLOSE_HAND_CUSOR = Qt.ClosedHandCursor SIZEF_CURSOR = Qt.SizeFDiagCursor windows = os.name = 'nt' DMK = Qt.AltModifier if windows else CTRL_MODIFIER MOUSE_LEFT = Qt.LeftButton MOUSE_RIGHT = Qt.RightButton MOUSE_MIDDLE = Qt.MiddleButton NO_BUTTON = Qt.NoButton ARROW_NONE = Qt.NoArrow # Cursor CURSOR_ARROW = Qt.ArrowCursor CURSOR_SIZEALL = Qt.SizeAllCursor MOVE_OPERATION = QTextCursor.MoveOperation MOVE_ANCHOR = QTextCursor.MoveMode.MoveAnchor KEEP_ANCHOR = QTextCursor.MoveMode.KeepAnchor ACTION_MOVE = Qt.MoveAction # Action ignoreARM = Qt.IgnoreAspectRatio # ------------------------------------------------------------------------------------------------------------- """ Set number """ RELATIVE_SIZE = Qt.RelativeSize # Size INI = QSettings.IniFormat NATIVE = QSettings.NativeFormat INVALID = QSettings.InvalidFormat SYS_SCOPE = QSettings.SystemScope USER_SCOPE = QSettings.UserScope # ------------------------------------------------------------------------------------------------------------- # Created by Trinh Do on 5/6/2020 - 3:13 AM # © 2017 - 2020 DAMGteam. All rights reserved
[((46, 30, 46, 43), 'PySide2.QtCore.QSize', 'QSize', ({(46, 36, 46, 38): '87', (46, 40, 46, 42): '20'}, {}), '(87, 20)', False, 'from PySide2.QtCore import QEvent, QSettings, QSize, Qt, QDateTime\n'), ((47, 30, 47, 47), 'PySide2.QtCore.QSize', 'QSize', ({(47, 36, 47, 40): '87 - 1', (47, 42, 47, 46): '20 - 1'}, {}), '(87 - 1, 20 - 1)', False, 'from PySide2.QtCore import QEvent, QSettings, QSize, Qt, QDateTime\n'), ((48, 30, 48, 55), 'PySide2.QtCore.QSize', 'QSize', ({(48, 36, 48, 44): 'ICONSIZE', (48, 46, 48, 54): 'ICONSIZE'}, {}), '(ICONSIZE, ICONSIZE)', False, 'from PySide2.QtCore import QEvent, QSettings, QSize, Qt, QDateTime\n'), ((49, 30, 49, 77), 'PySide2.QtCore.QSize', 'QSize', ({(49, 36, 49, 55): 'ICONSIZE + ICONBUFFER', (49, 57, 49, 76): 'ICONSIZE + ICONBUFFER'}, {}), '(ICONSIZE + ICONBUFFER, ICONSIZE + ICONBUFFER)', False, 'from PySide2.QtCore import QEvent, QSettings, QSize, Qt, QDateTime\n'), ((52, 30, 52, 54), 'PySide2.QtGui.QColor', 'QColor', ({(52, 37, 52, 38): '0', (52, 40, 52, 43): '114', (52, 45, 52, 48): '188', (52, 50, 52, 53): '255'}, {}), '(0, 114, 188, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((58, 30, 58, 46), 'PySide2.QtGui.QColor', 'QColor', ({(58, 37, 58, 45): 'Qt.white'}, {}), '(Qt.white)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((59, 30, 59, 50), 'PySide2.QtGui.QColor', 'QColor', ({(59, 37, 59, 49): 'Qt.lightGray'}, {}), '(Qt.lightGray)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((60, 30, 60, 45), 'PySide2.QtGui.QColor', 'QColor', ({(60, 37, 60, 44): 'Qt.gray'}, {}), '(Qt.gray)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((61, 30, 61, 49), 'PySide2.QtGui.QColor', 'QColor', ({(61, 37, 61, 48): 'Qt.darkGray'}, {}), '(Qt.darkGray)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((62, 30, 62, 46), 'PySide2.QtGui.QColor', 'QColor', ({(62, 37, 62, 45): 'Qt.black'}, {}), '(Qt.black)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((63, 30, 63, 44), 'PySide2.QtGui.QColor', 'QColor', ({(63, 37, 63, 43): 'Qt.red'}, {}), '(Qt.red)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((64, 30, 64, 46), 'PySide2.QtGui.QColor', 'QColor', ({(64, 37, 64, 45): 'Qt.green'}, {}), '(Qt.green)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((65, 30, 65, 45), 'PySide2.QtGui.QColor', 'QColor', ({(65, 37, 65, 44): 'Qt.blue'}, {}), '(Qt.blue)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((66, 30, 66, 48), 'PySide2.QtGui.QColor', 'QColor', ({(66, 37, 66, 47): 'Qt.darkRed'}, {}), '(Qt.darkRed)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((67, 30, 67, 50), 'PySide2.QtGui.QColor', 'QColor', ({(67, 37, 67, 49): 'Qt.darkGreen'}, {}), '(Qt.darkGreen)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((68, 30, 68, 49), 'PySide2.QtGui.QColor', 'QColor', ({(68, 37, 68, 48): 'Qt.darkBlue'}, {}), '(Qt.darkBlue)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((69, 30, 69, 45), 'PySide2.QtGui.QColor', 'QColor', ({(69, 37, 69, 44): 'Qt.cyan'}, {}), '(Qt.cyan)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((70, 30, 70, 48), 'PySide2.QtGui.QColor', 'QColor', ({(70, 37, 70, 47): 'Qt.magenta'}, {}), '(Qt.magenta)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((71, 30, 71, 47), 'PySide2.QtGui.QColor', 'QColor', ({(71, 37, 71, 46): 'Qt.yellow'}, {}), '(Qt.yellow)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((72, 30, 72, 49), 'PySide2.QtGui.QColor', 'QColor', ({(72, 37, 72, 48): 'Qt.darkCyan'}, {}), '(Qt.darkCyan)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((73, 30, 73, 52), 'PySide2.QtGui.QColor', 'QColor', ({(73, 37, 73, 51): 'Qt.darkMagenta'}, {}), '(Qt.darkMagenta)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((74, 30, 74, 51), 'PySide2.QtGui.QColor', 'QColor', ({(74, 37, 74, 50): 'Qt.darkYellow'}, {}), '(Qt.darkYellow)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((77, 30, 77, 47), 'PySide2.QtGui.QColor', 'QColor', ({(77, 37, 77, 46): '"""#505F69"""'}, {}), "('#505F69')", False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((78, 30, 78, 47), 'PySide2.QtGui.QColor', 'QColor', ({(78, 37, 78, 46): '"""#32414B"""'}, {}), "('#32414B')", False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((79, 30, 79, 47), 'PySide2.QtGui.QColor', 'QColor', ({(79, 37, 79, 46): '"""#19232D"""'}, {}), "('#19232D')", False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((81, 30, 81, 47), 'PySide2.QtGui.QColor', 'QColor', ({(81, 37, 81, 46): '"""#F0F0F0"""'}, {}), "('#F0F0F0')", False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((82, 30, 82, 47), 'PySide2.QtGui.QColor', 'QColor', ({(82, 37, 82, 46): '"""#AAAAAA"""'}, {}), "('#AAAAAA')", False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((83, 30, 83, 47), 'PySide2.QtGui.QColor', 'QColor', ({(83, 37, 83, 46): '"""#787878"""'}, {}), "('#787878')", False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((85, 30, 85, 47), 'PySide2.QtGui.QColor', 'QColor', ({(85, 37, 85, 46): '"""#148CD2"""'}, {}), "('#148CD2')", False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((86, 30, 86, 47), 'PySide2.QtGui.QColor', 'QColor', ({(86, 37, 86, 46): '"""#1464A0"""'}, {}), "('#1464A0')", False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((87, 30, 87, 47), 'PySide2.QtGui.QColor', 'QColor', ({(87, 37, 87, 46): '"""#14506E"""'}, {}), "('#14506E')", False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((90, 30, 90, 56), 'PySide2.QtGui.QColor', 'QColor', ({(90, 37, 90, 40): '246', (90, 42, 90, 45): '202', (90, 47, 90, 50): '203', (90, 52, 90, 55): '255'}, {}), '(246, 202, 203, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((91, 30, 91, 56), 'PySide2.QtGui.QColor', 'QColor', ({(91, 37, 91, 40): '247', (91, 42, 91, 45): '170', (91, 47, 91, 50): '189', (91, 52, 91, 55): '255'}, {}), '(247, 170, 189, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((92, 30, 92, 55), 'PySide2.QtGui.QColor', 'QColor', ({(92, 37, 92, 40): '231', (92, 42, 92, 44): '62', (92, 46, 92, 49): '151', (92, 51, 92, 54): '255'}, {}), '(231, 62, 151, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((93, 30, 93, 54), 'PySide2.QtGui.QColor', 'QColor', ({(93, 37, 93, 40): '229', (93, 42, 93, 43): '2', (93, 45, 93, 48): '120', (93, 50, 93, 53): '255'}, {}), '(229, 2, 120, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((94, 30, 94, 56), 'PySide2.QtGui.QColor', 'QColor', ({(94, 37, 94, 40): '241', (94, 42, 94, 45): '118', (94, 47, 94, 50): '110', (94, 52, 94, 55): '255'}, {}), '(241, 118, 110, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((95, 30, 95, 54), 'PySide2.QtGui.QColor', 'QColor', ({(95, 37, 95, 40): '178', (95, 42, 95, 44): '27', (95, 46, 95, 48): '32', (95, 50, 95, 53): '255'}, {}), '(178, 27, 32, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((96, 30, 96, 54), 'PySide2.QtGui.QColor', 'QColor', ({(96, 37, 96, 40): '236', (96, 42, 96, 44): '51', (96, 46, 96, 48): '39', (96, 50, 96, 53): '255'}, {}), '(236, 51, 39, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((97, 30, 97, 55), 'PySide2.QtGui.QColor', 'QColor', ({(97, 37, 97, 40): '240', (97, 42, 97, 45): '101', (97, 47, 97, 49): '53', (97, 51, 97, 54): '255'}, {}), '(240, 101, 53, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((98, 30, 98, 55), 'PySide2.QtGui.QColor', 'QColor', ({(98, 37, 98, 40): '174', (98, 42, 98, 45): '188', (98, 47, 98, 49): '43', (98, 51, 98, 54): '255'}, {}), '(174, 188, 43, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((99, 30, 99, 56), 'PySide2.QtGui.QColor', 'QColor', ({(99, 37, 99, 40): '227', (99, 42, 99, 45): '229', (99, 47, 99, 50): '121', (99, 52, 99, 55): '255'}, {}), '(227, 229, 121, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((100, 30, 100, 55), 'PySide2.QtGui.QColor', 'QColor', ({(100, 37, 100, 40): '255', (100, 42, 100, 45): '240', (100, 47, 100, 49): '29', (100, 51, 100, 54): '255'}, {}), '(255, 240, 29, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((101, 30, 101, 55), 'PySide2.QtGui.QColor', 'QColor', ({(101, 37, 101, 40): '254', (101, 42, 101, 45): '209', (101, 47, 101, 49): '26', (101, 51, 101, 54): '255'}, {}), '(254, 209, 26, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((102, 30, 102, 55), 'PySide2.QtGui.QColor', 'QColor', ({(102, 37, 102, 40): '250', (102, 42, 102, 45): '176', (102, 47, 102, 49): '98', (102, 51, 102, 54): '255'}, {}), '(250, 176, 98, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((103, 30, 103, 55), 'PySide2.QtGui.QColor', 'QColor', ({(103, 37, 103, 40): '247', (103, 42, 103, 45): '151', (103, 47, 103, 49): '47', (103, 51, 103, 54): '255'}, {}), '(247, 151, 47, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((104, 30, 104, 55), 'PySide2.QtGui.QColor', 'QColor', ({(104, 37, 104, 40): '236', (104, 42, 104, 45): '137', (104, 47, 104, 49): '36', (104, 51, 104, 54): '255'}, {}), '(236, 137, 36, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((105, 30, 105, 55), 'PySide2.QtGui.QColor', 'QColor', ({(105, 37, 105, 40): '242', (105, 42, 105, 45): '124', (105, 47, 105, 49): '53', (105, 51, 105, 54): '255'}, {}), '(242, 124, 53, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((106, 30, 106, 55), 'PySide2.QtGui.QColor', 'QColor', ({(106, 37, 106, 40): '176', (106, 42, 106, 45): '186', (106, 47, 106, 49): '39', (106, 51, 106, 54): '255'}, {}), '(176, 186, 39, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((107, 30, 107, 56), 'PySide2.QtGui.QColor', 'QColor', ({(107, 37, 107, 40): '212', (107, 42, 107, 45): '219', (107, 47, 107, 50): '145', (107, 52, 107, 55): '255'}, {}), '(212, 219, 145, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((108, 30, 108, 56), 'PySide2.QtGui.QColor', 'QColor', ({(108, 37, 108, 40): '178', (108, 42, 108, 45): '215', (108, 47, 108, 50): '140', (108, 52, 108, 55): '255'}, {}), '(178, 215, 140, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((109, 30, 109, 55), 'PySide2.QtGui.QColor', 'QColor', ({(109, 37, 109, 40): '111', (109, 42, 109, 45): '178', (109, 47, 109, 49): '68', (109, 51, 109, 54): '255'}, {}), '(111, 178, 68, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((110, 30, 110, 54), 'PySide2.QtGui.QColor', 'QColor', ({(110, 37, 110, 39): '69', (110, 41, 110, 44): '149', (110, 46, 110, 48): '62', (110, 50, 110, 53): '255'}, {}), '(69, 149, 62, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((111, 30, 111, 55), 'PySide2.QtGui.QColor', 'QColor', ({(111, 37, 111, 39): '21', (111, 41, 111, 44): '140', (111, 46, 111, 49): '167', (111, 51, 111, 54): '255'}, {}), '(21, 140, 167, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((112, 30, 112, 55), 'PySide2.QtGui.QColor', 'QColor', ({(112, 37, 112, 39): '24', (112, 41, 112, 44): '157', (112, 46, 112, 49): '193', (112, 51, 112, 54): '255'}, {}), '(24, 157, 193, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((113, 30, 113, 56), 'PySide2.QtGui.QColor', 'QColor', ({(113, 37, 113, 40): '153', (113, 42, 113, 45): '214', (113, 47, 113, 50): '218', (113, 52, 113, 55): '255'}, {}), '(153, 214, 218, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((114, 30, 114, 54), 'PySide2.QtGui.QColor', 'QColor', ({(114, 37, 114, 39): '55', (114, 41, 114, 43): '52', (114, 45, 114, 48): '144', (114, 50, 114, 53): '255'}, {}), '(55, 52, 144, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((115, 30, 115, 54), 'PySide2.QtGui.QColor', 'QColor', ({(115, 37, 115, 39): '15', (115, 41, 115, 43): '86', (115, 45, 115, 48): '163', (115, 50, 115, 53): '255'}, {}), '(15, 86, 163, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((116, 30, 116, 56), 'PySide2.QtGui.QColor', 'QColor', ({(116, 37, 116, 40): '150', (116, 42, 116, 45): '191', (116, 47, 116, 50): '229', (116, 52, 116, 55): '255'}, {}), '(150, 191, 229, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((117, 30, 117, 56), 'PySide2.QtGui.QColor', 'QColor', ({(117, 37, 117, 40): '139', (117, 42, 117, 45): '210', (117, 47, 117, 50): '244', (117, 52, 117, 55): '255'}, {}), '(139, 210, 244, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((118, 30, 118, 55), 'PySide2.QtGui.QColor', 'QColor', ({(118, 37, 118, 39): '16', (118, 41, 118, 44): '102', (118, 46, 118, 49): '162', (118, 51, 118, 54): '255'}, {}), '(16, 102, 162, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((119, 30, 119, 54), 'PySide2.QtGui.QColor', 'QColor', ({(119, 37, 119, 39): '14', (119, 41, 119, 43): '90', (119, 45, 119, 48): '131', (119, 50, 119, 53): '255'}, {}), '(14, 90, 131, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((120, 30, 120, 55), 'PySide2.QtGui.QColor', 'QColor', ({(120, 37, 120, 39): '87', (120, 41, 120, 44): '154', (120, 46, 120, 49): '188', (120, 51, 120, 54): '255'}, {}), '(87, 154, 188, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((121, 30, 121, 56), 'PySide2.QtGui.QColor', 'QColor', ({(121, 37, 121, 40): '137', (121, 42, 121, 45): '203', (121, 47, 121, 50): '225', (121, 52, 121, 55): '255'}, {}), '(137, 203, 225, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((122, 30, 122, 52), 'PySide2.QtGui.QColor', 'QColor', ({(122, 37, 122, 39): '86', (122, 41, 122, 42): '5', (122, 44, 122, 46): '79', (122, 48, 122, 51): '255'}, {}), '(86, 5, 79, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((123, 30, 123, 56), 'PySide2.QtGui.QColor', 'QColor', ({(123, 37, 123, 40): '222', (123, 42, 123, 45): '192', (123, 47, 123, 50): '219', (123, 52, 123, 55): '255'}, {}), '(222, 192, 219, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((124, 30, 124, 52), 'PySide2.QtGui.QColor', 'QColor', ({(124, 37, 124, 39): '87', (124, 41, 124, 43): '43', (124, 45, 124, 46): '3', (124, 48, 124, 51): '255'}, {}), '(87, 43, 3, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((125, 30, 125, 53), 'PySide2.QtGui.QColor', 'QColor', ({(125, 37, 125, 39): '19', (125, 41, 125, 43): '17', (125, 45, 125, 47): '15', (125, 49, 125, 52): '255'}, {}), '(19, 17, 15, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((126, 30, 126, 56), 'PySide2.QtGui.QColor', 'QColor', ({(126, 37, 126, 40): '125', (126, 42, 126, 45): '127', (126, 47, 126, 50): '130', (126, 52, 126, 55): '255'}, {}), '(125, 127, 130, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((127, 30, 127, 56), 'PySide2.QtGui.QColor', 'QColor', ({(127, 37, 127, 40): '181', (127, 42, 127, 45): '182', (127, 47, 127, 50): '185', (127, 52, 127, 55): '255'}, {}), '(181, 182, 185, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((128, 30, 128, 56), 'PySide2.QtGui.QColor', 'QColor', ({(128, 37, 128, 40): '217', (128, 42, 128, 45): '212', (128, 47, 128, 50): '206', (128, 52, 128, 55): '255'}, {}), '(217, 212, 206, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((129, 30, 129, 56), 'PySide2.QtGui.QColor', 'QColor', ({(129, 37, 129, 40): '185', (129, 42, 129, 45): '172', (129, 47, 129, 50): '151', (129, 52, 129, 55): '255'}, {}), '(185, 172, 151, 255)', False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((161, 31, 161, 58), 'PySide2.QtCore.QDateTime.currentDateTime', 'QDateTime.currentDateTime', ({}, {}), '()', False, 'from PySide2.QtCore import QEvent, QSettings, QSize, Qt, QDateTime\n')]
Nyapy/FMTG
Crawling/ssafyCrawling.py
dcf0a35dbbcd50d5bc861b04ac0db41d27e57b6e
from selenium import webdriver from selenium.webdriver.chrome.options import Options import sys import time import urllib.request import os sys.stdin = open('idpwd.txt') site = input() id = input() pwd = input() # selenium에서 사용할 웹 드라이버 절대 경로 정보 chromedriver = 'C:\Webdriver\chromedriver.exe' # selenum의 webdriver에 앞서 설치한 chromedirver를 연동한다. driver = webdriver.Chrome(chromedriver) # driver로 특정 페이지를 크롤링한다. driver.get(site) driver.find_element_by_name('userId').send_keys(id) driver.find_element_by_name('userPwd').send_keys(pwd) driver.find_element_by_class_name('form-btn').click() driver.set_window_size(1600, 800) driver.find_element_by_xpath("//a[@href='/edu/lectureroom/openlearning/openLearningList.do']/span").click() # driver.find_element_by_id('searchContNm').send_keys('aps') # # driver.find_element_by_xpath("//button[@onclick='fnSearch();']").click() driver.find_elements_by_xpath("//*[contains(text(), '5기_B반_Java(1)')]")[0].click() driver.find_element_by_xpath("//span[@class='file-name']").click() driver.switch_to.window(driver.window_handles[1]) print(driver.find_elements_by_xpath("//button[@title='다음 페이지']")[0].get_attribute('disabled')) # driver.find_elements_by_xpath("//button[@title='마지막 페이지']")[0].click() # print(driver.find_elements_by_xpath("//button[@title='다음 페이지']")[0].get_attribute('disabled')) # url 가져오기 + find 함수 연습 # pre = driver.current_url # find = pre.find('/index.html') # url = pre[:find] # src = driver.find_element_by_class_name("background").get_attribute('src') # print(src) ## 다음페이지 넘기기 # for i in driver.find_elements_by_xpath("//button[@title='다음 페이지']"): # print(i) cnt = 1 # url = driver.find_elements_by_class_name("background")[-1].get_attribute('src') # print(url) # urllib.request.urlretrieve(url, '123.jpg') # os.system("curl " + url + " > test.jpg") time.sleep(2) driver.get_screenshot_as_file("hi.png") # for i in driver.find_elements_by_class_name("background"): # time.sleep(2) # print(i.get_attribute('style')) # i.screenshot(str(cnt)+'.png') # cnt += 1 while 1: time.sleep(0.4) driver.save_screenshot('APS/C/'+str(cnt)+'.png') # print(driver.find_element_by_class_name("background").get_attribute('src')) # driver.find_element_by_class_name("background").screenshot(str(cnt)+'.png') driver.find_elements_by_xpath("//button[@title='다음 페이지']")[0].click() cnt += 1 if driver.find_elements_by_xpath("//button[@title='다음 페이지']")[0].get_attribute('disabled') == 'disabled': break
[((15, 9, 15, 39), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ({(15, 26, 15, 38): 'chromedriver'}, {}), '(chromedriver)', False, 'from selenium import webdriver\n'), ((61, 0, 61, 13), 'time.sleep', 'time.sleep', ({(61, 11, 61, 12): '(2)'}, {}), '(2)', False, 'import time\n'), ((70, 4, 70, 19), 'time.sleep', 'time.sleep', ({(70, 15, 70, 18): '(0.4)'}, {}), '(0.4)', False, 'import time\n')]
chainren/python-learn
100days/day95/StringIO_demo.py
5e48e96c4bb212806b9ae0954fdb368abdcf9ba3
from io import StringIO # 定义一个 StringIO 对象,写入并读取其在内存中的内容 f = StringIO() f.write('Python-100') str = f.getvalue() # 读取写入的内容 print('写入内存中的字符串为:%s' %str) f.write('\n') # 追加内容 f.write('坚持100天') f.close() # 关闭 f1 = StringIO('Python-100' + '\n' + '坚持100天') # 读取内容 print(f1.read()) f1.close() # 假设的爬虫数据输出函数 outputData() def outputData(): dataOne = '我是 1 号爬虫数据\n' dataTwo = '我是 2 号爬虫数据\n' dataThree = '我是 3 号爬虫数据' data = dataOne + dataTwo + dataThree return data # dataStr 为爬虫数据字符串 dataStr = outputData() # 1. 将 outputData() 函数返回的内容写入内存中 dataIO = StringIO(dataStr) # 假设的爬虫数据输出函数 outputData() def outputData(): dataOne = '我是 1 号爬虫数据\n' dataTwo = '我是 2 号爬虫数据\n' dataThree = '我是 3 号爬虫数据' data = dataOne + dataTwo + dataThree return data # dataStr 为爬虫数据字符串 dataStr = outputData() # 1. 将 outputData() 函数返回的内容写入内存中 dataIO = StringIO(dataStr) # 1.1 输出 StringIO 在内存中写入的数据 print('1.1内存中写入的数据为:\n%s' %dataIO.getvalue()) # 1.2 按行输出写入的数据方式一 print('1.2按行输出写入的数据方式一:') for data in dataIO.readlines(): print(data.strip('\n')) # 去掉每行数据末尾的换行符 # 1.2 按行输出写入的数据方式一 print('1.2按行输出写入的数据方式一:') for data in dataIO.readlines(): print(data.strip('\n')) # 去掉每行数据末尾的换行符 # 1.3 按行输出写入的数据方式二 # 由于上一步的操作,此时文件指针指向数据末尾(32),我们需要将指针指向起始位置 print('由于上一步操作的输出,此时文件指针位置为:%d' %dataIO.tell()) # 将文件指针指向起始位置,方便下面的演示 dataIO.seek(0) print('1.3按行输出写入的数据方式二:') for data in dataIO: print(data.strip('\n'))
[((5, 4, 5, 14), 'io.StringIO', 'StringIO', ({}, {}), '()', False, 'from io import StringIO\n'), ((18, 5, 18, 51), 'io.StringIO', 'StringIO', ({(18, 14, 18, 50): "'Python-100' + '\\n' + '坚持100天'"}, {}), "('Python-100' + '\\n' + '坚持100天')", False, 'from io import StringIO\n'), ((38, 9, 38, 26), 'io.StringIO', 'StringIO', ({(38, 18, 38, 25): 'dataStr'}, {}), '(dataStr)', False, 'from io import StringIO\n'), ((52, 9, 52, 26), 'io.StringIO', 'StringIO', ({(52, 18, 52, 25): 'dataStr'}, {}), '(dataStr)', False, 'from io import StringIO\n')]
veleritas/mychem.info
src/hub/dataload/sources/drugcentral/drugcentral_upload.py
bb22357d4cbbc3c4865da224bf998f2cbc59f8f2
import biothings.hub.dataload.uploader as uploader class DrugCentralUploader(uploader.DummySourceUploader): name = "drugcentral" __metadata__ = { "src_meta" : { "url" : "http://drugcentral.org/", "license_url" : "http://drugcentral.org/privacy", "license_url_short" : "https://goo.gl/QDNyNe", "license" : "CC BY-SA 4.0", } } @classmethod def get_mapping(klass): mapping = { "drugcentral": { "properties": { "approval": { "properties": { "applicant": { "type": "string" }, "date": { "analyzer": "string_lowercase", "type": "string" }, "type": { "type": "string" } } }, "bioactivity": { "properties": { "act_comment": { "type": "string" }, "act_source": { "type": "string" }, "act_type": { "analyzer": "string_lowercase", "type": "string" }, "act_value": { "analyzer": "string_lowercase", "type": "string" }, "action_type": { "type": "string" }, "gene_name": { "type": "string" }, "moa": { "analyzer": "string_lowercase", "type": "string" }, "moa_source": { "type": "string" }, "swissprot": { "analyzer": "string_lowercase", "type": "string" }, "target": { "type": "string" }, "target_class": { "type": "string" }, "uniprot_id": { "analyzer": "string_lowercase", "type": "string" } } }, "drug_dosage": { "properties": { "atc_code": { "analyzer": "string_lowercase", "type": "string" }, "dose": { "analyzer": "string_lowercase", "type": "string" }, "route": { "analyzer": "string_lowercase", "type": "string" }, "unit": { "analyzer": "string_lowercase", "type": "string" } } }, "drug_use": { "properties": { "relation": { "type": "string" }, "snomed_id": { "analyzer": "string_lowercase", "type": "string" }, "snomed_name": { "type": "string" } } }, "pharmacology_action": { "properties": { "class_code": { "analyzer": "string_lowercase", "type": "string" }, "name": { "type": "string" }, "source": { "analyzer": "string_lowercase", "type": "string" }, "type": { "type": "string" } } }, "struct_id": { "analyzer": "string_lowercase", "type": "string" }, "structures": { "properties": { "cas_rn": { "analyzer": "string_lowercase", "type": "string" }, "inchi": { "analyzer": "string_lowercase", "type": "string" }, "inchikey": { "analyzer": "string_lowercase", "type": "string" }, "inn": { "analyzer": "string_lowercase", "type": "string" }, "smiles": { "analyzer": "string_lowercase", "type": "string" } } }, "synonyms": { "type": "string" }, "xref": { "properties": { "chebi": { "analyzer": "string_lowercase", "type": "string" }, "chembl_id": { "analyzer": "string_lowercase", "type": "string" }, "drugbank_id": { "analyzer": "string_lowercase", "type": "string" }, "inn_id": { "analyzer": "string_lowercase", "type": "string" }, "iuphar_ligand_id": { "analyzer": "string_lowercase", "type": "string" }, "kegg_drug": { "analyzer": "string_lowercase", "type": "string" }, "mesh_descriptor_ui": { "analyzer": "string_lowercase", "type": "string" }, "mesh_supplemental_record_ui": { "analyzer": "string_lowercase", "type": "string" }, "mmsl": { "analyzer": "string_lowercase", "type": "string" }, "nddf": { "analyzer": "string_lowercase", "type": "string" }, "ndfrt": { "analyzer": "string_lowercase", "type": "string" }, "nui": { "analyzer": "string_lowercase", "type": "string" }, "pdb_chem_id": { "analyzer": "string_lowercase", "type": "string" }, "pubchem_cid": { "analyzer": "string_lowercase", "type": "string" }, "rxnorm": { "analyzer": "string_lowercase", "type": "string" }, "secondary_cas_rn": { "analyzer": "string_lowercase", "type": "string" }, "snomedct_us": { "analyzer": "string_lowercase", "type": "string" }, "umlscui": { "analyzer": "string_lowercase", "type": "string" }, "unii": { "analyzer": "string_lowercase", "type": "string" }, "vandf": { "analyzer": "string_lowercase", "type": "string" }, "vuid": { "analyzer": "string_lowercase", "type": "string" } } } } } } return mapping
[]
Nate1729/FinPack
tests/test_cli.py
d76fd5e6538298d5596d5b0f7d3be2bc6520c431
"""Contains tests for finpack/core/cli.py """ __copyright__ = "Copyright (C) 2021 Matt Ferreira" import os import unittest from importlib import metadata from docopt import docopt from finpack.core import cli class TestCli(unittest.TestCase): @classmethod def setUpClass(cls): cls.DATA_DIR = "temp" os.mkdir(cls.DATA_DIR) @classmethod def tearDownClass(cls): os.rmdir(cls.DATA_DIR) def test_version_option(self): argv = ["--version"] args = docopt(cli.__doc__, argv=argv) self.assertTrue(args["--version"]) def test_init_no_options(self): argv = ["init"] args = docopt(cli.__doc__, argv=argv) self.assertTrue(args["init"]) def test_init_with_filepath_option(self): argv = ["init", "--filepath=temp/data.csv"] args = docopt(cli.__doc__, argv=argv) self.assertTrue(args["init"]) self.assertEqual(args["--filepath"], "temp/data.csv") def test_init_with_sample_dataset_option(self): argv = ["init", "--sample-dataset"] args = docopt(cli.__doc__, argv=argv) self.assertTrue(args["init"]) self.assertTrue(args["--sample-dataset"]) def test_init_with_overwrite_option(self): argv = ["init", "--overwrite"] args = docopt(cli.__doc__, argv=argv) self.assertTrue(args["init"]) self.assertTrue(args["--overwrite"]) def test_balsheet_no_option(self): argv = ["balsheet"] args = docopt(cli.__doc__, argv=argv) self.assertTrue(args["balsheet"]) def test_balsheet_with_filepath_option(self): argv = ["balsheet", "--filepath=temp/data2.csv"] args = docopt(cli.__doc__, argv=argv) self.assertTrue(args["balsheet"]) self.assertEqual(args["--filepath"], "temp/data2.csv") def test_balsheet_with_levels_default(self): argv = ["balsheet"] args = docopt(cli.__doc__, argv=argv) self.assertTrue(args["balsheet"]) self.assertEqual(args["--levels"], "3") def test_balsheet_with_levels_option(self): argv = ["balsheet", "--levels=2"] args = docopt(cli.__doc__, argv=argv) self.assertTrue(args["balsheet"]) self.assertEqual(args["--levels"], "2") def test_balsheet_with_date_default(self): argv = ["balsheet"] args = docopt(cli.__doc__, argv=argv) self.assertTrue(args["balsheet"]) self.assertEqual(args["--date"], "today") def test_balsheet_with_date_option(self): argv = ["balsheet", "--date=2021-12-01"] args = docopt(cli.__doc__, argv=argv) self.assertTrue(args["balsheet"]) self.assertEqual(args["--date"], "2021-12-01")
[((19, 8, 19, 30), 'os.mkdir', 'os.mkdir', ({(19, 17, 19, 29): 'cls.DATA_DIR'}, {}), '(cls.DATA_DIR)', False, 'import os\n'), ((23, 8, 23, 30), 'os.rmdir', 'os.rmdir', ({(23, 17, 23, 29): 'cls.DATA_DIR'}, {}), '(cls.DATA_DIR)', False, 'import os\n'), ((28, 15, 28, 45), 'docopt.docopt', 'docopt', (), '', False, 'from docopt import docopt\n'), ((35, 15, 35, 45), 'docopt.docopt', 'docopt', (), '', False, 'from docopt import docopt\n'), ((42, 15, 42, 45), 'docopt.docopt', 'docopt', (), '', False, 'from docopt import docopt\n'), ((50, 15, 50, 45), 'docopt.docopt', 'docopt', (), '', False, 'from docopt import docopt\n'), ((58, 15, 58, 45), 'docopt.docopt', 'docopt', (), '', False, 'from docopt import docopt\n'), ((66, 15, 66, 45), 'docopt.docopt', 'docopt', (), '', False, 'from docopt import docopt\n'), ((73, 15, 73, 45), 'docopt.docopt', 'docopt', (), '', False, 'from docopt import docopt\n'), ((81, 15, 81, 45), 'docopt.docopt', 'docopt', (), '', False, 'from docopt import docopt\n'), ((89, 15, 89, 45), 'docopt.docopt', 'docopt', (), '', False, 'from docopt import docopt\n'), ((97, 15, 97, 45), 'docopt.docopt', 'docopt', (), '', False, 'from docopt import docopt\n'), ((105, 15, 105, 45), 'docopt.docopt', 'docopt', (), '', False, 'from docopt import docopt\n')]
zinderud/ysa
python/Patterns/inheritance/main.py
e34d3f4c7afab3976d86f5d27edfcd273414e496
class Yaratik(object): def move_left(self): print('Moving left...') def move_right(self): print('Moving left...') class Ejderha(Yaratik): def Ates_puskurtme(self): print('ates puskurtum!') class Zombie(Yaratik): def Isirmak(self): print('Isirdim simdi!') enemy = Yaratik() enemy.move_left() # ejderha also includes all functions from parent class (yaratik) ejderha = Ejderha() ejderha.move_left() ejderha.Ates_puskurtme() # Zombie is called the (child class), inherits from Yaratik (parent class) zombie = Zombie() zombie.move_right() zombie.Isirmak()
[]
perathambkk/ml-techniques
clustering/graph_utils.py
5d6fd122322342c0b47dc65d09c4425fd73f2ea9
""" Author: Peratham Wiriyathammabhum """ import numpy as np import pandas as pd from sklearn.neighbors import NearestNeighbors def affinity_graph(X): ''' This function returns a numpy array. ''' ni, nd = X.shape A = np.zeros((ni, ni)) for i in range(ni): for j in range(i+1, ni): dist = ((X[i] - X[j])**2).sum() # compute L2 distance A[i][j] = dist A[j][i] = dist # by symmetry return A def knn_graph(X, knn=4): ''' This function returns a numpy array. ''' ni, nd = X.shape nbrs = NearestNeighbors(n_neighbors=(knn+1), algorithm='ball_tree').fit(X) distances, indices = nbrs.kneighbors(X) A = np.zeros((ni, ni)) for dist, ind in zip(distances, indices): i0 = ind[0] for i in range(1,knn+1): d = dist[i] A[i0, i] = d A[i, i0] = d # by symmetry return A def sparse_affinity_graph(X): ''' TODO: This function returns a numpy sparse matrix. ''' ni, nd = X.shape A = np.zeros((ni, ni)) for i in range(ni): for j in range(i+1, ni): dist = ((X[i] - X[j])**2).sum() # compute L2 distance A[i][j] = dist A[j][i] = dist # by symmetry return A def laplacian_graph(X, mode='affinity', knn=3, eta=0.01, sigma=2.5): ''' The unnormalized graph Laplacian, L = D − W. ''' if mode == 'affinity': W = affinity_graph(X) W[abs(W) > eta] = 0 elif mode == 'nearestneighbor': W = knn_graph(X, knn=knn) elif mode == 'gaussian': W = affinity_graph(X) bandwidth = 2.0*(sigma**2) W = np.exp(W) / bandwidth else: pass D = np.diag(W.sum(axis=1)) L = D - W return L
[((15, 5, 15, 23), 'numpy.zeros', 'np.zeros', ({(15, 14, 15, 22): '(ni, ni)'}, {}), '((ni, ni))', True, 'import numpy as np\n'), ((30, 5, 30, 23), 'numpy.zeros', 'np.zeros', ({(30, 14, 30, 22): '(ni, ni)'}, {}), '((ni, ni))', True, 'import numpy as np\n'), ((44, 5, 44, 23), 'numpy.zeros', 'np.zeros', ({(44, 14, 44, 22): '(ni, ni)'}, {}), '((ni, ni))', True, 'import numpy as np\n'), ((28, 8, 28, 68), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', (), '', False, 'from sklearn.neighbors import NearestNeighbors\n'), ((64, 6, 64, 15), 'numpy.exp', 'np.exp', ({(64, 13, 64, 14): 'W'}, {}), '(W)', True, 'import numpy as np\n')]
Acidburn0zzz/luci
recipe_engine/internal/commands/__init__.py
d8993f4684839b58f5f966dd6273d1d8fd001eae
# Copyright 2019 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. """This package houses all subcommands for the recipe engine. See implementation_details.md for the expectations of the modules in this directory. """ import argparse import errno import logging import os import pkgutil import sys if sys.version_info >= (3, 5): # we're running python > 3.5 OS_WALK = os.walk else: # From vpython from scandir import walk as OS_WALK # pylint: disable=wrong-import-position from .. import simple_cfg from ..recipe_deps import RecipeDeps from ..recipe_module_importer import RecipeModuleImporter LOG = logging.getLogger(__name__) # This incantation finds all loadable submodules of ourself. The # `prefix=__name__` bit is so that these modules get loaded with the correct # import names, i.e. # # recipe_engine.internal.commands.<submodule> # # If omitted, then these submodules can get double loaded as both: # # <submodule> AND # recipe_engine.internal.commands.<submodule> # # Which can both interfere with the global python module namespace, and lead to # strange errors when doing type assertions (since all data in these modules # will be loaded under two different names; classes will fail isinstance checks # even though they are "the same"). _COMMANDS = [ loader.find_module(module_name).load_module(module_name) for (loader, module_name, _) in pkgutil.walk_packages( __path__, prefix=__name__+'.') if '.' not in module_name[len(__name__)+1:] ] # Order all commands by an optional __cmd_priority__ field, and then by module # name. _COMMANDS.sort( key=lambda mod: ( not hasattr(mod, '__cmd_priority__'), # modules defining priority first getattr(mod, '__cmd_priority__', None), # actual priority mod.__name__ # name )) # Now actually set these commands on ourself so that 'mock' works correctly. # # This is needed to allow some tests (though it may be worth adjusting these # tests later to not need this. Just delete this function and see which tests # fail to find the dependencies on this behavior). def _patch_our_attrs(): self = sys.modules[__name__] self.__all__ = [mod.__name__[len(__name__)+1:] for mod in _COMMANDS] for modname, mod in zip(self.__all__, _COMMANDS): setattr(self, modname, mod) _patch_our_attrs() def _check_recipes_cfg_consistency(recipe_deps): """Checks all recipe.cfg files for the loaded recipe_deps and logs inconsistent dependencies. Args: recipe_deps (RecipeDeps) - The loaded+fetched recipe deps for the current run. """ actual = recipe_deps.main_repo.simple_cfg.deps # For every repo we loaded for repo_name in actual: required_deps = recipe_deps.repos[repo_name].simple_cfg.deps for req_repo_name, req_spec in required_deps.iteritems(): # If this depends on something we didn't load, log an error. if req_repo_name not in actual: LOG.error( '%r depends on %r, but your recipes.cfg is missing an ' 'entry for this.', repo_name, req_repo_name) continue actual_spec = actual[req_repo_name] if req_spec.revision == actual_spec.revision: # They match, it's all good. continue LOG.warn( 'recipes.cfg depends on %r @ %s, but %r depends on version %s.', req_repo_name, actual_spec.revision, repo_name, req_spec.revision) def _cleanup_pyc(recipe_deps): """Removes any .pyc files from the recipes/recipe_module directories. Args: * recipe_deps (RecipeDeps) - The loaded recipe dependencies. """ for repo in recipe_deps.repos.itervalues(): for to_walk in (repo.recipes_dir, repo.modules_dir): for root, _dirs, files in OS_WALK(to_walk): for fname in files: if not fname.endswith('.pyc'): continue try: to_clean = os.path.join(root, fname) LOG.info('cleaning %r', to_clean) os.unlink(to_clean) except OSError as ex: # If multiple things are cleaning pyc's at the same time this can # race. Fortunately we only care that SOMETHING deleted the pyc :) if ex.errno != errno.ENOENT: raise def _common_post_process(args): # TODO(iannucci): We should always do logging.basicConfig() (probably with # logging.WARNING), even if no verbose is passed. However we need to be # careful as this could cause issues with spurious/unexpected output. # Once the recipe engine is on native build.proto, this should be safe to # do. if args.verbose > 0: logging.basicConfig() logging.getLogger().setLevel(logging.INFO) if args.verbose > 1: logging.getLogger().setLevel(logging.DEBUG) else: # Prevent spurious "No handlers could be found for ..." stderr messages. # Once we always set a basicConfig (per TODO above), this can go away as # well. logging.root.manager.emittedNoHandlerWarning = True if args.pid_file: try: with open(args.pid_file, 'w') as pid_file: pid_file.write('%d\n' % os.getpid()) except Exception: logging.exception("unable to write pidfile") args.recipe_deps = RecipeDeps.create( args.main_repo_path, args.repo_override, args.proto_override, ) _check_recipes_cfg_consistency(args.recipe_deps) # Allows: # import RECIPE_MODULES.repo_name.module_name.submodule sys.meta_path = [RecipeModuleImporter(args.recipe_deps)] + sys.meta_path _cleanup_pyc(args.recipe_deps) # Remove flags that subcommands shouldn't use; everything from this point on # should ONLY use args.recipe_deps. del args.main_repo_path del args.verbose del args.repo_override def _add_common_args(parser): class _RepoOverrideAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): tokens = values.split('=', 2) if len(tokens) != 2: raise ValueError('Override must have the form: repo=path') repo_name, path = tokens override_dict = getattr(namespace, self.dest) if repo_name in override_dict: raise ValueError('An override is already defined for [%s] (%s)' % ( repo_name, override_dict[repo_name])) path = os.path.abspath(os.path.expanduser(path)) if not os.path.isdir(path): raise ValueError('Override path [%s] is not a directory' % (path,)) override_dict[repo_name] = path def _package_to_main_repo(value): try: value = os.path.abspath(value) except Exception as ex: # pylint: disable=broad-except parser.error( '--package %r could not be converted to absolute path: %r' % ( value, ex,)) recipes_cfg_rel = simple_cfg.RECIPES_CFG_LOCATION_REL if not value.endswith(recipes_cfg_rel): parser.error('--package must end with %r.' % (recipes_cfg_rel,)) # We know the arg ends with 'infra/config/recipes.cfg', so chop those # elements off the path to get the path to the recipe repo root. for _ in simple_cfg.RECIPES_CFG_LOCATION_TOKS: value = os.path.dirname(value) return value # TODO(iannucci): change --package to --repo-path and avoid having recipes.py # pass the path to the recipes.cfg. This is preferable because the location of # recipes.cfg MUST be discovered for recipe dependencies; the RepoSpec # protobuf doesn't specify where the recipes.cfg is in the dependency repos # (nor can it, even if it was dynamic; this would be a nightmare to maintain, # and the autoroller would need to discover it automatically ANYWAY. If we # allow it to be relocatable, the engine needs to be able to discover it, in # which case the minimal information is still 'repo root'). parser.add_argument( '--package', dest='main_repo_path', type=_package_to_main_repo, required=True, help='Path to recipes.cfg of the recipe repo to operate on.') parser.add_argument( '--verbose', '-v', action='count', help='Increase logging verboisty') parser.add_argument('-O', '--repo-override', metavar='ID=PATH', action=_RepoOverrideAction, default={}, help='Override a repo repository path with a local one.') parser.add_argument('--pid-file', metavar='PATH', help=( 'Absolute path to a file where the engine should write its pid. ' 'Path must be absolute and not exist.')) def _proto_override_abspath(value): try: value = os.path.abspath(value) except Exception as ex: # pylint: disable=broad-except parser.error( '--proto-override %r could not be converted to absolute path: %r' % ( value, ex,)) return value # Override the location of the folder containing the `PB` module. This should # only be used for recipe bundles, so we don't bother giving it a shortform # option, and suppress the option's help to avoid confusing users. parser.add_argument( '--proto-override', type=_proto_override_abspath, help=argparse.SUPPRESS) parser.set_defaults( postprocess_func=lambda error, args: None, ) def parse_and_run(): """Parses the command line and runs the chosen subcommand. Returns the command's return value (either int or None, suitable as input to `os._exit`). """ parser = argparse.ArgumentParser( description='Interact with the recipe system.') _add_common_args(parser) subp = parser.add_subparsers(dest='command') for module in _COMMANDS: description = module.__doc__ helplines = [] for line in description.splitlines(): line = line.strip() if not line: break helplines.append(line) module.add_arguments(subp.add_parser( module.__name__.split('.')[-1], # use module's short name formatter_class=argparse.RawDescriptionHelpFormatter, help=' '.join(helplines), description=description, )) args = parser.parse_args() _common_post_process(args) args.postprocess_func(parser.error, args) return args.func(args)
[((30, 6, 30, 33), 'logging.getLogger', 'logging.getLogger', ({(30, 24, 30, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((262, 11, 263, 53), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((49, 34, 50, 36), 'pkgutil.walk_packages', 'pkgutil.walk_packages', (), '', False, 'import pkgutil\n'), ((137, 4, 137, 25), 'logging.basicConfig', 'logging.basicConfig', ({}, {}), '()', False, 'import logging\n'), ((114, 32, 114, 48), 'scandir.walk', 'OS_WALK', ({(114, 40, 114, 47): 'to_walk'}, {}), '(to_walk)', True, 'from scandir import walk as OS_WALK\n'), ((195, 14, 195, 36), 'os.path.abspath', 'os.path.abspath', ({(195, 30, 195, 35): 'value'}, {}), '(value)', False, 'import os\n'), ((208, 14, 208, 36), 'os.path.dirname', 'os.path.dirname', ({(208, 30, 208, 35): 'value'}, {}), '(value)', False, 'import os\n'), ((237, 14, 237, 36), 'os.path.abspath', 'os.path.abspath', ({(237, 30, 237, 35): 'value'}, {}), '(value)', False, 'import os\n'), ((138, 4, 138, 23), 'logging.getLogger', 'logging.getLogger', ({}, {}), '()', False, 'import logging\n'), ((152, 6, 152, 50), 'logging.exception', 'logging.exception', ({(152, 24, 152, 49): '"""unable to write pidfile"""'}, {}), "('unable to write pidfile')", False, 'import logging\n'), ((188, 29, 188, 53), 'os.path.expanduser', 'os.path.expanduser', ({(188, 48, 188, 52): 'path'}, {}), '(path)', False, 'import os\n'), ((189, 13, 189, 32), 'os.path.isdir', 'os.path.isdir', ({(189, 27, 189, 31): 'path'}, {}), '(path)', False, 'import os\n'), ((140, 6, 140, 25), 'logging.getLogger', 'logging.getLogger', ({}, {}), '()', False, 'import logging\n'), ((120, 23, 120, 48), 'os.path.join', 'os.path.join', ({(120, 36, 120, 40): 'root', (120, 42, 120, 47): 'fname'}, {}), '(root, fname)', False, 'import os\n'), ((122, 12, 122, 31), 'os.unlink', 'os.unlink', ({(122, 22, 122, 30): 'to_clean'}, {}), '(to_clean)', False, 'import os\n'), ((150, 32, 150, 43), 'os.getpid', 'os.getpid', ({}, {}), '()', False, 'import os\n')]
sarthakpati/openfl
openfl/pipelines/stc_pipeline.py
8edebfd565d94f709a7d7f06d9ee38a7975c066e
# Copyright (C) 2020-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """STCPipelinemodule.""" import numpy as np import gzip as gz from .pipeline import TransformationPipeline, Transformer class SparsityTransformer(Transformer): """A transformer class to sparsify input data.""" def __init__(self, p=0.01): """Initialize. Args: p (float): sparsity ratio (Default=0.01) """ self.lossy = True self.p = p def forward(self, data, **kwargs): """Sparsify data and pass over only non-sparsified elements by reducing the array size. Args: data: an numpy array from the model tensor_dict Returns: condensed_data: an numpy array being sparsified. metadata: dictionary to store a list of meta information. """ metadata = {'int_list': list(data.shape)} # sparsification data = data.astype(np.float32) flatten_data = data.flatten() n_elements = flatten_data.shape[0] k_op = int(np.ceil(n_elements * self.p)) topk, topk_indices = self._topk_func(flatten_data, k_op) # condensed_data = topk sparse_data = np.zeros(flatten_data.shape) sparse_data[topk_indices] = topk nonzero_element_bool_indices = sparse_data != 0.0 metadata['bool_list'] = list(nonzero_element_bool_indices) return condensed_data, metadata # return sparse_data, metadata def backward(self, data, metadata, **kwargs): """Recover data array with the right shape and numerical type. Args: data: an numpy array with non-zero values. metadata: dictionary to contain information for recovering back to original data array. Returns: recovered_data: an numpy array with original shape. """ data = data.astype(np.float32) data_shape = metadata['int_list'] nonzero_element_bool_indices = list(metadata['bool_list']) recovered_data = np.zeros(data_shape).reshape(-1).astype(np.float32) recovered_data[nonzero_element_bool_indices] = data recovered_data = recovered_data.reshape(data_shape) return recovered_data @staticmethod def _topk_func(x, k): """Select top k values. Args: x: an numpy array to be sorted out for top-k components. k: k most maximum values. Returns: topk_mag: components with top-k values. indices: indices of the top-k components. """ # quick sort as default on magnitude idx = np.argsort(np.abs(x)) # sorted order, the right most is the largest magnitude length = x.shape[0] start_idx = length - k # get the top k magnitude topk_mag = np.asarray(x[idx[start_idx:]]) indices = np.asarray(idx[start_idx:]) if min(topk_mag) - 0 < 10e-8: # avoid zeros topk_mag = topk_mag + 10e-8 return topk_mag, indices class TernaryTransformer(Transformer): """A transformer class to ternerize input data.""" def __init__(self): """Initialize.""" self.lossy = True def forward(self, data, **kwargs): """Ternerize data into positive mean value, negative mean value and zero value. Args: data: an flattened numpy array Returns: int_data: an numpy array being terneraized. metadata: dictionary to store a list of meta information. """ # ternarization, data is sparse and flattened mean_topk = np.mean(np.abs(data)) out_ = np.where(data > 0.0, mean_topk, 0.0) out = np.where(data < 0.0, -mean_topk, out_) int_array, int2float_map = self._float_to_int(out) metadata = {'int_to_float': int2float_map} return int_array, metadata def backward(self, data, metadata, **kwargs): """Recover data array back to the original numerical type. Args: data: an numpy array with non-zero values. Returns: metadata: dictionary to contain information for recovering back to original data array. data (return): an numpy array with original numerical type. """ # TODO import copy data = copy.deepcopy(data) int2float_map = metadata['int_to_float'] for key in int2float_map: indices = data == key data[indices] = int2float_map[key] return data @staticmethod def _float_to_int(np_array): """Create look-up table for conversion between floating and integer types. Args: np_array: Returns: int_array: int_to_float_map: """ flatten_array = np_array.reshape(-1) unique_value_array = np.unique(flatten_array) int_array = np.zeros(flatten_array.shape, dtype=np.int) int_to_float_map = {} float_to_int_map = {} # create table for idx, u_value in enumerate(unique_value_array): int_to_float_map.update({idx: u_value}) float_to_int_map.update({u_value: idx}) # assign to the integer array indices = np.where(flatten_array == u_value) int_array[indices] = idx int_array = int_array.reshape(np_array.shape) return int_array, int_to_float_map class GZIPTransformer(Transformer): """A transformer class to losslessly compress data.""" def __init__(self): """Initialize.""" self.lossy = False def forward(self, data, **kwargs): """Compress data into numpy of float32. Args: data: an numpy array with non-zero values Returns: compressed_bytes : metadata: dictionary to contain information for recovering back to original data array """ bytes_ = data.astype(np.float32).tobytes() compressed_bytes = gz.compress(bytes_) metadata = {} return compressed_bytes, metadata def backward(self, data, metadata, **kwargs): """Decompress data into numpy of float32. Args: data: an numpy array with non-zero values metadata: dictionary to contain information for recovering back to original data array Returns: data: """ decompressed_bytes_ = gz.decompress(data) data = np.frombuffer(decompressed_bytes_, dtype=np.float32) return data class STCPipeline(TransformationPipeline): """A pipeline class to compress data lossly using sparsity and ternerization methods.""" def __init__(self, p_sparsity=0.01, n_clusters=6, **kwargs): """Initialize a pipeline of transformers. Args: p_sparsity (float): Sparsity factor (Default=0.01) n_cluster (int): Number of K-Means clusters (Default=6) Returns: Data compression transformer pipeline object """ # instantiate each transformer self.p = p_sparsity transformers = [SparsityTransformer(self.p), TernaryTransformer(), GZIPTransformer()] super(STCPipeline, self).__init__(transformers=transformers, **kwargs)
[((43, 22, 43, 50), 'numpy.zeros', 'np.zeros', ({(43, 31, 43, 49): 'flatten_data.shape'}, {}), '(flatten_data.shape)', True, 'import numpy as np\n'), ((86, 19, 86, 49), 'numpy.asarray', 'np.asarray', ({(86, 30, 86, 48): 'x[idx[start_idx:]]'}, {}), '(x[idx[start_idx:]])', True, 'import numpy as np\n'), ((87, 18, 87, 45), 'numpy.asarray', 'np.asarray', ({(87, 29, 87, 44): 'idx[start_idx:]'}, {}), '(idx[start_idx:])', True, 'import numpy as np\n'), ((112, 15, 112, 51), 'numpy.where', 'np.where', ({(112, 24, 112, 34): 'data > 0.0', (112, 36, 112, 45): 'mean_topk', (112, 47, 112, 50): '0.0'}, {}), '(data > 0.0, mean_topk, 0.0)', True, 'import numpy as np\n'), ((113, 14, 113, 52), 'numpy.where', 'np.where', ({(113, 23, 113, 33): 'data < 0.0', (113, 35, 113, 45): '-mean_topk', (113, 47, 113, 51): 'out_'}, {}), '(data < 0.0, -mean_topk, out_)', True, 'import numpy as np\n'), ((130, 15, 130, 34), 'copy.deepcopy', 'copy.deepcopy', ({(130, 29, 130, 33): 'data'}, {}), '(data)', False, 'import copy\n'), ((150, 29, 150, 53), 'numpy.unique', 'np.unique', ({(150, 39, 150, 52): 'flatten_array'}, {}), '(flatten_array)', True, 'import numpy as np\n'), ((151, 20, 151, 63), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((184, 27, 184, 46), 'gzip.compress', 'gz.compress', ({(184, 39, 184, 45): 'bytes_'}, {}), '(bytes_)', True, 'import gzip as gz\n'), ((198, 30, 198, 49), 'gzip.decompress', 'gz.decompress', ({(198, 44, 198, 48): 'data'}, {}), '(data)', True, 'import gzip as gz\n'), ((199, 15, 199, 67), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((39, 19, 39, 47), 'numpy.ceil', 'np.ceil', ({(39, 27, 39, 46): 'n_elements * self.p'}, {}), '(n_elements * self.p)', True, 'import numpy as np\n'), ((81, 25, 81, 34), 'numpy.abs', 'np.abs', ({(81, 32, 81, 33): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((111, 28, 111, 40), 'numpy.abs', 'np.abs', ({(111, 35, 111, 39): 'data'}, {}), '(data)', True, 'import numpy as np\n'), ((159, 22, 159, 56), 'numpy.where', 'np.where', ({(159, 31, 159, 55): 'flatten_array == u_value'}, {}), '(flatten_array == u_value)', True, 'import numpy as np\n'), ((63, 25, 63, 45), 'numpy.zeros', 'np.zeros', ({(63, 34, 63, 44): 'data_shape'}, {}), '(data_shape)', True, 'import numpy as np\n')]
Rex0519/NessusToReport
modle/__init__.py
047dd4a2f749addab3991b0ebc8ab609140c32a7
#!/usr/bin/env python3 # -*- coding:utf-8 -*- # ------------------------------------------------------------ # File: __init__.py.py # Created Date: 2020/6/24 # Created Time: 0:12 # Author: Hypdncy # Author Mail: [email protected] # Copyright (c) 2020 Hypdncy # ------------------------------------------------------------ # .::::. # .::::::::. # ::::::::::: # ..:::::::::::' # '::::::::::::' # .:::::::::: # '::::::::::::::.. # ..::::::::::::. # ``:::::::::::::::: # ::::``:::::::::' .:::. # ::::' ':::::' .::::::::. # .::::' :::: .:::::::'::::. # .:::' ::::: .:::::::::' ':::::. # .::' :::::.:::::::::' ':::::. # .::' ::::::::::::::' ``::::. # ...::: ::::::::::::' ``::. # ````':. ':::::::::' ::::.. # '.:::::' ':'````.. # ------------------------------------------------------------
[]
csdms/pymt
tests/component/test_grid_mixin.py
188222d7858cd3e8eb15564e56d9b7f0cb43cae5
import numpy as np import pytest from pytest import approx from pymt.component.grid import GridMixIn class Port: def __init__(self, name, uses=None, provides=None): self._name = name self._uses = uses or [] self._provides = provides or [] def get_component_name(self): return self._name def get_input_item_count(self): return len(self._uses) def get_input_item_list(self): return self._uses def get_output_item_count(self): return len(self._provides) def get_output_item_list(self): return self._provides def test_exchange_items(): class Component(GridMixIn): def __init__(self): self._port = Port("test", uses=["invar"], provides=["outvar"]) super().__init__() c = Component() assert c.input_items == ["invar"] assert c.output_items == ["outvar"] def test_no_exchange_items(): class Component(GridMixIn): def __init__(self): self._port = Port("test") super().__init__() c = Component() assert c.input_items == [] assert c.output_items == [] def test_raster_1d(): class RasterPort(Port): def get_grid_shape(self, grid_id): return (3,) def get_grid_spacing(self, grid_id): return (2.0,) def get_grid_origin(self, grid_id): return (3.0,) class Component(GridMixIn): def __init__(self): self._port = RasterPort("test", uses=["invar"]) super().__init__() c = Component() assert c.get_x("invar") == approx(np.array([3.0, 5.0, 7.0])) def test_raster_2d(): class RasterPort(Port): def get_grid_shape(self, grid_id): return (2, 3) def get_grid_spacing(self, grid_id): return (2.0, 1.0) def get_grid_origin(self, grid_id): return (0.0, 0.0) class Component(GridMixIn): def __init__(self): self._port = RasterPort("test-2d", uses=["invar"], provides=["outvar"]) super().__init__() c = Component() assert c.name == "test-2d" assert c.get_grid_type(0) == "RASTER" assert c.get_x(0) == approx(np.array([[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]])) assert c.get_y(0) == approx(np.array([[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]])) assert np.all(c.get_connectivity(0) == np.array([0, 1, 4, 3, 1, 2, 5, 4])) assert np.all(c.get_offset(0) == np.array([4, 8])) def test_raster_3d(): class RasterPort(Port): def get_grid_shape(self, grid_id): return (2, 2, 3) def get_grid_spacing(self, grid_id): return (1.0, 2.0, 1.0) def get_grid_origin(self, grid_id): return (0.0, 0.0, 0.0) class Component(GridMixIn): def __init__(self): self._port = RasterPort("test-3d", uses=["invar"]) super().__init__() c = Component() assert c.get_x(0) == approx( np.array( [[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]], [[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]] ) ) assert c.get_y(0) == approx( np.array( [[[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], [[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]]] ) ) assert c.get_z(0) == approx( np.array( [[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]] ) ) def test_rectilinear(): class RectilinearPort(Port): def get_grid_shape(self, grid_id): return (2, 3) def get_grid_x(self, grid_id): return (0.0, 3.0, 4) def get_grid_y(self, grid_id): return (2.0, 7.0) class Component(GridMixIn): def __init__(self): self._port = RectilinearPort("test", uses=["invar"]) super().__init__() c = Component() assert c.get_grid_type(0) == "RECTILINEAR" assert c.get_x(0) == approx(np.array([[0.0, 3.0, 4.0], [0.0, 3.0, 4.0]])) assert c.get_y(0) == approx(np.array([[2.0, 2.0, 2.0], [7.0, 7.0, 7.0]])) def test_structured(): class StructuredPort(Port): def get_grid_shape(self, grid_id): return (2, 3) def get_grid_x(self, grid_id): return np.array([0.0, 1.0, 2.0, 0.0, 1.0, 2.0]) def get_grid_y(self, grid_id): return np.array([0.0, 1.0, 2.0, 1.0, 2.0, 3.0]) class Component(GridMixIn): def __init__(self): self._port = StructuredPort("test", uses=["invar"]) super().__init__() c = Component() assert c.get_grid_type(0) == "STRUCTURED" assert c.get_x(0) == approx(np.array([0.0, 1.0, 2.0, 0.0, 1.0, 2.0])) assert c.get_y(0) == approx(np.array([0.0, 1.0, 2.0, 1.0, 2.0, 3.0])) def test_unstructured(): class UnstructuredPort(Port): def get_grid_x(self, grid_id): return np.array([0.0, 1.0, 0.0, 1.0, 2.0]) def get_grid_y(self, grid_id): return np.array([0.0, 0.0, 1.0, 1.0, 0.0]) def get_grid_connectivity(self, grid_id): return np.array([0, 1, 3, 2, 4, 3, 1]) def get_grid_offset(self, grid_id): return np.array([4, 7]) class Component(GridMixIn): def __init__(self): self._port = UnstructuredPort("test", uses=["invar"]) super().__init__() c = Component() assert c.get_grid_type(0) == "UNSTRUCTURED" assert c.get_x(0) == approx(np.array([0.0, 1.0, 0.0, 1.0, 2.0])) assert c.get_y(0) == approx(np.array([0.0, 0.0, 1.0, 1.0, 0.0])) def test_get_grid_shape_is_none(): class UnstructuredPort(Port): def get_grid_shape(self, grid_id): return None def get_grid_x(self, grid_id): return np.array([0.0, 1.0, 2.0]) class Component(GridMixIn): def __init__(self): self._port = UnstructuredPort("test", uses=["invar"]) super().__init__() c = Component() assert c.get_grid_type(0) == "UNSTRUCTURED" def test_get_grid_shape_raises(): class UnstructuredPort(Port): def get_grid_shape(self, grid_id): raise NotImplementedError("get_grid_shape") def get_grid_x(self, grid_id): return np.array([0.0, 1.0, 2.0]) class Component(GridMixIn): def __init__(self): self._port = UnstructuredPort("test", uses=["invar"]) super().__init__() c = Component() assert c.get_grid_type(0) == "UNSTRUCTURED" def test_structured_1d(): class RectilinearPort(Port): def get_grid_shape(self, grid_id): return (2, 3) def get_grid_x(self, grid_id): return np.array([0.0, 1.0, 2.0]) def get_grid_y(self, grid_id): raise NotImplementedError("get_grid_y") def get_grid_z(self, grid_id): raise NotImplementedError("get_grid_z") class Component(GridMixIn): def __init__(self): self._port = RectilinearPort("test", uses=["invar"]) super().__init__() c = Component() assert c.get_grid_type(0) == "RECTILINEAR" with pytest.raises(IndexError): c.get_z(0)
[((255, 9, 255, 34), 'pytest.raises', 'pytest.raises', ({(255, 23, 255, 33): 'IndexError'}, {}), '(IndexError)', False, 'import pytest\n'), ((69, 38, 69, 63), 'numpy.array', 'np.array', ({(69, 47, 69, 62): '[3.0, 5.0, 7.0]'}, {}), '([3.0, 5.0, 7.0])', True, 'import numpy as np\n'), ((91, 32, 91, 76), 'numpy.array', 'np.array', ({(91, 41, 91, 75): '[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]'}, {}), '([[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]])', True, 'import numpy as np\n'), ((92, 32, 92, 76), 'numpy.array', 'np.array', ({(92, 41, 92, 75): '[[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]]'}, {}), '([[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]])', True, 'import numpy as np\n'), ((93, 43, 93, 77), 'numpy.array', 'np.array', ({(93, 52, 93, 76): '[0, 1, 4, 3, 1, 2, 5, 4]'}, {}), '([0, 1, 4, 3, 1, 2, 5, 4])', True, 'import numpy as np\n'), ((94, 37, 94, 53), 'numpy.array', 'np.array', ({(94, 46, 94, 52): '[4, 8]'}, {}), '([4, 8])', True, 'import numpy as np\n'), ((115, 8, 117, 9), 'numpy.array', 'np.array', ({(116, 12, 116, 84): '[[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]], [[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]]'}, {}), '([[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]], [[0.0, 1.0, 2.0], [0.0, 1.0, \n 2.0]]])', True, 'import numpy as np\n'), ((120, 8, 122, 9), 'numpy.array', 'np.array', ({(121, 12, 121, 84): '[[[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], [[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]]]'}, {}), '([[[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], [[0.0, 0.0, 0.0], [2.0, 2.0, \n 2.0]]])', True, 'import numpy as np\n'), ((125, 8, 127, 9), 'numpy.array', 'np.array', ({(126, 12, 126, 84): '[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]'}, {}), '([[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, \n 1.0]]])', True, 'import numpy as np\n'), ((149, 32, 149, 76), 'numpy.array', 'np.array', ({(149, 41, 149, 75): '[[0.0, 3.0, 4.0], [0.0, 3.0, 4.0]]'}, {}), '([[0.0, 3.0, 4.0], [0.0, 3.0, 4.0]])', True, 'import numpy as np\n'), ((150, 32, 150, 76), 'numpy.array', 'np.array', ({(150, 41, 150, 75): '[[2.0, 2.0, 2.0], [7.0, 7.0, 7.0]]'}, {}), '([[2.0, 2.0, 2.0], [7.0, 7.0, 7.0]])', True, 'import numpy as np\n'), ((159, 19, 159, 59), 'numpy.array', 'np.array', ({(159, 28, 159, 58): '[0.0, 1.0, 2.0, 0.0, 1.0, 2.0]'}, {}), '([0.0, 1.0, 2.0, 0.0, 1.0, 2.0])', True, 'import numpy as np\n'), ((162, 19, 162, 59), 'numpy.array', 'np.array', ({(162, 28, 162, 58): '[0.0, 1.0, 2.0, 1.0, 2.0, 3.0]'}, {}), '([0.0, 1.0, 2.0, 1.0, 2.0, 3.0])', True, 'import numpy as np\n'), ((171, 32, 171, 72), 'numpy.array', 'np.array', ({(171, 41, 171, 71): '[0.0, 1.0, 2.0, 0.0, 1.0, 2.0]'}, {}), '([0.0, 1.0, 2.0, 0.0, 1.0, 2.0])', True, 'import numpy as np\n'), ((172, 32, 172, 72), 'numpy.array', 'np.array', ({(172, 41, 172, 71): '[0.0, 1.0, 2.0, 1.0, 2.0, 3.0]'}, {}), '([0.0, 1.0, 2.0, 1.0, 2.0, 3.0])', True, 'import numpy as np\n'), ((178, 19, 178, 54), 'numpy.array', 'np.array', ({(178, 28, 178, 53): '[0.0, 1.0, 0.0, 1.0, 2.0]'}, {}), '([0.0, 1.0, 0.0, 1.0, 2.0])', True, 'import numpy as np\n'), ((181, 19, 181, 54), 'numpy.array', 'np.array', ({(181, 28, 181, 53): '[0.0, 0.0, 1.0, 1.0, 0.0]'}, {}), '([0.0, 0.0, 1.0, 1.0, 0.0])', True, 'import numpy as np\n'), ((184, 19, 184, 50), 'numpy.array', 'np.array', ({(184, 28, 184, 49): '[0, 1, 3, 2, 4, 3, 1]'}, {}), '([0, 1, 3, 2, 4, 3, 1])', True, 'import numpy as np\n'), ((187, 19, 187, 35), 'numpy.array', 'np.array', ({(187, 28, 187, 34): '[4, 7]'}, {}), '([4, 7])', True, 'import numpy as np\n'), ((196, 32, 196, 67), 'numpy.array', 'np.array', ({(196, 41, 196, 66): '[0.0, 1.0, 0.0, 1.0, 2.0]'}, {}), '([0.0, 1.0, 0.0, 1.0, 2.0])', True, 'import numpy as np\n'), ((197, 32, 197, 67), 'numpy.array', 'np.array', ({(197, 41, 197, 66): '[0.0, 0.0, 1.0, 1.0, 0.0]'}, {}), '([0.0, 0.0, 1.0, 1.0, 0.0])', True, 'import numpy as np\n'), ((206, 19, 206, 44), 'numpy.array', 'np.array', ({(206, 28, 206, 43): '[0.0, 1.0, 2.0]'}, {}), '([0.0, 1.0, 2.0])', True, 'import numpy as np\n'), ((223, 19, 223, 44), 'numpy.array', 'np.array', ({(223, 28, 223, 43): '[0.0, 1.0, 2.0]'}, {}), '([0.0, 1.0, 2.0])', True, 'import numpy as np\n'), ((240, 19, 240, 44), 'numpy.array', 'np.array', ({(240, 28, 240, 43): '[0.0, 1.0, 2.0]'}, {}), '([0.0, 1.0, 2.0])', True, 'import numpy as np\n')]
SnoozeTime/nes
scripts/compare.py
4d60562c59e175485eb3dff043c0c78473034cdb
import sys def load_log_sp(filename): data = [] with open(filename) as f: for line in f.readlines(): tokens = line.split(" ") spidx = line.find("SP:") endidx = line.find(' ', spidx) data.append((line[0:4], line[spidx+3:endidx])) return data if __name__ == "__main__": mylog = sys.argv[1] correctlog = sys.argv[2] mylog_sp = load_log_sp(mylog) correctlog_sp = load_log_sp(correctlog) for (i, ((nb1, sp1), (nb2, sp2))) in enumerate(zip(mylog_sp, correctlog_sp)): print('{} {} - {} vs {}'.format( nb1, nb2, sp1, sp2)) if sp1.lower() != sp2.lower() or int(nb1.lower(),16) != int(nb2.lower(), 16): break
[]
nahuelalmeira/deepLearning
tercer_modelo.py
f1fcd06f5735c8be9272b0c8392b1ae467c08582
"""Exercise 1 Usage: $ CUDA_VISIBLE_DEVICES=2 python practico_1_train_petfinder.py --dataset_dir ../ --epochs 30 --dropout 0.1 0.1 --hidden_layer_sizes 200 100 To know which GPU to use, you can check it with the command $ nvidia-smi """ import argparse import os import mlflow import pickle import numpy as np import pandas as pd import tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.keras import layers, models import warnings warnings.filterwarnings("ignore") from auxiliary import process_features, load_dataset, build_columns, log_dir_name TARGET_COL = 'AdoptionSpeed' def read_args(): parser = argparse.ArgumentParser( description='Training a MLP on the petfinder dataset') # Here you have some examples of classifier parameters. You can add # more arguments or change these if you need to. parser.add_argument('--experiment_name', type=str, default='Base model', help='Name of the experiment, used in mlflow.') parser.add_argument('--dataset_dir', default='../petfinder_dataset', type=str, help='Directory with the training and test files.') parser.add_argument('--hidden_layer_sizes', nargs='+', default=[100], type=int, help='Number of hidden units of each hidden layer.') parser.add_argument('--epochs', default=50, type=int, help='Number of epochs to train.') parser.add_argument('--dropout', nargs='+', default=[0.5], type=float, help='Dropout ratio for every layer.') parser.add_argument('--batch_size', type=int, default=32, help='Number of instances in each batch.') parser.add_argument('--learning_rate', default=1e-3, type=float, help='Learning rate.') args = parser.parse_args() assert len(args.hidden_layer_sizes) == len(args.dropout) return args def print_args(args): print('-------------------------------------------') print('PARAMS ------------------------------------') print('-------------------------------------------') print('--experiment_name ', args.experiment_name) print('--dataset_dir ', args.dataset_dir) print('--epochs ', args.epochs) print('--hidden_layer_sizes', args.hidden_layer_sizes) print('--dropout ', args.dropout) print('--batch_size ', args.batch_size) print('--learning_rate ', args.learning_rate) print('-------------------------------------------') def main(): args = read_args() print_args(args) experiment_name = args.experiment_name batch_size = args.batch_size learning_rate = args.learning_rate hidden_layer_sizes = args.hidden_layer_sizes dropout = args.dropout epochs = args.epochs ### Output directory dir_name = log_dir_name(args) print() print(dir_name) print() output_dir = os.path.join('experiments', experiment_name, dir_name) if not os.path.exists(output_dir): os.makedirs(output_dir) dataset, dev_dataset, test_dataset = load_dataset(args.dataset_dir) nlabels = dataset[TARGET_COL].unique().shape[0] columns = [ 'Gender', 'Color1', 'Vaccinated', 'Dewormed', 'Breed1', 'Age', 'Fee', 'Quantity'] one_hot_columns, embedded_columns, numeric_columns = build_columns(dataset, columns) # TODO (optional) put these three types of columns in the same dictionary with "column types" X_train, y_train = process_features(dataset, one_hot_columns, numeric_columns, embedded_columns) direct_features_input_shape = (X_train['direct_features'].shape[1],) X_dev, y_dev = process_features(dev_dataset, one_hot_columns, numeric_columns, embedded_columns) ########################################################################################################### ### TODO: Shuffle train dataset - Done ########################################################################################################### shuffle_len = X_train['direct_features'].shape[0] train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(shuffle_len).batch(batch_size) ########################################################################################################### dev_ds = tf.data.Dataset.from_tensor_slices((X_dev, y_dev)).batch(batch_size) test_ds = tf.data.Dataset.from_tensor_slices(process_features( test_dataset, one_hot_columns, numeric_columns, embedded_columns, test=True)[0]).batch(batch_size) ########################################################################################################### ### TODO: Build the Keras model - Done ########################################################################################################### tf.keras.backend.clear_session() # Add one input and one embedding for each embedded column embedding_layers = [] inputs = [] for embedded_col, max_value in embedded_columns.items(): input_layer = layers.Input(shape=(1,), name=embedded_col) inputs.append(input_layer) # Define the embedding layer embedding_size = int(max_value / 4) embedding_layers.append( tf.squeeze(layers.Embedding(input_dim=max_value, output_dim=embedding_size)(input_layer), axis=-2)) print('Adding embedding of size {} for layer {}'.format(embedding_size, embedded_col)) # Add the direct features already calculated direct_features_input = layers.Input(shape=direct_features_input_shape, name='direct_features') inputs.append(direct_features_input) # Concatenate everything together features = layers.concatenate(embedding_layers + [direct_features_input]) denses = [] dense1 = layers.Dense(hidden_layer_sizes[0], activation='relu')(features) denses.append(dense1) if len(hidden_layer_sizes) > 1: for hidden_layer_size in hidden_layer_sizes[1:]: dense = layers.Dense(hidden_layer_size, activation='relu')(denses[-1]) denses.append(dense) output_layer = layers.Dense(nlabels, activation='softmax')(dense1) model = models.Model(inputs=inputs, outputs=output_layer) ########################################################################################################### ########################################################################################################### ### TODO: Fit the model - Done ########################################################################################################### mlflow.set_experiment(experiment_name) optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) logdir = "logs/scalars/" + dir_name tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir) with mlflow.start_run(nested=True): # Log model hiperparameters first mlflow.log_param('hidden_layer_size', hidden_layer_sizes) mlflow.log_param('dropout', dropout) mlflow.log_param('embedded_columns', embedded_columns) mlflow.log_param('one_hot_columns', one_hot_columns) mlflow.log_param('numeric_columns', numeric_columns) # Not using these yet mlflow.log_param('epochs', epochs) mlflow.log_param('batch_size', batch_size) mlflow.log_param('learning_rate', learning_rate) # Train history = model.fit(train_ds, epochs=epochs, validation_data=dev_ds, callbacks=[tensorboard_callback]) ####################################################################################################### ### TODO: analyze history to see if model converges/overfits ####################################################################################################### output_csv = os.path.join(output_dir, 'history.pickle') with open(output_csv, 'bw') as f: pickle.dump(history.history, f) ####################################################################################################### ####################################################################################################### ### TODO: Evaluate the model, calculating the metrics. - Done ####################################################################################################### loss, accuracy = model.evaluate(dev_ds) print("*** Dev loss: {} - accuracy: {}".format(loss, accuracy)) mlflow.log_metric('loss', loss) mlflow.log_metric('accuracy', accuracy) predictions = model.predict(test_ds) ####################################################################################################### ####################################################################################################### ### TODO: Convert predictions to classes - Done ####################################################################################################### prediction_classes = np.argmax(predictions, axis=1) ####################################################################################################### ####################################################################################################### ### TODO: Save the results for submission - Done ####################################################################################################### output_csv = os.path.join(output_dir, 'submit.csv') submissions = pd.DataFrame(prediction_classes, columns=[TARGET_COL], index=test_dataset.PID) submissions.to_csv(output_csv) ####################################################################################################### ########################################################################################################### print('All operations completed') if __name__ == '__main__': main()
[((25, 0, 25, 33), 'warnings.filterwarnings', 'warnings.filterwarnings', ({(25, 24, 25, 32): '"""ignore"""'}, {}), "('ignore')", False, 'import warnings\n'), ((33, 13, 34, 62), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((82, 15, 82, 33), 'auxiliary.log_dir_name', 'log_dir_name', ({(82, 28, 82, 32): 'args'}, {}), '(args)', False, 'from auxiliary import process_features, load_dataset, build_columns, log_dir_name\n'), ((86, 17, 86, 71), 'os.path.join', 'os.path.join', ({(86, 30, 86, 43): '"""experiments"""', (86, 45, 86, 60): 'experiment_name', (86, 62, 86, 70): 'dir_name'}, {}), "('experiments', experiment_name, dir_name)", False, 'import os\n'), ((90, 41, 90, 71), 'auxiliary.load_dataset', 'load_dataset', ({(90, 54, 90, 70): 'args.dataset_dir'}, {}), '(args.dataset_dir)', False, 'from auxiliary import process_features, load_dataset, build_columns, log_dir_name\n'), ((97, 57, 97, 88), 'auxiliary.build_columns', 'build_columns', ({(97, 71, 97, 78): 'dataset', (97, 80, 97, 87): 'columns'}, {}), '(dataset, columns)', False, 'from auxiliary import process_features, load_dataset, build_columns, log_dir_name\n'), ((100, 23, 100, 100), 'auxiliary.process_features', 'process_features', ({(100, 40, 100, 47): 'dataset', (100, 49, 100, 64): 'one_hot_columns', (100, 66, 100, 81): 'numeric_columns', (100, 83, 100, 99): 'embedded_columns'}, {}), '(dataset, one_hot_columns, numeric_columns, embedded_columns)', False, 'from auxiliary import process_features, load_dataset, build_columns, log_dir_name\n'), ((102, 19, 102, 100), 'auxiliary.process_features', 'process_features', ({(102, 36, 102, 47): 'dev_dataset', (102, 49, 102, 64): 'one_hot_columns', (102, 66, 102, 81): 'numeric_columns', (102, 83, 102, 99): 'embedded_columns'}, {}), '(dev_dataset, one_hot_columns, numeric_columns,\n embedded_columns)', False, 'from auxiliary import process_features, load_dataset, build_columns, log_dir_name\n'), ((120, 4, 120, 36), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((135, 28, 135, 99), 'tensorflow.keras.layers.Input', 'layers.Input', (), '', False, 'from tensorflow.keras import layers, models\n'), ((139, 15, 139, 77), 'tensorflow.keras.layers.concatenate', 'layers.concatenate', ({(139, 34, 139, 76): 'embedding_layers + [direct_features_input]'}, {}), '(embedding_layers + [direct_features_input])', False, 'from tensorflow.keras import layers, models\n'), ((150, 12, 150, 61), 'tensorflow.keras.models.Model', 'models.Model', (), '', False, 'from tensorflow.keras import layers, models\n'), ((157, 4, 157, 42), 'mlflow.set_experiment', 'mlflow.set_experiment', ({(157, 26, 157, 41): 'experiment_name'}, {}), '(experiment_name)', False, 'import mlflow\n'), ((159, 16, 159, 69), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (), '', True, 'import tensorflow as tf\n'), ((164, 27, 164, 73), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', (), '', True, 'import tensorflow as tf\n'), ((87, 11, 87, 37), 'os.path.exists', 'os.path.exists', ({(87, 26, 87, 36): 'output_dir'}, {}), '(output_dir)', False, 'import os\n'), ((88, 8, 88, 31), 'os.makedirs', 'os.makedirs', ({(88, 20, 88, 30): 'output_dir'}, {}), '(output_dir)', False, 'import os\n'), ((126, 22, 126, 65), 'tensorflow.keras.layers.Input', 'layers.Input', (), '', False, 'from tensorflow.keras import layers, models\n'), ((142, 13, 142, 67), 'tensorflow.keras.layers.Dense', 'layers.Dense', (), '', False, 'from tensorflow.keras import layers, models\n'), ((148, 19, 148, 62), 'tensorflow.keras.layers.Dense', 'layers.Dense', (), '', False, 'from tensorflow.keras import layers, models\n'), ((166, 9, 166, 38), 'mlflow.start_run', 'mlflow.start_run', (), '', False, 'import mlflow\n'), ((168, 8, 168, 65), 'mlflow.log_param', 'mlflow.log_param', ({(168, 25, 168, 44): '"""hidden_layer_size"""', (168, 46, 168, 64): 'hidden_layer_sizes'}, {}), "('hidden_layer_size', hidden_layer_sizes)", False, 'import mlflow\n'), ((169, 8, 169, 44), 'mlflow.log_param', 'mlflow.log_param', ({(169, 25, 169, 34): '"""dropout"""', (169, 36, 169, 43): 'dropout'}, {}), "('dropout', dropout)", False, 'import mlflow\n'), ((170, 8, 170, 62), 'mlflow.log_param', 'mlflow.log_param', ({(170, 25, 170, 43): '"""embedded_columns"""', (170, 45, 170, 61): 'embedded_columns'}, {}), "('embedded_columns', embedded_columns)", False, 'import mlflow\n'), ((171, 8, 171, 60), 'mlflow.log_param', 'mlflow.log_param', ({(171, 25, 171, 42): '"""one_hot_columns"""', (171, 44, 171, 59): 'one_hot_columns'}, {}), "('one_hot_columns', one_hot_columns)", False, 'import mlflow\n'), ((172, 8, 172, 60), 'mlflow.log_param', 'mlflow.log_param', ({(172, 25, 172, 42): '"""numeric_columns"""', (172, 44, 172, 59): 'numeric_columns'}, {}), "('numeric_columns', numeric_columns)", False, 'import mlflow\n'), ((173, 8, 173, 42), 'mlflow.log_param', 'mlflow.log_param', ({(173, 25, 173, 33): '"""epochs"""', (173, 35, 173, 41): 'epochs'}, {}), "('epochs', epochs)", False, 'import mlflow\n'), ((174, 8, 174, 50), 'mlflow.log_param', 'mlflow.log_param', ({(174, 25, 174, 37): '"""batch_size"""', (174, 39, 174, 49): 'batch_size'}, {}), "('batch_size', batch_size)", False, 'import mlflow\n'), ((175, 8, 175, 56), 'mlflow.log_param', 'mlflow.log_param', ({(175, 25, 175, 40): '"""learning_rate"""', (175, 42, 175, 55): 'learning_rate'}, {}), "('learning_rate', learning_rate)", False, 'import mlflow\n'), ((186, 21, 186, 63), 'os.path.join', 'os.path.join', ({(186, 34, 186, 44): 'output_dir', (186, 46, 186, 62): '"""history.pickle"""'}, {}), "(output_dir, 'history.pickle')", False, 'import os\n'), ((197, 8, 197, 39), 'mlflow.log_metric', 'mlflow.log_metric', ({(197, 26, 197, 32): '"""loss"""', (197, 34, 197, 38): 'loss'}, {}), "('loss', loss)", False, 'import mlflow\n'), ((198, 8, 198, 47), 'mlflow.log_metric', 'mlflow.log_metric', ({(198, 26, 198, 36): '"""accuracy"""', (198, 38, 198, 46): 'accuracy'}, {}), "('accuracy', accuracy)", False, 'import mlflow\n'), ((206, 29, 206, 59), 'numpy.argmax', 'np.argmax', (), '', True, 'import numpy as np\n'), ((213, 21, 213, 59), 'os.path.join', 'os.path.join', ({(213, 34, 213, 44): 'output_dir', (213, 46, 213, 58): '"""submit.csv"""'}, {}), "(output_dir, 'submit.csv')", False, 'import os\n'), ((214, 22, 214, 100), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((112, 13, 112, 63), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', ({(112, 48, 112, 62): '(X_dev, y_dev)'}, {}), '((X_dev, y_dev))', True, 'import tensorflow as tf\n'), ((188, 12, 188, 43), 'pickle.dump', 'pickle.dump', ({(188, 24, 188, 39): 'history.history', (188, 41, 188, 42): 'f'}, {}), '(history.history, f)', False, 'import pickle\n'), ((146, 20, 146, 70), 'tensorflow.keras.layers.Dense', 'layers.Dense', (), '', False, 'from tensorflow.keras import layers, models\n'), ((109, 15, 109, 69), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', ({(109, 50, 109, 68): '(X_train, y_train)'}, {}), '((X_train, y_train))', True, 'import tensorflow as tf\n'), ((113, 49, 114, 84), 'auxiliary.process_features', 'process_features', (), '', False, 'from auxiliary import process_features, load_dataset, build_columns, log_dir_name\n'), ((131, 23, 131, 87), 'tensorflow.keras.layers.Embedding', 'layers.Embedding', (), '', False, 'from tensorflow.keras import layers, models\n')]
catmaid/catpy
catpy/applications/export.py
481d87591a6dfaedef2767dcddcbed7185ecc8b8
# -*- coding: utf-8 -*- from __future__ import absolute_import from pkg_resources import parse_version from warnings import warn from copy import deepcopy import networkx as nx from networkx.readwrite import json_graph from catpy.applications.base import CatmaidClientApplication NX_VERSION_INFO = parse_version(nx.__version__)._key[1] err_msg = ( "Tried to treat the edge's source/target fields as indices into the list of nodes, but failed. " "See issue #26 [1]. " "Has CATMAID upgraded to networkx 2.x? [2]\n\n" "[1]: https://github.com/catmaid/catpy/issues/26\n" "[2]: https://github.com/catmaid/CATMAID/blob/master/django/requirements.txt" ) def convert_nodelink_data(jso): """NetworkX serialises graphs differently in v1.x and v2.x. This converts v1-style data (as emitted by CATMAID) to v2-style data. See issue #26 https://github.com/catmaid/catpy/issues/26 Parameters ---------- jso : dict Returns ------- dict """ if NX_VERSION_INFO < (2, 0): warn( "You are converting networkx v1-style JSON (emitted by CATMAID) to v2-style JSON," " but you are using networkx v1" ) out = deepcopy(jso) for edge in out["links"]: for label in ["source", "target"]: try: edge[label] = out["nodes"][edge[label]]["id"] except (KeyError, IndexError): raise RuntimeError(err_msg) return out class ExportWidget(CatmaidClientApplication): def get_swc(self, skeleton_id, linearize_ids=False): """ Get a single skeleton in SWC format. Parameters ---------- skeleton_id : int or str linearize_ids : bool Returns ------- str """ return self.get( (self.project_id, "skeleton", skeleton_id, "swc"), {"linearize_ids": "true" if linearize_ids else "false"}, ) def get_connector_archive(self, *args, **kwargs): """Not implemented: requires an async job""" raise NotImplementedError("Requires an async job") def get_treenode_archive(self, *args, **kwargs): """Not implemented: requires an async job""" raise NotImplementedError("Requires an async job") def get_networkx_dict(self, *skeleton_ids): """ Get the data for a networkx graph of the given skeletons in node-link format. In networkx 1.x, as used by CATMAID and therefore returned by this method, "source" and "target" in the dicts in "links" refer to nodes by their indices in the "nodes" array. See ``convert_nodelink_data`` function to convert into networkx 2.x-compatible format. https://networkx.readthedocs.io/en/networkx-1.11/reference/generated/networkx.readwrite.json_graph.node_link_data.html Parameters ---------- skeleton_ids : array-like of (int or str) Returns ------- dict """ return self.post( (self.project_id, "graphexport", "json"), data={"skeleton_list": list(skeleton_ids)}, ) def get_networkx(self, *skeleton_ids): """ Get a networkx MultiDiGraph of the given skeletons. Parameters ---------- skeleton_ids : array-like of (int or str) Returns ------- networkx.MultiDiGraph """ data = self.get_networkx_dict(*skeleton_ids) if NX_VERSION_INFO >= (2, 0): data = convert_nodelink_data(data) return json_graph.node_link_graph(data, directed=True) def get_neuroml(self, skeleton_ids, skeleton_inputs=tuple()): """ Get NeuroML v1.8.1 (level 3, NetworkML) for the given skeletons, possibly with their input synapses constrained to another set of skeletons. N.B. If len(skeleton_ids) > 1, skeleton_inputs will be ignored and only synapses within the first skeleton set will be used in the model. Parameters ---------- skeleton_ids : array-like Skeletons whose NeuroML to return skeleton_inputs : array-like, optional If specified, only input synapses from these skeletons will be added to the NeuroML Returns ------- str NeuroML output string """ data = {"skids": list(skeleton_ids)} if skeleton_inputs: if len(skeleton_ids) > 1: warn( "More than one skeleton ID was selected: ignoring skeleton input constraints" ) else: data["inputs"] = list(skeleton_inputs) return self.post((self.project_id, "neuroml", "neuroml_level3_v181"), data=data) def get_treenode_and_connector_geometry(self, *skeleton_ids): """ Get the treenode and connector information for the given skeletons. The returned dictionary will be of the form { "skeletons": { skeleton_id1: { "treenodes": { treenode_id1: { "location": [x, y, z], "parent_id": id_of_parent_treenode }, treenode_id2: ... }, "connectors": { connector_id1: { "location": [x, y, z], "presynaptic_to": [list, of, treenode, ids], "postsynaptic_to": [list, of, treenode, ids] }, connector_id2: ... } }, skeleton_id2: ... } } Parameters ---------- skeleton_ids : array-like of (int or str) Returns ------- dict """ # todo: factor API call into MorphologyFetcher skeletons = dict() warnings = set() relation_names = {0: "presnaptic_to", 1: "postsynaptic_to"} for skeleton_id in skeleton_ids: data = self.get( "{}/{}/1/0/compact-skeleton".format(self.project_id, skeleton_id) ) skeleton = {"treenodes": dict(), "connectors": dict()} for treenode in data[0]: skeleton["treenodes"][int(treenode[0])] = { "location": treenode[3:6], "parent_id": None if treenode[1] is None else int(treenode[1]), } for connector in data[1]: # NOT the database relation ID # {pre: 0, post: 1, gj: 2} relation_number = connector[2] if relation_number not in relation_names: continue conn_id = int(connector[1]) if conn_id not in skeleton["connectors"]: skeleton["connectors"][conn_id] = { rn: [] for rn in relation_names.values() } skeleton["connectors"][conn_id]["location"] = connector[3:6] skeleton["connectors"][conn_id][relation_names[relation_number]].append( connector[0] ) skeletons[int(skeleton_id)] = skeleton warn( "Skeleton representations contained some unknown treenode->connector relation IDs:\n\t" "\n\t".join(sorted(warnings)) ) return {"skeletons": skeletons}
[((47, 10, 47, 23), 'copy.deepcopy', 'deepcopy', ({(47, 19, 47, 22): 'jso'}, {}), '(jso)', False, 'from copy import deepcopy\n'), ((14, 18, 14, 47), 'pkg_resources.parse_version', 'parse_version', ({(14, 32, 14, 46): 'nx.__version__'}, {}), '(nx.__version__)', False, 'from pkg_resources import parse_version\n'), ((42, 8, 45, 9), 'warnings.warn', 'warn', ({(43, 12, 44, 44): '"""You are converting networkx v1-style JSON (emitted by CATMAID) to v2-style JSON, but you are using networkx v1"""'}, {}), "(\n 'You are converting networkx v1-style JSON (emitted by CATMAID) to v2-style JSON, but you are using networkx v1'\n )", False, 'from warnings import warn\n'), ((123, 15, 123, 62), 'networkx.readwrite.json_graph.node_link_graph', 'json_graph.node_link_graph', (), '', False, 'from networkx.readwrite import json_graph\n'), ((150, 16, 152, 17), 'warnings.warn', 'warn', ({(151, 20, 151, 97): '"""More than one skeleton ID was selected: ignoring skeleton input constraints"""'}, {}), "(\n 'More than one skeleton ID was selected: ignoring skeleton input constraints'\n )", False, 'from warnings import warn\n')]
Indexical-Metrics-Measure-Advisory/watchmen
packages/watchmen-data-kernel/src/watchmen_data_kernel/meta/external_writer_service.py
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
from typing import Optional from watchmen_auth import PrincipalService from watchmen_data_kernel.cache import CacheService from watchmen_data_kernel.common import DataKernelException from watchmen_data_kernel.external_writer import find_external_writer_create, register_external_writer_creator from watchmen_meta.common import ask_meta_storage, ask_snowflake_generator from watchmen_meta.system import ExternalWriterService as ExternalWriterStorageService from watchmen_model.common import ExternalWriterId from watchmen_model.system import ExternalWriter def register_external_writer(external_writer: ExternalWriter) -> None: create = find_external_writer_create(external_writer.type) if create is None: raise DataKernelException(f'Creator not found for external writer[{external_writer.dict()}].') register_external_writer_creator(external_writer.writerCode, create()) class ExternalWriterService: def __init__(self, principal_service: PrincipalService): self.principalService = principal_service def find_by_id(self, writer_id: ExternalWriterId) -> Optional[ExternalWriter]: external_writer = CacheService.external_writer().get(writer_id) if external_writer is not None: if external_writer.tenantId != self.principalService.get_tenant_id(): raise DataKernelException( f'External writer[id={writer_id}] not belongs to ' f'current tenant[id={self.principalService.get_tenant_id()}].') register_external_writer(external_writer) return external_writer storage_service = ExternalWriterStorageService( ask_meta_storage(), ask_snowflake_generator(), self.principalService) storage_service.begin_transaction() try: # noinspection PyTypeChecker external_writer: ExternalWriter = storage_service.find_by_id(writer_id) if external_writer is None: return None CacheService.external_writer().put(external_writer) register_external_writer(external_writer) return external_writer finally: storage_service.close_transaction()
[((14, 10, 14, 59), 'watchmen_data_kernel.external_writer.find_external_writer_create', 'find_external_writer_create', ({(14, 38, 14, 58): 'external_writer.type'}, {}), '(external_writer.type)', False, 'from watchmen_data_kernel.external_writer import find_external_writer_create, register_external_writer_creator\n'), ((35, 3, 35, 21), 'watchmen_meta.common.ask_meta_storage', 'ask_meta_storage', ({}, {}), '()', False, 'from watchmen_meta.common import ask_meta_storage, ask_snowflake_generator\n'), ((35, 23, 35, 48), 'watchmen_meta.common.ask_snowflake_generator', 'ask_snowflake_generator', ({}, {}), '()', False, 'from watchmen_meta.common import ask_meta_storage, ask_snowflake_generator\n'), ((25, 20, 25, 50), 'watchmen_data_kernel.cache.CacheService.external_writer', 'CacheService.external_writer', ({}, {}), '()', False, 'from watchmen_data_kernel.cache import CacheService\n'), ((43, 3, 43, 33), 'watchmen_data_kernel.cache.CacheService.external_writer', 'CacheService.external_writer', ({}, {}), '()', False, 'from watchmen_data_kernel.cache import CacheService\n')]
AlbertoAlfredo/exercicios-cursos
udemy-python/mediaponderada.py
792096ad1f853188adec8fc3e5c629742c8dd7ab
nota1 = float(input('Digite a nota da primeira nota ')) peso1 = float(input('Digite o peso da primeira nota ')) nota2 = float(input('Digite a nota da seugundo nota ')) peso2 = float(input('Digite o peso da segundo nota ')) media = (nota1/peso1+nota2/peso2)/2 print('A média das duas notas é:', media)
[]
chasebrewsky/scrywarden
scrywarden/module.py
c6a5a81d14016ca58625df68594ef52dd328a0dd
from importlib import import_module from typing import Any def import_string(path: str) -> Any: """Imports a dotted path name and returns the class/attribute. Parameters ---------- path: str Dotted module path to retrieve. Returns ------- Class/attribute at the given import path. Raises ------ ImportError If the path does not exist. """ try: module_path, class_name = path.rsplit('.', 1) except ValueError as error: raise ImportError( f"{path} does not look like a module path", ) from error module = import_module(module_path) try: return getattr(module, class_name) except AttributeError as error: raise ImportError( f"Module '{module_path}' does not define a '{class_name}' " "attribute/class", ) from error
[((28, 13, 28, 39), 'importlib.import_module', 'import_module', ({(28, 27, 28, 38): 'module_path'}, {}), '(module_path)', False, 'from importlib import import_module\n')]
learningequality/klorimin
examples/oldexamples/sample_program.py
c569cd4048ac670bc55a83f4fdda0b818c7f626e
#!/usr/bin/env python import json import os import re from enum import Enum from os.path import join from le_utils.constants import content_kinds from le_utils.constants import exercises from le_utils.constants import file_formats from le_utils.constants import licenses from ricecooker.chefs import SushiChef from ricecooker.classes import files from ricecooker.classes import nodes from ricecooker.classes import questions from ricecooker.classes.licenses import get_license from ricecooker.exceptions import InvalidFormatException from ricecooker.exceptions import raise_for_invalid_channel from ricecooker.exceptions import UnknownContentKindError from ricecooker.exceptions import UnknownFileTypeError from ricecooker.exceptions import UnknownQuestionTypeError # CHANNEL SETTINGS SOURCE_DOMAIN = "<yourdomain.org>" # content provider's domain SOURCE_ID = "<yourid>" # an alphanumeric channel ID CHANNEL_TITLE = "Testing Ricecooker Channel" # a humand-readbale title CHANNEL_LANGUAGE = "en" # language code of channel # LOCAL DIRS EXAMPLES_DIR = os.path.dirname(os.path.realpath(__file__)) DATA_DIR = os.path.join(EXAMPLES_DIR, "data") CONTENT_DIR = os.path.join(EXAMPLES_DIR, "content") # # A utility function to manage absolute paths that allows us to refer to files # in the CONTENT_DIR (subdirectory `content/' in current directory) using content:// def get_abspath(path, content_dir=CONTENT_DIR): """ Replaces `content://` with absolute path of `content_dir`. By default looks for content in subdirectory `content` in current directory. """ if path: file = re.search("content://(.+)", path) if file: return os.path.join(content_dir, file.group(1)) return path class FileTypes(Enum): """Enum containing all file types Ricecooker can have Steps: AUDIO_FILE: mp3 files THUMBNAIL: png, jpg, or jpeg files DOCUMENT_FILE: pdf files """ AUDIO_FILE = 0 THUMBNAIL = 1 DOCUMENT_FILE = 2 VIDEO_FILE = 3 YOUTUBE_VIDEO_FILE = 4 VECTORIZED_VIDEO_FILE = 5 VIDEO_THUMBNAIL = 6 YOUTUBE_VIDEO_THUMBNAIL_FILE = 7 HTML_ZIP_FILE = 8 SUBTITLE_FILE = 9 TILED_THUMBNAIL_FILE = 10 UNIVERSAL_SUBS_SUBTITLE_FILE = 11 BASE64_FILE = 12 WEB_VIDEO_FILE = 13 H5P_FILE = 14 FILE_TYPE_MAPPING = { content_kinds.AUDIO: { file_formats.MP3: FileTypes.AUDIO_FILE, file_formats.PNG: FileTypes.THUMBNAIL, file_formats.JPG: FileTypes.THUMBNAIL, file_formats.JPEG: FileTypes.THUMBNAIL, }, content_kinds.DOCUMENT: { file_formats.PDF: FileTypes.DOCUMENT_FILE, file_formats.PNG: FileTypes.THUMBNAIL, file_formats.JPG: FileTypes.THUMBNAIL, file_formats.JPEG: FileTypes.THUMBNAIL, }, content_kinds.HTML5: { file_formats.HTML5: FileTypes.HTML_ZIP_FILE, file_formats.PNG: FileTypes.THUMBNAIL, file_formats.JPG: FileTypes.THUMBNAIL, file_formats.JPEG: FileTypes.THUMBNAIL, }, content_kinds.H5P: { file_formats.H5P: FileTypes.H5P_FILE, file_formats.PNG: FileTypes.THUMBNAIL, file_formats.JPG: FileTypes.THUMBNAIL, file_formats.JPEG: FileTypes.THUMBNAIL, }, content_kinds.VIDEO: { file_formats.MP4: FileTypes.VIDEO_FILE, file_formats.VTT: FileTypes.SUBTITLE_FILE, file_formats.PNG: FileTypes.THUMBNAIL, file_formats.JPG: FileTypes.THUMBNAIL, file_formats.JPEG: FileTypes.THUMBNAIL, }, content_kinds.EXERCISE: { file_formats.PNG: FileTypes.THUMBNAIL, file_formats.JPG: FileTypes.THUMBNAIL, file_formats.JPEG: FileTypes.THUMBNAIL, }, } def guess_file_type(kind, filepath=None, youtube_id=None, web_url=None, encoding=None): """guess_file_class: determines what file the content is Args: filepath (str): filepath of file to check Returns: string indicating file's class """ if youtube_id: return FileTypes.YOUTUBE_VIDEO_FILE elif web_url: return FileTypes.WEB_VIDEO_FILE elif encoding: return FileTypes.BASE64_FILE else: ext = os.path.splitext(filepath)[1][1:].lower() if kind in FILE_TYPE_MAPPING and ext in FILE_TYPE_MAPPING[kind]: return FILE_TYPE_MAPPING[kind][ext] return None def guess_content_kind(path=None, web_video_data=None, questions=None): """guess_content_kind: determines what kind the content is Args: files (str or list): files associated with content Returns: string indicating node's kind """ # If there are any questions, return exercise if questions and len(questions) > 0: return content_kinds.EXERCISE # See if any files match a content kind if path: ext = os.path.splitext(path)[1][1:].lower() if ext in content_kinds.MAPPING: return content_kinds.MAPPING[ext] raise InvalidFormatException( "Invalid file type: Allowed formats are {0}".format( [key for key, value in content_kinds.MAPPING.items()] ) ) elif web_video_data: return content_kinds.VIDEO else: return content_kinds.TOPIC # LOAD sample_tree.json (as dict) with open(join(DATA_DIR, "sample_tree.json"), "r") as json_file: SAMPLE_TREE = json.load(json_file) # LOAD JSON DATA (as string) FOR PERSEUS QUESTIONS SAMPLE_PERSEUS_1_JSON = open(join(DATA_DIR, "sample_perseus01.json"), "r").read() # SAMPLE_PERSEUS_2_JSON = open(join(DATA_DIR,'sample_perseus02.json'),'r').read() # ADD EXERCISES EXERCISES_NODES = [ { "title": "Rice Cookers", "id": "d98752", "description": "Start cooking rice today!", "children": [ { "title": "Rice Chef", "id": "6cafe2", "author": "Revision 3", "description": "Become a master rice cooker", "file": "https://ia600209.us.archive.org/27/items/RiceChef/Rice Chef.mp4", "license": licenses.CC_BY_NC_SA, "copyright_holder": "Learning Equality", "files": [ { "path": "https://ia600209.us.archive.org/27/items/RiceChef/Rice Chef.mp4" }, { "encoding": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAmFQTFRF////wN/2I0FiNFFuAAAAxdvsN1RxV3KMnrPFFi9PAB1CVG+KXHaQI0NjttLrEjVchIF4AyNGZXB5V087UUw/EzBMpqWeb2thbmpgpqOceXVsERgfTWeADg8QCAEApKGZBAYIop+XCQkIhZ+2T2mEg5mtnK/AobPDkKO2YXqTAAAAJkBetMraZH2VprjIz9zm4enw7/T47fP3wc7ae5GnAAAAN1BsSmSApLfI1ODq2OHp5Orv8PL09vb38fb5wM/bbISbrL/PfZSpxNPgzdnj2+Pr5evw6+/z6e3w3ePp2OPsma2/ABM5Q197ABk4jKG1yNfjytfh1uDo3eXs4unv1t/nztrjqbzMTmmEXneRES1Ji6CzxtXixdPfztrk1N/n1+Dp1d/oz9vlxdPeq73NVG+KYnyUAAAddIuhwtPhvMzaxtTgytfiy9jjwtHewtHenbDCHT1fS2eCRV52qr7PvM3cucrYv87cv8/cvMzavc3bucvacoyl////ByE8WnKKscXWv9Hguszbu8zbvc7dtcnaiJqrcHZ4f4SHEh0nEitFTWZ+hJqumrDDm7HDj6W5dI2lYGJfmZeQl5SNAAAADRciAAATHjdSOVNsPlhyLklmKCYjW1lUlpOLlZKLFSAqWXSOBQAADA0NAAAAHh0bWlhSk5CIk5CIBAYJDRQbERcdDBAUBgkMAAAEDg4NAAAAHBsZWFZQkY6GAAAAAAAABQUEHBsZAAAAGxoYVlROko+GBAQDZ2RdAAAAGhkYcW9oAgICAAAAExMSDQwLjouDjYuDioiAiIV9hoN7VlRO////Z2DcYwAAAMR0Uk5TAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACRKrJyrZlBQECaNXCsKaqypMGAUDcu7Gpn5mf03gDo8+4saiipKq3xRMBH83Eu7OsqbG61DkDMdbFvrizsbK3wNs9Ax/VysS/vLq/zNwfArDhxMfExMXE3pMCMe7byMjIzd33ZgYGQtnz6+zooeJXBQMFD1yHejZ1+l8FBgEELlOR+GgFCQ0SGxoBGFKg+m0BBwEMR6v+hAEDM6nRASWURVuYQQ4AAAABYktHRACIBR1IAAAACXBIWXMAAAjLAAAIywGEuOmJAAABCklEQVQY02NgUGZUUVVT19DUYtBmYmZhYdBh1dXTNzA0MjYxZTFjAwqwm1tYWlnb2NrZO3A4cgIFGJycXVzd3D08vbx9uHyBAn7+AYFBwSEhoWHhEdyRQIGo6JjYuPiExKTklFSeNKBAekZmVnZObk5efkEhbxFQgK+4pLSsvKKyqrqGoZZfgIVBsK6+obGpuaW1rV2oQ1hEgKFTtKu7p7evf8LEI5PEJotLMEyZyjJt+oyZsxhmzzk6V3KeFIO01vwFMrJyCxctXrL02DL55QwsClorVq5avWbtuvUbNh7fpMjAwsKyWWvLFJatStu279h5YhdIAAJ2s+zZu+/kfoQAy4HNLAcPHQYA5YtSi+k2/WkAAAAldEVYdGRhdGU6Y3JlYXRlADIwMTMtMTAtMDRUMTk6Mzk6MjEtMDQ6MDAwU1uYAAAAJXRFWHRkYXRlOm1vZGlmeQAyMDEzLTEwLTA0VDE5OjM5OjIxLTA0OjAwQQ7jJAAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAAASUVORK5CYII=" }, ], }, { "title": "Rice Exercise", "id": "6cafe3", "description": "Test how well you know your rice", "license": licenses.CC_BY_NC_SA, "copyright_holder": "Learning Equality", "mastery_model": exercises.DO_ALL, "files": [ { "path": "http://www.publicdomainpictures.net/pictures/110000/nahled/bowl-of-rice.jpg" } ], "questions": [ { "id": "eeeee", "question": "Which rice is your favorite? \\_\\_\\_ ![](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAmFQTFRF////wN/2I0FiNFFuAAAAxdvsN1RxV3KMnrPFFi9PAB1CVG+KXHaQI0NjttLrEjVchIF4AyNGZXB5V087UUw/EzBMpqWeb2thbmpgpqOceXVsERgfTWeADg8QCAEApKGZBAYIop+XCQkIhZ+2T2mEg5mtnK/AobPDkKO2YXqTAAAAJkBetMraZH2VprjIz9zm4enw7/T47fP3wc7ae5GnAAAAN1BsSmSApLfI1ODq2OHp5Orv8PL09vb38fb5wM/bbISbrL/PfZSpxNPgzdnj2+Pr5evw6+/z6e3w3ePp2OPsma2/ABM5Q197ABk4jKG1yNfjytfh1uDo3eXs4unv1t/nztrjqbzMTmmEXneRES1Ji6CzxtXixdPfztrk1N/n1+Dp1d/oz9vlxdPeq73NVG+KYnyUAAAddIuhwtPhvMzaxtTgytfiy9jjwtHewtHenbDCHT1fS2eCRV52qr7PvM3cucrYv87cv8/cvMzavc3bucvacoyl////ByE8WnKKscXWv9Hguszbu8zbvc7dtcnaiJqrcHZ4f4SHEh0nEitFTWZ+hJqumrDDm7HDj6W5dI2lYGJfmZeQl5SNAAAADRciAAATHjdSOVNsPlhyLklmKCYjW1lUlpOLlZKLFSAqWXSOBQAADA0NAAAAHh0bWlhSk5CIk5CIBAYJDRQbERcdDBAUBgkMAAAEDg4NAAAAHBsZWFZQkY6GAAAAAAAABQUEHBsZAAAAGxoYVlROko+GBAQDZ2RdAAAAGhkYcW9oAgICAAAAExMSDQwLjouDjYuDioiAiIV9hoN7VlRO////Z2DcYwAAAMR0Uk5TAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACRKrJyrZlBQECaNXCsKaqypMGAUDcu7Gpn5mf03gDo8+4saiipKq3xRMBH83Eu7OsqbG61DkDMdbFvrizsbK3wNs9Ax/VysS/vLq/zNwfArDhxMfExMXE3pMCMe7byMjIzd33ZgYGQtnz6+zooeJXBQMFD1yHejZ1+l8FBgEELlOR+GgFCQ0SGxoBGFKg+m0BBwEMR6v+hAEDM6nRASWURVuYQQ4AAAABYktHRACIBR1IAAAACXBIWXMAAAjLAAAIywGEuOmJAAABCklEQVQY02NgUGZUUVVT19DUYtBmYmZhYdBh1dXTNzA0MjYxZTFjAwqwm1tYWlnb2NrZO3A4cgIFGJycXVzd3D08vbx9uHyBAn7+AYFBwSEhoWHhEdyRQIGo6JjYuPiExKTklFSeNKBAekZmVnZObk5efkEhbxFQgK+4pLSsvKKyqrqGoZZfgIVBsK6+obGpuaW1rV2oQ1hEgKFTtKu7p7evf8LEI5PEJotLMEyZyjJt+oyZsxhmzzk6V3KeFIO01vwFMrJyCxctXrL02DL55QwsClorVq5avWbtuvUbNh7fpMjAwsKyWWvLFJatStu279h5YhdIAAJ2s+zZu+/kfoQAy4HNLAcPHQYA5YtSi+k2/WkAAAAldEVYdGRhdGU6Y3JlYXRlADIwMTMtMTAtMDRUMTk6Mzk6MjEtMDQ6MDAwU1uYAAAAJXRFWHRkYXRlOm1vZGlmeQAyMDEzLTEwLTA0VDE5OjM5OjIxLTA0OjAwQQ7jJAAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAAASUVORK5CYII=)", "type": exercises.MULTIPLE_SELECTION, "correct_answers": [ "White rice", "Brown rice", "Sushi rice <p>abc</p>", ], "all_answers": ["White rice", "Quinoa", "Brown rice", "<"], }, { "id": "bbbbb", "question": "Which rice is the crunchiest?", "type": exercises.SINGLE_SELECTION, "correct_answer": "Rice Krispies \n![](https://upload.wikimedia.org/wikipedia/commons/c/cd/RKTsquares.jpg)", "all_answers": [ "White rice", "Brown rice \n![](https://c2.staticflickr.com/4/3159/2889140143_b99fd8dd4c_z.jpg?zz=1)", "Rice Krispies \n![](https://upload.wikimedia.org/wikipedia/commons/c/cd/RKTsquares.jpg)", ], "hints": "It's delicious", }, { "id": "aaaaa", "question": "How many minutes does it take to cook rice? <img src='https://upload.wikimedia.org/wikipedia/commons/5/5e/Jeera-rice.JPG'>", "type": exercises.INPUT_QUESTION, "answers": ["20", "25", "15"], "hints": [ "Takes roughly same amount of time to install kolibri on Windows machine", "Does this help?\n![](http://www.aroma-housewares.com/images/rice101/delay_timer_1.jpg)", ], }, { "id": "ddddd", "type": exercises.PERSEUS_QUESTION, "item_data": SAMPLE_PERSEUS_1_JSON, }, ], }, { "title": "Rice Exercise 2", "id": "6cafe4", "description": "Test how well you know your rice", "license": licenses.CC_BY_NC_SA, "copyright_holder": "Learning Equality", "mastery_model": exercises.M_OF_N, "files": [ { "path": "https://c1.staticflickr.com/5/4021/4302326650_b11f0f0aaf_b.jpg" } ], "questions": [ { "id": "11111", "question": "<h3 id=\"rainbow\" style=\"font-weight:bold\">RICE COOKING!!!</h3><script type='text/javascript'><!-- setInterval(function() {$('#rainbow').css('color', '#'+((1<<24)*Math.random()|0).toString(16));}, 300); --></script>", "type": exercises.SINGLE_SELECTION, "all_answers": ["Answer"], "correct_answer": "Answer", }, { "id": "121212", "question": "<math> <mrow> <msup><mi> a </mi><mn>2</mn></msup> <mo> + </mo> <msup><mi> b </mi><mn>2</mn></msup> <mo> = </mo> <msup><mi> c </mi><mn>2</mn></msup> </mrow> </math>", "type": exercises.SINGLE_SELECTION, "all_answers": ["Answer"], "correct_answer": "Answer", }, ], }, { "title": "HTML Sample", "id": "abcdef", "description": "An example of how html can be imported from the ricecooker", "license": licenses.PUBLIC_DOMAIN, "files": [{"path": "content://htmltest.zip"}], }, { "title": "Rice Exercise 3", "id": "6cafe5", "description": "Test how well you know your rice", "license": licenses.CC_BY_NC_SA, "copyright_holder": "Learning Equality", "mastery_model": exercises.M_OF_N, "files": [ { "path": "https://upload.wikimedia.org/wikipedia/commons/b/b7/Rice_p1160004.jpg" } ], "questions": [ { "id": "123456", "question": "Solve: $$(111^{x+1}\\times111^\\frac14)\\div111^\\frac12=111^3$$", "type": exercises.SINGLE_SELECTION, "all_answers": ["Yes", "No", "Rice!"], "correct_answer": "Rice!", } ], }, ], } ] SAMPLE_TREE.extend(EXERCISES_NODES) class SampleChef(SushiChef): """ The chef class that takes care of uploading channel to the content curation server. We'll call its `main()` method from the command line script. """ channel_info = { # "CHANNEL_SOURCE_DOMAIN": SOURCE_DOMAIN, # who is providing the content (e.g. learningequality.org) "CHANNEL_SOURCE_ID": SOURCE_ID, # channel's unique id "CHANNEL_TITLE": CHANNEL_TITLE, "CHANNEL_LANGUAGE": CHANNEL_LANGUAGE, # (optional) local path or url to image file "CHANNEL_THUMBNAIL": "https://upload.wikimedia.org/wikipedia/commons/thumb/5/50/Banaue_Philippines_Banaue-Rice-Terraces-01.jpg/640px-Banaue_Philippines_Banaue-Rice-Terraces-01.jpg", "CHANNEL_DESCRIPTION": "A sample sushi chef to demo content types.", # (optional) description of the channel (optional) } def construct_channel(self, *args, **kwargs): """ Create ChannelNode and build topic tree. """ channel = self.get_channel( *args, **kwargs ) # creates ChannelNode from data in self.channel_info _build_tree(channel, SAMPLE_TREE) raise_for_invalid_channel(channel) return channel def _build_tree(node, sourcetree): """ Parse nodes given in `sourcetree` and add as children of `node`. """ for child_source_node in sourcetree: try: main_file = ( child_source_node["files"][0] if "files" in child_source_node else {} ) kind = guess_content_kind( path=main_file.get("path"), web_video_data=main_file.get("youtube_id") or main_file.get("web_url"), questions=child_source_node.get("questions"), ) except UnknownContentKindError: continue if kind == content_kinds.TOPIC: child_node = nodes.TopicNode( source_id=child_source_node["id"], title=child_source_node["title"], author=child_source_node.get("author"), description=child_source_node.get("description"), thumbnail=child_source_node.get("thumbnail"), ) node.add_child(child_node) source_tree_children = child_source_node.get("children", []) _build_tree(child_node, source_tree_children) elif kind == content_kinds.VIDEO: child_node = nodes.VideoNode( source_id=child_source_node["id"], title=child_source_node["title"], license=get_license( child_source_node.get("license"), description="Description of license", copyright_holder=child_source_node.get("copyright_holder"), ), author=child_source_node.get("author"), description=child_source_node.get("description"), derive_thumbnail=True, # video-specific data thumbnail=child_source_node.get("thumbnail"), ) add_files(child_node, child_source_node.get("files") or []) node.add_child(child_node) elif kind == content_kinds.AUDIO: child_node = nodes.AudioNode( source_id=child_source_node["id"], title=child_source_node["title"], license=child_source_node.get("license"), author=child_source_node.get("author"), description=child_source_node.get("description"), thumbnail=child_source_node.get("thumbnail"), copyright_holder=child_source_node.get("copyright_holder"), ) add_files(child_node, child_source_node.get("files") or []) node.add_child(child_node) elif kind == content_kinds.DOCUMENT: child_node = nodes.DocumentNode( source_id=child_source_node["id"], title=child_source_node["title"], license=child_source_node.get("license"), author=child_source_node.get("author"), description=child_source_node.get("description"), thumbnail=child_source_node.get("thumbnail"), copyright_holder=child_source_node.get("copyright_holder"), ) add_files(child_node, child_source_node.get("files") or []) node.add_child(child_node) elif kind == content_kinds.EXERCISE: mastery_model = ( child_source_node.get("mastery_model") and {"mastery_model": child_source_node["mastery_model"]} ) or {} child_node = nodes.ExerciseNode( source_id=child_source_node["id"], title=child_source_node["title"], license=child_source_node.get("license"), author=child_source_node.get("author"), description=child_source_node.get("description"), exercise_data=mastery_model, thumbnail=child_source_node.get("thumbnail"), copyright_holder=child_source_node.get("copyright_holder"), ) add_files(child_node, child_source_node.get("files") or []) for q in child_source_node.get("questions"): question = create_question(q) child_node.add_question(question) node.add_child(child_node) elif kind == content_kinds.HTML5: child_node = nodes.HTML5AppNode( source_id=child_source_node["id"], title=child_source_node["title"], license=child_source_node.get("license"), author=child_source_node.get("author"), description=child_source_node.get("description"), thumbnail=child_source_node.get("thumbnail"), copyright_holder=child_source_node.get("copyright_holder"), ) add_files(child_node, child_source_node.get("files") or []) node.add_child(child_node) elif kind == content_kinds.H5P: child_node = nodes.H5PAppNode( source_id=child_source_node["id"], title=child_source_node["title"], license=child_source_node.get("license"), author=child_source_node.get("author"), description=child_source_node.get("description"), thumbnail=child_source_node.get("thumbnail"), copyright_holder=child_source_node.get("copyright_holder"), ) add_files(child_node, child_source_node.get("files") or []) node.add_child(child_node) else: # unknown content file format continue return node def add_files(node, file_list): for f in file_list: path = f.get("path") if path is not None: abspath = get_abspath( path ) # NEW: expand content:// --> ./content/ in file paths else: abspath = None file_type = guess_file_type( node.kind, filepath=abspath, youtube_id=f.get("youtube_id"), web_url=f.get("web_url"), encoding=f.get("encoding"), ) if file_type == FileTypes.AUDIO_FILE: node.add_file(files.AudioFile(path=abspath, language=f.get("language"))) elif file_type == FileTypes.THUMBNAIL: node.add_file(files.ThumbnailFile(path=abspath)) elif file_type == FileTypes.DOCUMENT_FILE: node.add_file(files.DocumentFile(path=abspath, language=f.get("language"))) elif file_type == FileTypes.HTML_ZIP_FILE: node.add_file(files.HTMLZipFile(path=abspath, language=f.get("language"))) elif file_type == FileTypes.H5P_FILE: node.add_file(files.H5PFile(path=abspath, language=f.get("language"))) elif file_type == FileTypes.VIDEO_FILE: node.add_file( files.VideoFile( path=abspath, language=f.get("language"), ffmpeg_settings=f.get("ffmpeg_settings"), ) ) elif file_type == FileTypes.SUBTITLE_FILE: node.add_file(files.SubtitleFile(path=abspath, language=f["language"])) elif file_type == FileTypes.BASE64_FILE: node.add_file(files.Base64ImageFile(encoding=f["encoding"])) elif file_type == FileTypes.WEB_VIDEO_FILE: node.add_file( files.WebVideoFile( web_url=f["web_url"], high_resolution=f.get("high_resolution") ) ) elif file_type == FileTypes.YOUTUBE_VIDEO_FILE: node.add_file( files.YouTubeVideoFile( youtube_id=f["youtube_id"], high_resolution=f.get("high_resolution") ) ) node.add_file( files.YouTubeSubtitleFile(youtube_id=f["youtube_id"], language="en") ) else: raise UnknownFileTypeError("Unrecognized file type '{0}'".format(f["path"])) def create_question(raw_question): question = parse_images(raw_question.get("question")) hints = raw_question.get("hints") hints = ( parse_images(hints) if isinstance(hints, str) else [parse_images(hint) for hint in hints or []] ) if raw_question["type"] == exercises.MULTIPLE_SELECTION: return questions.MultipleSelectQuestion( id=raw_question["id"], question=question, correct_answers=[ parse_images(answer) for answer in raw_question["correct_answers"] ], all_answers=[ parse_images(answer) for answer in raw_question["all_answers"] ], hints=hints, ) if raw_question["type"] == exercises.SINGLE_SELECTION: return questions.SingleSelectQuestion( id=raw_question["id"], question=question, correct_answer=parse_images(raw_question["correct_answer"]), all_answers=[ parse_images(answer) for answer in raw_question["all_answers"] ], hints=hints, ) if raw_question["type"] == exercises.INPUT_QUESTION: return questions.InputQuestion( id=raw_question["id"], question=question, answers=[parse_images(answer) for answer in raw_question["answers"]], hints=hints, ) if raw_question["type"] == exercises.PERSEUS_QUESTION: return questions.PerseusQuestion( id=raw_question["id"], raw_data=parse_images(raw_question.get("item_data")), source_url="https://www.google.com/", ) else: raise UnknownQuestionTypeError( "Unrecognized question type '{0}': accepted types are {1}".format( raw_question["type"], [key for key, value in exercises.question_choices] ) ) def parse_images(content): if content: reg = re.compile(questions.MARKDOWN_IMAGE_REGEX, flags=re.IGNORECASE) matches = reg.findall(content) for match in matches: path = match[1] graphie = re.search(questions.WEB_GRAPHIE_URL_REGEX, path) if graphie: path = graphie.group(1) content = content.replace(path, get_abspath(path).replace("\\", "\\\\")) return content if __name__ == "__main__": """ This code will run when the sushi chef is called from the command line. """ chef = SampleChef() chef.main()
[((34, 11, 34, 45), 'os.path.join', 'os.path.join', ({(34, 24, 34, 36): 'EXAMPLES_DIR', (34, 38, 34, 44): '"""data"""'}, {}), "(EXAMPLES_DIR, 'data')", False, 'import os\n'), ((35, 14, 35, 51), 'os.path.join', 'os.path.join', ({(35, 27, 35, 39): 'EXAMPLES_DIR', (35, 41, 35, 50): '"""content"""'}, {}), "(EXAMPLES_DIR, 'content')", False, 'import os\n'), ((33, 31, 33, 57), 'os.path.realpath', 'os.path.realpath', ({(33, 48, 33, 56): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((166, 18, 166, 38), 'json.load', 'json.load', ({(166, 28, 166, 37): 'json_file'}, {}), '(json_file)', False, 'import json\n'), ((47, 15, 47, 48), 're.search', 're.search', ({(47, 25, 47, 41): '"""content://(.+)"""', (47, 43, 47, 47): 'path'}, {}), "('content://(.+)', path)", False, 'import re\n'), ((165, 10, 165, 44), 'os.path.join', 'join', ({(165, 15, 165, 23): 'DATA_DIR', (165, 25, 165, 43): '"""sample_tree.json"""'}, {}), "(DATA_DIR, 'sample_tree.json')", False, 'from os.path import join\n'), ((338, 8, 338, 42), 'ricecooker.exceptions.raise_for_invalid_channel', 'raise_for_invalid_channel', ({(338, 34, 338, 41): 'channel'}, {}), '(channel)', False, 'from ricecooker.exceptions import raise_for_invalid_channel\n'), ((584, 14, 584, 77), 're.compile', 're.compile', (), '', False, 'import re\n'), ((169, 29, 169, 68), 'os.path.join', 'join', ({(169, 34, 169, 42): 'DATA_DIR', (169, 44, 169, 67): '"""sample_perseus01.json"""'}, {}), "(DATA_DIR, 'sample_perseus01.json')", False, 'from os.path import join\n'), ((588, 22, 588, 70), 're.search', 're.search', ({(588, 32, 588, 63): 'questions.WEB_GRAPHIE_URL_REGEX', (588, 65, 588, 69): 'path'}, {}), '(questions.WEB_GRAPHIE_URL_REGEX, path)', False, 'import re\n'), ((492, 26, 492, 59), 'ricecooker.classes.files.ThumbnailFile', 'files.ThumbnailFile', (), '', False, 'from ricecooker.classes import files\n'), ((150, 14, 150, 36), 'os.path.splitext', 'os.path.splitext', ({(150, 31, 150, 35): 'path'}, {}), '(path)', False, 'import os\n'), ((155, 39, 155, 68), 'le_utils.constants.content_kinds.MAPPING.items', 'content_kinds.MAPPING.items', ({}, {}), '()', False, 'from le_utils.constants import content_kinds\n'), ((132, 14, 132, 40), 'os.path.splitext', 'os.path.splitext', ({(132, 31, 132, 39): 'filepath'}, {}), '(filepath)', False, 'import os\n'), ((508, 26, 508, 82), 'ricecooker.classes.files.SubtitleFile', 'files.SubtitleFile', (), '', False, 'from ricecooker.classes import files\n'), ((510, 26, 510, 71), 'ricecooker.classes.files.Base64ImageFile', 'files.Base64ImageFile', (), '', False, 'from ricecooker.classes import files\n'), ((524, 16, 524, 84), 'ricecooker.classes.files.YouTubeSubtitleFile', 'files.YouTubeSubtitleFile', (), '', False, 'from ricecooker.classes import files\n')]
dvgd/blender
release/scripts/modules/bl_i18n_utils/utils_spell_check.py
4eb2807db1c1bd2514847d182fbb7a3f7773da96
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # <pep8 compliant> import enchant import os import pickle import re class SpellChecker: """ A basic spell checker. """ # These must be all lower case for comparisons uimsgs = { # OK words "adaptively", "adaptivity", "aren", # aren't "betweens", # yuck! in-betweens! "boolean", "booleans", "chamfer", "couldn", # couldn't "decrement", "derivate", "deterministically", "doesn", # doesn't "duplications", "effector", "equi", # equi-angular, etc. "fader", "globbing", "hasn", # hasn't "hetero", "hoc", # ad-hoc "incompressible", "indices", "instantiation", "iridas", "isn", # isn't "iterable", "kyrgyz", "latin", "merchantability", "mplayer", "ons", # add-ons "pong", # ping pong "scalable", "shadeless", "shouldn", # shouldn't "smoothen", "spacings", "teleport", "teleporting", "vertices", "wasn", # wasn't # Merged words "antialiasing", "antialias", "arcsine", "arccosine", "arctangent", "autoclip", "autocomplete", "autoexec", "autoexecution", "autogenerated", "autolock", "automasking", "autoname", "autopack", "autosave", "autoscale", "autosmooth", "autosplit", "backface", "backfacing", "backimage", "backscattered", "bandnoise", "bindcode", "bitdepth", "bitflag", "bitflags", "bitrate", "blackbody", "blendfile", "blendin", "bonesize", "boundbox", "boxpack", "buffersize", "builtin", "builtins", "bytecode", "chunksize", "customdata", "dataset", "datasets", "de", "deadzone", "deconstruct", "defocus", "denoise", "denoised", "denoising", "denoiser", "deselect", "deselecting", "deselection", "despill", "despilling", "dirtree", "editcurve", "editmesh", "filebrowser", "filelist", "filename", "filenames", "filepath", "filepaths", "forcefield", "forcefields", "fulldome", "fulldomes", "fullscreen", "gridline", "hardlight", "hemi", "hostname", "inbetween", "inscatter", "inscattering", "libdata", "lightprobe", "lightprobes", "lightless", "lineset", "linestyle", "linestyles", "localview", "lookup", "lookups", "mathutils", "micropolygon", "midlevel", "midground", "mixdown", "multi", "multifractal", "multiframe", "multilayer", "multipaint", "multires", "multiresolution", "multisampling", "multiscatter", "multitexture", "multithreaded", "multiuser", "multiview", "namespace", "nodetree", "nodetrees", "keyconfig", "offscreen", "online", "playhead", "popup", "popups", "pre", "precache", "precaching", "precalculate", "precomputing", "prefetch", "premultiply", "premultiplied", "prepass", "prepend", "preprocess", "preprocessing", "preseek", "promillage", "pushdown", "raytree", "readonly", "realtime", "reinject", "reinjected", "rekey", "remesh", "reprojection", "reproject", "reprojecting", "resize", "restpose", "retarget", "retargets", "retargeting", "retargeted", "retiming", "rigidbody", "ringnoise", "rolloff", "runtime", "scanline", "screenshot", "screenshots", "seekability", "selfcollision", "shadowbuffer", "shadowbuffers", "singletexture", "spellcheck", "spellchecking", "startup", "stateful", "starfield", "studiolight", "subflare", "subflares", "subframe", "subframes", "subclass", "subclasses", "subclassing", "subdirectory", "subdirectories", "subdir", "subdirs", "subitem", "submode", "submodule", "submodules", "subpath", "subsize", "substep", "substeps", "targetless", "textbox", "textboxes", "tilemode", "timestamp", "timestamps", "timestep", "timesteps", "todo", "tradeoff", "un", "unassociate", "unassociated", "unbake", "unclosed", "uncomment", "unculled", "undeformed", "undistort", "undistorted", "undistortion", "ungroup", "ungrouped", "unhide", "unindent", "unkeyed", "unlink", "unlinked", "unmute", "unphysical", "unpremultiply", "unprojected", "unprotect", "unreacted", "unreferenced", "unregister", "unselect", "unselected", "unselectable", "unsets", "unshadowed", "unspill", "unstitchable", "unstitch", "unsubdivided", "unsubdivide", "untrusted", "vectorscope", "whitespace", "whitespaces", "worldspace", "workflow", "workspace", "workspaces", # Neologisms, slangs "affectable", "animatable", "automagic", "automagically", "blobby", "blockiness", "blocky", "collider", "colliders", "deformer", "deformers", "determinator", "editability", "effectors", "expander", "instancer", "keyer", "lacunarity", "linkable", "numerics", "occluder", "occluders", "overridable", "passepartout", "perspectively", "pixelate", "pointiness", "polycount", "polygonization", "polygonalization", # yuck! "scalings", "selectable", "selectability", "shaper", "smoothen", "smoothening", "spherize", "spherized", "stitchable", "symmetrize", "trackability", "transmissivity", "rasterized", "rasterization", "rasterizer", "renderer", "renderers", "renderable", "renderability", # Really bad!!! "convertor", "fullscr", # Abbreviations "aero", "amb", "anim", "aov", "app", "bbox", "bboxes", "bksp", # Backspace "bool", "calc", "cfl", "config", "configs", "const", "coord", "coords", "degr", "diff", "dof", "dupli", "duplis", "eg", "esc", "expr", "fac", "fra", "fract", "frs", "grless", "http", "init", "irr", # Irradiance "kbit", "kb", "lang", "langs", "lclick", "rclick", "lensdist", "loc", "rot", "pos", "lorem", "luma", "mbs", # mouse button 'select'. "mem", "multicam", "num", "ok", "orco", "ortho", "pano", "persp", "pref", "prefs", "prev", "param", "premul", "quad", "quads", "quat", "quats", "recalc", "recalcs", "refl", "sce", "sel", "spec", "struct", "structs", "subdiv", "sys", "tex", "texcoord", "tmr", # timer "tri", "tris", "udim", "udims", "upres", # Upresolution "usd", "uv", "uvs", "uvw", "uw", "uvmap", "ve", "vec", "vel", # velocity! "vert", "verts", "vis", "vram", "xor", "xyz", "xzy", "yxz", "yzx", "zxy", "zyx", "xy", "xz", "yx", "yz", "zx", "zy", # General computer/science terms "affine", "albedo", "anamorphic", "anisotropic", "anisotropy", "bitangent", "boid", "boids", "ceil", "compressibility", "curvilinear", "equiangular", "equisolid", "euler", "eulers", "fribidi", "gettext", "hashable", "hotspot", "interocular", "intrinsics", "irradiance", "isosurface", "jitter", "jittering", "jittered", "keymap", "keymaps", "lambertian", "laplacian", "metadata", "msgfmt", "nand", "xnor", "normals", "numpad", "octahedral", "octree", "omnidirectional", "opengl", "openmp", "parametrization", "photoreceptor", "poly", "polyline", "polylines", "probabilistically", "pulldown", "pulldowns", "quantized", "quartic", "quaternion", "quaternions", "quintic", "samplerate", "sawtooth", "scrollback", "scrollbar", "scroller", "searchable", "spacebar", "subtractive", "superellipse", "tooltip", "tooltips", "trackpad", "tuple", "unicode", "viewport", "viewports", "viscoelastic", "vorticity", "waveform", "waveforms", "wildcard", "wildcards", "wintab", # Some Windows tablet API # General computer graphics terms "anaglyph", "bezier", "beziers", "bicubic", "bilinear", "bindpose", "binormal", "blackpoint", "whitepoint", "blinn", "bokeh", "catadioptric", "centroid", "chroma", "chrominance", "clearcoat", "codec", "codecs", "collada", "compositing", "crossfade", "cubemap", "cubemaps", "cuda", "deinterlace", "dropoff", "duotone", "dv", "eigenvectors", "emissive", "equirectangular", "fisheye", "framerate", "gimbal", "grayscale", "icosphere", "inpaint", "kerning", "lightmap", "linearlight", "lossless", "lossy", "luminance", "mantaflow", "matcap", "midtones", "mipmap", "mipmaps", "mip", "ngon", "ngons", "ntsc", "nurb", "nurbs", "perlin", "phong", "pinlight", "qi", "radiosity", "raycasting", "raytrace", "raytracing", "raytraced", "refractions", "remesher", "remeshing", "remesh", "renderfarm", "scanfill", "shader", "shaders", "shadowmap", "shadowmaps", "softlight", "specular", "specularity", "spillmap", "sobel", "stereoscopy", "texel", "timecode", "tonemap", "toon", "transmissive", "vividlight", "volumetrics", "voronoi", "voxel", "voxels", "vsync", "wireframe", "zmask", "ztransp", # Blender terms "audaspace", "azone", # action zone "backwire", "bbone", "bendy", # bones "bmesh", "breakdowner", "bspline", "bweight", "colorband", "datablock", "datablocks", "despeckle", "depsgraph", "dopesheet", "dupliface", "duplifaces", "dupliframe", "dupliframes", "dupliobject", "dupliob", "dupligroup", "duplivert", "dyntopo", "editbone", "editmode", "eevee", "fcurve", "fcurves", "fedge", "fedges", "filmic", "fluidsim", "freestyle", "enum", "enums", "gizmogroup", "gons", # N-Gons "gpencil", "idcol", "keyframe", "keyframes", "keyframing", "keyframed", "lookdev", "luminocity", "mathvis", "metaball", "metaballs", "mball", "metaelement", "metaelements", "metastrip", "metastrips", "movieclip", "mpoly", "mtex", "nabla", "navmesh", "outliner", "overscan", "paintmap", "paintmaps", "polygroup", "polygroups", "poselib", "pushpull", "pyconstraint", "pyconstraints", "qe", # keys... "shaderfx", "shaderfxs", "shapekey", "shapekeys", "shrinkfatten", "shrinkwrap", "softbody", "stucci", "subdiv", "subtype", "sunsky", "tessface", "tessfaces", "texface", "timeline", "timelines", "tosphere", "uilist", "userpref", "vcol", "vcols", "vgroup", "vgroups", "vinterlace", "vse", "wasd", "wasdqe", # keys... "wetmap", "wetmaps", "wpaint", "uvwarp", # UOC (Ugly Operator Categories) "cachefile", "paintcurve", "ptcache", "dpaint", # Algorithm/library names "ashikhmin", # Ashikhmin-Shirley "arsloe", # Texel-Marsen-Arsloe "beckmann", "blackman", # Blackman-Harris "blosc", "burley", # Christensen-Burley "catmull", "catrom", "chebychev", "courant", "cryptomatte", "crypto", "embree", "hosek", "kutta", "lennard", "marsen", # Texel-Marsen-Arsloe "mikktspace", "minkowski", "minnaert", "moskowitz", # Pierson-Moskowitz "musgrave", "nayar", "netravali", "nishita", "ogawa", "oren", "peucker", # Ramer-Douglas-Peucker "pierson", # Pierson-Moskowitz "preetham", "prewitt", "ramer", # Ramer-Douglas-Peucker "runge", "sobol", "verlet", "wilkie", "worley", # Acronyms "aa", "msaa", "ao", "api", "asc", "cdl", "ascii", "atrac", "avx", "bsdf", "bssrdf", "bw", "ccd", "cmd", "cmos", "cpus", "ctrl", "cw", "ccw", "dev", "djv", "dpi", "dvar", "dx", "eo", "fh", "fk", "fov", "fft", "futura", "fx", "gfx", "ggx", "gl", "glsl", "gpl", "gpu", "gpus", "hc", "hdc", "hdr", "hdri", "hdris", "hh", "mm", "ss", "ff", # hh:mm:ss:ff timecode "hsv", "hsva", "hsl", "id", "ies", "ior", "itu", "jonswap", "lhs", "lmb", "mmb", "rmb", "kb", "mocap", "msgid", "msgids", "mux", "ndof", "ppc", "precisa", "px", "qmc", "rdp", "rgb", "rgba", "rhs", "rv", "sdl", "sl", "smpte", "ssao", "ssr", "svn", "tma", "ui", "unix", "vbo", "vbos", "vr", "wxyz", "xr", "ycc", "ycca", "yrgb", "yuv", "yuva", # Blender acronyms "bli", "bpy", "bvh", "dbvt", "dop", # BLI K-Dop BVH "ik", "nla", "py", "qbvh", "rna", "rvo", "simd", "sph", "svbvh", # Files types/formats "avi", "attrac", "autocad", "autodesk", "bmp", "btx", "cineon", "dpx", "dwaa", "dwab", "dxf", "eps", "exr", "fbx", "fbxnode", "ffmpeg", "flac", "gltf", "gzip", "ico", "jpg", "jpeg", "jpegs", "json", "matroska", "mdd", "mkv", "mpeg", "mjpeg", "mtl", "ogg", "openjpeg", "osl", "oso", "piz", "png", "pngs", "po", "quicktime", "rle", "sgi", "stl", "svg", "targa", "tga", "tiff", "theora", "vorbis", "vp9", "wav", "webm", "xiph", "xml", "xna", "xvid", } _valid_before = "(?<=[\\s*'\"`])|(?<=[a-zA-Z][/-])|(?<=^)" _valid_after = "(?=[\\s'\"`.!?,;:])|(?=[/-]\\s*[a-zA-Z])|(?=$)" _valid_words = "(?:{})(?:(?:[A-Z]+[a-z]*)|[A-Z]*|[a-z]*)(?:{})".format(_valid_before, _valid_after) _split_words = re.compile(_valid_words).findall @classmethod def split_words(cls, text): return [w for w in cls._split_words(text) if w] def __init__(self, settings, lang="en_US"): self.settings = settings self.dict_spelling = enchant.Dict(lang) self.cache = set(self.uimsgs) cache = self.settings.SPELL_CACHE if cache and os.path.exists(cache): with open(cache, 'rb') as f: self.cache |= set(pickle.load(f)) def __del__(self): cache = self.settings.SPELL_CACHE if cache and os.path.exists(cache): with open(cache, 'wb') as f: pickle.dump(self.cache, f) def check(self, txt): ret = [] if txt in self.cache: return ret for w in self.split_words(txt): w_lower = w.lower() if w_lower in self.cache: continue if not self.dict_spelling.check(w): ret.append((w, self.dict_spelling.suggest(w))) else: self.cache.add(w_lower) if not ret: self.cache.add(txt) return ret
[((785, 19, 785, 43), 're.compile', 're.compile', ({(785, 30, 785, 42): '_valid_words'}, {}), '(_valid_words)', False, 'import re\n'), ((793, 29, 793, 47), 'enchant.Dict', 'enchant.Dict', ({(793, 42, 793, 46): 'lang'}, {}), '(lang)', False, 'import enchant\n'), ((797, 21, 797, 42), 'os.path.exists', 'os.path.exists', ({(797, 36, 797, 41): 'cache'}, {}), '(cache)', False, 'import os\n'), ((803, 21, 803, 42), 'os.path.exists', 'os.path.exists', ({(803, 36, 803, 41): 'cache'}, {}), '(cache)', False, 'import os\n'), ((805, 16, 805, 42), 'pickle.dump', 'pickle.dump', ({(805, 28, 805, 38): 'self.cache', (805, 40, 805, 41): 'f'}, {}), '(self.cache, f)', False, 'import pickle\n'), ((799, 34, 799, 48), 'pickle.load', 'pickle.load', ({(799, 46, 799, 47): 'f'}, {}), '(f)', False, 'import pickle\n')]
gmeyerlee/NASLib
naslib/predictors/mlp.py
21dbceda04cc1faf3d8b6dd391412a459218ef2b
import numpy as np import os import json import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import DataLoader, TensorDataset from naslib.utils.utils import AverageMeterGroup from naslib.predictors.utils.encodings import encode from naslib.predictors import Predictor # NOTE: faster on CPU device = torch.device("cpu") print("device:", device) def accuracy_mse(prediction, target, scale=100.0): prediction = prediction.detach() * scale target = (target) * scale return F.mse_loss(prediction, target) class FeedforwardNet(nn.Module): def __init__( self, input_dims: int = 5, num_layers: int = 3, layer_width: list = [10, 10, 10], output_dims: int = 1, activation="relu", ): super(FeedforwardNet, self).__init__() assert ( len(layer_width) == num_layers ), "number of widths should be \ equal to the number of layers" self.activation = eval("F." + activation) all_units = [input_dims] + layer_width self.layers = nn.ModuleList( [nn.Linear(all_units[i], all_units[i + 1]) for i in range(num_layers)] ) self.out = nn.Linear(all_units[-1], 1) # make the init similar to the tf.keras version for l in self.layers: torch.nn.init.xavier_uniform_(l.weight) torch.nn.init.zeros_(l.bias) torch.nn.init.xavier_uniform_(self.out.weight) torch.nn.init.zeros_(self.out.bias) def forward(self, x): for layer in self.layers: x = self.activation(layer(x)) return self.out(x) def basis_funcs(self, x): for layer in self.layers: x = self.activation(layer(x)) return x class MLPPredictor(Predictor): def __init__( self, encoding_type="adjacency_one_hot", ss_type="nasbench201", hpo_wrapper=False, hparams_from_file=False ): self.encoding_type = encoding_type self.ss_type = ss_type self.hpo_wrapper = hpo_wrapper self.default_hyperparams = { "num_layers": 20, "layer_width": 20, "batch_size": 32, "lr": 0.001, "regularization": 0.2, } self.hyperparams = None self.hparams_from_file = hparams_from_file def get_model(self, **kwargs): predictor = FeedforwardNet(**kwargs) return predictor def fit(self, xtrain, ytrain, train_info=None, epochs=500, loss="mae", verbose=0): if self.hparams_from_file and self.hparams_from_file not in ['False', 'None'] \ and os.path.exists(self.hparams_from_file): self.hyperparams = json.load(open(self.hparams_from_file, 'rb'))['mlp'] print('loaded hyperparams from', self.hparams_from_file) elif self.hyperparams is None: self.hyperparams = self.default_hyperparams.copy() num_layers = self.hyperparams["num_layers"] layer_width = self.hyperparams["layer_width"] batch_size = self.hyperparams["batch_size"] lr = self.hyperparams["lr"] regularization = self.hyperparams["regularization"] self.mean = np.mean(ytrain) self.std = np.std(ytrain) if self.encoding_type is not None: _xtrain = np.array( [ encode(arch, encoding_type=self.encoding_type, ss_type=self.ss_type) for arch in xtrain ] ) else: _xtrain = xtrain _ytrain = np.array(ytrain) X_tensor = torch.FloatTensor(_xtrain).to(device) y_tensor = torch.FloatTensor(_ytrain).to(device) train_data = TensorDataset(X_tensor, y_tensor) data_loader = DataLoader( train_data, batch_size=batch_size, shuffle=True, drop_last=False, pin_memory=False, ) self.model = self.get_model( input_dims=_xtrain.shape[1], num_layers=num_layers, layer_width=num_layers * [layer_width], ) self.model.to(device) optimizer = optim.Adam(self.model.parameters(), lr=lr, betas=(0.9, 0.99)) if loss == "mse": criterion = nn.MSELoss().to(device) elif loss == "mae": criterion = nn.L1Loss().to(device) self.model.train() for e in range(epochs): meters = AverageMeterGroup() for b, batch in enumerate(data_loader): optimizer.zero_grad() input = batch[0].to(device) target = batch[1].to(device) prediction = self.model(input).view(-1) loss_fn = criterion(prediction, target) # add L1 regularization params = torch.cat( [ x[1].view(-1) for x in self.model.named_parameters() if x[0] == "out.weight" ] ) loss_fn += regularization * torch.norm(params, 1) loss_fn.backward() optimizer.step() mse = accuracy_mse(prediction, target) meters.update( {"loss": loss_fn.item(), "mse": mse.item()}, n=target.size(0) ) if verbose and e % 100 == 0: print("Epoch {}, {}, {}".format(e, meters["loss"], meters["mse"])) train_pred = np.squeeze(self.query(xtrain)) train_error = np.mean(abs(train_pred - ytrain)) return train_error def query(self, xtest, info=None, eval_batch_size=None): if self.encoding_type is not None: xtest = np.array( [ encode(arch, encoding_type=self.encoding_type, ss_type=self.ss_type) for arch in xtest ] ) X_tensor = torch.FloatTensor(xtest).to(device) test_data = TensorDataset(X_tensor) eval_batch_size = len(xtest) if eval_batch_size is None else eval_batch_size test_data_loader = DataLoader( test_data, batch_size=eval_batch_size, pin_memory=False ) self.model.eval() pred = [] with torch.no_grad(): for _, batch in enumerate(test_data_loader): prediction = self.model(batch[0].to(device)).view(-1) pred.append(prediction.cpu().numpy()) pred = np.concatenate(pred) return np.squeeze(pred) def set_random_hyperparams(self): if self.hyperparams is None: params = self.default_hyperparams.copy() else: params = { "num_layers": int(np.random.choice(range(5, 25))), "layer_width": int(np.random.choice(range(5, 25))), "batch_size": 32, "lr": np.random.choice([0.1, 0.01, 0.005, 0.001, 0.0001]), "regularization": 0.2, } self.hyperparams = params return params
[((15, 9, 15, 28), 'torch.device', 'torch.device', ({(15, 22, 15, 27): '"""cpu"""'}, {}), "('cpu')", False, 'import torch\n'), ((22, 11, 22, 41), 'torch.nn.functional.mse_loss', 'F.mse_loss', ({(22, 22, 22, 32): 'prediction', (22, 34, 22, 40): 'target'}, {}), '(prediction, target)', True, 'import torch.nn.functional as F\n'), ((47, 19, 47, 46), 'torch.nn.Linear', 'nn.Linear', ({(47, 29, 47, 42): 'all_units[-1]', (47, 44, 47, 45): '1'}, {}), '(all_units[-1], 1)', True, 'import torch.nn as nn\n'), ((53, 8, 53, 54), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', ({(53, 38, 53, 53): 'self.out.weight'}, {}), '(self.out.weight)', False, 'import torch\n'), ((54, 8, 54, 43), 'torch.nn.init.zeros_', 'torch.nn.init.zeros_', ({(54, 29, 54, 42): 'self.out.bias'}, {}), '(self.out.bias)', False, 'import torch\n'), ((107, 20, 107, 35), 'numpy.mean', 'np.mean', ({(107, 28, 107, 34): 'ytrain'}, {}), '(ytrain)', True, 'import numpy as np\n'), ((108, 19, 108, 33), 'numpy.std', 'np.std', ({(108, 26, 108, 32): 'ytrain'}, {}), '(ytrain)', True, 'import numpy as np\n'), ((118, 18, 118, 34), 'numpy.array', 'np.array', ({(118, 27, 118, 33): 'ytrain'}, {}), '(ytrain)', True, 'import numpy as np\n'), ((122, 21, 122, 54), 'torch.utils.data.TensorDataset', 'TensorDataset', ({(122, 35, 122, 43): 'X_tensor', (122, 45, 122, 53): 'y_tensor'}, {}), '(X_tensor, y_tensor)', False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((123, 22, 129, 9), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((188, 20, 188, 43), 'torch.utils.data.TensorDataset', 'TensorDataset', ({(188, 34, 188, 42): 'X_tensor'}, {}), '(X_tensor)', False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((191, 27, 193, 9), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((202, 15, 202, 35), 'numpy.concatenate', 'np.concatenate', ({(202, 30, 202, 34): 'pred'}, {}), '(pred)', True, 'import numpy as np\n'), ((203, 15, 203, 31), 'numpy.squeeze', 'np.squeeze', ({(203, 26, 203, 30): 'pred'}, {}), '(pred)', True, 'import numpy as np\n'), ((51, 12, 51, 51), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', ({(51, 42, 51, 50): 'l.weight'}, {}), '(l.weight)', False, 'import torch\n'), ((52, 12, 52, 40), 'torch.nn.init.zeros_', 'torch.nn.init.zeros_', ({(52, 33, 52, 39): 'l.bias'}, {}), '(l.bias)', False, 'import torch\n'), ((95, 12, 95, 50), 'os.path.exists', 'os.path.exists', ({(95, 27, 95, 49): 'self.hparams_from_file'}, {}), '(self.hparams_from_file)', False, 'import os\n'), ((147, 21, 147, 40), 'naslib.utils.utils.AverageMeterGroup', 'AverageMeterGroup', ({}, {}), '()', False, 'from naslib.utils.utils import AverageMeterGroup\n'), ((197, 13, 197, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((44, 13, 44, 54), 'torch.nn.Linear', 'nn.Linear', ({(44, 23, 44, 35): 'all_units[i]', (44, 37, 44, 53): 'all_units[i + 1]'}, {}), '(all_units[i], all_units[i + 1])', True, 'import torch.nn as nn\n'), ((120, 19, 120, 45), 'torch.FloatTensor', 'torch.FloatTensor', ({(120, 37, 120, 44): '_xtrain'}, {}), '(_xtrain)', False, 'import torch\n'), ((121, 19, 121, 45), 'torch.FloatTensor', 'torch.FloatTensor', ({(121, 37, 121, 44): '_ytrain'}, {}), '(_ytrain)', False, 'import torch\n'), ((187, 19, 187, 43), 'torch.FloatTensor', 'torch.FloatTensor', ({(187, 37, 187, 42): 'xtest'}, {}), '(xtest)', False, 'import torch\n'), ((215, 22, 215, 73), 'numpy.random.choice', 'np.random.choice', ({(215, 39, 215, 72): '[0.1, 0.01, 0.005, 0.001, 0.0001]'}, {}), '([0.1, 0.01, 0.005, 0.001, 0.0001])', True, 'import numpy as np\n'), ((112, 20, 112, 88), 'naslib.predictors.utils.encodings.encode', 'encode', (), '', False, 'from naslib.predictors.utils.encodings import encode\n'), ((140, 24, 140, 36), 'torch.nn.MSELoss', 'nn.MSELoss', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((163, 44, 163, 65), 'torch.norm', 'torch.norm', ({(163, 55, 163, 61): 'params', (163, 63, 163, 64): '(1)'}, {}), '(params, 1)', False, 'import torch\n'), ((183, 20, 183, 88), 'naslib.predictors.utils.encodings.encode', 'encode', (), '', False, 'from naslib.predictors.utils.encodings import encode\n'), ((142, 24, 142, 35), 'torch.nn.L1Loss', 'nn.L1Loss', ({}, {}), '()', True, 'import torch.nn as nn\n')]
Joreshic/python-for-android
pythonforandroid/recipes/libx264/__init__.py
c60e02d2e32e31a3a754838c51e9242cbadcd9e8
from pythonforandroid.toolchain import Recipe, shprint, current_directory, ArchARM from os.path import exists, join, realpath from os import uname import glob import sh class LibX264Recipe(Recipe): version = 'x264-snapshot-20170608-2245-stable' # using mirror url since can't use ftp url = 'http://mirror.yandex.ru/mirrors/ftp.videolan.org/x264/snapshots/{version}.tar.bz2' md5sum = 'adf3b87f759b5cc9f100f8cf99276f77' def should_build(self, arch): build_dir = self.get_build_dir(arch.arch) return not exists(join(build_dir, 'lib', 'libx264.a')) def build_arch(self, arch): with current_directory(self.get_build_dir(arch.arch)): env = self.get_recipe_env(arch) configure = sh.Command('./configure') shprint(configure, '--cross-prefix=arm-linux-androideabi-', '--host=arm-linux', '--disable-asm', '--disable-cli', '--enable-pic', '--disable-shared', '--enable-static', '--prefix={}'.format(realpath('.')), _env=env) shprint(sh.make, '-j4', _env=env) shprint(sh.make, 'install', _env=env) recipe = LibX264Recipe()
[((20, 24, 20, 49), 'sh.Command', 'sh.Command', ({(20, 35, 20, 48): '"""./configure"""'}, {}), "('./configure')", False, 'import sh\n'), ((31, 12, 31, 45), 'pythonforandroid.toolchain.shprint', 'shprint', (), '', False, 'from pythonforandroid.toolchain import Recipe, shprint, current_directory, ArchARM\n'), ((32, 12, 32, 49), 'pythonforandroid.toolchain.shprint', 'shprint', (), '', False, 'from pythonforandroid.toolchain import Recipe, shprint, current_directory, ArchARM\n'), ((15, 26, 15, 61), 'os.path.join', 'join', ({(15, 31, 15, 40): 'build_dir', (15, 42, 15, 47): '"""lib"""', (15, 49, 15, 60): '"""libx264.a"""'}, {}), "(build_dir, 'lib', 'libx264.a')", False, 'from os.path import exists, join, realpath\n'), ((29, 41, 29, 54), 'os.path.realpath', 'realpath', ({(29, 50, 29, 53): '"""."""'}, {}), "('.')", False, 'from os.path import exists, join, realpath\n')]
QGB/QPSU
Win/reg.py
7bc214676d797f42d2d7189dc67c9377bccdf25d
#coding=utf-8 try: if __name__.startswith('qgb.Win'): from .. import py else: import py except Exception as ei: raise ei raise EnvironmentError(__name__) if py.is2(): import _winreg as winreg from _winreg import * else: import winreg from winreg import * def get(skey,name,root=HKEY_CURRENT_USER,returnType=True): ''' from qgb.Win import reg reg.get(r'Software\Microsoft\Windows\CurrentVersion\Internet Settings','ProxyEnable') reg.get(r'HKLM\SYSTEM\CurrentControlSet\Services\LanmanServer\Parameters\Size' ) There are seven predefined root keys, traditionally named according to their constant handles defined in the Win32 API skey不能包含 name,否则 FileNotFoundError: [WinError 2] 系统找不到指定的文件。 ''' r = OpenKey(root,skey) r = QueryValueEx(r,name) if returnType:return r[0],'{} : {}'.format(REG_TYPE[r[1]],r[1]) else :return r[0] def set(skey,name,value,root=HKEY_CURRENT_USER,type='auto,or REG_TYPE int',returnType=True): r = OpenKey(root,skey,0,KEY_SET_VALUE) if not py.isint(type): if py.isint(value):type=4 if py.istr(value):type=1 if py.isbyte(value):type=3 #TODO test,and add more rule SetValueEx(r,'ProxyEnable',0,type,value) if get(skey,name,root=root,returnType=False)==value: return 'reg.set [{}] {}={} sucess!'.format(skey[-55:],name,value) else: return 'reg.set [{}] {}={} Failed !'.format(skey,name,value) REG_TYPE={ 0 : 'REG_NONE', 1 : 'REG_SZ', 2 : 'REG_EXPAND_SZ', 3 : 'REG_BINARY', 4 : 'REG_DWORD', 5 : 'REG_DWORD_BIG_ENDIAN', 6 : 'REG_LINK', 7 : 'REG_MULTI_SZ', 8 : 'REG_RESOURCE_LIST', 9 : 'REG_FULL_RESOURCE_DESCRIPTOR', 10: 'REG_RESOURCE_REQUIREMENTS_LIST', 11: 'REG_QWORD'}
[((11, 3, 11, 11), 'py.is2', 'py.is2', ({}, {}), '()', False, 'import py\n'), ((34, 8, 34, 22), 'py.isint', 'py.isint', ({(34, 17, 34, 21): 'type'}, {}), '(type)', False, 'import py\n'), ((35, 5, 35, 20), 'py.isint', 'py.isint', ({(35, 14, 35, 19): 'value'}, {}), '(value)', False, 'import py\n'), ((36, 5, 36, 19), 'py.istr', 'py.istr', ({(36, 13, 36, 18): 'value'}, {}), '(value)', False, 'import py\n'), ((37, 5, 37, 21), 'py.isbyte', 'py.isbyte', ({(37, 15, 37, 20): 'value'}, {}), '(value)', False, 'import py\n')]
CJSoldier/webssh
tests/test_handler.py
b3c33ff6bd76f4f5df40cc1fe9a138cf0cecd08c
import unittest import paramiko from tornado.httputil import HTTPServerRequest from tests.utils import read_file, make_tests_data_path from webssh.handler import MixinHandler, IndexHandler, InvalidValueError class TestMixinHandler(unittest.TestCase): def test_get_real_client_addr(self): handler = MixinHandler() handler.request = HTTPServerRequest(uri='/') self.assertIsNone(handler.get_real_client_addr()) ip = '127.0.0.1' handler.request.headers.add('X-Real-Ip', ip) self.assertEqual(handler.get_real_client_addr(), False) handler.request.headers.add('X-Real-Port', '12345x') self.assertEqual(handler.get_real_client_addr(), False) handler.request.headers.update({'X-Real-Port': '12345'}) self.assertEqual(handler.get_real_client_addr(), (ip, 12345)) handler.request.headers.update({'X-Real-ip': None}) self.assertEqual(handler.get_real_client_addr(), False) handler.request.headers.update({'X-Real-Port': '12345x'}) self.assertEqual(handler.get_real_client_addr(), False) class TestIndexHandler(unittest.TestCase): def test_get_specific_pkey_with_plain_key(self): fname = 'test_rsa.key' cls = paramiko.RSAKey key = read_file(make_tests_data_path(fname)) pkey = IndexHandler.get_specific_pkey(cls, key, None) self.assertIsInstance(pkey, cls) pkey = IndexHandler.get_specific_pkey(cls, key, 'iginored') self.assertIsInstance(pkey, cls) pkey = IndexHandler.get_specific_pkey(cls, 'x'+key, None) self.assertIsNone(pkey) def test_get_specific_pkey_with_encrypted_key(self): fname = 'test_rsa_password.key' cls = paramiko.RSAKey password = 'television' key = read_file(make_tests_data_path(fname)) pkey = IndexHandler.get_specific_pkey(cls, key, password) self.assertIsInstance(pkey, cls) pkey = IndexHandler.get_specific_pkey(cls, 'x'+key, None) self.assertIsNone(pkey) with self.assertRaises(paramiko.PasswordRequiredException): pkey = IndexHandler.get_specific_pkey(cls, key, None) def test_get_pkey_obj_with_plain_key(self): fname = 'test_ed25519.key' cls = paramiko.Ed25519Key key = read_file(make_tests_data_path(fname)) pkey = IndexHandler.get_pkey_obj(key, None, fname) self.assertIsInstance(pkey, cls) pkey = IndexHandler.get_pkey_obj(key, 'iginored', fname) self.assertIsInstance(pkey, cls) with self.assertRaises(InvalidValueError) as exc: pkey = IndexHandler.get_pkey_obj('x'+key, None, fname) self.assertIn('Invalid private key', str(exc)) def test_get_pkey_obj_with_encrypted_key(self): fname = 'test_ed25519_password.key' password = 'abc123' cls = paramiko.Ed25519Key key = read_file(make_tests_data_path(fname)) pkey = IndexHandler.get_pkey_obj(key, password, fname) self.assertIsInstance(pkey, cls) with self.assertRaises(InvalidValueError) as exc: pkey = IndexHandler.get_pkey_obj(key, 'wrongpass', fname) self.assertIn('Wrong password', str(exc)) with self.assertRaises(InvalidValueError) as exc: pkey = IndexHandler.get_pkey_obj('x'+key, password, fname) self.assertIn('Invalid private key', str(exc)) with self.assertRaises(paramiko.PasswordRequiredException): pkey = IndexHandler.get_pkey_obj(key, '', fname)
[((12, 18, 12, 32), 'webssh.handler.MixinHandler', 'MixinHandler', ({}, {}), '()', False, 'from webssh.handler import MixinHandler, IndexHandler, InvalidValueError\n'), ((13, 26, 13, 52), 'tornado.httputil.HTTPServerRequest', 'HTTPServerRequest', (), '', False, 'from tornado.httputil import HTTPServerRequest\n'), ((40, 15, 40, 61), 'webssh.handler.IndexHandler.get_specific_pkey', 'IndexHandler.get_specific_pkey', ({(40, 46, 40, 49): 'cls', (40, 51, 40, 54): 'key', (40, 56, 40, 60): 'None'}, {}), '(cls, key, None)', False, 'from webssh.handler import MixinHandler, IndexHandler, InvalidValueError\n'), ((42, 15, 42, 67), 'webssh.handler.IndexHandler.get_specific_pkey', 'IndexHandler.get_specific_pkey', ({(42, 46, 42, 49): 'cls', (42, 51, 42, 54): 'key', (42, 56, 42, 66): '"""iginored"""'}, {}), "(cls, key, 'iginored')", False, 'from webssh.handler import MixinHandler, IndexHandler, InvalidValueError\n'), ((44, 15, 44, 65), 'webssh.handler.IndexHandler.get_specific_pkey', 'IndexHandler.get_specific_pkey', ({(44, 46, 44, 49): 'cls', (44, 51, 44, 58): "'x' + key", (44, 60, 44, 64): 'None'}, {}), "(cls, 'x' + key, None)", False, 'from webssh.handler import MixinHandler, IndexHandler, InvalidValueError\n'), ((53, 15, 53, 65), 'webssh.handler.IndexHandler.get_specific_pkey', 'IndexHandler.get_specific_pkey', ({(53, 46, 53, 49): 'cls', (53, 51, 53, 54): 'key', (53, 56, 53, 64): 'password'}, {}), '(cls, key, password)', False, 'from webssh.handler import MixinHandler, IndexHandler, InvalidValueError\n'), ((55, 15, 55, 65), 'webssh.handler.IndexHandler.get_specific_pkey', 'IndexHandler.get_specific_pkey', ({(55, 46, 55, 49): 'cls', (55, 51, 55, 58): "'x' + key", (55, 60, 55, 64): 'None'}, {}), "(cls, 'x' + key, None)", False, 'from webssh.handler import MixinHandler, IndexHandler, InvalidValueError\n'), ((65, 15, 65, 58), 'webssh.handler.IndexHandler.get_pkey_obj', 'IndexHandler.get_pkey_obj', ({(65, 41, 65, 44): 'key', (65, 46, 65, 50): 'None', (65, 52, 65, 57): 'fname'}, {}), '(key, None, fname)', False, 'from webssh.handler import MixinHandler, IndexHandler, InvalidValueError\n'), ((67, 15, 67, 64), 'webssh.handler.IndexHandler.get_pkey_obj', 'IndexHandler.get_pkey_obj', ({(67, 41, 67, 44): 'key', (67, 46, 67, 56): '"""iginored"""', (67, 58, 67, 63): 'fname'}, {}), "(key, 'iginored', fname)", False, 'from webssh.handler import MixinHandler, IndexHandler, InvalidValueError\n'), ((78, 15, 78, 62), 'webssh.handler.IndexHandler.get_pkey_obj', 'IndexHandler.get_pkey_obj', ({(78, 41, 78, 44): 'key', (78, 46, 78, 54): 'password', (78, 56, 78, 61): 'fname'}, {}), '(key, password, fname)', False, 'from webssh.handler import MixinHandler, IndexHandler, InvalidValueError\n'), ((39, 24, 39, 51), 'tests.utils.make_tests_data_path', 'make_tests_data_path', ({(39, 45, 39, 50): 'fname'}, {}), '(fname)', False, 'from tests.utils import read_file, make_tests_data_path\n'), ((52, 24, 52, 51), 'tests.utils.make_tests_data_path', 'make_tests_data_path', ({(52, 45, 52, 50): 'fname'}, {}), '(fname)', False, 'from tests.utils import read_file, make_tests_data_path\n'), ((59, 19, 59, 65), 'webssh.handler.IndexHandler.get_specific_pkey', 'IndexHandler.get_specific_pkey', ({(59, 50, 59, 53): 'cls', (59, 55, 59, 58): 'key', (59, 60, 59, 64): 'None'}, {}), '(cls, key, None)', False, 'from webssh.handler import MixinHandler, IndexHandler, InvalidValueError\n'), ((64, 24, 64, 51), 'tests.utils.make_tests_data_path', 'make_tests_data_path', ({(64, 45, 64, 50): 'fname'}, {}), '(fname)', False, 'from tests.utils import read_file, make_tests_data_path\n'), ((70, 19, 70, 66), 'webssh.handler.IndexHandler.get_pkey_obj', 'IndexHandler.get_pkey_obj', ({(70, 45, 70, 52): "'x' + key", (70, 54, 70, 58): 'None', (70, 60, 70, 65): 'fname'}, {}), "('x' + key, None, fname)", False, 'from webssh.handler import MixinHandler, IndexHandler, InvalidValueError\n'), ((77, 24, 77, 51), 'tests.utils.make_tests_data_path', 'make_tests_data_path', ({(77, 45, 77, 50): 'fname'}, {}), '(fname)', False, 'from tests.utils import read_file, make_tests_data_path\n'), ((81, 19, 81, 69), 'webssh.handler.IndexHandler.get_pkey_obj', 'IndexHandler.get_pkey_obj', ({(81, 45, 81, 48): 'key', (81, 50, 81, 61): '"""wrongpass"""', (81, 63, 81, 68): 'fname'}, {}), "(key, 'wrongpass', fname)", False, 'from webssh.handler import MixinHandler, IndexHandler, InvalidValueError\n'), ((84, 19, 84, 70), 'webssh.handler.IndexHandler.get_pkey_obj', 'IndexHandler.get_pkey_obj', ({(84, 45, 84, 52): "'x' + key", (84, 54, 84, 62): 'password', (84, 64, 84, 69): 'fname'}, {}), "('x' + key, password, fname)", False, 'from webssh.handler import MixinHandler, IndexHandler, InvalidValueError\n'), ((87, 19, 87, 60), 'webssh.handler.IndexHandler.get_pkey_obj', 'IndexHandler.get_pkey_obj', ({(87, 45, 87, 48): 'key', (87, 50, 87, 52): '""""""', (87, 54, 87, 59): 'fname'}, {}), "(key, '', fname)", False, 'from webssh.handler import MixinHandler, IndexHandler, InvalidValueError\n')]
SCiO-systems/qcat
apps/notifications/tests/test_views.py
8c2b8e07650bc2049420fa6de758fba7e50c2f28
import logging from unittest import mock from unittest.mock import call from django.conf import settings from django.contrib.auth import get_user_model from django.core.signing import Signer from django.urls import reverse from django.http import Http404 from django.test import RequestFactory from braces.views import LoginRequiredMixin from django.test import override_settings from model_mommy import mommy from apps.notifications.models import Log, StatusUpdate, MemberUpdate, ReadLog, \ ActionContextQuerySet from apps.notifications.views import LogListView, LogCountView, ReadLogUpdateView, \ LogQuestionnairesListView, LogInformationUpdateCreateView, \ LogSubscriptionPreferencesView, SignedLogSubscriptionPreferencesView from apps.qcat.tests import TestCase class LogListViewTest(TestCase): def setUp(self): self.view = LogListView() self.url_path = reverse('notification_partial_list') self.request = RequestFactory().get(self.url_path) self.user = {} self.request.user = self.user self.view_instance = self.setup_view( view=self.view, request=self.request ) member_add_log = mommy.make( _model=Log, id=8, action=settings.NOTIFICATIONS_ADD_MEMBER ) self.change_log = mommy.make( _model=Log, id=42, action=settings.NOTIFICATIONS_CHANGE_STATUS ) mommy.make(_model=StatusUpdate, log=self.change_log) mommy.make(_model=MemberUpdate, log=member_add_log) def get_view_with_get_querystring(self, param): request = RequestFactory().get( '{url}?{param}'.format(url=self.url_path, param=param) ) request.user = self.user return self.setup_view(view=self.view, request=request) def test_force_login(self): self.assertIsInstance(self.view_instance, LoginRequiredMixin) def test_queryset_method(self): self.assertEqual( self.view_instance.queryset_method, 'user_log_list' ) def test_queryset_method_pending(self): self.assertEqual( self.get_view_with_get_querystring('is_pending').queryset_method, 'user_pending_list' ) def test_get_paginate_by(self): self.assertEqual( self.view_instance.get_paginate_by(None), settings.NOTIFICATIONS_LIST_PAGINATE_BY ) def test_get_paginate_by_teaser(self): self.assertEqual( self.get_view_with_get_querystring('is_teaser').get_paginate_by(None), settings.NOTIFICATIONS_TEASER_PAGINATE_BY ) @mock.patch('apps.notifications.views.Log.actions.user_log_list') def test_get_queryset(self, mock_actions): self.view_instance.get_queryset() mock_actions.assert_called_once_with(user={}) @mock.patch('apps.notifications.views.Log.actions.user_pending_list') def test_get_queryset_pending(self, mock_actions): self.get_view_with_get_querystring('is_pending').get_queryset() mock_actions.assert_called_once_with(user={}) @mock.patch.object(LogListView, 'add_user_aware_data') def test_get_context_data_logs(self, mock_add_user_aware_data): self.view_instance.object_list = 'foo' self.view_instance.get_context_data() mock_add_user_aware_data.assert_called_once_with('foo') def _test_add_user_aware_data(self): # for faster tests, mock all the elements. elements are created here # as this makes the tests more readable. pth = 'apps.notifications.views.Log.actions' with mock.patch('{}.read_id_list'.format(pth)) as read_id_list: read_id_list.return_value = [42] with mock.patch('{}.user_pending_list'.format(pth)) as pending: pending.values_list.return_value = [8, 42] logs = Log.objects.all() return list(self.view_instance.add_user_aware_data(logs)) def test_add_user_aware_data_keys(self): data_keys = self._test_add_user_aware_data()[0].keys() for key in ['id', 'created', 'text', 'is_read', 'is_todo', 'edit_url']: self.assertTrue(key in data_keys) def test_add_user_aware_data_is_read(self): data = self._test_add_user_aware_data() # logs are ordered by creation date - 42 is the newer one self.assertTrue(data[0]['is_read']) def test_add_user_aware_data_is_not_read(self): data = self._test_add_user_aware_data() self.assertFalse(data[1]['is_read']) #def test_add_user_aware_data_is_todo(self): # data = self._test_add_user_aware_data() # self.assertTrue(data[1]['is_todo']) def test_add_user_aware_data_is_not_todo(self): data = self._test_add_user_aware_data() self.assertFalse(data[0]['is_todo']) @override_settings(NOTIFICATIONS_ACTIONS={'foo': 'bar', 'result': '42'}) def test_statuses_in_context(self): self.view_instance.object_list = [] context = self.view_instance.get_context_data() self.assertDictEqual( context['statuses'], {'foo': 'bar', 'result': '42'} ) @mock.patch('apps.notifications.views.Log.actions.user_log_list') def test_status_filter_queryset(self, mock_user_log_list): mock_user_log_list.return_value = [] self.assertEqual( [], self.view_instance.get_queryset() ) @mock.patch('apps.notifications.views.Log.actions.user_log_list') def test_status_filter_queryset_for_status(self, mock_user_log_list): mock_user_log_list.return_value = Log.objects.filter() view = self.view view.get_statuses = mock.MagicMock(return_value=[3]) view_instance = self.setup_view( view=view, request=self.request ) self.assertQuerysetEqual( view_instance.get_queryset(), [self.change_log.id], transform=lambda item: item.id ) def test_get_status_invalid(self): request = RequestFactory().get('{}?statuses=foo'.format(self.url_path)) view = self.setup_view(self.view, request) self.assertEqual(view.get_statuses(), []) @override_settings(NOTIFICATIONS_ACTIONS={'2': 'bar'}) def test_get_status_invalid_config(self): request = RequestFactory().get('{}?statuses=1'.format(self.url_path)) view = self.setup_view(self.view, request) self.assertEqual(view.get_statuses(), []) def test_get_status_valid(self): request = RequestFactory().get('{}?statuses=1,2,3'.format(self.url_path)) view = self.setup_view(self.view, request) self.assertEqual(view.get_statuses(), [1, 2, 3]) class ReadLogUpdateViewTest(TestCase): def setUp(self): self.view = ReadLogUpdateView() self.request = RequestFactory().post( reverse('notification_read'), data={'user': 123, 'log': 'log', 'checked': 'true'} ) self.user = mock.MagicMock(id=123) self.request.user = self.user self.view_instance = self.setup_view(view=self.view, request=self.request) def test_validate_data_all_keys(self): self.assertFalse( self.view_instance.validate_data() ) def test_validate_data_id_type(self): self.assertFalse( self.view_instance.validate_data(checked='1', log='1', user='foo') ) def test_validate_data_invalid_user(self): self.assertFalse( self.view_instance.validate_data(checked='456', log='1', user='456') ) def test_validate_data_valid(self): self.assertTrue( self.view_instance.validate_data(checked='1', log='1', user='123') ) @mock.patch('apps.notifications.views.ReadLog.objects.update_or_create') def test_post_valid_checked(self, mock_get_or_create): self.view_instance.post(request=self.request) mock_get_or_create.assert_called_once_with( user_id='123', log_id='log', defaults={'is_read': True} ) @mock.patch('apps.notifications.views.ReadLog.objects.update_or_create') def test_post_valid_unchecked(self, mock_get_or_create): request = RequestFactory().post( reverse('notification_read'), data={'user': 123, 'log': 'log', 'checked': 'false'} ) self.view_instance.post(request=request) mock_get_or_create.assert_called_once_with( user_id='123', log_id='log', defaults={'is_read': False} ) @mock.patch.object(ReadLogUpdateView, 'validate_data') def test_post_invalid(self, mock_validate_data): logging.disable(logging.CRITICAL) mock_validate_data.return_value = False with self.assertRaises(Http404): self.view_instance.post(request=self.request) class LogCountViewTest(TestCase): def setUp(self): super().setUp() self.request = RequestFactory().get(reverse('notification_new_count')) self.request.user = mommy.make(_model=get_user_model()) self.view = self.setup_view(view=LogCountView(), request=self.request) mommy.make( _model=Log, catalyst=self.request.user, action=settings.NOTIFICATIONS_CHANGE_STATUS, _quantity=4 ) mommy.make( _model=Log, catalyst=self.request.user, action=settings.NOTIFICATIONS_EDIT_CONTENT, _quantity=2 ) @mock.patch('apps.notifications.views.Log.actions.only_unread_logs') def test_get_unread_only(self, mock_only_unread_logs): self.view.get(request=self.request) mock_only_unread_logs.assert_called_once_with( user=self.request.user ) def test_log_count(self): response = self.view.get(request=self.request) self.assertEqual(response.content, b'4') def test_log_count_one_read(self): mommy.make( _model=ReadLog, log=Log.objects.filter(action=settings.NOTIFICATIONS_CHANGE_STATUS).first(), user=self.request.user, is_read=True ) response = self.view.get(request=self.request) self.assertEqual(response.content, b'3') class LogQuestionnairesListViewTest(TestCase): def setUp(self): super().setUp() self.request = RequestFactory().get(reverse('notification_questionnaire_logs')) self.request.user = 'foo' self.view = self.setup_view(view=LogQuestionnairesListView(), request=self.request) @mock.patch.object(ActionContextQuerySet, 'user_log_list') def test_get_questionnaire_logs(self, mock_user_log_list): self.view.get_questionnaire_logs('foo') mock_user_log_list.assert_called_once_with(user='foo') @mock.patch.object(LogQuestionnairesListView, 'get_questionnaire_logs') def test_get(self, mock_get_questionnaire_logs): mock_get_questionnaire_logs.return_value = ['foo_1', 'foo_2', 'bar_3'] response = self.view.get(self.request) self.assertEqual( response.content, b'{"questionnaires": ["bar_3", "foo_1", "foo_2"]}' ) class LogInformationUpdateCreateViewTest(TestCase): def setUp(self): super().setUp() self.url = reverse('notification_inform_compiler') self.view = LogInformationUpdateCreateView() self.request = RequestFactory().get(self.url) self.request.user = 'foo' self.view = self.setup_view(view=self.view, request=self.request) def test_get_compiler_query(self): questionnaire = mock.MagicMock() self.view.get_compiler(questionnaire) self.assertEqual( questionnaire.method_calls[0], call.questionnairemembership_set.get(role='compiler') ) def test_get_compiler(self): sentinel = mock.sentinel questionnaire = mock.MagicMock() questionnaire.questionnairemembership_set.get.return_value = sentinel self.assertEqual( self.view.get_compiler(questionnaire), sentinel.user ) @mock.patch('apps.notifications.views.query_questionnaire') def test_get_questionnaire(self, mock_query_questionnaire): one_questionnaire = mock.MagicMock() one_questionnaire.first = lambda : 'foo' mock_query_questionnaire.return_value = one_questionnaire self.assertEqual( self.view.get_questionnaire('foo'), 'foo' ) @mock.patch('apps.notifications.views.query_questionnaire') def test_get_questionnaire_raises(self, mock_query_questionnaire): not_exists = mock.MagicMock() not_exists.exists = lambda : False mock_query_questionnaire.return_value = not_exists with self.assertRaises(Http404): self.view.get_questionnaire('foo') @mock.patch('apps.notifications.views.query_questionnaire') def test_get_questionnaire_calls_filter(self, mock_query_questionnaire): self.view.get_questionnaire('foo') mock_query_questionnaire.assert_called_once_with( identifier='foo', request=self.request ) @override_settings(NOTIFICATIONS_FINISH_EDITING='setting') @mock.patch.object(LogInformationUpdateCreateView, 'get_questionnaire') @mock.patch.object(LogInformationUpdateCreateView, 'get_compiler') def test_post(self, mock_get_compiler, mock_get_questionnaire): compiler = mock.MagicMock() mock_get_questionnaire.return_value = mock.sentinel.questionnaire mock_get_compiler.return_value = compiler request = RequestFactory().post(self.url, data={ 'identifier': 'foo', 'message': 'bar' }) with mock.patch('apps.notifications.views.InformationLog') as mock_create: self.setup_view(view=self.view, request=self.request).post(request) mock_create.assert_called_once_with( action='setting', questionnaire=mock.sentinel.questionnaire, receiver=compiler, sender='foo' ) class LogSubscriptionPreferencesMixinTest(TestCase): def setUp(self): self.url = reverse('notification_preferences') self.view = LogSubscriptionPreferencesView() self.request = RequestFactory().get(self.url) self.user = mommy.make(_model=get_user_model()) self.obj = self.user.mailpreferences self.request.user = self.user self.request._messages = mock.MagicMock() self.view = self.setup_view(view=self.view, request=self.request) self.view.object = self.obj def test_get_initial(self): self.obj.wanted_actions = 'some,thing,yay' self.assertEqual( ['some', 'thing', 'yay'], self.view.get_initial()['wanted_actions'] ) def test_get_form_valid_changed_language(self): self.view.object = mock.MagicMock() self.view.object.has_changed_language = False form = mock.MagicMock() form.changed_data = ['language'] self.view.form_valid(form) self.assertTrue(self.view.object.has_changed_language) def test_get_form_valid_message(self): self.view.form_valid(mock.MagicMock()) self.assertTrue(self.request._messages.method_calls) class SignedLogSubscriptionPreferencesViewTest(TestCase): def setUp(self): self.user = mommy.make(_model=get_user_model()) self.obj = self.user.mailpreferences self.view = SignedLogSubscriptionPreferencesView() self.request = RequestFactory().get(str(self.obj.get_signed_url())) self.request._messages = mock.MagicMock() self.view = self.setup_view(view=self.view, request=self.request) self.view.object = self.obj def test_get_success_url_signed(self): mock_user = mock.MagicMock(return_value=self.user) mock_user.is_authenticated = False mock_user.id = self.user.id self.request.user = mock_user self.assertEqual( self.view.get_success_url(), self.obj.get_signed_url() ) def test_get_success_url_user(self): self.request.user = self.user self.assertEqual( self.view.get_success_url(), reverse('notification_preferences') ) def test_get_object_user(self): self.request.user = self.user self.assertEqual( self.view.get_object(), self.obj ) def test_get_signed_object(self): mock_user = mock.MagicMock(return_value=self.user) mock_user.is_authenticated = False mock_user.id=self.user.id self.request.user = mock_user self.view.kwargs['token'] = mock.MagicMock() with mock.patch.object(Signer, 'unsign') as mock_unsign: mock_unsign.return_value = self.obj.id self.assertEqual( self.view.get_object(), self.obj ) mock_unsign.assert_called_with(self.view.kwargs['token']) def test_get_signed_object_404(self): mock_user = mock.MagicMock(return_value=self.user) mock_user.is_authenticated = False mock_user.id = self.user.id self.request.user = mock_user self.view.kwargs['token'] = mock.MagicMock() with self.assertRaises(Http404): self.view.get_object()
[((81, 5, 81, 69), 'unittest.mock.patch', 'mock.patch', ({(81, 16, 81, 68): '"""apps.notifications.views.Log.actions.user_log_list"""'}, {}), "('apps.notifications.views.Log.actions.user_log_list')", False, 'from unittest import mock\n'), ((86, 5, 86, 73), 'unittest.mock.patch', 'mock.patch', ({(86, 16, 86, 72): '"""apps.notifications.views.Log.actions.user_pending_list"""'}, {}), "('apps.notifications.views.Log.actions.user_pending_list')", False, 'from unittest import mock\n'), ((91, 5, 91, 58), 'unittest.mock.patch.object', 'mock.patch.object', ({(91, 23, 91, 34): 'LogListView', (91, 36, 91, 57): '"""add_user_aware_data"""'}, {}), "(LogListView, 'add_user_aware_data')", False, 'from unittest import mock\n'), ((130, 5, 130, 76), 'django.test.override_settings', 'override_settings', (), '', False, 'from django.test import override_settings\n'), ((139, 5, 139, 69), 'unittest.mock.patch', 'mock.patch', ({(139, 16, 139, 68): '"""apps.notifications.views.Log.actions.user_log_list"""'}, {}), "('apps.notifications.views.Log.actions.user_log_list')", False, 'from unittest import mock\n'), ((146, 5, 146, 69), 'unittest.mock.patch', 'mock.patch', ({(146, 16, 146, 68): '"""apps.notifications.views.Log.actions.user_log_list"""'}, {}), "('apps.notifications.views.Log.actions.user_log_list')", False, 'from unittest import mock\n'), ((165, 5, 165, 58), 'django.test.override_settings', 'override_settings', (), '', False, 'from django.test import override_settings\n'), ((209, 5, 209, 76), 'unittest.mock.patch', 'mock.patch', ({(209, 16, 209, 75): '"""apps.notifications.views.ReadLog.objects.update_or_create"""'}, {}), "('apps.notifications.views.ReadLog.objects.update_or_create')", False, 'from unittest import mock\n'), ((216, 5, 216, 76), 'unittest.mock.patch', 'mock.patch', ({(216, 16, 216, 75): '"""apps.notifications.views.ReadLog.objects.update_or_create"""'}, {}), "('apps.notifications.views.ReadLog.objects.update_or_create')", False, 'from unittest import mock\n'), ((227, 5, 227, 58), 'unittest.mock.patch.object', 'mock.patch.object', ({(227, 23, 227, 40): 'ReadLogUpdateView', (227, 42, 227, 57): '"""validate_data"""'}, {}), "(ReadLogUpdateView, 'validate_data')", False, 'from unittest import mock\n'), ((255, 5, 255, 72), 'unittest.mock.patch', 'mock.patch', ({(255, 16, 255, 71): '"""apps.notifications.views.Log.actions.only_unread_logs"""'}, {}), "('apps.notifications.views.Log.actions.only_unread_logs')", False, 'from unittest import mock\n'), ((285, 5, 285, 62), 'unittest.mock.patch.object', 'mock.patch.object', ({(285, 23, 285, 44): 'ActionContextQuerySet', (285, 46, 285, 61): '"""user_log_list"""'}, {}), "(ActionContextQuerySet, 'user_log_list')", False, 'from unittest import mock\n'), ((291, 5, 291, 75), 'unittest.mock.patch.object', 'mock.patch.object', ({(291, 23, 291, 48): 'LogQuestionnairesListView', (291, 50, 291, 74): '"""get_questionnaire_logs"""'}, {}), "(LogQuestionnairesListView, 'get_questionnaire_logs')", False, 'from unittest import mock\n'), ((327, 5, 327, 63), 'unittest.mock.patch', 'mock.patch', ({(327, 16, 327, 62): '"""apps.notifications.views.query_questionnaire"""'}, {}), "('apps.notifications.views.query_questionnaire')", False, 'from unittest import mock\n'), ((336, 5, 336, 63), 'unittest.mock.patch', 'mock.patch', ({(336, 16, 336, 62): '"""apps.notifications.views.query_questionnaire"""'}, {}), "('apps.notifications.views.query_questionnaire')", False, 'from unittest import mock\n'), ((344, 5, 344, 63), 'unittest.mock.patch', 'mock.patch', ({(344, 16, 344, 62): '"""apps.notifications.views.query_questionnaire"""'}, {}), "('apps.notifications.views.query_questionnaire')", False, 'from unittest import mock\n'), ((351, 5, 351, 62), 'django.test.override_settings', 'override_settings', (), '', False, 'from django.test import override_settings\n'), ((352, 5, 352, 75), 'unittest.mock.patch.object', 'mock.patch.object', ({(352, 23, 352, 53): 'LogInformationUpdateCreateView', (352, 55, 352, 74): '"""get_questionnaire"""'}, {}), "(LogInformationUpdateCreateView, 'get_questionnaire')", False, 'from unittest import mock\n'), ((353, 5, 353, 70), 'unittest.mock.patch.object', 'mock.patch.object', ({(353, 23, 353, 53): 'LogInformationUpdateCreateView', (353, 55, 353, 69): '"""get_compiler"""'}, {}), "(LogInformationUpdateCreateView, 'get_compiler')", False, 'from unittest import mock\n'), ((26, 20, 26, 33), 'apps.notifications.views.LogListView', 'LogListView', ({}, {}), '()', False, 'from apps.notifications.views import LogListView, LogCountView, ReadLogUpdateView, LogQuestionnairesListView, LogInformationUpdateCreateView, LogSubscriptionPreferencesView, SignedLogSubscriptionPreferencesView\n'), ((27, 24, 27, 60), 'django.urls.reverse', 'reverse', ({(27, 32, 27, 59): '"""notification_partial_list"""'}, {}), "('notification_partial_list')", False, 'from django.urls import reverse\n'), ((34, 25, 38, 9), 'model_mommy.mommy.make', 'mommy.make', (), '', False, 'from model_mommy import mommy\n'), ((39, 26, 43, 9), 'model_mommy.mommy.make', 'mommy.make', (), '', False, 'from model_mommy import mommy\n'), ((44, 8, 44, 60), 'model_mommy.mommy.make', 'mommy.make', (), '', False, 'from model_mommy import mommy\n'), ((45, 8, 45, 59), 'model_mommy.mommy.make', 'mommy.make', (), '', False, 'from model_mommy import mommy\n'), ((148, 42, 148, 62), 'apps.notifications.models.Log.objects.filter', 'Log.objects.filter', ({}, {}), '()', False, 'from apps.notifications.models import Log, StatusUpdate, MemberUpdate, ReadLog, ActionContextQuerySet\n'), ((150, 28, 150, 60), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((180, 20, 180, 39), 'apps.notifications.views.ReadLogUpdateView', 'ReadLogUpdateView', ({}, {}), '()', False, 'from apps.notifications.views import LogListView, LogCountView, ReadLogUpdateView, LogQuestionnairesListView, LogInformationUpdateCreateView, LogSubscriptionPreferencesView, SignedLogSubscriptionPreferencesView\n'), ((185, 20, 185, 42), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((229, 8, 229, 41), 'logging.disable', 'logging.disable', ({(229, 24, 229, 40): 'logging.CRITICAL'}, {}), '(logging.CRITICAL)', False, 'import logging\n'), ((242, 8, 247, 9), 'model_mommy.mommy.make', 'mommy.make', (), '', False, 'from model_mommy import mommy\n'), ((248, 8, 253, 9), 'model_mommy.mommy.make', 'mommy.make', (), '', False, 'from model_mommy import mommy\n'), ((304, 19, 304, 58), 'django.urls.reverse', 'reverse', ({(304, 27, 304, 57): '"""notification_inform_compiler"""'}, {}), "('notification_inform_compiler')", False, 'from django.urls import reverse\n'), ((305, 20, 305, 52), 'apps.notifications.views.LogInformationUpdateCreateView', 'LogInformationUpdateCreateView', ({}, {}), '()', False, 'from apps.notifications.views import LogListView, LogCountView, ReadLogUpdateView, LogQuestionnairesListView, LogInformationUpdateCreateView, LogSubscriptionPreferencesView, SignedLogSubscriptionPreferencesView\n'), ((311, 24, 311, 40), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((320, 24, 320, 40), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((329, 28, 329, 44), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((338, 21, 338, 37), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((355, 19, 355, 35), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((375, 19, 375, 54), 'django.urls.reverse', 'reverse', ({(375, 27, 375, 53): '"""notification_preferences"""'}, {}), "('notification_preferences')", False, 'from django.urls import reverse\n'), ((376, 20, 376, 52), 'apps.notifications.views.LogSubscriptionPreferencesView', 'LogSubscriptionPreferencesView', ({}, {}), '()', False, 'from apps.notifications.views import LogListView, LogCountView, ReadLogUpdateView, LogQuestionnairesListView, LogInformationUpdateCreateView, LogSubscriptionPreferencesView, SignedLogSubscriptionPreferencesView\n'), ((381, 33, 381, 49), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((393, 27, 393, 43), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((395, 15, 395, 31), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((410, 20, 410, 58), 'apps.notifications.views.SignedLogSubscriptionPreferencesView', 'SignedLogSubscriptionPreferencesView', ({}, {}), '()', False, 'from apps.notifications.views import LogListView, LogCountView, ReadLogUpdateView, LogQuestionnairesListView, LogInformationUpdateCreateView, LogSubscriptionPreferencesView, SignedLogSubscriptionPreferencesView\n'), ((412, 33, 412, 49), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((417, 20, 417, 58), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((441, 20, 441, 58), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((445, 36, 445, 52), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((454, 20, 454, 58), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((458, 36, 458, 52), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((182, 12, 182, 40), 'django.urls.reverse', 'reverse', ({(182, 20, 182, 39): '"""notification_read"""'}, {}), "('notification_read')", False, 'from django.urls import reverse\n'), ((219, 12, 219, 40), 'django.urls.reverse', 'reverse', ({(219, 20, 219, 39): '"""notification_read"""'}, {}), "('notification_read')", False, 'from django.urls import reverse\n'), ((239, 44, 239, 77), 'django.urls.reverse', 'reverse', ({(239, 52, 239, 76): '"""notification_new_count"""'}, {}), "('notification_new_count')", False, 'from django.urls import reverse\n'), ((281, 44, 281, 86), 'django.urls.reverse', 'reverse', ({(281, 52, 281, 85): '"""notification_questionnaire_logs"""'}, {}), "('notification_questionnaire_logs')", False, 'from django.urls import reverse\n'), ((315, 12, 315, 65), 'unittest.mock.call.questionnairemembership_set.get', 'call.questionnairemembership_set.get', (), '', False, 'from unittest.mock import call\n'), ((362, 13, 362, 66), 'unittest.mock.patch', 'mock.patch', ({(362, 24, 362, 65): '"""apps.notifications.views.InformationLog"""'}, {}), "('apps.notifications.views.InformationLog')", False, 'from unittest import mock\n'), ((401, 29, 401, 45), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((430, 12, 430, 47), 'django.urls.reverse', 'reverse', ({(430, 20, 430, 46): '"""notification_preferences"""'}, {}), "('notification_preferences')", False, 'from django.urls import reverse\n'), ((446, 13, 446, 48), 'unittest.mock.patch.object', 'mock.patch.object', ({(446, 31, 446, 37): 'Signer', (446, 39, 446, 47): '"""unsign"""'}, {}), "(Signer, 'unsign')", False, 'from unittest import mock\n'), ((28, 23, 28, 39), 'django.test.RequestFactory', 'RequestFactory', ({}, {}), '()', False, 'from django.test import RequestFactory\n'), ((48, 18, 48, 34), 'django.test.RequestFactory', 'RequestFactory', ({}, {}), '()', False, 'from django.test import RequestFactory\n'), ((105, 23, 105, 40), 'apps.notifications.models.Log.objects.all', 'Log.objects.all', ({}, {}), '()', False, 'from apps.notifications.models import Log, StatusUpdate, MemberUpdate, ReadLog, ActionContextQuerySet\n'), ((161, 18, 161, 34), 'django.test.RequestFactory', 'RequestFactory', ({}, {}), '()', False, 'from django.test import RequestFactory\n'), ((167, 18, 167, 34), 'django.test.RequestFactory', 'RequestFactory', ({}, {}), '()', False, 'from django.test import RequestFactory\n'), ((172, 18, 172, 34), 'django.test.RequestFactory', 'RequestFactory', ({}, {}), '()', False, 'from django.test import RequestFactory\n'), ((181, 23, 181, 39), 'django.test.RequestFactory', 'RequestFactory', ({}, {}), '()', False, 'from django.test import RequestFactory\n'), ((218, 18, 218, 34), 'django.test.RequestFactory', 'RequestFactory', ({}, {}), '()', False, 'from django.test import RequestFactory\n'), ((239, 23, 239, 39), 'django.test.RequestFactory', 'RequestFactory', ({}, {}), '()', False, 'from django.test import RequestFactory\n'), ((240, 46, 240, 62), 'django.contrib.auth.get_user_model', 'get_user_model', ({}, {}), '()', False, 'from django.contrib.auth import get_user_model\n'), ((241, 41, 241, 55), 'apps.notifications.views.LogCountView', 'LogCountView', ({}, {}), '()', False, 'from apps.notifications.views import LogListView, LogCountView, ReadLogUpdateView, LogQuestionnairesListView, LogInformationUpdateCreateView, LogSubscriptionPreferencesView, SignedLogSubscriptionPreferencesView\n'), ((281, 23, 281, 39), 'django.test.RequestFactory', 'RequestFactory', ({}, {}), '()', False, 'from django.test import RequestFactory\n'), ((283, 41, 283, 68), 'apps.notifications.views.LogQuestionnairesListView', 'LogQuestionnairesListView', ({}, {}), '()', False, 'from apps.notifications.views import LogListView, LogCountView, ReadLogUpdateView, LogQuestionnairesListView, LogInformationUpdateCreateView, LogSubscriptionPreferencesView, SignedLogSubscriptionPreferencesView\n'), ((306, 23, 306, 39), 'django.test.RequestFactory', 'RequestFactory', ({}, {}), '()', False, 'from django.test import RequestFactory\n'), ((358, 18, 358, 34), 'django.test.RequestFactory', 'RequestFactory', ({}, {}), '()', False, 'from django.test import RequestFactory\n'), ((377, 23, 377, 39), 'django.test.RequestFactory', 'RequestFactory', ({}, {}), '()', False, 'from django.test import RequestFactory\n'), ((378, 38, 378, 54), 'django.contrib.auth.get_user_model', 'get_user_model', ({}, {}), '()', False, 'from django.contrib.auth import get_user_model\n'), ((408, 38, 408, 54), 'django.contrib.auth.get_user_model', 'get_user_model', ({}, {}), '()', False, 'from django.contrib.auth import get_user_model\n'), ((411, 23, 411, 39), 'django.test.RequestFactory', 'RequestFactory', ({}, {}), '()', False, 'from django.test import RequestFactory\n'), ((269, 16, 269, 79), 'apps.notifications.models.Log.objects.filter', 'Log.objects.filter', (), '', False, 'from apps.notifications.models import Log, StatusUpdate, MemberUpdate, ReadLog, ActionContextQuerySet\n')]
willvousden/clint
examples/resources.py
6dc7ab1a6a162750e968463b43994447bca32544
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import sys import os sys.path.insert(0, os.path.abspath('..')) from clint import resources resources.init('kennethreitz', 'clint') lorem = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.' print('%s created.' % resources.user.path) resources.user.write('lorem.txt', lorem) print('lorem.txt created') assert resources.user.read('lorem.txt') == lorem print('lorem.txt has correct contents') resources.user.delete('lorem.txt') print('lorem.txt deleted') assert resources.user.read('lorem.txt') == None print('lorem.txt deletion confirmed')
[((13, 0, 13, 39), 'clint.resources.init', 'resources.init', ({(13, 15, 13, 29): '"""kennethreitz"""', (13, 31, 13, 38): '"""clint"""'}, {}), "('kennethreitz', 'clint')", False, 'from clint import resources\n'), ((20, 0, 20, 40), 'clint.resources.user.write', 'resources.user.write', ({(20, 21, 20, 32): '"""lorem.txt"""', (20, 34, 20, 39): 'lorem'}, {}), "('lorem.txt', lorem)", False, 'from clint import resources\n'), ((26, 0, 26, 34), 'clint.resources.user.delete', 'resources.user.delete', ({(26, 22, 26, 33): '"""lorem.txt"""'}, {}), "('lorem.txt')", False, 'from clint import resources\n'), ((9, 19, 9, 40), 'os.path.abspath', 'os.path.abspath', ({(9, 35, 9, 39): '""".."""'}, {}), "('..')", False, 'import os\n'), ((23, 7, 23, 39), 'clint.resources.user.read', 'resources.user.read', ({(23, 27, 23, 38): '"""lorem.txt"""'}, {}), "('lorem.txt')", False, 'from clint import resources\n'), ((29, 7, 29, 39), 'clint.resources.user.read', 'resources.user.read', ({(29, 27, 29, 38): '"""lorem.txt"""'}, {}), "('lorem.txt')", False, 'from clint import resources\n')]
charlesmugambi/Instagram
photos/urls.py
3a9dfc32c45bf9f221b22b7075ce31b1a16dcba7
from django.conf.urls import url from django.conf import settings from django.conf.urls.static import static from . import views urlpatterns = [ url(r'^$', views.index, name='index'), url(r'^image/$', views.add_image, name='upload_image'), url(r'^profile/$', views.profile_info, name='profile'), url(r'^update/$', views.profile_update, name='update'), url(r'^comment/(?P<image_id>\d+)', views.comment, name='comment'), url(r'^search/', views.search_results, name = 'search_results'), url(r'^follow/(?P<user_id>\d+)', views.follow, name = 'follow'), url(r'^unfollow/(?P<user_id>\d+)', views.unfollow, name='unfollow'), url(r'^likes/(\d+)/$', views.like_images,name='likes') ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
[((7, 4, 7, 41), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((8, 4, 8, 58), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((9, 4, 9, 58), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((10, 4, 10, 58), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((11, 4, 11, 69), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((12, 4, 12, 67), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((13, 4, 13, 67), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((14, 4, 14, 71), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((15, 4, 15, 58), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((18, 19, 18, 80), 'django.conf.urls.static.static', 'static', (), '', False, 'from django.conf.urls.static import static\n')]
vgfang/breadbot
bread.py
e58807431945e6d4de8dfc6c4dc4c90caebf88ca
import random import math from fractions import Fraction from datetime import datetime from jinja2 import Template # empty class for passing to template engine class Recipe: def __init__(self): return # returns flour percent using flour type def get_special_flour_percent(flourType: str, breadFlourPercent:int) -> int: if flourType == 'Hard Red Whole Wheat' or flourType == 'Hard White Wheat': percentages = [0,25,30,35,40,45,50] percentages = list(filter(lambda x: 100-breadFlourPercent >= x, percentages)) return random.choice(percentages) elif flourType == 'Rye' and breadFlourPercent >= 75: percentages = [0,10,15,20] percentages = list(filter(lambda x: 100-breadFlourPercent >= x, percentages)) return random.choice(percentages) else: percentages = [0,10,15,20,25.30] percentages = list(filter(lambda x: 100-breadFlourPercent >= x, percentages)) return random.choice(percentages) # returns multiplied spoon units from teaspoon fraction input, 3 tsp = 1 tbsp def spoon_mult(tsp: Fraction(), multiplier: float) -> str: tsp *= Fraction(multiplier) spoonString = "" if tsp >= 3: # use tablespoons tablespoons = int(tsp // 3) remainder = (tsp % 3) / 3 if tablespoons != 0: spoonString += f"{tablespoons} " if remainder.numerator != 0: spoonString += f"{remainder.numerator}/{remainder.denominator} " return f"{spoonString}tbsp" else: teaspoons = int(tsp // 1) remainder = tsp % 1 if teaspoons != 0: spoonString += f"{teaspoons} " if remainder.numerator != 0: spoonString += f"{remainder.numerator}/{remainder.denominator} " return f"{spoonString}tsp" # returns amount given the type of flavoring(spices) def get_flavor_amount(flavor: str, flourAmount: int) -> str: colorsDict = {} scale = 4 # floors to the 500g/scale for clean fractional multiplication multiplier = math.floor(flourAmount/500*scale) / scale # flavors in category red = ('Cardamom', 'Nutmeg','Hazelnut','Almond','Lemon Extract','Peppermint') blue = ('Cinnamon', 'Allspice') green = ('Vanilla', 'Instant Coffee') purple = ('Orange Zest', 'Lime Zest', 'Lemon Zest', 'Ginger') orange = ('Lavender', 'Hojicha', 'Matcha', 'Earl Grey', 'Oolong') # default possible teaspoon values list for flour = 500, 3 tsp = 1 tbsp redAmt = list(map(Fraction, [1/4, 1/2])) blueAmt = list(map(Fraction, [1/4, 1/2, 1])) greenAmt = list(map(Fraction, [1/2, 1, 3/2])) purpleAmt = list(map(Fraction, [2, 3, 9/2])) orangeAmt = list(map(Fraction, [9])) # random tablespoons colorsDict[red] = list(map(lambda x: spoon_mult(x, multiplier), redAmt)) colorsDict[blue] = list(map(lambda x: spoon_mult(x, multiplier), blueAmt)) colorsDict[green] = list(map(lambda x: spoon_mult(x, multiplier), greenAmt)) colorsDict[purple] = list(map(lambda x: spoon_mult(x, multiplier), purpleAmt)) colorsDict[orange] = list(map(lambda x: spoon_mult(x, multiplier), orangeAmt)) for color in colorsDict.keys(): if flavor in color: return random.choice(colorsDict[color]) # print("Error in Flavor Input: " + flavor) return "get_flavor_amount wrong input" # returns list of spices using number of spices def get_spices(spicesNum: int) -> [str]: spicesList = ['Cinnamon', 'Allspice', 'Cardamom', 'Nutmeg'] if spicesNum > len(spicesList): print("WARNING: spicesNum exceeds spices of num") return spicesList if spicesNum == 1: return random.sample(['Cinnamon', 'Cardamom'], 1) return random.sample(spicesList, spicesNum) # check if extract is nut def is_nut(extract: str) -> bool: nuts = ['Hazelnut','Almond'] return extract in nuts # checks if extract1 and extract2 are both allowed based on zest/extract same flavor def zest_extract_same_flavor(extract1: str, extract2: str) -> bool: if extract1 == extract2: return False e1 = extract1.split(" ") # may need to change if new types are added e2 = extract2.split(" ") if len(e1) != 2 or len(e2) != 2: return False if e1[0]==e2[0] and 'Zest' in [e1[1],e2[1]] and 'Extract' in [e1[1],e2[1]]: return True return False # return list of extracts using number of extracts def get_extracts(extractsNum: int) -> [str]: if extractsNum == 0: return [] allowedExtracts = ['Vanilla', 'Hazelnut', 'Almond', 'Lemon Extract', 'Peppermint', 'Orange Zest', 'Lime Zest', 'Lemon Zest', 'Ginger'] # if more than one, vanilla must be included currentExtracts = ['Vanilla'] allowedExtracts.remove('Vanilla') extractsLeft = extractsNum-1 while extractsLeft > 0: if len(allowedExtracts) <= 0: print("Incorrecnt number of extracts") return "Incorrecnt number of extracts" newExtract = random.choice(allowedExtracts) # one nut at a time if True in map(is_nut, currentExtracts) and is_nut(newExtract): allowedExtracts.remove(newExtract) continue # skips decrement, try again # no zest + extract comibination of the same flavor for currentExtract in currentExtracts: exit = False if zest_extract_same_flavor(currentExtract, newExtract): allowedExtracts.remove(newExtract) exit = True # skips decrement, try again if exit: continue # passed restraints, remove it from allowed currentExtracts.append(newExtract) if newExtract in allowedExtracts: allowedExtracts.remove(newExtract) extractsLeft -= 1 return currentExtracts # return percentage of enrichment def get_enrichment_percent(enrichment: str) -> int: if enrichment == 'Cream Cheese': return 10 return 5 # return liquid percent from liquid tpye def get_liquid_percent(liquidType: str) -> int: if liquidType in ['Heavy Cream', 'Coconut Milk']: return 13 elif liquidType in ['Cow Milk']: return 63 # print("Error in liquidType input.") return -1 # return fruit puree fruit choice(s), omitted fruit chance weighting for now def get_fruit_purees() -> [str]: fruitPureesNum = random.randint(1,2) fruitPureesChoices = ['Banana','Apple','Cherry','Strawberry','Fig','Mango'] return random.sample(fruitPureesChoices, fruitPureesNum) # retrun fruit puree percent from 0-2 fruitPurees using random generation def get_fruit_purees_percent(fruitPurees) -> [float]: totalFruitPureePercent = random.choice([25,30,35,40,45,50]) fruitPureeNum = len(fruitPurees) if fruitPureeNum == 1: return [totalFruitPureePercent] elif fruitPureeNum == 2: firstPercent = random.randint(0,totalFruitPureePercent) return [firstPercent, totalFruitPureePercent - firstPercent] return [0] # returns rounded ml conversion from percent, used in template def to_g(flourMl, percent) -> int: return round(flourMl * percent/100) # takes filename and writes an html recipe file def generate_recipe(breadname: str, filename: str, flourGramInput: int) -> str: # ALL NUMBERICAL VALUES REPRESENT PERCENTAGES r = Recipe() r.breadname = breadname r.totalFlourGrams = flourGramInput r.totalLiquidPercent = 63 r.preferment = random.choice(['Poolish', 'None']) r.breadFlourPercent = random.choice([75, 50]) # FLOUR STYLE r.breadShape = random.choice(['Pullman', 'Regular']) # FLOUR TYPES r.specialFlour = random.choice([ 'Einkorn', 'Khorasan', 'Spelt', 'Emmer', 'Semolina (Durum)', 'Hard Red Whole Wheat', 'Regular Whole Wheat', 'Hard White Wheat', 'Rye' ]) r.specialFlourPercent = get_special_flour_percent(r.specialFlour, r.breadFlourPercent) r.whiteFlourPercent = 100 - r.breadFlourPercent - r.specialFlourPercent # SPICES/FLAVORING spicesNum = random.randint(0,4) r.spices = get_spices(spicesNum) extractsNum = random.randint(0,3) r.extracts = get_extracts(extractsNum) teaList = ['Lavender', 'Hojicha', 'Matcha', 'Earl Grey', 'Oolong', 'Instant Coffee'] r.tea = random.choice(teaList) # illegal with fruit purees and all extracts but ginger, almond, and hazelnut # BASIC INGREDIENTS r.sugar = random.choice(['Brown Sugar','White Sugar','Honey','Molasses']) r.sugarPercent = random.choice([5,10,15]) r.salt = 'Table Salt' r.saltPercent = random.choice([1,1.5,2]) r.yeast = random.choice(['Instant Yeast','Active Yeast']) r.yeastPercent = 0.62 # ENRICHMENTS – All 5% , only one chosen enrichmentList = ['Olive Oil','Butter','Cream Cheese','Coconut oil'] if r.tea == 'Instant Coffee': enrichmentList.remove('Olive Oil') r.enrichment = random.choice(enrichmentList) r.enrichmentPercent = get_enrichment_percent(r.enrichment) if r.enrichment == 'Cream Cheese': r.totalLiquidPercent -= 5 # LIQUIDS # cap total liquid at 60% when these sugars are used if r.sugar in ['Honey', 'Molasses']: r.totalLiquidPercent = 60 # cow milk only if there is no preferemnt viableLiquids = ['Heavy Cream', 'Coconut Milk', 'Cow Milk'] if r.preferment != 'None': viableLiquids.remove('Cow Milk') r.liquid = random.choice(viableLiquids) r.liquidPercent = get_liquid_percent(r.liquid) ## LIQUIDS - FRUIT PUREE r.fruitPurees = [] r.fruitPureesPercent = [] if r.preferment != 'Poolish': # 50 percent chance to include # sugar reduction by 5 percent r.sugarPercent -= 5 r.fruitPurees = get_fruit_purees() r.fruitPureesPercent = get_fruit_purees_percent(r.fruitPurees) # account for cow milk r.liquidPercent = min(r.liquidPercent, r.totalLiquidPercent - sum(r.fruitPureesPercent)) r.waterPercent = max(0, r.totalLiquidPercent - sum(r.fruitPureesPercent) - r.liquidPercent) # BICOLOR ROLL r.isBicolorRoll = False if len(r.fruitPureesPercent) > 0 or r.tea in ['Lavender', 'Hojicha', 'Matcha', 'Earl Grey', 'Oolong']: r.isBicolorRoll = random.choice([True,False]) # COCOA POWDER r.cocoaPowderPercent = 0 cocoaPowderAllowedExtracts = ['Ginger', 'Almond', 'Hazelnut'] if r.fruitPurees == [] and any(not x in cocoaPowderAllowedExtracts for x in r.extracts): # allowed if random.randint(0,2) == 0: r.tea = '' # removes tea r.cocoaPowderPercent = round(random.choice([5,10])/100 * r.whiteFlourPercent,1) r.whiteFlourPercent = round(r.whiteFlourPercent - r.cocoaPowderPercent,1) # WRITE FORMAT time = datetime.now() r.datetime = time.strftime('%A, %b %d %Y') templateFile = open("./template.html") templateString = templateFile.read() ## Conversion to ml for percentages r.totalLiquidGrams = to_g(r.totalFlourGrams, r.totalLiquidPercent) r.breadFlourGrams = to_g(r.totalFlourGrams, r.breadFlourPercent) r.specialFlourGrams = to_g(r.totalFlourGrams, r.specialFlourPercent) r.whiteFlourGrams = to_g(r.totalFlourGrams, r.whiteFlourPercent) r.sugarGrams = to_g(r.totalFlourGrams, r.sugarPercent) r.saltGrams = to_g(r.totalFlourGrams, r.saltPercent) r.yeastGrams = to_g(r.totalFlourGrams, r.yeastPercent) r.spicesAmt = list(map(lambda x: get_flavor_amount(x, r.totalFlourGrams), r.spices)) r.extractsAmt = list(map(lambda x: get_flavor_amount(x, r.totalFlourGrams), r.extracts)) r.teaAmt = get_flavor_amount(r.tea, r.totalFlourGrams) r.enrichmentGrams = to_g(r.totalFlourGrams, r.enrichmentPercent) r.waterGrams = to_g(r.totalFlourGrams, r.waterPercent) r.liquidGrams = to_g(r.totalFlourGrams, r.liquidPercent) r.fruitPureesGrams = list(map(lambda x: to_g(r.totalFlourGrams,x), r.fruitPureesPercent)) r.cocoaPowderGrams = round(r.cocoaPowderPercent/100 * r.totalFlourGrams) template = Template(templateString) htmlString = template.render(r = r) outfile = open(f'{filename}', 'w') outfile.write(htmlString) outfile.close() templateFile.close() return htmlString
[((29, 8, 29, 28), 'fractions.Fraction', 'Fraction', ({(29, 17, 29, 27): 'multiplier'}, {}), '(multiplier)', False, 'from fractions import Fraction\n'), ((87, 8, 87, 44), 'random.sample', 'random.sample', ({(87, 22, 87, 32): 'spicesList', (87, 34, 87, 43): 'spicesNum'}, {}), '(spicesList, spicesNum)', False, 'import random\n'), ((163, 18, 163, 37), 'random.randint', 'random.randint', ({(163, 33, 163, 34): '1', (163, 35, 163, 36): '2'}, {}), '(1, 2)', False, 'import random\n'), ((165, 8, 165, 57), 'random.sample', 'random.sample', ({(165, 22, 165, 40): 'fruitPureesChoices', (165, 42, 165, 56): 'fruitPureesNum'}, {}), '(fruitPureesChoices, fruitPureesNum)', False, 'import random\n'), ((169, 26, 169, 60), 'random.choice', 'random.choice', ({(169, 40, 169, 59): '[25, 30, 35, 40, 45, 50]'}, {}), '([25, 30, 35, 40, 45, 50])', False, 'import random\n'), ((190, 16, 190, 50), 'random.choice', 'random.choice', ({(190, 30, 190, 49): "['Poolish', 'None']"}, {}), "(['Poolish', 'None'])", False, 'import random\n'), ((191, 23, 191, 46), 'random.choice', 'random.choice', ({(191, 37, 191, 45): '[75, 50]'}, {}), '([75, 50])', False, 'import random\n'), ((193, 16, 193, 53), 'random.choice', 'random.choice', ({(193, 30, 193, 52): "['Pullman', 'Regular']"}, {}), "(['Pullman', 'Regular'])", False, 'import random\n'), ((195, 18, 205, 3), 'random.choice', 'random.choice', ({(195, 32, 205, 2): "['Einkorn', 'Khorasan', 'Spelt', 'Emmer', 'Semolina (Durum)',\n 'Hard Red Whole Wheat', 'Regular Whole Wheat', 'Hard White Wheat', 'Rye']"}, {}), "(['Einkorn', 'Khorasan', 'Spelt', 'Emmer', 'Semolina (Durum)',\n 'Hard Red Whole Wheat', 'Regular Whole Wheat', 'Hard White Wheat', 'Rye'])", False, 'import random\n'), ((210, 13, 210, 32), 'random.randint', 'random.randint', ({(210, 28, 210, 29): '0', (210, 30, 210, 31): '4'}, {}), '(0, 4)', False, 'import random\n'), ((212, 15, 212, 34), 'random.randint', 'random.randint', ({(212, 30, 212, 31): '0', (212, 32, 212, 33): '3'}, {}), '(0, 3)', False, 'import random\n'), ((215, 9, 215, 31), 'random.choice', 'random.choice', ({(215, 23, 215, 30): 'teaList'}, {}), '(teaList)', False, 'import random\n'), ((219, 11, 219, 74), 'random.choice', 'random.choice', ({(219, 25, 219, 73): "['Brown Sugar', 'White Sugar', 'Honey', 'Molasses']"}, {}), "(['Brown Sugar', 'White Sugar', 'Honey', 'Molasses'])", False, 'import random\n'), ((220, 18, 220, 42), 'random.choice', 'random.choice', ({(220, 32, 220, 41): '[5, 10, 15]'}, {}), '([5, 10, 15])', False, 'import random\n'), ((222, 17, 222, 41), 'random.choice', 'random.choice', ({(222, 31, 222, 40): '[1, 1.5, 2]'}, {}), '([1, 1.5, 2])', False, 'import random\n'), ((223, 11, 223, 58), 'random.choice', 'random.choice', ({(223, 25, 223, 57): "['Instant Yeast', 'Active Yeast']"}, {}), "(['Instant Yeast', 'Active Yeast'])", False, 'import random\n'), ((230, 16, 230, 45), 'random.choice', 'random.choice', ({(230, 30, 230, 44): 'enrichmentList'}, {}), '(enrichmentList)', False, 'import random\n'), ((243, 12, 243, 40), 'random.choice', 'random.choice', ({(243, 26, 243, 39): 'viableLiquids'}, {}), '(viableLiquids)', False, 'import random\n'), ((275, 8, 275, 22), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((297, 12, 297, 36), 'jinja2.Template', 'Template', ({(297, 21, 297, 35): 'templateString'}, {}), '(templateString)', False, 'from jinja2 import Template\n'), ((17, 9, 17, 35), 'random.choice', 'random.choice', ({(17, 23, 17, 34): 'percentages'}, {}), '(percentages)', False, 'import random\n'), ((28, 20, 28, 30), 'fractions.Fraction', 'Fraction', ({}, {}), '()', False, 'from fractions import Fraction\n'), ((52, 14, 52, 47), 'math.floor', 'math.floor', ({(52, 25, 52, 46): '(flourAmount / 500 * scale)'}, {}), '(flourAmount / 500 * scale)', False, 'import math\n'), ((86, 9, 86, 51), 'random.sample', 'random.sample', ({(86, 23, 86, 47): "['Cinnamon', 'Cardamom']", (86, 49, 86, 50): '(1)'}, {}), "(['Cinnamon', 'Cardamom'], 1)", False, 'import random\n'), ((125, 15, 125, 45), 'random.choice', 'random.choice', ({(125, 29, 125, 44): 'allowedExtracts'}, {}), '(allowedExtracts)', False, 'import random\n'), ((263, 20, 263, 47), 'random.choice', 'random.choice', ({(263, 34, 263, 46): '[True, False]'}, {}), '([True, False])', False, 'import random\n'), ((21, 9, 21, 35), 'random.choice', 'random.choice', ({(21, 23, 21, 34): 'percentages'}, {}), '(percentages)', False, 'import random\n'), ((25, 9, 25, 35), 'random.choice', 'random.choice', ({(25, 23, 25, 34): 'percentages'}, {}), '(percentages)', False, 'import random\n'), ((74, 10, 74, 42), 'random.choice', 'random.choice', ({(74, 24, 74, 41): 'colorsDict[color]'}, {}), '(colorsDict[color])', False, 'import random\n'), ((174, 17, 174, 57), 'random.randint', 'random.randint', ({(174, 32, 174, 33): '0', (174, 34, 174, 56): 'totalFruitPureePercent'}, {}), '(0, totalFruitPureePercent)', False, 'import random\n'), ((269, 5, 269, 24), 'random.randint', 'random.randint', ({(269, 20, 269, 21): '(0)', (269, 22, 269, 23): '(2)'}, {}), '(0, 2)', False, 'import random\n'), ((271, 32, 271, 53), 'random.choice', 'random.choice', ({(271, 46, 271, 52): '[5, 10]'}, {}), '([5, 10])', False, 'import random\n')]
msnitish/posthog
posthog/api/test/test_organization_domain.py
cb86113f568e72eedcb64b5fd00c313d21e72f90
import datetime from unittest.mock import patch import dns.resolver import dns.rrset import pytest import pytz from django.utils import timezone from freezegun import freeze_time from rest_framework import status from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team from posthog.test.base import APIBaseTest, BaseTest class FakeAnswer(object): def __init__(self, answer): self.answer = answer class FakeDNSResponse(object): def __init__(self, answer): self.response = FakeAnswer(answer) class TestOrganizationDomains(BaseTest): def test_continuous_verification_task(self): """ Tests the task that re-verifies domains to ensure ownership is maintained. """ pass class TestOrganizationDomainsAPI(APIBaseTest): domain: OrganizationDomain = None # type: ignore another_domain: OrganizationDomain = None # type: ignore another_org: Organization = None # type: ignore @classmethod def setUpTestData(cls): super().setUpTestData() cls.domain = OrganizationDomain.objects.create(organization=cls.organization, domain="myposthog.com") cls.another_org = Organization.objects.create(name="Another Org") Team.objects.create(organization=cls.another_org) cls.another_domain = OrganizationDomain.objects.create(organization=cls.another_org, domain="org.posthog.net") # List & retrieve domains def test_can_list_and_retrieve_domains(self): response = self.client.get("/api/organizations/@current/domains") self.assertEqual(response.status_code, status.HTTP_200_OK) response_data = response.json() self.assertEqual(response_data["count"], 1) item = response_data["results"][0] self.assertEqual(item["domain"], "myposthog.com") self.assertEqual(item["verified_at"], None) self.assertEqual(item["is_verified"], False) self.assertEqual(item["jit_provisioning_enabled"], False) self.assertEqual(item["sso_enforcement"], "") self.assertRegex(item["verification_challenge"], r"[0-9A-Za-z_-]{32}") retrieve_response = self.client.get(f"/api/organizations/{self.organization.id}/domains/{self.domain.id}") self.assertEqual(retrieve_response.status_code, status.HTTP_200_OK) self.assertEqual(retrieve_response.json(), response_data["results"][0]) def test_cannot_list_or_retrieve_domains_for_other_org(self): self.organization_membership.level = OrganizationMembership.Level.ADMIN self.organization_membership.save() response = self.client.get(f"/api/organizations/@current/domains/{self.another_domain.id}") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) self.assertEqual(response.json(), self.not_found_response()) response = self.client.get(f"/api/organizations/{self.another_org.id}/domains/{self.another_domain.id}") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(response.json(), self.permission_denied_response()) # Create domains def test_create_domain(self): self.organization_membership.level = OrganizationMembership.Level.ADMIN self.organization_membership.save() with self.settings(MULTI_TENANCY=True): response = self.client.post( "/api/organizations/@current/domains/", { "domain": "the.posthog.com", "verified_at": "2022-01-01T14:25:25.000Z", # ignore me "verification_challenge": "123", # ignore me "jit_provisioning_enabled": True, # ignore me "sso_enforcement": "saml", # ignore me }, ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) response_data = response.json() self.assertEqual(response_data["domain"], "the.posthog.com") self.assertEqual(response_data["verified_at"], None) self.assertEqual(response_data["jit_provisioning_enabled"], False) self.assertRegex(response_data["verification_challenge"], r"[0-9A-Za-z_-]{32}") instance = OrganizationDomain.objects.get(id=response_data["id"]) self.assertEqual(instance.domain, "the.posthog.com") self.assertEqual(instance.verified_at, None) self.assertEqual(instance.last_verification_retry, None) self.assertEqual(instance.sso_enforcement, "") @pytest.mark.skip_on_multitenancy def test_creating_domain_on_self_hosted_is_automatically_verified(self): self.organization_membership.level = OrganizationMembership.Level.ADMIN self.organization_membership.save() with freeze_time("2021-08-08T20:20:08Z"): response = self.client.post( "/api/organizations/@current/domains/", { "domain": "the.posthog.com", "verified_at": "2022-01-01T14:25:25.000Z", # ignore me "verification_challenge": "123", # ignore me "jit_provisioning_enabled": True, # ignore me "sso_enforcement": "saml", # ignore me }, ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) response_data = response.json() self.assertEqual(response_data["domain"], "the.posthog.com") self.assertEqual( response_data["verified_at"], "2021-08-08T20:20:08Z", ) self.assertEqual(response_data["jit_provisioning_enabled"], False) self.assertRegex(response_data["verification_challenge"], r"[0-9A-Za-z_-]{32}") instance = OrganizationDomain.objects.get(id=response_data["id"]) self.assertEqual(instance.domain, "the.posthog.com") self.assertEqual( instance.verified_at, datetime.datetime(2021, 8, 8, 20, 20, 8, tzinfo=pytz.UTC), ) self.assertEqual(instance.last_verification_retry, None) self.assertEqual(instance.sso_enforcement, "") def test_cannot_create_duplicate_domain(self): OrganizationDomain.objects.create(domain="i-registered-first.com", organization=self.another_org) count = OrganizationDomain.objects.count() self.organization_membership.level = OrganizationMembership.Level.ADMIN self.organization_membership.save() response = self.client.post("/api/organizations/@current/domains/", {"domain": "i-registered-first.com"},) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "unique", "detail": "domain with this domain already exists.", "attr": "domain", }, ) self.assertEqual(OrganizationDomain.objects.count(), count) def test_cannot_create_invalid_domain(self): count = OrganizationDomain.objects.count() self.organization_membership.level = OrganizationMembership.Level.ADMIN self.organization_membership.save() invalid_domains = ["[email protected]", "🦔🦔🦔.com", "one.two.c", "--alpha.com", "javascript: alert(1)"] for _domain in invalid_domains: response = self.client.post("/api/organizations/@current/domains/", {"domain": _domain,},) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "invalid_input", "detail": "Please enter a valid domain or subdomain name.", "attr": "domain", }, ) self.assertEqual(OrganizationDomain.objects.count(), count) @patch("posthog.models.organization_domain.dns.resolver.resolve") def test_can_request_verification_for_unverified_domains(self, mock_dns_query): self.organization_membership.level = OrganizationMembership.Level.ADMIN self.organization_membership.save() mock_dns_query.return_value = FakeDNSResponse( [ dns.rrset.from_text( "_posthog-challenge.myposthog.com.", 3600, "IN", "TXT", self.domain.verification_challenge, ) ], ) with freeze_time("2021-08-08T20:20:08Z"): response = self.client.post(f"/api/organizations/@current/domains/{self.domain.id}/verify") self.assertEqual(response.status_code, status.HTTP_200_OK) response_data = response.json() self.domain.refresh_from_db() self.assertEqual(response_data["domain"], "myposthog.com") self.assertEqual( response_data["verified_at"], self.domain.verified_at.strftime("%Y-%m-%dT%H:%M:%SZ"), ) self.assertEqual(response_data["is_verified"], True) self.assertEqual( self.domain.verified_at, datetime.datetime(2021, 8, 8, 20, 20, 8, tzinfo=pytz.UTC), ) self.assertEqual(self.domain.is_verified, True) @patch("posthog.models.organization_domain.dns.resolver.resolve") def test_domain_is_not_verified_with_missing_challenge(self, mock_dns_query): self.organization_membership.level = OrganizationMembership.Level.ADMIN self.organization_membership.save() mock_dns_query.side_effect = dns.resolver.NoAnswer() with freeze_time("2021-10-10T10:10:10Z"): with self.settings(MULTI_TENANCY=True): response = self.client.post(f"/api/organizations/@current/domains/{self.domain.id}/verify") self.assertEqual(response.status_code, status.HTTP_200_OK) response_data = response.json() self.domain.refresh_from_db() self.assertEqual(response_data["domain"], "myposthog.com") self.assertEqual(response_data["verified_at"], None) self.assertEqual(self.domain.verified_at, None) self.assertEqual( self.domain.last_verification_retry, datetime.datetime(2021, 10, 10, 10, 10, 10, tzinfo=pytz.UTC), ) @patch("posthog.models.organization_domain.dns.resolver.resolve") def test_domain_is_not_verified_with_incorrect_challenge(self, mock_dns_query): self.organization_membership.level = OrganizationMembership.Level.ADMIN self.organization_membership.save() mock_dns_query.return_value = FakeDNSResponse( [dns.rrset.from_text("_posthog-challenge.myposthog.com.", 3600, "IN", "TXT", "incorrect_challenge",)], ) with freeze_time("2021-10-10T10:10:10Z"): with self.settings(MULTI_TENANCY=True): response = self.client.post(f"/api/organizations/@current/domains/{self.domain.id}/verify") self.assertEqual(response.status_code, status.HTTP_200_OK) response_data = response.json() self.domain.refresh_from_db() self.assertEqual(response_data["domain"], "myposthog.com") self.assertEqual(response_data["verified_at"], None) self.assertEqual(self.domain.verified_at, None) self.assertEqual( self.domain.last_verification_retry, datetime.datetime(2021, 10, 10, 10, 10, 10, tzinfo=pytz.UTC), ) def test_cannot_request_verification_for_verified_domains(self): self.organization_membership.level = OrganizationMembership.Level.ADMIN self.organization_membership.save() self.domain.verified_at = timezone.now() self.domain.save() response = self.client.post(f"/api/organizations/@current/domains/{self.domain.id}/verify") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "already_verified", "detail": "This domain has already been verified.", "attr": None, }, ) def test_only_admin_can_create_verified_domains(self): count = OrganizationDomain.objects.count() response = self.client.post("/api/organizations/@current/domains/", {"domain": "evil.posthog.com"}) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual( response.json(), self.permission_denied_response("Your organization access level is insufficient."), ) self.assertEqual(OrganizationDomain.objects.count(), count) def test_only_admin_can_request_verification(self): response = self.client.post(f"/api/organizations/@current/domains/{self.domain.id}/verify") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual( response.json(), self.permission_denied_response("Your organization access level is insufficient."), ) self.domain.refresh_from_db() self.assertEqual(self.domain.verified_at, None) # Update domains def test_can_update_jit_provisioning_and_sso_enforcement(self): self.organization_membership.level = OrganizationMembership.Level.ADMIN self.organization_membership.save() self.domain.verified_at = timezone.now() self.domain.save() response = self.client.patch( f"/api/organizations/@current/domains/{self.domain.id}/", {"sso_enforcement": "google-oauth2", "jit_provisioning_enabled": True}, ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.json()["sso_enforcement"], "google-oauth2") self.assertEqual(response.json()["jit_provisioning_enabled"], True) self.domain.refresh_from_db() self.assertEqual(self.domain.sso_enforcement, "google-oauth2") self.assertEqual(self.domain.jit_provisioning_enabled, True) def test_cannot_enforce_sso_or_enable_jit_provisioning_on_unverified_domain(self): self.organization_membership.level = OrganizationMembership.Level.ADMIN self.organization_membership.save() # SSO Enforcement response = self.client.patch( f"/api/organizations/@current/domains/{self.domain.id}/", {"sso_enforcement": "google-oauth2"}, ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "verification_required", "detail": "This attribute cannot be updated until the domain is verified.", "attr": "sso_enforcement", }, ) self.domain.refresh_from_db() self.assertEqual(self.domain.sso_enforcement, "") # JIT Provisioning response = self.client.patch( f"/api/organizations/@current/domains/{self.domain.id}/", {"jit_provisioning_enabled": True}, ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "verification_required", "detail": "This attribute cannot be updated until the domain is verified.", "attr": "jit_provisioning_enabled", }, ) self.domain.refresh_from_db() self.assertEqual(self.domain.jit_provisioning_enabled, False) def test_only_allowed_parameters_can_be_updated(self): self.organization_membership.level = OrganizationMembership.Level.ADMIN self.organization_membership.save() response = self.client.patch( f"/api/organizations/@current/domains/{self.domain.id}/", {"verified_at": "2020-01-01T12:12:12Z", "verification_challenge": "123"}, ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.json()["verified_at"], None) self.assertRegex(response.json()["verification_challenge"], r"[0-9A-Za-z_-]{32}") def test_only_admin_can_update_domain(self): self.domain.verified_at = timezone.now() self.domain.save() response = self.client.patch( f"/api/organizations/{self.organization.id}/domains/{self.domain.id}/", {"sso_enforcement": "google-oauth2", "jit_provisioning_enabled": True}, ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual( response.json(), self.permission_denied_response("Your organization access level is insufficient."), ) self.domain.refresh_from_db() self.assertEqual(self.domain.jit_provisioning_enabled, False) self.assertEqual(self.domain.sso_enforcement, "") def test_cannot_update_domain_for_another_org(self): self.organization_membership.level = OrganizationMembership.Level.ADMIN self.organization_membership.save() self.another_domain.verified_at = timezone.now() self.another_domain.save() response = self.client.patch( f"/api/organizations/{self.another_org.id}/domains/{self.another_domain.id}/", {"sso_enforcement": "google-oauth2", "jit_provisioning_enabled": True}, ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(response.json(), self.permission_denied_response()) self.another_domain.refresh_from_db() self.assertEqual(self.another_domain.jit_provisioning_enabled, False) self.assertEqual(self.another_domain.sso_enforcement, "") # Delete domains def test_admin_can_delete_domain(self): self.organization_membership.level = OrganizationMembership.Level.ADMIN self.organization_membership.save() response = self.client.delete(f"/api/organizations/@current/domains/{self.domain.id}") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(response.content, b"") self.assertFalse(OrganizationDomain.objects.filter(id=self.domain.id).exists()) def test_only_admin_can_delete_domain(self): response = self.client.delete(f"/api/organizations/@current/domains/{self.domain.id}") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual( response.json(), self.permission_denied_response("Your organization access level is insufficient."), ) self.domain.refresh_from_db() def test_cannot_delete_domain_for_another_org(self): self.organization_membership.level = OrganizationMembership.Level.ADMIN self.organization_membership.save() response = self.client.delete(f"/api/organizations/{self.another_org.id}/domains/{self.another_domain.id}") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(response.json(), self.permission_denied_response()) self.another_domain.refresh_from_db()
[((185, 5, 185, 69), 'unittest.mock.patch', 'patch', ({(185, 11, 185, 68): '"""posthog.models.organization_domain.dns.resolver.resolve"""'}, {}), "('posthog.models.organization_domain.dns.resolver.resolve')", False, 'from unittest.mock import patch\n'), ((214, 5, 214, 69), 'unittest.mock.patch', 'patch', ({(214, 11, 214, 68): '"""posthog.models.organization_domain.dns.resolver.resolve"""'}, {}), "('posthog.models.organization_domain.dns.resolver.resolve')", False, 'from unittest.mock import patch\n'), ((234, 5, 234, 69), 'unittest.mock.patch', 'patch', ({(234, 11, 234, 68): '"""posthog.models.organization_domain.dns.resolver.resolve"""'}, {}), "('posthog.models.organization_domain.dns.resolver.resolve')", False, 'from unittest.mock import patch\n'), ((43, 21, 43, 109), 'posthog.models.OrganizationDomain.objects.create', 'OrganizationDomain.objects.create', (), '', False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((45, 26, 45, 73), 'posthog.models.Organization.objects.create', 'Organization.objects.create', (), '', False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((46, 8, 46, 57), 'posthog.models.Team.objects.create', 'Team.objects.create', (), '', False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((47, 29, 47, 118), 'posthog.models.OrganizationDomain.objects.create', 'OrganizationDomain.objects.create', (), '', False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((105, 19, 105, 73), 'posthog.models.OrganizationDomain.objects.get', 'OrganizationDomain.objects.get', (), '', False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((136, 19, 136, 73), 'posthog.models.OrganizationDomain.objects.get', 'OrganizationDomain.objects.get', (), '', False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((145, 8, 145, 105), 'posthog.models.OrganizationDomain.objects.create', 'OrganizationDomain.objects.create', (), '', False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((146, 16, 146, 50), 'posthog.models.OrganizationDomain.objects.count', 'OrganizationDomain.objects.count', ({}, {}), '()', False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((165, 16, 165, 50), 'posthog.models.OrganizationDomain.objects.count', 'OrganizationDomain.objects.count', ({}, {}), '()', False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((259, 34, 259, 48), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((275, 16, 275, 50), 'posthog.models.OrganizationDomain.objects.count', 'OrganizationDomain.objects.count', ({}, {}), '()', False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((299, 34, 299, 48), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((365, 34, 365, 48), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((383, 42, 383, 56), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((116, 13, 116, 48), 'freezegun.freeze_time', 'freeze_time', ({(116, 25, 116, 47): '"""2021-08-08T20:20:08Z"""'}, {}), "('2021-08-08T20:20:08Z')", False, 'from freezegun import freeze_time\n'), ((139, 34, 139, 91), 'datetime.datetime', 'datetime.datetime', (), '', False, 'import datetime\n'), ((162, 25, 162, 59), 'posthog.models.OrganizationDomain.objects.count', 'OrganizationDomain.objects.count', ({}, {}), '()', False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((183, 25, 183, 59), 'posthog.models.OrganizationDomain.objects.count', 'OrganizationDomain.objects.count', ({}, {}), '()', False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((198, 13, 198, 48), 'freezegun.freeze_time', 'freeze_time', ({(198, 25, 198, 47): '"""2021-08-08T20:20:08Z"""'}, {}), "('2021-08-08T20:20:08Z')", False, 'from freezegun import freeze_time\n'), ((210, 37, 210, 94), 'datetime.datetime', 'datetime.datetime', (), '', False, 'import datetime\n'), ((221, 13, 221, 48), 'freezegun.freeze_time', 'freeze_time', ({(221, 25, 221, 47): '"""2021-10-10T10:10:10Z"""'}, {}), "('2021-10-10T10:10:10Z')", False, 'from freezegun import freeze_time\n'), ((231, 49, 231, 109), 'datetime.datetime', 'datetime.datetime', (), '', False, 'import datetime\n'), ((243, 13, 243, 48), 'freezegun.freeze_time', 'freeze_time', ({(243, 25, 243, 47): '"""2021-10-10T10:10:10Z"""'}, {}), "('2021-10-10T10:10:10Z')", False, 'from freezegun import freeze_time\n'), ((253, 49, 253, 109), 'datetime.datetime', 'datetime.datetime', (), '', False, 'import datetime\n'), ((282, 25, 282, 59), 'posthog.models.OrganizationDomain.objects.count', 'OrganizationDomain.objects.count', ({}, {}), '()', False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((406, 25, 406, 77), 'posthog.models.OrganizationDomain.objects.filter', 'OrganizationDomain.objects.filter', (), '', False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n')]
nataliapryakhina/FA_group3
tutorial/test input.py
3200464bc20d38a85af9ad3583a360db4ffb7f8d
import numpy as np import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt from os import listdir from tensorflow.keras.callbacks import ModelCheckpoint dataDir = "./data/trainSmallFA/" files = listdir(dataDir) files.sort() totalLength = len(files) inputs = np.empty((len(files), 3, 64, 64)) targets = np.empty((len(files), 3, 64, 64)) for i, file in enumerate(files): npfile = np.load(dataDir + file) d = npfile['a'] inputs[i] = d[0:3] # inx, iny, mask targets[i] = d[3:6] # p, velx, vely # print("inputs shape = ", inputs.shape) print(np.shape(targets[:, 1, :, :].flatten())) maxvel = np.amax(np.sqrt(targets[:, 1, :, :]* targets[:, 1, :, :] + targets[:, 2, :, :]* targets[:, 2, :, :])) print(maxvel) targets[:, 1:3, :, :] /= maxvel targets[:, 0, :, :] /= np.amax(targets[:, 0, :, :]) for input in inputs: plt.figure(num=None, figsize=(20, 10), dpi=80, facecolor='w', edgecolor='k') # predicted data plt.subplot(331) plt.title('x vel') plt.imshow(input[0, :, :], cmap='jet') # vmin=-100,vmax=100, cmap='jet') plt.colorbar() plt.subplot(332) plt.title('y vel') plt.imshow(input[1, :, :], cmap='jet') plt.colorbar() plt.show()
[((9, 8, 9, 24), 'os.listdir', 'listdir', ({(9, 16, 9, 23): 'dataDir'}, {}), '(dataDir)', False, 'from os import listdir\n'), ((27, 23, 27, 51), 'numpy.amax', 'np.amax', ({(27, 31, 27, 50): 'targets[:, (0), :, :]'}, {}), '(targets[:, (0), :, :])', True, 'import numpy as np\n'), ((16, 13, 16, 36), 'numpy.load', 'np.load', ({(16, 21, 16, 35): 'dataDir + file'}, {}), '(dataDir + file)', True, 'import numpy as np\n'), ((23, 17, 24, 68), 'numpy.sqrt', 'np.sqrt', ({(23, 25, 24, 67): 'targets[:, (1), :, :] * targets[:, (1), :, :] + targets[:, (2), :, :\n ] * targets[:, (2), :, :]'}, {}), '(targets[:, (1), :, :] * targets[:, (1), :, :] + targets[:, (2), :,\n :] * targets[:, (2), :, :])', True, 'import numpy as np\n'), ((29, 4, 29, 80), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((32, 4, 32, 20), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(32, 16, 32, 19): '(331)'}, {}), '(331)', True, 'import matplotlib.pyplot as plt\n'), ((33, 4, 33, 22), 'matplotlib.pyplot.title', 'plt.title', ({(33, 14, 33, 21): '"""x vel"""'}, {}), "('x vel')", True, 'import matplotlib.pyplot as plt\n'), ((34, 4, 34, 42), 'matplotlib.pyplot.imshow', 'plt.imshow', (), '', True, 'import matplotlib.pyplot as plt\n'), ((35, 4, 35, 18), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((36, 4, 36, 20), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(36, 16, 36, 19): '(332)'}, {}), '(332)', True, 'import matplotlib.pyplot as plt\n'), ((37, 4, 37, 22), 'matplotlib.pyplot.title', 'plt.title', ({(37, 14, 37, 21): '"""y vel"""'}, {}), "('y vel')", True, 'import matplotlib.pyplot as plt\n'), ((38, 4, 38, 42), 'matplotlib.pyplot.imshow', 'plt.imshow', (), '', True, 'import matplotlib.pyplot as plt\n'), ((39, 4, 39, 18), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((41, 4, 41, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n')]
cltl/pepper
pepper/responder/brain.py
5d34fc5074473163aa9273016d89e5e2b8edffa9
from pepper.framework import * from pepper import logger from pepper.language import Utterance from pepper.language.generation.thoughts_phrasing import phrase_thoughts from pepper.language.generation.reply import reply_to_question from .responder import Responder, ResponderType from pepper.language import UtteranceType from pepper.knowledge import sentences, animations from random import choice import re from typing import Optional, Union, Tuple, Callable class BrainResponder(Responder): def __init__(self): self._log = logger.getChild(self.__class__.__name__) @property def type(self): return ResponderType.Brain @property def requirements(self): return [TextToSpeechComponent, BrainComponent] def respond(self, utterance, app): # type: (Utterance, Union[TextToSpeechComponent, BrainComponent]) -> Optional[Tuple[float, Callable]] try: utterance.analyze() self._log.debug("TRIPLE: {}".format(utterance.triple)) if utterance.triple is not None: brain_response_statement = [] brain_response_question = [] if utterance.type == UtteranceType.QUESTION: brain_response_question = app.brain.query_brain(utterance) reply = reply_to_question(brain_response_question) self._log.info("REPLY to question: {}".format(reply)) else: brain_response_statement = app.brain.update(utterance, reason_types=True) # Searches for types in dbpedia reply = phrase_thoughts(brain_response_statement, True, True, True) self._log.info("REPLY to statement: {}".format(reply)) if (isinstance(reply, str) or isinstance(reply, unicode)) and reply != "": # Return Score and Response # Make sure to not execute the response here, but just to return the response function return 1.0, lambda: app.say(re.sub(r"[\s+_]", " ", reply)) elif brain_response_statement: # Thank Human for the Data! return 1.0, lambda: app.say("{} {}".format(choice([choice(sentences.THANK), choice(sentences.HAPPY)]), choice(sentences.PARSED_KNOWLEDGE)), animations.HAPPY) elif brain_response_question: # Apologize to human for not knowing return 1.0, lambda: app.say("{} {}".format(choice(sentences.SORRY), choice(sentences.NO_ANSWER)), animations.ASHAMED) except Exception as e: self._log.error(e)
[((21, 20, 21, 60), 'pepper.logger.getChild', 'logger.getChild', ({(21, 36, 21, 59): 'self.__class__.__name__'}, {}), '(self.__class__.__name__)', False, 'from pepper import logger\n'), ((45, 28, 45, 70), 'pepper.language.generation.reply.reply_to_question', 'reply_to_question', ({(45, 46, 45, 69): 'brain_response_question'}, {}), '(brain_response_question)', False, 'from pepper.language.generation.reply import reply_to_question\n'), ((49, 28, 49, 87), 'pepper.language.generation.thoughts_phrasing.phrase_thoughts', 'phrase_thoughts', ({(49, 44, 49, 68): 'brain_response_statement', (49, 70, 49, 74): 'True', (49, 76, 49, 80): 'True', (49, 82, 49, 86): 'True'}, {}), '(brain_response_statement, True, True, True)', False, 'from pepper.language.generation.thoughts_phrasing import phrase_thoughts\n'), ((55, 48, 55, 77), 're.sub', 're.sub', ({(55, 55, 55, 64): '"""[\\\\s+_]"""', (55, 66, 55, 69): '""" """', (55, 71, 55, 76): 'reply'}, {}), "('[\\\\s+_]', ' ', reply)", False, 'import re\n'), ((59, 63, 59, 97), 'random.choice', 'choice', ({(59, 70, 59, 96): 'sentences.PARSED_KNOWLEDGE'}, {}), '(sentences.PARSED_KNOWLEDGE)', False, 'from random import choice\n'), ((62, 63, 62, 86), 'random.choice', 'choice', ({(62, 70, 62, 85): 'sentences.SORRY'}, {}), '(sentences.SORRY)', False, 'from random import choice\n'), ((63, 63, 63, 90), 'random.choice', 'choice', ({(63, 70, 63, 89): 'sentences.NO_ANSWER'}, {}), '(sentences.NO_ANSWER)', False, 'from random import choice\n'), ((58, 71, 58, 94), 'random.choice', 'choice', ({(58, 78, 58, 93): 'sentences.THANK'}, {}), '(sentences.THANK)', False, 'from random import choice\n'), ((58, 96, 58, 119), 'random.choice', 'choice', ({(58, 103, 58, 118): 'sentences.HAPPY'}, {}), '(sentences.HAPPY)', False, 'from random import choice\n')]
fedora-infra/fedora-college
fedora_college/modules/content/views.py
cf310dab2e4fea02b9ac5e7f57dc53aafb4834d8
# -*- coding: utf-8 -*- import re from unicodedata import normalize from flask import Blueprint, render_template, current_app from flask import redirect, url_for, g, abort from sqlalchemy import desc from fedora_college.core.database import db from fedora_college.modules.content.forms import * # noqa from fedora_college.core.models import * # noqa from fedora_college.fedmsgshim import publish from flask_fas_openid import fas_login_required bundle = Blueprint('content', __name__, template_folder='templates') from fedora_college.modules.content.media import * # noqa _punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+') # Verify if user is authenticated def authenticated(): return hasattr(g, 'fas_user') and g.fas_user # generate url slug def slugify(text, delim=u'-'): """Generates an slightly worse ASCII-only slug.""" result = [] for word in _punct_re.split(text.lower()): word = normalize('NFKD', word).encode('ascii', 'ignore') if word: result.append(word) return unicode(delim.join(result)) # attach tags to a content entry def attach_tags(tags, content): rem = TagsMap.query.filter_by(content_id=content.content_id).all() for r in rem: db.session.delete(r) db.session.commit() for tag in tags: tag_db = Tags.query.filter_by(tag_text=tag).first() if tag_db is None: tag_db = Tags(tag) db.session.add(tag_db) db.session.commit() Map = TagsMap(tag_db.tag_id, content.content_id) db.session.add(Map) db.session.commit() # delete content @bundle.route('/content/delete/<posturl>', methods=['GET', 'POST']) @bundle.route('/content/delete/<posturl>/', methods=['GET', 'POST']) @fas_login_required def delete_content(posturl=None): if posturl is not None: db.session.rollback() content = Content.query.filter_by(slug=posturl).first_or_404() rem = TagsMap.query.filter_by( content_id=content.content_id).all() '''delete mapped tags''' for r in rem: db.session.delete(r) comments = Comments.query.filter_by( content_id=content.content_id).all() '''delete comments with foriegn keys''' for r in comments: db.session.delete(r) db.session.delete(content) db.session.commit() return redirect(url_for('profile.user', nickname=g.fas_user['username'])) abort(404) # add / edit more content @bundle.route('/content/add/', methods=['GET', 'POST']) @bundle.route('/content/add', methods=['GET', 'POST']) @bundle.route('/content/edit/<posturl>/', methods=['GET', 'POST']) @bundle.route('/content/edit/<posturl>', methods=['GET', 'POST']) @fas_login_required def addcontent(posturl=None): if authenticated(): form = CreateContent() form_action = url_for('content.addcontent') media = Media.query.order_by(desc(Media.timestamp)).limit(10).all() if posturl is not None: content = Content.query.filter_by(slug=posturl).first_or_404() form = CreateContent(obj=content) if form.validate_on_submit(): form.populate_obj(content) tags = str(form.tags.data).split(',') attach_tags(tags, content) content.rehtml() db.session.commit() '''Publish the message''' msg = content.getdata() msg['title'] = content.title msg['link'] = current_app.config[ 'EXTERNAL_URL'] + content.slug publish( topic=current_app.config['CONTENT_EDIT_TOPIC'], msg=msg ) if content.type_content == "blog": print url_for('content.blog', slug=posturl) return redirect(url_for('content.blog', slug=posturl)) return redirect(url_for('home.content', slug=posturl)) else: if form.validate_on_submit(): url_name = slugify(form.title.data) content = Content(form.title.data, url_name, form.description.data, form.active.data, form.tags.data, g.fas_user['username'], form.type_content.data ) tags = str(form.tags.data).split(',') try: db.session.add(content) db.session.commit() attach_tags(tags, content) '''Publish the message''' msg = content.getdata() msg['title'] = content.title msg['link'] = current_app.config[ 'EXTERNAL_URL'] + url_name publish( topic=current_app.config['CONTENT_CREATE_TOPIC'], msg=msg ) if content.type_content == "blog": return redirect(url_for('content.blog', slug=posturl)) return redirect(url_for('home.content', slug=url_name)) # Duplicate entry except Exception as e: return str(e) db.session.rollback() pass tags = Tags.query.all() return render_template('content/edit_content.html', form=form, form_action=form_action, title="Create Content", media=media[0:5], tags=tags) abort(404) # View Blog post @bundle.route('/blog', methods=['GET', 'POST']) @bundle.route('/blog/', methods=['GET', 'POST']) @bundle.route('/blog/<slug>/', methods=['GET', 'POST']) @bundle.route('/blog/<slug>', methods=['GET', 'POST']) @bundle.route('/blog/page/<id>', methods=['GET', 'POST']) @bundle.route('/blog/page/<id>', methods=['GET', 'POST']) def blog(slug=None, id=0): id = int(id) screen = Content.query. \ filter_by( type_content="lecture", active=True ).limit(10).all() if slug is not None: try: posts = Content.query. \ filter_by(slug=slug).all() except: posts = "No such posts in database." else: try: posts = Content.query. \ filter_by(type_content="blog").all() if id > 0: posts = posts[id - 1:id + 5] else: posts = posts[0:5] except: posts = [] return render_template('blog/index.html', title='Blog', content=posts, screen=screen, id=id, slug=slug )
[]
MrDelik/core
tests/components/airthings/test_config_flow.py
93a66cc357b226389967668441000498a10453bb
"""Test the Airthings config flow.""" from unittest.mock import patch import airthings from homeassistant import config_entries from homeassistant.components.airthings.const import CONF_ID, CONF_SECRET, DOMAIN from homeassistant.core import HomeAssistant from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM from tests.common import MockConfigEntry TEST_DATA = { CONF_ID: "client_id", CONF_SECRET: "secret", } async def test_form(hass: HomeAssistant) -> None: """Test we get the form.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == RESULT_TYPE_FORM assert result["errors"] is None with patch("airthings.get_token", return_value="test_token",), patch( "homeassistant.components.airthings.async_setup_entry", return_value=True, ) as mock_setup_entry: result2 = await hass.config_entries.flow.async_configure( result["flow_id"], TEST_DATA, ) await hass.async_block_till_done() assert result2["type"] == RESULT_TYPE_CREATE_ENTRY assert result2["title"] == "Airthings" assert result2["data"] == TEST_DATA assert len(mock_setup_entry.mock_calls) == 1 async def test_form_invalid_auth(hass: HomeAssistant) -> None: """Test we handle invalid auth.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "airthings.get_token", side_effect=airthings.AirthingsAuthError, ): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], TEST_DATA, ) assert result2["type"] == RESULT_TYPE_FORM assert result2["errors"] == {"base": "invalid_auth"} async def test_form_cannot_connect(hass: HomeAssistant) -> None: """Test we handle cannot connect error.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "airthings.get_token", side_effect=airthings.AirthingsConnectionError, ): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], TEST_DATA, ) assert result2["type"] == RESULT_TYPE_FORM assert result2["errors"] == {"base": "cannot_connect"} async def test_form_unknown_error(hass: HomeAssistant) -> None: """Test we handle unknown error.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "airthings.get_token", side_effect=Exception, ): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], TEST_DATA, ) assert result2["type"] == RESULT_TYPE_FORM assert result2["errors"] == {"base": "unknown"} async def test_flow_entry_already_exists(hass: HomeAssistant) -> None: """Test user input for config_entry that already exists.""" first_entry = MockConfigEntry( domain="airthings", data=TEST_DATA, unique_id=TEST_DATA[CONF_ID], ) first_entry.add_to_hass(hass) with patch("airthings.get_token", return_value="token"): result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER}, data=TEST_DATA ) assert result["type"] == "abort" assert result["reason"] == "already_configured"
[((104, 18, 108, 5), 'tests.common.MockConfigEntry', 'MockConfigEntry', (), '', False, 'from tests.common import MockConfigEntry\n'), ((28, 9, 28, 65), 'unittest.mock.patch', 'patch', (), '', False, 'from unittest.mock import patch\n'), ((28, 67, 31, 5), 'unittest.mock.patch', 'patch', (), '', False, 'from unittest.mock import patch\n'), ((50, 9, 53, 5), 'unittest.mock.patch', 'patch', (), '', False, 'from unittest.mock import patch\n'), ((69, 9, 72, 5), 'unittest.mock.patch', 'patch', (), '', False, 'from unittest.mock import patch\n'), ((88, 9, 91, 5), 'unittest.mock.patch', 'patch', (), '', False, 'from unittest.mock import patch\n'), ((111, 9, 111, 59), 'unittest.mock.patch', 'patch', (), '', False, 'from unittest.mock import patch\n')]
scomup/StereoNet-ActiveStereoNet
utils/utils.py
05994cf1eec4a109e095732fe01ecb5558880ba5
# ------------------------------------------------------------------------------ # Copyright (c) NKU # Licensed under the MIT License. # Written by Xuanyi Li ([email protected]) # ------------------------------------------------------------------------------ import os import torch import torch.nn.functional as F #import cv2 as cv import numpy as np def GERF_loss(GT, pred, args): # mask = (GT < args.maxdisp) & (GT >= 0) mask = GT > 0 mask.detach_() # print(mask.size(), GT.size(), pred.size()) count = len(torch.nonzero(mask)) # print(count) if count == 0: count = 1 return torch.sum(torch.sqrt(torch.pow(GT[mask] - pred[mask], 2) + 4) /2 - 1) / count def smooth_L1_loss(GT, pred, args): mask = GT < args.maxdisp mask.detach_() # loss = F.smooth_l1_loss(pred[mask], GT[mask], size_average=True) loss = (pred[mask] - GT[mask]).abs().mean() return loss if __name__ == '__main__': pass # import matplotlib.pyplot as plt # image = cv.imread('/media/lxy/sdd1/ActiveStereoNet/StereoNet_pytorch/results/forvideo/iter-122.jpg') #im_gray = cv.imread('/media/lxy/sdd1/ActiveStereoNet/StereoNet_pytorch/results/forvideo/iter-133.jpg', cv.IMREAD_GRAYSCALE) # print(im_gray.shape) #im_color = cv.applyColorMap(im_gray*2, cv.COLORMAP_JET) # cv.imshow('test', im_color) # cv.waitKey(0) #cv.imwrite('test.png',im_color) # print(image.shape) # plt.figure('Image') # sc =plt.imshow(image) # sc.set_cmap('hsv') # plt.colorbar() # plt.axis('off') # plt.show() # print('end') # image[:,:,0].save('/media/lxy/sdd1/ActiveStereoNet/StereoNet_pytorch/results/pretrained_StereoNet_single/it1er-151.jpg')
[((16, 16, 16, 35), 'torch.nonzero', 'torch.nonzero', ({(16, 30, 16, 34): 'mask'}, {}), '(mask)', False, 'import torch\n'), ((20, 32, 20, 67), 'torch.pow', 'torch.pow', ({(20, 42, 20, 63): '(GT[mask] - pred[mask])', (20, 65, 20, 66): '(2)'}, {}), '(GT[mask] - pred[mask], 2)', False, 'import torch\n')]
Devalent/facial-recognition-service
worker/main.py
342e31fa7d016992d938b0121b03f0e8fe776ea8
from aiohttp import web import base64 import io import face_recognition async def encode(request): request_data = await request.json() # Read base64 encoded image url = request_data['image'].split(',')[1] image = io.BytesIO(base64.b64decode(url)) # Load image data np_array = face_recognition.load_image_file(image) # Find face locations locations = face_recognition.face_locations(np_array) # Create face encodings encodings = face_recognition.face_encodings(np_array, locations) results = [] for i in range(len(locations)): top, right, bottom, left = locations[i] result = { 'x': left, 'y': top, 'width': right - left, 'height': bottom - top, 'encodings': encodings[i].tolist() } results.append(result) return web.json_response(results) def main(): app = web.Application() app.router.add_post('/encode', encode) web.run_app(app, host='0.0.0.0', port='3000') main()
[((14, 15, 14, 54), 'face_recognition.load_image_file', 'face_recognition.load_image_file', ({(14, 48, 14, 53): 'image'}, {}), '(image)', False, 'import face_recognition\n'), ((17, 16, 17, 57), 'face_recognition.face_locations', 'face_recognition.face_locations', ({(17, 48, 17, 56): 'np_array'}, {}), '(np_array)', False, 'import face_recognition\n'), ((20, 16, 20, 68), 'face_recognition.face_encodings', 'face_recognition.face_encodings', ({(20, 48, 20, 56): 'np_array', (20, 58, 20, 67): 'locations'}, {}), '(np_array, locations)', False, 'import face_recognition\n'), ((37, 11, 37, 37), 'aiohttp.web.json_response', 'web.json_response', ({(37, 29, 37, 36): 'results'}, {}), '(results)', False, 'from aiohttp import web\n'), ((40, 10, 40, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import web\n'), ((43, 4, 43, 49), 'aiohttp.web.run_app', 'web.run_app', (), '', False, 'from aiohttp import web\n'), ((11, 23, 11, 44), 'base64.b64decode', 'base64.b64decode', ({(11, 40, 11, 43): 'url'}, {}), '(url)', False, 'import base64\n')]
TiKeil/Two-scale-RBLOD
rblod/setup.py
23f17a3e4edf63ea5f208eca50ca90c19bf511a9
# ~~~ # This file is part of the paper: # # " An Online Efficient Two-Scale Reduced Basis Approach # for the Localized Orthogonal Decomposition " # # https://github.com/TiKeil/Two-scale-RBLOD.git # # Copyright 2019-2021 all developers. All rights reserved. # License: Licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause) # Authors: # Stephan Rave # Tim Keil # ~~~ from setuptools import setup setup(name='rblod', version='2021.1', description='Pymor support for RBLOD', author='Tim Keil', author_email='[email protected]', license='MIT', packages=['rblod'])
[((18, 0, 24, 25), 'setuptools.setup', 'setup', (), '', False, 'from setuptools import setup\n')]
ndeporzio/cosmicfish
bin/euclid_fine_plot_job_array.py
f68f779d73f039512a958d110bb44194d0daceec
import os import shutil import numpy as np import pandas as pd import seaborn as sns import cosmicfish as cf import matplotlib.pyplot as plt import dill # Instruct pyplot to use seaborn sns.set() # Set project, data, CLASS directories projectdir = os.environ['STORAGE_DIR'] datastore = os.environ['DATASTORE_DIR'] classpath = os.environ['CLASS_DIR'] fidx = int(os.environ['FORECAST_INDEX']) # Generate output paths fp_resultsdir = projectdir cf.makedirectory(fp_resultsdir) # Specify resolution of numerical integrals derivative_step = 0.008 # How much to vary parameter to calculate numerical derivative g_derivative_step = 0.1 mu_integral_step = 0.05 # For calculating numerical integral wrt mu between -1 and 1 # Linda Fiducial Cosmology fp_fid = { "A_s" : 2.2321e-9, "n_s" : 0.967, "omega_b" : 0.02226, "omega_cdm" : 0.1127, "tau_reio" : 0.0598, "h" : 0.701, "T_cmb" : 2.726, # Units [K] "N_ncdm" : 4., "deg_ncdm" : 1.0, "T_ncdm" : (0.79/2.726), # Units [T_cmb]. "m_ncdm" : 0.01, # Units [eV] "b0" : 1.0, "beta0" : 1.7, "beta1" : 1.0, "alphak2" : 1.0, "sigma_fog_0" : 250000, #Units [m s^-2] "N_eff" : 0.0064, #We allow relativistic neutrinos in addition to our DM relic "relic_vary" : "N_ncdm", # Fix T_ncdm or m_ncdm "m_nu" : 0.02 } # EUCLID values z_table = np.array([0.65, 0.75, 0.85, 0.95, 1.05, 1.15, 1.25, 1.35, 1.45, 1.55, 1.65, 1.75, 1.85, 1.95]) dNdz = np.array([2434.280, 4364.812, 4728.559, 4825.798, 4728.797, 4507.625, 4269.851, 3720.657, 3104.309, 2308.975, 1514.831, 1474.707, 893.716, 497.613]) skycover = 0.3636 # Run Fisher Forecast full_masses = np.geomspace(0.01, 10., 21) full_temps = np.array([0.79, 0.91, 0.94, 1.08]) mass_index=(fidx % 21) temp_index=(fidx // 21) masses = np.array([full_masses[mass_index]]) temps = np.array([full_temps[temp_index]]) omegacdm_set = np.array([ fp_fid['omega_cdm'] - ((masses/cf.NEUTRINO_SCALE_FACTOR)* np.power(tval / 1.95, 3.)) for tidx, tval in enumerate(temps)]) fp_fiducialset = [[ dict(fp_fid, **{ 'm_ncdm' : masses[midx], 'omega_cdm' : omegacdm_set[tidx, midx], 'T_ncdm' : temps[tidx]/2.726}) for midx, mval in enumerate(masses)] for tidx, tval in enumerate(temps)] fp_forecastset = [[cf.forecast( classpath, datastore, '2relic', fidval, z_table, "EUCLID", dNdz, fsky=skycover, dstep=derivative_step, gstep=g_derivative_step, RSD=True, FOG=True, AP=True, COV=True) for fididx, fidval in enumerate(fidrowvals)] for fidrowidx, fidrowvals in enumerate(fp_fiducialset)] #dill.load_session('') for frowidx, frowval in enumerate(fp_forecastset): for fidx, fcst in enumerate(frowval): if type(fcst.fisher)==type(None): fcst.gen_pm() fcst.gen_fisher( fisher_order=[ 'omega_b', 'omega_cdm', 'n_s', 'A_s', 'tau_reio', 'h', 'N_ncdm', 'M_ncdm', 'sigma_fog', 'beta0', 'beta1', 'alpha_k2'], mu_step=mu_integral_step, skipgen=False) print("Relic Forecast ", fidx, " complete...") dill.dump_session(os.path.join(fp_resultsdir, 'fp_'+str(temp_index)+'_'+str(mass_index)+'.db')) else: print('Fisher matrix already generated!')
[((11, 0, 11, 9), 'seaborn.set', 'sns.set', ({}, {}), '()', True, 'import seaborn as sns\n'), ((21, 0, 21, 31), 'cosmicfish.makedirectory', 'cf.makedirectory', ({(21, 17, 21, 30): 'fp_resultsdir'}, {}), '(fp_resultsdir)', True, 'import cosmicfish as cf\n'), ((52, 10, 52, 104), 'numpy.array', 'np.array', ({(52, 19, 52, 103): '[0.65, 0.75, 0.85, 0.95, 1.05, 1.15, 1.25, 1.35, 1.45, 1.55, 1.65, 1.75, \n 1.85, 1.95]'}, {}), '([0.65, 0.75, 0.85, 0.95, 1.05, 1.15, 1.25, 1.35, 1.45, 1.55, 1.65,\n 1.75, 1.85, 1.95])', True, 'import numpy as np\n'), ((53, 7, 54, 52), 'numpy.array', 'np.array', ({(53, 16, 54, 51): '[2434.28, 4364.812, 4728.559, 4825.798, 4728.797, 4507.625, 4269.851, \n 3720.657, 3104.309, 2308.975, 1514.831, 1474.707, 893.716, 497.613]'}, {}), '([2434.28, 4364.812, 4728.559, 4825.798, 4728.797, 4507.625, \n 4269.851, 3720.657, 3104.309, 2308.975, 1514.831, 1474.707, 893.716, \n 497.613])', True, 'import numpy as np\n'), ((58, 14, 58, 41), 'numpy.geomspace', 'np.geomspace', ({(58, 27, 58, 31): '0.01', (58, 33, 58, 36): '10.0', (58, 38, 58, 40): '21'}, {}), '(0.01, 10.0, 21)', True, 'import numpy as np\n'), ((59, 13, 59, 47), 'numpy.array', 'np.array', ({(59, 22, 59, 46): '[0.79, 0.91, 0.94, 1.08]'}, {}), '([0.79, 0.91, 0.94, 1.08])', True, 'import numpy as np\n'), ((64, 9, 64, 44), 'numpy.array', 'np.array', ({(64, 18, 64, 43): '[full_masses[mass_index]]'}, {}), '([full_masses[mass_index]])', True, 'import numpy as np\n'), ((65, 8, 65, 42), 'numpy.array', 'np.array', ({(65, 17, 65, 41): '[full_temps[temp_index]]'}, {}), '([full_temps[temp_index]])', True, 'import numpy as np\n'), ((80, 19, 94, 13), 'cosmicfish.forecast', 'cf.forecast', (), '', True, 'import cosmicfish as cf\n'), ((69, 42, 69, 67), 'numpy.power', 'np.power', ({(69, 51, 69, 62): 'tval / 1.95', (69, 64, 69, 66): '3.0'}, {}), '(tval / 1.95, 3.0)', True, 'import numpy as np\n')]
XDZhelheim/CS205_C_CPP_Lab
project4/test/test_arm.py
f585fd685a51e19fddc9c582846547d34442c6ef
import os if __name__ == "__main__": dims = ["32", "64", "128", "256", "512", "1024", "2048"] for dim in dims: os.system( f"perf stat -e r11 -x, -r 10 ../matmul.out ../data/mat-A-{dim}.txt ../data/mat-B-{dim}.txt ./out/out-{dim}.txt 2>>res_arm.csv" ) print(f"Finished {dim}") print("Finished.")
[((7, 8, 9, 9), 'os.system', 'os.system', ({(8, 12, 8, 138): 'f"""perf stat -e r11 -x, -r 10 ../matmul.out ../data/mat-A-{dim}.txt ../data/mat-B-{dim}.txt ./out/out-{dim}.txt 2>>res_arm.csv"""'}, {}), "(\n f'perf stat -e r11 -x, -r 10 ../matmul.out ../data/mat-A-{dim}.txt ../data/mat-B-{dim}.txt ./out/out-{dim}.txt 2>>res_arm.csv'\n )", False, 'import os\n')]