repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
Tranquant/tqcli | tqcli/config/config.py | 0cc12e0d80129a14cec8117cd73e2ca69fb25214 | import logging
from os.path import expanduser
#TQ_API_ROOT_URL = 'http://127.0.1.1:8090/dataset'
TQ_API_ROOT_URL = 'http://elb-tranquant-ecs-cluster-tqapi-1919110681.us-west-2.elb.amazonaws.com/dataset'
LOG_PATH = expanduser('~/tqcli.log')
# the chunk size must be at least 5MB for multipart upload
DEFAULT_CHUNK_SIZE = 1024 * 1024 * 5 # 5MB
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
filename=LOG_PATH,
filemode='w'
)
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
| [((6, 11, 6, 36), 'os.path.expanduser', 'expanduser', ({(6, 22, 6, 35): '"""~/tqcli.log"""'}, {}), "('~/tqcli.log')", False, 'from os.path import expanduser\n'), ((12, 0, 17, 1), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((19, 10, 19, 33), 'logging.StreamHandler', 'logging.StreamHandler', ({}, {}), '()', False, 'import logging\n'), ((22, 12, 22, 44), 'logging.Formatter', 'logging.Formatter', ({(22, 30, 22, 43): '"""%(message)s"""'}, {}), "('%(message)s')", False, 'import logging\n'), ((26, 0, 26, 21), 'logging.getLogger', 'logging.getLogger', ({(26, 18, 26, 20): '""""""'}, {}), "('')", False, 'import logging\n')] |
rainwangphy/fqf-iqn-qrdqn.pytorch | fqf_iqn_qrdqn/agent/base_agent.py | 351e9c4722c8b1ed411cd8c1bbf46c93c07f0893 | from abc import ABC, abstractmethod
import os
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from fqf_iqn_qrdqn.memory import LazyMultiStepMemory, \
LazyPrioritizedMultiStepMemory
from fqf_iqn_qrdqn.utils import RunningMeanStats, LinearAnneaer
class BaseAgent(ABC):
def __init__(self, env, test_env, log_dir, num_steps=5 * (10 ** 7),
batch_size=32, memory_size=10 ** 6, gamma=0.99, multi_step=1,
update_interval=4, target_update_interval=10000,
start_steps=50000, epsilon_train=0.01, epsilon_eval=0.001,
epsilon_decay_steps=250000, double_q_learning=False,
dueling_net=False, noisy_net=False, use_per=False,
log_interval=100, eval_interval=250000, num_eval_steps=125000,
max_episode_steps=27000, grad_cliping=5.0, cuda=True, seed=0):
self.env = env
self.test_env = test_env
torch.manual_seed(seed)
np.random.seed(seed)
self.env.seed(seed)
self.test_env.seed(2 ** 31 - 1 - seed)
# torch.backends.cudnn.deterministic = True # It harms a performance.
# torch.backends.cudnn.benchmark = False # It harms a performance.
self.device = torch.device(
"cuda" if cuda and torch.cuda.is_available() else "cpu")
self.online_net = None
self.target_net = None
# Replay memory which is memory-efficient to store stacked frames.
if use_per:
beta_steps = (num_steps - start_steps) / update_interval
self.memory = LazyPrioritizedMultiStepMemory(
memory_size, self.env.observation_space.shape,
self.device, gamma, multi_step, beta_steps=beta_steps)
else:
self.memory = LazyMultiStepMemory(
memory_size, self.env.observation_space.shape,
self.device, gamma, multi_step)
self.log_dir = log_dir
self.model_dir = os.path.join(log_dir, 'model')
self.summary_dir = os.path.join(log_dir, 'summary')
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
if not os.path.exists(self.summary_dir):
os.makedirs(self.summary_dir)
self.writer = SummaryWriter(log_dir=self.summary_dir)
self.train_return = RunningMeanStats(log_interval)
self.steps = 0
self.learning_steps = 0
self.episodes = 0
self.best_eval_score = -np.inf
self.num_actions = self.env.action_space.n
self.num_steps = num_steps
self.batch_size = batch_size
self.double_q_learning = double_q_learning
self.dueling_net = dueling_net
self.noisy_net = noisy_net
self.use_per = use_per
self.log_interval = log_interval
self.eval_interval = eval_interval
self.num_eval_steps = num_eval_steps
self.gamma_n = gamma ** multi_step
self.start_steps = start_steps
self.epsilon_train = LinearAnneaer(
1.0, epsilon_train, epsilon_decay_steps)
self.epsilon_eval = epsilon_eval
self.update_interval = update_interval
self.target_update_interval = target_update_interval
self.max_episode_steps = max_episode_steps
self.grad_cliping = grad_cliping
def run(self):
while True:
self.train_episode()
if self.steps > self.num_steps:
break
def is_update(self):
return self.steps % self.update_interval == 0 \
and self.steps >= self.start_steps
def is_random(self, eval=False):
# Use e-greedy for evaluation.
if self.steps < self.start_steps:
return True
if eval:
return np.random.rand() < self.epsilon_eval
if self.noisy_net:
return False
return np.random.rand() < self.epsilon_train.get()
def update_target(self):
self.target_net.load_state_dict(
self.online_net.state_dict())
def explore(self):
# Act with randomness.
action = self.env.action_space.sample()
return action
def exploit(self, state):
# Act without randomness.
state = torch.ByteTensor(
state).unsqueeze(0).to(self.device).float() / 255.
with torch.no_grad():
action = self.online_net.calculate_q(states=state).argmax().item()
return action
@abstractmethod
def learn(self):
pass
def save_models(self, save_dir):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(
self.online_net.state_dict(),
os.path.join(save_dir, 'online_net.pth'))
torch.save(
self.target_net.state_dict(),
os.path.join(save_dir, 'target_net.pth'))
def load_models(self, save_dir):
self.online_net.load_state_dict(torch.load(
os.path.join(save_dir, 'online_net.pth')))
self.target_net.load_state_dict(torch.load(
os.path.join(save_dir, 'target_net.pth')))
def train_episode(self):
self.online_net.train()
self.target_net.train()
self.episodes += 1
episode_return = 0.
episode_steps = 0
done = False
state = self.env.reset()
while (not done) and episode_steps <= self.max_episode_steps:
# NOTE: Noises can be sampled only after self.learn(). However, I
# sample noises before every action, which seems to lead better
# performances.
self.online_net.sample_noise()
if self.is_random(eval=False):
action = self.explore()
else:
action = self.exploit(state)
next_state, reward, done, _ = self.env.step(action)
# To calculate efficiently, I just set priority=max_priority here.
self.memory.append(state, action, reward, next_state, done)
self.steps += 1
episode_steps += 1
episode_return += reward
state = next_state
self.train_step_interval()
# We log running mean of stats.
self.train_return.append(episode_return)
# We log evaluation results along with training frames = 4 * steps.
if self.episodes % self.log_interval == 0:
self.writer.add_scalar(
'return/train', self.train_return.get(), 4 * self.steps)
print(f'Episode: {self.episodes:<4} '
f'episode steps: {episode_steps:<4} '
f'return: {episode_return:<5.1f}')
def train_step_interval(self):
self.epsilon_train.step()
if self.steps % self.target_update_interval == 0:
self.update_target()
if self.is_update():
self.learn()
if self.steps % self.eval_interval == 0:
self.evaluate()
self.save_models(os.path.join(self.model_dir, 'final'))
self.online_net.train()
def evaluate(self):
self.online_net.eval()
num_episodes = 0
num_steps = 0
total_return = 0.0
while True:
state = self.test_env.reset()
episode_steps = 0
episode_return = 0.0
done = False
while (not done) and episode_steps <= self.max_episode_steps:
if self.is_random(eval=True):
action = self.explore()
else:
action = self.exploit(state)
next_state, reward, done, _ = self.test_env.step(action)
num_steps += 1
episode_steps += 1
episode_return += reward
state = next_state
num_episodes += 1
total_return += episode_return
if num_steps > self.num_eval_steps:
break
mean_return = total_return / num_episodes
if mean_return > self.best_eval_score:
self.best_eval_score = mean_return
self.save_models(os.path.join(self.model_dir, 'best'))
# We log evaluation results along with training frames = 4 * steps.
self.writer.add_scalar(
'return/test', mean_return, 4 * self.steps)
print('-' * 60)
print(f'Num steps: {self.steps:<5} '
f'return: {mean_return:<5.1f}')
print('-' * 60)
def __del__(self):
self.env.close()
self.test_env.close()
self.writer.close()
| [((26, 8, 26, 31), 'torch.manual_seed', 'torch.manual_seed', ({(26, 26, 26, 30): 'seed'}, {}), '(seed)', False, 'import torch\n'), ((27, 8, 27, 28), 'numpy.random.seed', 'np.random.seed', ({(27, 23, 27, 27): 'seed'}, {}), '(seed)', True, 'import numpy as np\n'), ((51, 25, 51, 55), 'os.path.join', 'os.path.join', ({(51, 38, 51, 45): 'log_dir', (51, 47, 51, 54): '"""model"""'}, {}), "(log_dir, 'model')", False, 'import os\n'), ((52, 27, 52, 59), 'os.path.join', 'os.path.join', ({(52, 40, 52, 47): 'log_dir', (52, 49, 52, 58): '"""summary"""'}, {}), "(log_dir, 'summary')", False, 'import os\n'), ((58, 22, 58, 61), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (), '', False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((59, 28, 59, 58), 'fqf_iqn_qrdqn.utils.RunningMeanStats', 'RunningMeanStats', ({(59, 45, 59, 57): 'log_interval'}, {}), '(log_interval)', False, 'from fqf_iqn_qrdqn.utils import RunningMeanStats, LinearAnneaer\n'), ((79, 29, 80, 52), 'fqf_iqn_qrdqn.utils.LinearAnneaer', 'LinearAnneaer', ({(80, 12, 80, 15): '1.0', (80, 17, 80, 30): 'epsilon_train', (80, 32, 80, 51): 'epsilon_decay_steps'}, {}), '(1.0, epsilon_train, epsilon_decay_steps)', False, 'from fqf_iqn_qrdqn.utils import RunningMeanStats, LinearAnneaer\n'), ((42, 26, 44, 70), 'fqf_iqn_qrdqn.memory.LazyPrioritizedMultiStepMemory', 'LazyPrioritizedMultiStepMemory', (), '', False, 'from fqf_iqn_qrdqn.memory import LazyMultiStepMemory, LazyPrioritizedMultiStepMemory\n'), ((46, 26, 48, 47), 'fqf_iqn_qrdqn.memory.LazyMultiStepMemory', 'LazyMultiStepMemory', ({(47, 16, 47, 27): 'memory_size', (47, 29, 47, 61): 'self.env.observation_space.shape', (48, 16, 48, 27): 'self.device', (48, 29, 48, 34): 'gamma', (48, 36, 48, 46): 'multi_step'}, {}), '(memory_size, self.env.observation_space.shape, self.\n device, gamma, multi_step)', False, 'from fqf_iqn_qrdqn.memory import LazyMultiStepMemory, LazyPrioritizedMultiStepMemory\n'), ((53, 15, 53, 45), 'os.path.exists', 'os.path.exists', ({(53, 30, 53, 44): 'self.model_dir'}, {}), '(self.model_dir)', False, 'import os\n'), ((54, 12, 54, 39), 'os.makedirs', 'os.makedirs', ({(54, 24, 54, 38): 'self.model_dir'}, {}), '(self.model_dir)', False, 'import os\n'), ((55, 15, 55, 47), 'os.path.exists', 'os.path.exists', ({(55, 30, 55, 46): 'self.summary_dir'}, {}), '(self.summary_dir)', False, 'import os\n'), ((56, 12, 56, 41), 'os.makedirs', 'os.makedirs', ({(56, 24, 56, 40): 'self.summary_dir'}, {}), '(self.summary_dir)', False, 'import os\n'), ((105, 15, 105, 31), 'numpy.random.rand', 'np.random.rand', ({}, {}), '()', True, 'import numpy as np\n'), ((120, 13, 120, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((129, 15, 129, 39), 'os.path.exists', 'os.path.exists', ({(129, 30, 129, 38): 'save_dir'}, {}), '(save_dir)', False, 'import os\n'), ((130, 12, 130, 33), 'os.makedirs', 'os.makedirs', ({(130, 24, 130, 32): 'save_dir'}, {}), '(save_dir)', False, 'import os\n'), ((133, 12, 133, 52), 'os.path.join', 'os.path.join', ({(133, 25, 133, 33): 'save_dir', (133, 35, 133, 51): '"""online_net.pth"""'}, {}), "(save_dir, 'online_net.pth')", False, 'import os\n'), ((136, 12, 136, 52), 'os.path.join', 'os.path.join', ({(136, 25, 136, 33): 'save_dir', (136, 35, 136, 51): '"""target_net.pth"""'}, {}), "(save_dir, 'target_net.pth')", False, 'import os\n'), ((102, 19, 102, 35), 'numpy.random.rand', 'np.random.rand', ({}, {}), '()', True, 'import numpy as np\n'), ((140, 12, 140, 52), 'os.path.join', 'os.path.join', ({(140, 25, 140, 33): 'save_dir', (140, 35, 140, 51): '"""online_net.pth"""'}, {}), "(save_dir, 'online_net.pth')", False, 'import os\n'), ((142, 12, 142, 52), 'os.path.join', 'os.path.join', ({(142, 25, 142, 33): 'save_dir', (142, 35, 142, 51): '"""target_net.pth"""'}, {}), "(save_dir, 'target_net.pth')", False, 'import os\n'), ((201, 29, 201, 66), 'os.path.join', 'os.path.join', ({(201, 42, 201, 56): 'self.model_dir', (201, 58, 201, 65): '"""final"""'}, {}), "(self.model_dir, 'final')", False, 'import os\n'), ((237, 29, 237, 65), 'os.path.join', 'os.path.join', ({(237, 42, 237, 56): 'self.model_dir', (237, 58, 237, 64): '"""best"""'}, {}), "(self.model_dir, 'best')", False, 'import os\n'), ((34, 31, 34, 56), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((118, 16, 119, 18), 'torch.ByteTensor', 'torch.ByteTensor', ({(119, 12, 119, 17): 'state'}, {}), '(state)', False, 'import torch\n')] |
felaray/Recognizers-Text | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/italian/timeperiod_extractor_config.py | f514fd61c8d472ed92565261162712409f655312 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import List, Pattern
from recognizers_text.utilities import RegExpUtility
from recognizers_text.extractor import Extractor
from recognizers_number.number.italian.extractors import ItalianIntegerExtractor
from ...resources.italian_date_time import ItalianDateTime
from ..extractors import DateTimeExtractor
from ..base_timeperiod import TimePeriodExtractorConfiguration, MatchedIndex
from ..base_time import BaseTimeExtractor
from ..base_timezone import BaseTimeZoneExtractor
from .time_extractor_config import ItalianTimeExtractorConfiguration
from .base_configs import ItalianDateTimeUtilityConfiguration
from .timezone_extractor_config import ItalianTimeZoneExtractorConfiguration
class ItalianTimePeriodExtractorConfiguration(TimePeriodExtractorConfiguration):
@property
def check_both_before_after(self) -> bool:
return self._check_both_before_after
@property
def simple_cases_regex(self) -> List[Pattern]:
return self._simple_cases_regex
@property
def till_regex(self) -> Pattern:
return self._till_regex
@property
def time_of_day_regex(self) -> Pattern:
return self._time_of_day_regex
@property
def general_ending_regex(self) -> Pattern:
return self._general_ending_regex
@property
def single_time_extractor(self) -> DateTimeExtractor:
return self._single_time_extractor
@property
def integer_extractor(self) -> Extractor:
return self._integer_extractor
@property
def token_before_date(self) -> str:
return self._token_before_date
@property
def pure_number_regex(self) -> List[Pattern]:
return self._pure_number_regex
@property
def time_zone_extractor(self) -> DateTimeExtractor:
return self._time_zone_extractor
def __init__(self):
super().__init__()
self._check_both_before_after = ItalianDateTime.CheckBothBeforeAfter
self._single_time_extractor = BaseTimeExtractor(
ItalianTimeExtractorConfiguration())
self._integer_extractor = ItalianIntegerExtractor()
self.utility_configuration = ItalianDateTimeUtilityConfiguration()
self._simple_cases_regex: List[Pattern] = [
RegExpUtility.get_safe_reg_exp(ItalianDateTime.PureNumFromTo),
RegExpUtility.get_safe_reg_exp(ItalianDateTime.PureNumBetweenAnd),
RegExpUtility.get_safe_reg_exp(ItalianDateTime.PmRegex),
RegExpUtility.get_safe_reg_exp(ItalianDateTime.AmRegex)
]
self._till_regex: Pattern = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.TillRegex)
self._time_of_day_regex: Pattern = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.TimeOfDayRegex)
self._general_ending_regex: Pattern = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.GeneralEndingRegex)
self.from_regex = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.FromRegex2)
self.connector_and_regex = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.ConnectorAndRegex)
self.before_regex = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.BeforeRegex2)
self._token_before_date = ItalianDateTime.TokenBeforeDate
self._pure_number_regex = [ItalianDateTime.PureNumFromTo, ItalianDateTime.PureNumFromTo]
self._time_zone_extractor = BaseTimeZoneExtractor(
ItalianTimeZoneExtractorConfiguration())
def get_from_token_index(self, source: str) -> MatchedIndex:
match = self.from_regex.search(source)
if match:
return MatchedIndex(True, match.start())
return MatchedIndex(False, -1)
def get_between_token_index(self, source: str) -> MatchedIndex:
match = self.before_regex.search(source)
if match:
return MatchedIndex(True, match.start())
return MatchedIndex(False, -1)
def is_connector_token(self, source: str):
return self.connector_and_regex.match(source)
| [((65, 34, 65, 59), 'recognizers_number.number.italian.extractors.ItalianIntegerExtractor', 'ItalianIntegerExtractor', ({}, {}), '()', False, 'from recognizers_number.number.italian.extractors import ItalianIntegerExtractor\n'), ((75, 36, 76, 38), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', ({(76, 12, 76, 37): 'ItalianDateTime.TillRegex'}, {}), '(ItalianDateTime.TillRegex)', False, 'from recognizers_text.utilities import RegExpUtility\n'), ((77, 43, 78, 43), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', ({(78, 12, 78, 42): 'ItalianDateTime.TimeOfDayRegex'}, {}), '(ItalianDateTime.TimeOfDayRegex)', False, 'from recognizers_text.utilities import RegExpUtility\n'), ((79, 46, 80, 47), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', ({(80, 12, 80, 46): 'ItalianDateTime.GeneralEndingRegex'}, {}), '(ItalianDateTime.GeneralEndingRegex)', False, 'from recognizers_text.utilities import RegExpUtility\n'), ((82, 26, 83, 39), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', ({(83, 12, 83, 38): 'ItalianDateTime.FromRegex2'}, {}), '(ItalianDateTime.FromRegex2)', False, 'from recognizers_text.utilities import RegExpUtility\n'), ((84, 35, 85, 46), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', ({(85, 12, 85, 45): 'ItalianDateTime.ConnectorAndRegex'}, {}), '(ItalianDateTime.ConnectorAndRegex)', False, 'from recognizers_text.utilities import RegExpUtility\n'), ((86, 28, 87, 41), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', ({(87, 12, 87, 40): 'ItalianDateTime.BeforeRegex2'}, {}), '(ItalianDateTime.BeforeRegex2)', False, 'from recognizers_text.utilities import RegExpUtility\n'), ((69, 12, 69, 73), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', ({(69, 43, 69, 72): 'ItalianDateTime.PureNumFromTo'}, {}), '(ItalianDateTime.PureNumFromTo)', False, 'from recognizers_text.utilities import RegExpUtility\n'), ((70, 12, 70, 77), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', ({(70, 43, 70, 76): 'ItalianDateTime.PureNumBetweenAnd'}, {}), '(ItalianDateTime.PureNumBetweenAnd)', False, 'from recognizers_text.utilities import RegExpUtility\n'), ((71, 12, 71, 67), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', ({(71, 43, 71, 66): 'ItalianDateTime.PmRegex'}, {}), '(ItalianDateTime.PmRegex)', False, 'from recognizers_text.utilities import RegExpUtility\n'), ((72, 12, 72, 67), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', ({(72, 43, 72, 66): 'ItalianDateTime.AmRegex'}, {}), '(ItalianDateTime.AmRegex)', False, 'from recognizers_text.utilities import RegExpUtility\n')] |
BotanyHunter/QuartetAnalysis | quartet_condor.py | c9b21aac267718be5ea8a8a76632fc0a3feb8403 | #quartet_condor.py
#version 2.0.2
import random, sys
def addToDict(d):
'''
Ensures each quartet has three concordance factors (CFs)
a dictionary d has less than three CFs, add CFs with the value 0 until there are three
Input: a dictionary containing CFs, a counter of how many CFs are in the dictionary
'''
if ("{1,2|3,4}" not in d):
d["{1,2|3,4}"] = 0.0
if ("{1,3|2,4}" not in d):
d["{1,3|2,4}"] = 0.0
if ("{1,4|2,3}" not in d):
d["{1,4|2,3}"] = 0.0
class quartet:
'''
Picks individual quartets and isolates concordance factors
'''
def __init__(self):
#length of a split in *.concordance file
self.length_of_splits = 10
self.quartet_length = 4
#list to hold the 4 taxa
self.taxa = []
#dictionaries to hold cfs with splits
self.d = {}
self.d2 = {}
def pick_random_quartet(self, ntax):
'''
Randomly select the 4 taxa to be included in the quartet analysis
:Input: The total number of taxa in an analysis
:Return: A sorted list of 4 unique taxa
'''
self.taxa = []
while len(self.taxa) < self.quartet_length:
num = random.randint(0, ntax-1)
if num not in self.taxa:
self.taxa.append(num)
self.taxa = sorted(self.taxa)
#return a sorted list of 4 random taxa
return self.taxa
def isolateCFs(self, file, num_genes):
'''
Isolates the CFs within a *.concordance file, and sorts the three from largest to smallest
:Input: A *.concordance file
:Return: A sorted dictionary of three CFs
'''
self.d = {}
self.ciLow = {}
self.ciHigh = {}
split = ""
cf = 0
#counter to ensure 3 entries
counter = 0
for line in file:
#finds all splits, which have CFs associated with them
if (line[0] == '{' and len(line) == self.length_of_splits):
split = line
#find CF associated with the split found above
if (line.startswith('mean')):
words = line.split()
#CF guarenteed by BUCKy to be the 4th "word"
cf = float(words[3])
#add split/CF pair to dictionary
self.d[split.strip()] = cf
counter += 1
if( line.startswith('95% CI for CF')):
useline = line.translate(None,"()")
useline = useline.replace(","," ")
words = useline.split()
self.ciLow[split.strip()] = float(words[5]) / num_genes
self.ciHigh[split.strip()] = float(words[6]) / num_genes
#fill out dictionary if there were less than 3 splits
if (counter < 3):
addToDict(self.d)
addToDict(self.ciLow)
addToDict(self.ciHigh)
return self.d, self.ciLow, self.ciHigh
| [((43, 18, 43, 43), 'random.randint', 'random.randint', ({(43, 33, 43, 34): '0', (43, 36, 43, 42): 'ntax - 1'}, {}), '(0, ntax - 1)', False, 'import random, sys\n')] |
rahulroshan96/CloudVisual | src/profiles/forms.py | aa33709d88442bcdbe3229234b4eb4f9abb4481e | from django import forms
from models import UserInputModel
class UserInputForm(forms.ModelForm):
class Meta:
model = UserInputModel
fields = ['user_input'] | [] |
Honny1/oval-graph | tests_oval_graph/test_arf_xml_parser/test_arf_xml_parser.py | 96472a9d2b08c2afce620c54f229ce95ad019d1f | from pathlib import Path
import pytest
from oval_graph.arf_xml_parser.arf_xml_parser import ARFXMLParser
def get_arf_report_path(src="global_test_data/ssg-fedora-ds-arf.xml"):
return str(Path(__file__).parent.parent / src)
@pytest.mark.parametrize("rule_id, result", [
(
"xccdf_org.ssgproject.content_rule_accounts_passwords_pam_faillock_deny",
"false",
),
(
"xccdf_org.ssgproject.content_rule_sshd_disable_gssapi_auth",
"false",
),
(
"xccdf_org.ssgproject.content_rule_service_debug-shell_disabled",
"true",
),
(
"xccdf_org.ssgproject.content_rule_mount_option_dev_shm_noexec",
"false",
),
(
"xccdf_org.ssgproject.content_rule_audit_rules_unsuccessful_file_modification_creat",
"false",
),
(
"xccdf_org.ssgproject.content_rule_audit_rules_file_deletion_events_rmdir",
"false",
),
(
"xccdf_org.ssgproject.content_rule_require_singleuser_auth",
"true",
),
])
def test_parsing_and_evaluate_scan_rule(rule_id, result):
path = get_arf_report_path()
parser = ARFXMLParser(path)
oval_tree = parser.get_oval_tree(rule_id)
assert oval_tree.evaluate_tree() == result
def test_parsing_arf_report_without_system_data():
path = get_arf_report_path("global_test_data/arf_no_system_data.xml")
rule_id = "xccdf_com.example.www_rule_test-fail"
parser = ARFXMLParser(path)
oval_tree = parser.get_oval_tree(rule_id)
assert oval_tree.evaluate_tree() == "false"
@pytest.mark.parametrize("rule_id, pattern", [
("hello", "404 rule \"hello\" not found!"),
("xccdf_org.ssgproject.content_rule_ntpd_specify_remote_server", "notselected"),
("xccdf_org.ssgproject.content_rule_configure_bind_crypto_policy", "notchecked"),
("xccdf_org.ssgproject.content_rule_ensure_gpgcheck_local_packages", "notapplicable"),
])
def test_parsing_bad_rule(rule_id, pattern):
path = get_arf_report_path()
parser = ARFXMLParser(path)
with pytest.raises(Exception, match=pattern):
assert parser.get_oval_tree(rule_id)
def test_use_bad_report_file():
src = 'global_test_data/xccdf_org.ssgproject.content_profile_ospp-results-initial.xml'
path = get_arf_report_path(src)
with pytest.raises(Exception, match=r"arf\b|ARF\b"):
assert ARFXMLParser(path)
| [((12, 1, 41, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(12, 25, 12, 42): '"""rule_id, result"""', (12, 44, 41, 1): "[('xccdf_org.ssgproject.content_rule_accounts_passwords_pam_faillock_deny',\n 'false'), ('xccdf_org.ssgproject.content_rule_sshd_disable_gssapi_auth',\n 'false'), (\n 'xccdf_org.ssgproject.content_rule_service_debug-shell_disabled',\n 'true'), (\n 'xccdf_org.ssgproject.content_rule_mount_option_dev_shm_noexec',\n 'false'), (\n 'xccdf_org.ssgproject.content_rule_audit_rules_unsuccessful_file_modification_creat'\n , 'false'), (\n 'xccdf_org.ssgproject.content_rule_audit_rules_file_deletion_events_rmdir',\n 'false'), ('xccdf_org.ssgproject.content_rule_require_singleuser_auth',\n 'true')]"}, {}), "('rule_id, result', [(\n 'xccdf_org.ssgproject.content_rule_accounts_passwords_pam_faillock_deny',\n 'false'), ('xccdf_org.ssgproject.content_rule_sshd_disable_gssapi_auth',\n 'false'), (\n 'xccdf_org.ssgproject.content_rule_service_debug-shell_disabled',\n 'true'), (\n 'xccdf_org.ssgproject.content_rule_mount_option_dev_shm_noexec',\n 'false'), (\n 'xccdf_org.ssgproject.content_rule_audit_rules_unsuccessful_file_modification_creat'\n , 'false'), (\n 'xccdf_org.ssgproject.content_rule_audit_rules_file_deletion_events_rmdir',\n 'false'), ('xccdf_org.ssgproject.content_rule_require_singleuser_auth',\n 'true')])", False, 'import pytest\n'), ((58, 1, 63, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(58, 25, 58, 43): '"""rule_id, pattern"""', (58, 45, 63, 1): '[(\'hello\', \'404 rule "hello" not found!\'), (\n \'xccdf_org.ssgproject.content_rule_ntpd_specify_remote_server\',\n \'notselected\'), (\n \'xccdf_org.ssgproject.content_rule_configure_bind_crypto_policy\',\n \'notchecked\'), (\n \'xccdf_org.ssgproject.content_rule_ensure_gpgcheck_local_packages\',\n \'notapplicable\')]'}, {}), '(\'rule_id, pattern\', [(\'hello\',\n \'404 rule "hello" not found!\'), (\n \'xccdf_org.ssgproject.content_rule_ntpd_specify_remote_server\',\n \'notselected\'), (\n \'xccdf_org.ssgproject.content_rule_configure_bind_crypto_policy\',\n \'notchecked\'), (\n \'xccdf_org.ssgproject.content_rule_ensure_gpgcheck_local_packages\',\n \'notapplicable\')])', False, 'import pytest\n'), ((45, 13, 45, 31), 'oval_graph.arf_xml_parser.arf_xml_parser.ARFXMLParser', 'ARFXMLParser', ({(45, 26, 45, 30): 'path'}, {}), '(path)', False, 'from oval_graph.arf_xml_parser.arf_xml_parser import ARFXMLParser\n'), ((53, 13, 53, 31), 'oval_graph.arf_xml_parser.arf_xml_parser.ARFXMLParser', 'ARFXMLParser', ({(53, 26, 53, 30): 'path'}, {}), '(path)', False, 'from oval_graph.arf_xml_parser.arf_xml_parser import ARFXMLParser\n'), ((66, 13, 66, 31), 'oval_graph.arf_xml_parser.arf_xml_parser.ARFXMLParser', 'ARFXMLParser', ({(66, 26, 66, 30): 'path'}, {}), '(path)', False, 'from oval_graph.arf_xml_parser.arf_xml_parser import ARFXMLParser\n'), ((68, 9, 68, 48), 'pytest.raises', 'pytest.raises', (), '', False, 'import pytest\n'), ((75, 9, 75, 55), 'pytest.raises', 'pytest.raises', (), '', False, 'import pytest\n'), ((76, 15, 76, 33), 'oval_graph.arf_xml_parser.arf_xml_parser.ARFXMLParser', 'ARFXMLParser', ({(76, 28, 76, 32): 'path'}, {}), '(path)', False, 'from oval_graph.arf_xml_parser.arf_xml_parser import ARFXMLParser\n'), ((9, 15, 9, 29), 'pathlib.Path', 'Path', ({(9, 20, 9, 28): '__file__'}, {}), '(__file__)', False, 'from pathlib import Path\n')] |
scottjr632/trump-twitter-bot | main.py | 484b1324d752395338b0a9e5850acf294089b26f | import os
import logging
import argparse
import sys
import signal
import subprocess
from functools import wraps
from dotenv import load_dotenv
load_dotenv(verbose=True)
from app.config import configure_app
from app.bot import TrumpBotScheduler
from app.sentimentbot import SentimentBot
parser = argparse.ArgumentParser(description=r"""
""")
ROOT = os.getcwd()
PID_FILE_PATH = os.path.join(ROOT, 'var/run-dev.pid')
CMDS = []
FNCS = []
try:
os.setpgrp()
if not os.path.exists(os.path.dirname(PID_FILE_PATH)):
os.makedirs(os.path.dirname(PID_FILE_PATH))
with open(PID_FILE_PATH, 'w+') as file:
file.write(str(os.getpgrp()) + '\n')
except Exception as e:
logging.error(e)
def _file_path_sanity_check(*args):
for path in args:
if not os.path.exists(path):
raise Exception('Unable to find file %s' % path)
def _start_client_server(*args, **kwargs):
cmd = [
'npm', '--prefix', '%s/client' % ROOT, 'run', 'start'
]
CMDS.append(cmd)
def inject_file_paths(fn):
requests_path = os.environ.get('REQUESTS_FILE_PATH', 'requests/request.json')
auth_path = os.environ.get('AUTH_FILE_PATH', 'requests/auth.json')
_file_path_sanity_check(requests_path, auth_path)
@wraps(fn)
def wrapper(*args, **kwargs):
return fn(requests_path=requests_path, auth_path=auth_path, *args, **kwargs)
return wrapper
@inject_file_paths
def _initialize_trump_bot(auth_path, requests_path,
send_posts: bool=True,
*args, **kwargs) -> TrumpBotScheduler:
trump_bot: TrumpBotScheduler = None
if send_posts:
logging.info('Post requests are not being sent.')
class PostOverride(TrumpBotScheduler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __send_tweet_msg__(self, content, headers=None):
return 200
trump_bot = PostOverride(file_path=requests_path, auth_file_path=auth_path)
else:
trump_bot = TrumpBotScheduler(file_path=requests_path, auth_file_path=auth_path)
# this functions initialize the trump bot by getting the latest tweets
# and trying to send any tweets that contained errors
trump_bot.send_latest_tweets()
trump_bot.resend_bad_tweets()
logging.info('Trump bot initialization finished... please press ctrl-c to close program if finished.')
return trump_bot
@inject_file_paths
def _start_sentiment_bot(auth_path: str, requests_path: str,
trump_bot: TrumpBotScheduler,
send_posts: bool=True) -> SentimentBot:
bot: SentimentBot = None
if send_posts:
logging.info('Sentiment bot is not running')
class PostOverride(SentimentBot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __send_tweet_msg__(self, content) -> int:
return 200
bot = PostOverride(file_path=requests_path, auth_file_path=auth_path)
else:
bot = SentimentBot(auth_file_path=auth_path, file_path=requests_path)
trump_bot.add_job(bot.send_todays_tone, 'interval', hours=24, max_instances=1)
return bot
def _start_flask_server(*args, **kwargs):
from app import app
logging.info('Starting the flask server...')
level = os.environ.get('CONFIG_LEVEL')
configure_app(app, status='production' if level is None else level)
port = app.config.get('PORT')
app.run(host='0.0.0.0', port=port)
def _start_dev_server(*args, **kwargs):
_start_client_server()
FNCS.append(_start_flask_server)
def _start_prod_server(*args, **kwargs):
_start_trump_bot(*args, **kwargs)
_start_flask_server(*args, **kwargs)
def _start_trump_bot(send_posts=True, start_sentiment_bot=False, *args, **kwargs):
logging.info('Starting the trump bot...')
# requests_path = os.environ.get('REQUESTS_FILE_PATH', 'requests/request.json')
# auth_path = os.environ.get('AUTH_FILE_PATH', 'requests/auth.json')
# _file_path_sanity_check(requests_path, auth_path)
bot = _initialize_trump_bot(send_posts=send_posts)
if not start_sentiment_bot:
_start_sentiment_bot(trump_bot=bot, send_posts=send_posts)
bot.start()
ACTIONS = {
"initialize": _initialize_trump_bot,
"client": _start_client_server,
"trumpbot": _start_trump_bot,
"flask": _start_flask_server,
"dev": _start_dev_server,
"prod": _start_prod_server,
}
parser.add_argument('action',
help='start the Flask app',
type=str,
choices=[key for key, v in ACTIONS.items()])
parser.add_argument('-np', '--no-post',
dest='send_posts',
action='store_true',
help='Do not send post requests')
parser.add_argument('-nsb', '--no-sentiment-bot',
dest='start_sentiment_bot',
action='store_true',
help='Do not to start the sentiment bot')
def signal_handler(sig, frame):
os.killpg(0, signal.SIGTERM)
os.remove(PID_FILE_PATH)
sys.exit(0)
def main():
options = parser.parse_args()
for s in (signal.SIGINT, signal.SIGTERM):
signal.signal(s, signal_handler)
ACTIONS.get(options.action)(**options.__dict__)
env = os.environ.copy()
for cmd in CMDS:
subprocess.Popen(cmd, env=env)
for fn in FNCS:
subprocess.Popen(fn(), env=env)
signal.pause()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
| [((10, 0, 10, 25), 'dotenv.load_dotenv', 'load_dotenv', (), '', False, 'from dotenv import load_dotenv\n'), ((16, 9, 17, 4), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((20, 7, 20, 18), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((21, 16, 21, 53), 'os.path.join', 'os.path.join', ({(21, 29, 21, 33): 'ROOT', (21, 35, 21, 52): '"""var/run-dev.pid"""'}, {}), "(ROOT, 'var/run-dev.pid')", False, 'import os\n'), ((26, 4, 26, 16), 'os.setpgrp', 'os.setpgrp', ({}, {}), '()', False, 'import os\n'), ((53, 20, 53, 81), 'os.environ.get', 'os.environ.get', ({(53, 35, 53, 55): '"""REQUESTS_FILE_PATH"""', (53, 57, 53, 80): '"""requests/request.json"""'}, {}), "('REQUESTS_FILE_PATH', 'requests/request.json')", False, 'import os\n'), ((54, 16, 54, 70), 'os.environ.get', 'os.environ.get', ({(54, 31, 54, 47): '"""AUTH_FILE_PATH"""', (54, 49, 54, 69): '"""requests/auth.json"""'}, {}), "('AUTH_FILE_PATH', 'requests/auth.json')", False, 'import os\n'), ((57, 5, 57, 14), 'functools.wraps', 'wraps', ({(57, 11, 57, 13): 'fn'}, {}), '(fn)', False, 'from functools import wraps\n'), ((90, 4, 90, 106), 'logging.info', 'logging.info', ({(90, 17, 90, 105): '"""Trump bot initialization finished... please press ctrl-c to close program if finished."""'}, {}), "(\n 'Trump bot initialization finished... please press ctrl-c to close program if finished.'\n )", False, 'import logging\n'), ((122, 4, 122, 48), 'logging.info', 'logging.info', ({(122, 17, 122, 47): '"""Starting the flask server..."""'}, {}), "('Starting the flask server...')", False, 'import logging\n'), ((123, 12, 123, 42), 'os.environ.get', 'os.environ.get', ({(123, 27, 123, 41): '"""CONFIG_LEVEL"""'}, {}), "('CONFIG_LEVEL')", False, 'import os\n'), ((125, 4, 125, 71), 'app.config.configure_app', 'configure_app', (), '', False, 'from app.config import configure_app\n'), ((126, 11, 126, 33), 'app.app.config.get', 'app.config.get', ({(126, 26, 126, 32): '"""PORT"""'}, {}), "('PORT')", False, 'from app import app\n'), ((127, 4, 127, 38), 'app.app.run', 'app.run', (), '', False, 'from app import app\n'), ((142, 4, 142, 45), 'logging.info', 'logging.info', ({(142, 17, 142, 44): '"""Starting the trump bot..."""'}, {}), "('Starting the trump bot...')", False, 'import logging\n'), ((181, 4, 181, 32), 'os.killpg', 'os.killpg', ({(181, 14, 181, 15): '(0)', (181, 17, 181, 31): 'signal.SIGTERM'}, {}), '(0, signal.SIGTERM)', False, 'import os\n'), ((182, 4, 182, 28), 'os.remove', 'os.remove', ({(182, 14, 182, 27): 'PID_FILE_PATH'}, {}), '(PID_FILE_PATH)', False, 'import os\n'), ((183, 4, 183, 15), 'sys.exit', 'sys.exit', ({(183, 13, 183, 14): '(0)'}, {}), '(0)', False, 'import sys\n'), ((192, 10, 192, 27), 'os.environ.copy', 'os.environ.copy', ({}, {}), '()', False, 'import os\n'), ((199, 4, 199, 18), 'signal.pause', 'signal.pause', ({}, {}), '()', False, 'import signal\n'), ((203, 4, 203, 43), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((35, 4, 35, 20), 'logging.error', 'logging.error', ({(35, 18, 35, 19): 'e'}, {}), '(e)', False, 'import logging\n'), ((71, 8, 71, 57), 'logging.info', 'logging.info', ({(71, 21, 71, 56): '"""Post requests are not being sent."""'}, {}), "('Post requests are not being sent.')", False, 'import logging\n'), ((83, 20, 83, 88), 'app.bot.TrumpBotScheduler', 'TrumpBotScheduler', (), '', False, 'from app.bot import TrumpBotScheduler\n'), ((101, 8, 101, 52), 'logging.info', 'logging.info', ({(101, 21, 101, 51): '"""Sentiment bot is not running"""'}, {}), "('Sentiment bot is not running')", False, 'import logging\n'), ((113, 14, 113, 77), 'app.sentimentbot.SentimentBot', 'SentimentBot', (), '', False, 'from app.sentimentbot import SentimentBot\n'), ((189, 8, 189, 40), 'signal.signal', 'signal.signal', ({(189, 22, 189, 23): 's', (189, 25, 189, 39): 'signal_handler'}, {}), '(s, signal_handler)', False, 'import signal\n'), ((194, 8, 194, 38), 'subprocess.Popen', 'subprocess.Popen', (), '', False, 'import subprocess\n'), ((28, 26, 28, 56), 'os.path.dirname', 'os.path.dirname', ({(28, 42, 28, 55): 'PID_FILE_PATH'}, {}), '(PID_FILE_PATH)', False, 'import os\n'), ((29, 20, 29, 50), 'os.path.dirname', 'os.path.dirname', ({(29, 36, 29, 49): 'PID_FILE_PATH'}, {}), '(PID_FILE_PATH)', False, 'import os\n'), ((40, 15, 40, 35), 'os.path.exists', 'os.path.exists', ({(40, 30, 40, 34): 'path'}, {}), '(path)', False, 'import os\n'), ((32, 23, 32, 35), 'os.getpgrp', 'os.getpgrp', ({}, {}), '()', False, 'import os\n')] |
dendi239/euler | 010-summation-of-primes.py | 71fcdca4a80f9e586aab05eb8acadf1a296dda90 | #! /usr/bin/env python3
import itertools
import typing as tp
def primes() -> tp.Generator[int, None, None]:
primes_ = []
d = 2
while True:
is_prime = True
for p in primes_:
if p * p > d:
break
if d % p == 0:
is_prime = False
break
if is_prime:
primes_.append(d)
yield d
d += 1
def sum_primes_below(n: int) -> int:
return sum(itertools.takewhile(lambda x: x < n, primes()))
def test_ten() -> None:
assert sum_primes_below(10) == 17
def main() -> None:
print(sum_primes_below(2_000_000))
if __name__ == '__main__':
main()
| [] |
letmaik/lensfunpy | setup.py | ddadb6bfd5f3acde5640210aa9f575501e5c0914 | from setuptools import setup, Extension, find_packages
import subprocess
import errno
import re
import os
import shutil
import sys
import zipfile
from urllib.request import urlretrieve
import numpy
from Cython.Build import cythonize
isWindows = os.name == 'nt'
isMac = sys.platform == 'darwin'
is64Bit = sys.maxsize > 2**32
# adapted from cffi's setup.py
# the following may be overridden if pkg-config exists
libraries = ['lensfun']
include_dirs = []
library_dirs = []
extra_compile_args = []
extra_link_args = []
def _ask_pkg_config(resultlist, option, result_prefix='', sysroot=False):
pkg_config = os.environ.get('PKG_CONFIG','pkg-config')
try:
p = subprocess.Popen([pkg_config, option, 'lensfun'],
stdout=subprocess.PIPE)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
t = p.stdout.read().decode().strip()
if p.wait() == 0:
res = t.split()
# '-I/usr/...' -> '/usr/...'
for x in res:
assert x.startswith(result_prefix)
res = [x[len(result_prefix):] for x in res]
sysroot = sysroot and os.environ.get('PKG_CONFIG_SYSROOT_DIR', '')
if sysroot:
# old versions of pkg-config don't support this env var,
# so here we emulate its effect if needed
res = [path if path.startswith(sysroot)
else sysroot + path
for path in res]
resultlist[:] = res
def use_pkg_config():
_ask_pkg_config(include_dirs, '--cflags-only-I', '-I', sysroot=True)
_ask_pkg_config(extra_compile_args, '--cflags-only-other')
_ask_pkg_config(library_dirs, '--libs-only-L', '-L', sysroot=True)
_ask_pkg_config(extra_link_args, '--libs-only-other')
_ask_pkg_config(libraries, '--libs-only-l', '-l')
if isWindows or isMac:
cmake_build = os.path.abspath('external/lensfun/build')
install_dir = os.path.join(cmake_build, 'install')
include_dirs += [os.path.join(install_dir, 'include', 'lensfun')]
library_dirs += [os.path.join(install_dir, 'lib')]
else:
use_pkg_config()
# this must be after use_pkg_config()!
include_dirs += [numpy.get_include()]
# for version_helper.h
include_dirs += [os.path.abspath('lensfunpy')]
def clone_submodules():
if not os.path.exists('external/lensfun/README.md'):
print('lensfun git submodule not cloned yet, will invoke "git submodule update --init" now')
if os.system('git submodule update --init') != 0:
raise Exception('git failed')
def windows_lensfun_compile():
clone_submodules()
cwd = os.getcwd()
# Download cmake to build lensfun
cmake_version = '3.13.4'
cmake_url = 'https://github.com/Kitware/CMake/releases/download/v{v}/cmake-{v}-win32-x86.zip'.format(v=cmake_version)
cmake = os.path.abspath('external/cmake-{}-win32-x86/bin/cmake.exe'.format(cmake_version))
# Download vcpkg to build dependencies of lensfun
vcpkg_commit = '2021.05.12'
vcpkg_url = 'https://github.com/Microsoft/vcpkg/archive/{}.zip'.format(vcpkg_commit)
vcpkg_dir = os.path.abspath('external/vcpkg-{}'.format(vcpkg_commit))
vcpkg_bootstrap = os.path.join(vcpkg_dir, 'bootstrap-vcpkg.bat')
vcpkg = os.path.join(vcpkg_dir, 'vcpkg.exe')
files = [(cmake_url, 'external', cmake),
(vcpkg_url, 'external', vcpkg_bootstrap)]
for url, extractdir, extractcheck in files:
if not os.path.exists(extractcheck):
path = 'external/' + os.path.basename(url)
if not os.path.exists(path):
print('Downloading', url)
try:
urlretrieve(url, path)
except:
# repeat once in case of network issues
urlretrieve(url, path)
with zipfile.ZipFile(path) as z:
print('Extracting', path, 'into', extractdir)
z.extractall(extractdir)
if not os.path.exists(path):
raise RuntimeError(path + ' not found!')
# Bootstrap vcpkg
os.chdir(vcpkg_dir)
if not os.path.exists(vcpkg):
code = os.system(vcpkg_bootstrap)
if code != 0:
sys.exit(code)
# lensfun depends on glib2, so let's build it with vcpkg
vcpkg_arch = 'x64' if is64Bit else 'x86'
vcpkg_triplet = '{}-windows'.format(vcpkg_arch)
code = os.system(vcpkg + ' install glib:' + vcpkg_triplet)
if code != 0:
sys.exit(code)
vcpkg_install_dir = os.path.join(vcpkg_dir, 'installed', vcpkg_triplet)
# bundle runtime dlls
vcpkg_bin_dir = os.path.join(vcpkg_install_dir, 'bin')
glib2_dll = os.path.join(vcpkg_bin_dir, 'glib-2.0-0.dll')
# configure and compile lensfun
if not os.path.exists(cmake_build):
os.mkdir(cmake_build)
os.chdir(cmake_build)
# temporary hack to avoid https://stackoverflow.com/a/53547931
# (python module not needed here anyway)
patch_path = '../apps/CMakeLists.txt'
with open(patch_path) as f:
content = f.read()
content = content.replace('IF(PYTHON)', 'IF(FALSE)')
with open(patch_path, 'w') as f:
f.write(content)
cmds = [cmake + ' .. -G "NMake Makefiles" -DCMAKE_BUILD_TYPE=Release ' +\
'-DBUILD_TESTS=off -DINSTALL_HELPER_SCRIPTS=off ' +\
'-DCMAKE_TOOLCHAIN_FILE={}/scripts/buildsystems/vcpkg.cmake '.format(vcpkg_dir) +\
'-DGLIB2_BASE_DIR={} -DGLIB2_DLL={} -DCMAKE_INSTALL_PREFIX=install'.format(vcpkg_install_dir, glib2_dll),
cmake + ' --build .',
cmake + ' --build . --target install',
]
for cmd in cmds:
print(cmd)
code = os.system(cmd)
if code != 0:
sys.exit(code)
os.chdir(cwd)
dll_runtime_libs = [('lensfun.dll', os.path.join(install_dir, 'bin')),
('glib-2.0-0.dll', vcpkg_bin_dir),
# dependencies of glib
('pcre.dll', vcpkg_bin_dir),
('iconv-2.dll', vcpkg_bin_dir),
('charset-1.dll', vcpkg_bin_dir),
('intl-8.dll', vcpkg_bin_dir),
]
for filename, folder in dll_runtime_libs:
src = os.path.join(folder, filename)
dest = 'lensfunpy/' + filename
print('copying', src, '->', dest)
shutil.copyfile(src, dest)
def mac_lensfun_compile():
clone_submodules()
# configure and compile lensfun
cwd = os.getcwd()
if not os.path.exists(cmake_build):
os.mkdir(cmake_build)
os.chdir(cmake_build)
install_name_dir = os.path.join(install_dir, 'lib')
cmds = ['cmake .. -DCMAKE_BUILD_TYPE=Release ' +\
'-DBUILD_TESTS=off -DINSTALL_HELPER_SCRIPTS=off ' +\
'-DCMAKE_INSTALL_PREFIX=install ' +\
'-DCMAKE_INSTALL_NAME_DIR=' + install_name_dir,
'cmake --build .',
'cmake --build . --target install',
]
for cmd in cmds:
print(cmd)
code = os.system(cmd)
if code != 0:
sys.exit(code)
os.chdir(cwd)
def bundle_db_files():
import glob
db_files = 'lensfunpy/db_files'
if not os.path.exists(db_files):
os.makedirs(db_files)
for path in glob.glob('external/lensfun/data/db/*.xml'):
dest = os.path.join(db_files, os.path.basename(path))
print('copying', path, '->', dest)
shutil.copyfile(path, dest)
package_data = {'lensfunpy': []}
# evil hack, check cmd line for relevant commands
# custom cmdclasses didn't work out in this case
cmdline = ''.join(sys.argv[1:])
needsCompile = any(s in cmdline for s in ['install', 'bdist', 'build_ext', 'wheel', 'nosetests'])
if isWindows and needsCompile:
windows_lensfun_compile()
package_data['lensfunpy'].append('*.dll')
elif isMac and needsCompile:
mac_lensfun_compile()
if any(s in cmdline for s in ['clean', 'sdist']):
# When running sdist after a previous run of bdist or build_ext
# then even with the 'clean' command the .egg-info folder stays.
# This folder contains SOURCES.txt which in turn is used by sdist
# to include package data files, but we don't want .dll's and .xml
# files in our source distribution. Therefore, to prevent accidents,
# we help a little...
egg_info = 'lensfunpy.egg-info'
print('removing', egg_info)
shutil.rmtree(egg_info, ignore_errors=True)
if 'sdist' not in cmdline:
# This assumes that the lensfun version from external/lensfun was used.
# If that's not the case, the bundled files may fail to load, for example,
# if lensfunpy was linked against an older lensfun version already on
# the system (Linux mostly) and the database format changed in an incompatible way.
# In that case, loading of bundled files can still be disabled
# with Database(load_bundled=False).
package_data['lensfunpy'].append('db_files/*.xml')
bundle_db_files()
# Support for optional Cython line tracing
# run the following to generate a test coverage report:
# $ export LINETRACE=1
# $ python setup.py build_ext --inplace
# $ nosetests --with-coverage --cover-html --cover-package=lensfunpy
compdirectives = {}
macros = []
if (os.environ.get('LINETRACE', False)):
compdirectives['linetrace'] = True
macros.append(('CYTHON_TRACE', '1'))
extensions = cythonize([Extension("lensfunpy._lensfun",
include_dirs=include_dirs,
sources=[os.path.join('lensfunpy', '_lensfun.pyx')],
libraries=libraries,
library_dirs=library_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
define_macros=macros
)],
compiler_directives=compdirectives)
# make __version__ available (https://stackoverflow.com/a/16084844)
exec(open('lensfunpy/_version.py').read())
setup(
name = 'lensfunpy',
version = __version__,
description = 'Lens distortion correction for Python, a wrapper for lensfun',
long_description = open('README.rst').read(),
author = 'Maik Riechert',
author_email = '[email protected]',
url = 'https://github.com/letmaik/lensfunpy',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Topic :: Multimedia :: Graphics',
'Topic :: Software Development :: Libraries',
],
packages = find_packages(),
ext_modules = extensions,
package_data = package_data,
install_requires=['numpy']
)
| [((253, 4, 253, 38), 'os.environ.get', 'os.environ.get', ({(253, 19, 253, 30): '"""LINETRACE"""', (253, 32, 253, 37): '(False)'}, {}), "('LINETRACE', False)", False, 'import os\n'), ((27, 17, 27, 58), 'os.environ.get', 'os.environ.get', ({(27, 32, 27, 44): '"""PKG_CONFIG"""', (27, 45, 27, 57): '"""pkg-config"""'}, {}), "('PKG_CONFIG', 'pkg-config')", False, 'import os\n'), ((60, 18, 60, 59), 'os.path.abspath', 'os.path.abspath', ({(60, 34, 60, 58): '"""external/lensfun/build"""'}, {}), "('external/lensfun/build')", False, 'import os\n'), ((61, 18, 61, 54), 'os.path.join', 'os.path.join', ({(61, 31, 61, 42): 'cmake_build', (61, 44, 61, 53): '"""install"""'}, {}), "(cmake_build, 'install')", False, 'import os\n'), ((69, 17, 69, 36), 'numpy.get_include', 'numpy.get_include', ({}, {}), '()', False, 'import numpy\n'), ((72, 17, 72, 45), 'os.path.abspath', 'os.path.abspath', ({(72, 33, 72, 44): '"""lensfunpy"""'}, {}), "('lensfunpy')", False, 'import os\n'), ((83, 10, 83, 21), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((94, 22, 94, 68), 'os.path.join', 'os.path.join', ({(94, 35, 94, 44): 'vcpkg_dir', (94, 46, 94, 67): '"""bootstrap-vcpkg.bat"""'}, {}), "(vcpkg_dir, 'bootstrap-vcpkg.bat')", False, 'import os\n'), ((95, 12, 95, 48), 'os.path.join', 'os.path.join', ({(95, 25, 95, 34): 'vcpkg_dir', (95, 36, 95, 47): '"""vcpkg.exe"""'}, {}), "(vcpkg_dir, 'vcpkg.exe')", False, 'import os\n'), ((119, 4, 119, 23), 'os.chdir', 'os.chdir', ({(119, 13, 119, 22): 'vcpkg_dir'}, {}), '(vcpkg_dir)', False, 'import os\n'), ((128, 11, 128, 62), 'os.system', 'os.system', ({(128, 21, 128, 61): "vcpkg + ' install glib:' + vcpkg_triplet"}, {}), "(vcpkg + ' install glib:' + vcpkg_triplet)", False, 'import os\n'), ((131, 24, 131, 75), 'os.path.join', 'os.path.join', ({(131, 37, 131, 46): 'vcpkg_dir', (131, 48, 131, 59): '"""installed"""', (131, 61, 131, 74): 'vcpkg_triplet'}, {}), "(vcpkg_dir, 'installed', vcpkg_triplet)", False, 'import os\n'), ((134, 20, 134, 58), 'os.path.join', 'os.path.join', ({(134, 33, 134, 50): 'vcpkg_install_dir', (134, 52, 134, 57): '"""bin"""'}, {}), "(vcpkg_install_dir, 'bin')", False, 'import os\n'), ((135, 16, 135, 61), 'os.path.join', 'os.path.join', ({(135, 29, 135, 42): 'vcpkg_bin_dir', (135, 44, 135, 60): '"""glib-2.0-0.dll"""'}, {}), "(vcpkg_bin_dir, 'glib-2.0-0.dll')", False, 'import os\n'), ((140, 4, 140, 25), 'os.chdir', 'os.chdir', ({(140, 13, 140, 24): 'cmake_build'}, {}), '(cmake_build)', False, 'import os\n'), ((161, 4, 161, 17), 'os.chdir', 'os.chdir', ({(161, 13, 161, 16): 'cwd'}, {}), '(cwd)', False, 'import os\n'), ((183, 10, 183, 21), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((186, 4, 186, 25), 'os.chdir', 'os.chdir', ({(186, 13, 186, 24): 'cmake_build'}, {}), '(cmake_build)', False, 'import os\n'), ((187, 23, 187, 55), 'os.path.join', 'os.path.join', ({(187, 36, 187, 47): 'install_dir', (187, 49, 187, 54): '"""lib"""'}, {}), "(install_dir, 'lib')", False, 'import os\n'), ((200, 4, 200, 17), 'os.chdir', 'os.chdir', ({(200, 13, 200, 16): 'cwd'}, {}), '(cwd)', False, 'import os\n'), ((207, 16, 207, 59), 'glob.glob', 'glob.glob', ({(207, 26, 207, 58): '"""external/lensfun/data/db/*.xml"""'}, {}), "('external/lensfun/data/db/*.xml')", False, 'import glob\n'), ((234, 4, 234, 47), 'shutil.rmtree', 'shutil.rmtree', (), '', False, 'import shutil\n'), ((29, 12, 30, 52), 'subprocess.Popen', 'subprocess.Popen', (), '', False, 'import subprocess\n'), ((63, 21, 63, 68), 'os.path.join', 'os.path.join', ({(63, 34, 63, 45): 'install_dir', (63, 47, 63, 56): '"""include"""', (63, 58, 63, 67): '"""lensfun"""'}, {}), "(install_dir, 'include', 'lensfun')", False, 'import os\n'), ((64, 21, 64, 53), 'os.path.join', 'os.path.join', ({(64, 34, 64, 45): 'install_dir', (64, 47, 64, 52): '"""lib"""'}, {}), "(install_dir, 'lib')", False, 'import os\n'), ((75, 11, 75, 55), 'os.path.exists', 'os.path.exists', ({(75, 26, 75, 54): '"""external/lensfun/README.md"""'}, {}), "('external/lensfun/README.md')", False, 'import os\n'), ((120, 11, 120, 32), 'os.path.exists', 'os.path.exists', ({(120, 26, 120, 31): 'vcpkg'}, {}), '(vcpkg)', False, 'import os\n'), ((121, 15, 121, 41), 'os.system', 'os.system', ({(121, 25, 121, 40): 'vcpkg_bootstrap'}, {}), '(vcpkg_bootstrap)', False, 'import os\n'), ((130, 8, 130, 22), 'sys.exit', 'sys.exit', ({(130, 17, 130, 21): 'code'}, {}), '(code)', False, 'import sys\n'), ((138, 11, 138, 38), 'os.path.exists', 'os.path.exists', ({(138, 26, 138, 37): 'cmake_build'}, {}), '(cmake_build)', False, 'import os\n'), ((139, 8, 139, 29), 'os.mkdir', 'os.mkdir', ({(139, 17, 139, 28): 'cmake_build'}, {}), '(cmake_build)', False, 'import os\n'), ((158, 15, 158, 29), 'os.system', 'os.system', ({(158, 25, 158, 28): 'cmd'}, {}), '(cmd)', False, 'import os\n'), ((173, 14, 173, 44), 'os.path.join', 'os.path.join', ({(173, 27, 173, 33): 'folder', (173, 35, 173, 43): 'filename'}, {}), '(folder, filename)', False, 'import os\n'), ((176, 8, 176, 34), 'shutil.copyfile', 'shutil.copyfile', ({(176, 24, 176, 27): 'src', (176, 29, 176, 33): 'dest'}, {}), '(src, dest)', False, 'import shutil\n'), ((184, 11, 184, 38), 'os.path.exists', 'os.path.exists', ({(184, 26, 184, 37): 'cmake_build'}, {}), '(cmake_build)', False, 'import os\n'), ((185, 8, 185, 29), 'os.mkdir', 'os.mkdir', ({(185, 17, 185, 28): 'cmake_build'}, {}), '(cmake_build)', False, 'import os\n'), ((197, 15, 197, 29), 'os.system', 'os.system', ({(197, 25, 197, 28): 'cmd'}, {}), '(cmd)', False, 'import os\n'), ((205, 11, 205, 35), 'os.path.exists', 'os.path.exists', ({(205, 26, 205, 34): 'db_files'}, {}), '(db_files)', False, 'import os\n'), ((206, 8, 206, 29), 'os.makedirs', 'os.makedirs', ({(206, 20, 206, 28): 'db_files'}, {}), '(db_files)', False, 'import os\n'), ((210, 8, 210, 35), 'shutil.copyfile', 'shutil.copyfile', ({(210, 24, 210, 28): 'path', (210, 30, 210, 34): 'dest'}, {}), '(path, dest)', False, 'import shutil\n'), ((298, 17, 298, 32), 'setuptools.find_packages', 'find_packages', ({}, {}), '()', False, 'from setuptools import setup, Extension, find_packages\n'), ((77, 11, 77, 51), 'os.system', 'os.system', ({(77, 21, 77, 50): '"""git submodule update --init"""'}, {}), "('git submodule update --init')", False, 'import os\n'), ((101, 15, 101, 43), 'os.path.exists', 'os.path.exists', ({(101, 30, 101, 42): 'extractcheck'}, {}), '(extractcheck)', False, 'import os\n'), ((123, 12, 123, 26), 'sys.exit', 'sys.exit', ({(123, 21, 123, 25): 'code'}, {}), '(code)', False, 'import sys\n'), ((160, 12, 160, 26), 'sys.exit', 'sys.exit', ({(160, 21, 160, 25): 'code'}, {}), '(code)', False, 'import sys\n'), ((163, 40, 163, 72), 'os.path.join', 'os.path.join', ({(163, 53, 163, 64): 'install_dir', (163, 66, 163, 71): '"""bin"""'}, {}), "(install_dir, 'bin')", False, 'import os\n'), ((199, 12, 199, 26), 'sys.exit', 'sys.exit', ({(199, 21, 199, 25): 'code'}, {}), '(code)', False, 'import sys\n'), ((208, 38, 208, 60), 'os.path.basename', 'os.path.basename', ({(208, 55, 208, 59): 'path'}, {}), '(path)', False, 'import os\n'), ((43, 34, 43, 78), 'os.environ.get', 'os.environ.get', ({(43, 49, 43, 73): '"""PKG_CONFIG_SYSROOT_DIR"""', (43, 75, 43, 77): '""""""'}, {}), "('PKG_CONFIG_SYSROOT_DIR', '')", False, 'import os\n'), ((102, 33, 102, 54), 'os.path.basename', 'os.path.basename', ({(102, 50, 102, 53): 'url'}, {}), '(url)', False, 'import os\n'), ((103, 19, 103, 39), 'os.path.exists', 'os.path.exists', ({(103, 34, 103, 38): 'path'}, {}), '(path)', False, 'import os\n'), ((111, 17, 111, 38), 'zipfile.ZipFile', 'zipfile.ZipFile', ({(111, 33, 111, 37): 'path'}, {}), '(path)', False, 'import zipfile\n'), ((115, 19, 115, 39), 'os.path.exists', 'os.path.exists', ({(115, 34, 115, 38): 'path'}, {}), '(path)', False, 'import os\n'), ((106, 20, 106, 42), 'urllib.request.urlretrieve', 'urlretrieve', ({(106, 32, 106, 35): 'url', (106, 37, 106, 41): 'path'}, {}), '(url, path)', False, 'from urllib.request import urlretrieve\n'), ((259, 23, 259, 64), 'os.path.join', 'os.path.join', ({(259, 36, 259, 47): '"""lensfunpy"""', (259, 49, 259, 63): '"""_lensfun.pyx"""'}, {}), "('lensfunpy', '_lensfun.pyx')", False, 'import os\n'), ((109, 20, 109, 42), 'urllib.request.urlretrieve', 'urlretrieve', ({(109, 32, 109, 35): 'url', (109, 37, 109, 41): 'path'}, {}), '(url, path)', False, 'from urllib.request import urlretrieve\n')] |
bimri/programming_python | chapter_13/mailtools/__init__.py | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | "The mailtools Utility Package"
'Initialization File'
"""
##################################################################################
mailtools package: interface to mail server transfers, used by pymail2, PyMailGUI,
and PyMailCGI; does loads, sends, parsing, composing, and deleting, with part
attachments, encodings (of both the email and Unicdode kind), etc.; the parser,
fetcher, and sender classes here are designed to be mixed-in to subclasses which
use their methods, or used as embedded or standalone objects;
this package also includes convenience subclasses for silent mode, and more;
loads all mail text if pop server doesn't do top; doesn't handle threads or UI
here, and allows askPassword to differ per subclass; progress callback funcs get
status; all calls raise exceptions on error--client must handle in GUI/other;
this changed from file to package: nested modules imported here for bw compat;
4E: need to use package-relative import syntax throughout, because in Py 3.X
package dir in no longer on module import search path if package is imported
elsewhere (from another directory which uses this package); also performs
Unicode decoding on mail text when fetched (see mailFetcher), as well as for
some text part payloads which might have been email-encoded (see mailParser);
TBD: in saveparts, should file be opened in text mode for text/ contypes?
TBD: in walkNamedParts, should we skip oddballs like message/delivery-status?
TBD: Unicode support has not been tested exhaustively: see Chapter 13 for more
on the Py3.1 email package and its limitations, and the policies used here;
##################################################################################
"""
# collect contents of all modules here, when package dir imported directly
from .mailFetcher import *
from .mailSender import * # 4E: package-relative
from .mailParser import *
# export nested modules here, when from mailtools import *
__all__ = 'mailFetcher', 'mailSender', 'mailParser'
# self-test code is in selftest.py to allow mailconfig's path
# to be set before running thr nested module imports above
| [] |
jvollhueter/pyMANGA-1 | TreeModelLib/BelowgroundCompetition/__init__.py | 414204a394d44405225b4b8224b19464c1006f1d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 15:25:03 2018
@author: bathmann
"""
from .BelowgroundCompetition import BelowgroundCompetition
from .SimpleTest import SimpleTest
from .FON import FON
from .OGSWithoutFeedback import OGSWithoutFeedback
from .OGSLargeScale3D import OGSLargeScale3D
from .OGS.helpers import CellInformation
from .FixedSalinity import FixedSalinity
| [] |
SDelhey/websocket-chat | server.py | c7b83583007a723baee25acedbceddd55c12ffec | from flask import Flask, render_template
from flask_socketio import SocketIO, send, emit
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
if __name__ == '__main__':
socketio.run(app) | [((4, 6, 4, 21), 'flask.Flask', 'Flask', ({(4, 12, 4, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask, render_template\n'), ((6, 11, 6, 24), 'flask_socketio.SocketIO', 'SocketIO', ({(6, 20, 6, 23): 'app'}, {}), '(app)', False, 'from flask_socketio import SocketIO, send, emit\n')] |
hadarohana/myCosmos | services/postprocess/src/postprocess.py | 6e4682a2af822eb828180658aaa6d3e304cc85bf | """
Post processing on detected objects
"""
import pymongo
from pymongo import MongoClient
import time
import logging
logging.basicConfig(format='%(levelname)s :: %(asctime)s :: %(message)s', level=logging.DEBUG)
from joblib import Parallel, delayed
import click
from xgboost_model.inference import run_inference, PostprocessException
import os
def load_detected_pages(db, buffer_size):
"""
"""
current_docs = []
for doc in db.propose_pages.find({'postprocess': None, 'ocr': True}, no_cursor_timeout=True):
current_docs.append(doc)
if len(current_docs) == buffer_size:
yield current_docs
current_docs = []
yield current_docs
def do_skip(page, client):
db = client.pdfs
coll = db.postprocess_pages
return coll.count_documents({'pdf_name': page['pdf_name'], 'page_num': page['page_num']}, limit=1) != 0
def postprocess(db_insert_fn, num_processes, weights_pth, skip):
logging.info('Starting post-processing over detected objects')
start_time = time.time()
client = MongoClient(os.environ["DBCONNECT"])
logging.info(f'Connected to client: {client}.')
db = client.pdfs
for batch in load_detected_pages(db, 100):
logging.info('Loaded next batch. Running postprocessing')
try:
pages = Parallel(n_jobs=num_processes)(delayed(run_inference)(page, weights_pth) for page in batch)
except PostprocessException as e:
logging.error(f'Postprocessing error in referenced page: {e.page}')
logging.error(f'Original Exception: {e.original_exception}')
continue
db_insert_fn(pages, client)
end_time = time.time()
logging.info(f'Exiting post-processing. Time up: {end_time - start_time}')
def mongo_insert_fn(objs, client):
db = client.pdfs
for obj in objs:
try:
result = db.propose_pages.update_one({'_id': obj['_id']},
{'$set':
{
'pp_detected_objs': obj['pp_detected_objs'],
'postprocess': True
}
}, upsert=False)
logging.info(f'Updated result: {result}')
except pymongo.errors.WriterError as e:
logging.error(f'Document write error: {e}\n Document id: obj["_id"]')
@click.command()
@click.argument("num_processes")
@click.argument("weights_pth")
@click.option('--skip/--no-skip')
def click_wrapper(num_processes, weights_pth, skip):
postprocess(mongo_insert_fn, int(num_processes), weights_pth, skip)
if __name__ == '__main__':
click_wrapper()
| [((8, 0, 8, 94), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((65, 1, 65, 16), 'click.command', 'click.command', ({}, {}), '()', False, 'import click\n'), ((66, 1, 66, 32), 'click.argument', 'click.argument', ({(66, 16, 66, 31): '"""num_processes"""'}, {}), "('num_processes')", False, 'import click\n'), ((67, 1, 67, 30), 'click.argument', 'click.argument', ({(67, 16, 67, 29): '"""weights_pth"""'}, {}), "('weights_pth')", False, 'import click\n'), ((68, 1, 68, 33), 'click.option', 'click.option', ({(68, 14, 68, 32): '"""--skip/--no-skip"""'}, {}), "('--skip/--no-skip')", False, 'import click\n'), ((32, 4, 32, 66), 'logging.info', 'logging.info', ({(32, 17, 32, 65): '"""Starting post-processing over detected objects"""'}, {}), "('Starting post-processing over detected objects')", False, 'import logging\n'), ((33, 17, 33, 28), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((34, 13, 34, 49), 'pymongo.MongoClient', 'MongoClient', ({(34, 25, 34, 48): "os.environ['DBCONNECT']"}, {}), "(os.environ['DBCONNECT'])", False, 'from pymongo import MongoClient\n'), ((35, 4, 35, 51), 'logging.info', 'logging.info', ({(35, 17, 35, 50): 'f"""Connected to client: {client}."""'}, {}), "(f'Connected to client: {client}.')", False, 'import logging\n'), ((47, 15, 47, 26), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((48, 4, 48, 78), 'logging.info', 'logging.info', ({(48, 17, 48, 77): 'f"""Exiting post-processing. Time up: {end_time - start_time}"""'}, {}), "(f'Exiting post-processing. Time up: {end_time - start_time}')", False, 'import logging\n'), ((38, 8, 38, 65), 'logging.info', 'logging.info', ({(38, 21, 38, 64): '"""Loaded next batch. Running postprocessing"""'}, {}), "('Loaded next batch. Running postprocessing')", False, 'import logging\n'), ((61, 12, 61, 53), 'logging.info', 'logging.info', ({(61, 25, 61, 52): 'f"""Updated result: {result}"""'}, {}), "(f'Updated result: {result}')", False, 'import logging\n'), ((40, 20, 40, 50), 'joblib.Parallel', 'Parallel', (), '', False, 'from joblib import Parallel, delayed\n'), ((42, 12, 42, 79), 'logging.error', 'logging.error', ({(42, 26, 42, 78): 'f"""Postprocessing error in referenced page: {e.page}"""'}, {}), "(f'Postprocessing error in referenced page: {e.page}')", False, 'import logging\n'), ((43, 12, 43, 72), 'logging.error', 'logging.error', ({(43, 26, 43, 71): 'f"""Original Exception: {e.original_exception}"""'}, {}), "(f'Original Exception: {e.original_exception}')", False, 'import logging\n'), ((63, 12, 63, 81), 'logging.error', 'logging.error', ({(63, 26, 63, 80): 'f"""Document write error: {e}\n Document id: obj["_id"]"""'}, {}), '(f"""Document write error: {e}\n Document id: obj["_id"]""")', False, 'import logging\n'), ((40, 51, 40, 73), 'joblib.delayed', 'delayed', ({(40, 59, 40, 72): 'run_inference'}, {}), '(run_inference)', False, 'from joblib import Parallel, delayed\n')] |
calvinfeng/openvino | model-optimizer/mo/front/common/partial_infer/multi_box_prior_test.py | 11f591c16852637506b1b40d083b450e56d0c8ac | """
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from mo.front.common.partial_infer.multi_box_prior import multi_box_prior_infer_mxnet
from mo.graph.graph import Node
from mo.utils.unittest.graph import build_graph
nodes_attributes = {'node_1': {'value': None, 'kind': 'data'},
'node_2': {'value': None, 'kind': 'data'},
'prior_box_1': {'type': 'PriorBox', 'kind': 'op'},
'node_3': {'type': 'Identity', 'value': None, 'kind': 'data'}
}
class TestMultiBoxPriorInfer(unittest.TestCase):
def test_prior_box_infer_ideal(self):
graph = build_graph(nodes_attributes,
[('node_1', 'prior_box_1'),
('node_2', 'prior_box_1'),
('prior_box_1', 'node_3')],
{'node_1': {'shape': np.array([1, 1024, 19, 19])},
'node_2': {'shape': np.array([1, 3, 300, 300])},
'prior_box_1': {'aspect_ratio': [1.0, 2.0, 0.5, 3.0, 0.333333333333],
'min_size': [0.2, 0.272],
'max_size': '', 'offset': 0.5, 'step': 0.2, 'sizes': [0.2, 0.272]},
'node_3': {'shape': np.array([1, 2, 3])},
})
multi_box_prior_node = Node(graph, 'prior_box_1')
multi_box_prior_infer_mxnet(multi_box_prior_node)
exp_shape = np.array([1, 2, 8664])
res_shape = graph.node['node_3']['shape']
for i in range(0, len(exp_shape)):
self.assertEqual(exp_shape[i], res_shape[i])
self.assertEqual(multi_box_prior_node.min_size, [0.2, 0.272])
self.assertEqual(multi_box_prior_node.max_size, '')
self.assertEqual(multi_box_prior_node.aspect_ratio, [1.0, 2.0, 0.5, 3.0, 0.333333333333])
self.assertEqual(round(multi_box_prior_node.step, 1), 0.2)
self.assertEqual(round(multi_box_prior_node.offset, 1), 0.5)
| [((46, 31, 46, 57), 'mo.graph.graph.Node', 'Node', ({(46, 36, 46, 41): 'graph', (46, 43, 46, 56): '"""prior_box_1"""'}, {}), "(graph, 'prior_box_1')", False, 'from mo.graph.graph import Node\n'), ((48, 8, 48, 57), 'mo.front.common.partial_infer.multi_box_prior.multi_box_prior_infer_mxnet', 'multi_box_prior_infer_mxnet', ({(48, 36, 48, 56): 'multi_box_prior_node'}, {}), '(multi_box_prior_node)', False, 'from mo.front.common.partial_infer.multi_box_prior import multi_box_prior_infer_mxnet\n'), ((49, 20, 49, 42), 'numpy.array', 'np.array', ({(49, 29, 49, 41): '[1, 2, 8664]'}, {}), '([1, 2, 8664])', True, 'import numpy as np\n'), ((38, 49, 38, 76), 'numpy.array', 'np.array', ({(38, 58, 38, 75): '[1, 1024, 19, 19]'}, {}), '([1, 1024, 19, 19])', True, 'import numpy as np\n'), ((39, 49, 39, 75), 'numpy.array', 'np.array', ({(39, 58, 39, 74): '[1, 3, 300, 300]'}, {}), '([1, 3, 300, 300])', True, 'import numpy as np\n'), ((43, 49, 43, 68), 'numpy.array', 'np.array', ({(43, 58, 43, 67): '[1, 2, 3]'}, {}), '([1, 2, 3])', True, 'import numpy as np\n')] |
Samahu/ros-system-monitor | bin/mem_monitor.py | 5376eba046ac38cfe8fe9ff8b385fa2637015eda | #!/usr/bin/env python
############################################################################
# Copyright (C) 2009, Willow Garage, Inc. #
# Copyright (C) 2013 by Ralf Kaestner #
# [email protected] #
# Copyright (C) 2013 by Jerome Maye #
# [email protected] #
# #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# 1. Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# 3. The name of the copyright holders may be used to endorse or #
# promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
from __future__ import with_statement
import rospy
import traceback
import threading
from threading import Timer
import sys, os, time
from time import sleep
import subprocess
import string
import socket
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
mem_level_warn = 0.95
mem_level_error = 0.99
stat_dict = { 0: 'OK', 1: 'Warning', 2: 'Error' }
def update_status_stale(stat, last_update_time):
time_since_update = rospy.get_time() - last_update_time
stale_status = 'OK'
if time_since_update > 20 and time_since_update <= 35:
stale_status = 'Lagging'
if stat.level == DiagnosticStatus.OK:
stat.message = stale_status
elif stat.message.find(stale_status) < 0:
stat.message = ', '.join([stat.message, stale_status])
stat.level = max(stat.level, DiagnosticStatus.WARN)
if time_since_update > 35:
stale_status = 'Stale'
if stat.level == DiagnosticStatus.OK:
stat.message = stale_status
elif stat.message.find(stale_status) < 0:
stat.message = ', '.join([stat.message, stale_status])
stat.level = max(stat.level, DiagnosticStatus.ERROR)
stat.values.pop(0)
stat.values.pop(0)
stat.values.insert(0, KeyValue(key = 'Update Status', value = stale_status))
stat.values.insert(1, KeyValue(key = 'Time Since Update', value = str(time_since_update)))
class MemMonitor():
def __init__(self, hostname, diag_hostname):
self._diag_pub = rospy.Publisher('/diagnostics', DiagnosticArray, queue_size = 100)
self._mutex = threading.Lock()
self._mem_level_warn = rospy.get_param('~mem_level_warn', mem_level_warn)
self._mem_level_error = rospy.get_param('~mem_level_error', mem_level_error)
self._usage_timer = None
self._usage_stat = DiagnosticStatus()
self._usage_stat.name = 'Memory Usage (%s)' % diag_hostname
self._usage_stat.level = 1
self._usage_stat.hardware_id = hostname
self._usage_stat.message = 'No Data'
self._usage_stat.values = [ KeyValue(key = 'Update Status', value = 'No Data' ),
KeyValue(key = 'Time Since Last Update', value = 'N/A') ]
self._last_usage_time = 0
self._last_publish_time = 0
# Start checking everything
self.check_usage()
## Must have the lock to cancel everything
def cancel_timers(self):
if self._usage_timer:
self._usage_timer.cancel()
def check_memory(self):
values = []
level = DiagnosticStatus.OK
msg = ''
mem_dict = { 0: 'OK', 1: 'Low Memory', 2: 'Very Low Memory' }
try:
p = subprocess.Popen('free -tm',
stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
values.append(KeyValue(key = "\"free -tm\" Call Error", value = str(retcode)))
return DiagnosticStatus.ERROR, values
rows = stdout.split('\n')
data = rows[1].split()
total_mem_physical = data[1]
used_mem_physical = data[2]
free_mem_physical = data[3]
data = rows[2].split()
total_mem_swap = data[1]
used_mem_swap = data[2]
free_mem_swap = data[3]
data = rows[3].split()
total_mem = data[1]
used_mem = data[2]
free_mem = data[3]
level = DiagnosticStatus.OK
mem_usage = float(used_mem_physical)/float(total_mem_physical)
if (mem_usage < self._mem_level_warn):
level = DiagnosticStatus.OK
elif (mem_usage < self._mem_level_error):
level = DiagnosticStatus.WARN
else:
level = DiagnosticStatus.ERROR
values.append(KeyValue(key = 'Memory Status', value = mem_dict[level]))
values.append(KeyValue(key = 'Total Memory (Physical)', value = total_mem_physical+"M"))
values.append(KeyValue(key = 'Used Memory (Physical)', value = used_mem_physical+"M"))
values.append(KeyValue(key = 'Free Memory (Physical)', value = free_mem_physical+"M"))
values.append(KeyValue(key = 'Total Memory (Swap)', value = total_mem_swap+"M"))
values.append(KeyValue(key = 'Used Memory (Swap)', value = used_mem_swap+"M"))
values.append(KeyValue(key = 'Free Memory (Swap)', value = free_mem_swap+"M"))
values.append(KeyValue(key = 'Total Memory', value = total_mem+"M"))
values.append(KeyValue(key = 'Used Memory', value = used_mem+"M"))
values.append(KeyValue(key = 'Free Memory', value = free_mem+"M"))
msg = mem_dict[level]
except Exception, e:
rospy.logerr(traceback.format_exc())
msg = 'Memory Usage Check Error'
values.append(KeyValue(key = msg, value = str(e)))
level = DiagnosticStatus.ERROR
return level, mem_dict[level], values
def check_usage(self):
if rospy.is_shutdown():
with self._mutex:
self.cancel_timers()
return
diag_level = 0
diag_vals = [ KeyValue(key = 'Update Status', value = 'OK' ),
KeyValue(key = 'Time Since Last Update', value = 0 )]
diag_msgs = []
# Check memory
mem_level, mem_msg, mem_vals = self.check_memory()
diag_vals.extend(mem_vals)
if mem_level > 0:
diag_msgs.append(mem_msg)
diag_level = max(diag_level, mem_level)
if diag_msgs and diag_level > 0:
usage_msg = ', '.join(set(diag_msgs))
else:
usage_msg = stat_dict[diag_level]
# Update status
with self._mutex:
self._last_usage_time = rospy.get_time()
self._usage_stat.level = diag_level
self._usage_stat.values = diag_vals
self._usage_stat.message = usage_msg
if not rospy.is_shutdown():
self._usage_timer = threading.Timer(5.0, self.check_usage)
self._usage_timer.start()
else:
self.cancel_timers()
def publish_stats(self):
with self._mutex:
# Update everything with last update times
update_status_stale(self._usage_stat, self._last_usage_time)
msg = DiagnosticArray()
msg.header.stamp = rospy.get_rostime()
msg.status.append(self._usage_stat)
if rospy.get_time() - self._last_publish_time > 0.5:
self._diag_pub.publish(msg)
self._last_publish_time = rospy.get_time()
if __name__ == '__main__':
hostname = socket.gethostname()
hostname = hostname.replace('-', '_')
import optparse
parser = optparse.OptionParser(usage="usage: mem_monitor.py [--diag-hostname=cX]")
parser.add_option("--diag-hostname", dest="diag_hostname",
help="Computer name in diagnostics output (ex: 'c1')",
metavar="DIAG_HOSTNAME",
action="store", default = hostname)
options, args = parser.parse_args(rospy.myargv())
try:
rospy.init_node('mem_monitor_%s' % hostname)
except rospy.exceptions.ROSInitException:
print >> sys.stderr, 'Memory monitor is unable to initialize node. Master may not be running.'
sys.exit(0)
mem_node = MemMonitor(hostname, options.diag_hostname)
rate = rospy.Rate(1.0)
try:
while not rospy.is_shutdown():
rate.sleep()
mem_node.publish_stats()
except KeyboardInterrupt:
pass
except Exception, e:
traceback.print_exc()
rospy.logerr(traceback.format_exc())
mem_node.cancel_timers()
sys.exit(0)
| [] |
stamhe/bitcoin-abc | cmake/utils/gen-ninja-deps.py | a1ba303c6b4f164ae94612e83b824e564405a96e | #!/usr/bin/env python3
import argparse
import os
import subprocess
parser = argparse.ArgumentParser(description='Produce a dep file from ninja.')
parser.add_argument(
'--build-dir',
help='The build directory.',
required=True)
parser.add_argument(
'--base-dir',
help='The directory for which dependencies are rewriten.',
required=True)
parser.add_argument('--ninja', help='The ninja executable to use.')
parser.add_argument(
'base_target',
help="The target from the base's perspective.")
parser.add_argument(
'targets', nargs='+',
help='The target for which dependencies are extracted.')
parser.add_argument(
'--extra-deps', nargs='+',
help='Extra dependencies.')
args = parser.parse_args()
build_dir = os.path.abspath(args.build_dir)
base_dir = os.path.abspath(args.base_dir)
ninja = args.ninja
base_target = args.base_target
targets = args.targets
extra_deps = args.extra_deps
# Make sure we operate in the right folder.
os.chdir(build_dir)
if ninja is None:
ninja = subprocess.check_output(['command', '-v', 'ninja'])[:-1]
# Construct the set of all targets
all_targets = set()
doto_targets = set()
for t in subprocess.check_output([ninja, '-t', 'targets', 'all']).splitlines():
t, r = t.split(b':')
all_targets.add(t)
if r[:13] == b' C_COMPILER__' or r[:15] == b' CXX_COMPILER__':
doto_targets.add(t)
def parse_ninja_query(query):
deps = dict()
lines = query.splitlines()
while len(lines):
line = lines.pop(0)
if line[0] == ord(' '):
continue
# We have a new target
target = line.split(b':')[0]
assert lines.pop(0)[:8] == b' input:'
inputs = set()
while True:
i = lines.pop(0)
if i[:4] != b' ':
break
'''
ninja has 3 types of input:
1. Explicit dependencies, no prefix;
2. Implicit dependencies, | prefix.
3. Order only dependencies, || prefix.
Order only dependency do not require the target to be rebuilt
and so we ignore them.
'''
i = i[4:]
if i[0] == ord('|'):
if i[1] == ord('|'):
# We reached the order only dependencies.
break
i = i[2:]
inputs.add(i)
deps[target] = inputs
return deps
def extract_deps(workset):
# Recursively extract the dependencies of the target.
deps = dict()
while len(workset) > 0:
query = subprocess.check_output([ninja, '-t', 'query'] + list(workset))
target_deps = parse_ninja_query(query)
deps.update(target_deps)
workset = set()
for d in target_deps.values():
workset.update(t for t in d if t in all_targets and t not in deps)
# Extract build time dependencies.
bt_targets = [t for t in deps if t in doto_targets]
if len(bt_targets) == 0:
return deps
ndeps = subprocess.check_output(
[ninja, '-t', 'deps'] + bt_targets,
stderr=subprocess.DEVNULL)
lines = ndeps.splitlines()
while len(lines) > 0:
line = lines.pop(0)
t, m = line.split(b':')
if m == b' deps not found':
continue
inputs = set()
while True:
i = lines.pop(0)
if i == b'':
break
assert i[:4] == b' '
inputs.add(i[4:])
deps[t] = inputs
return deps
base_dir = base_dir.encode()
def rebase_deps(deps):
rebased = dict()
cache = dict()
def rebase(path):
if path in cache:
return cache[path]
abspath = os.path.abspath(path)
newpath = path if path == abspath else os.path.relpath(
abspath, base_dir)
cache[path] = newpath
return newpath
for t, s in deps.items():
rebased[rebase(t)] = set(rebase(d) for d in s)
return rebased
deps = extract_deps(set(targets))
deps = rebase_deps(deps)
def dump(deps):
for t, d in deps.items():
if len(d) == 0:
continue
str = t.decode() + ": \\\n "
str += " \\\n ".join(sorted(map((lambda x: x.decode()), d)))
print(str)
# Collapse everything under the base target.
basedeps = set() if extra_deps is None else set(d.encode() for d in extra_deps)
for d in deps.values():
basedeps.update(d)
base_target = base_target.encode()
basedeps.discard(base_target)
dump({base_target: basedeps})
| [((7, 9, 7, 78), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((28, 12, 28, 43), 'os.path.abspath', 'os.path.abspath', ({(28, 28, 28, 42): 'args.build_dir'}, {}), '(args.build_dir)', False, 'import os\n'), ((29, 11, 29, 41), 'os.path.abspath', 'os.path.abspath', ({(29, 27, 29, 40): 'args.base_dir'}, {}), '(args.base_dir)', False, 'import os\n'), ((36, 0, 36, 19), 'os.chdir', 'os.chdir', ({(36, 9, 36, 18): 'build_dir'}, {}), '(build_dir)', False, 'import os\n'), ((110, 12, 112, 34), 'subprocess.check_output', 'subprocess.check_output', (), '', False, 'import subprocess\n'), ((39, 12, 39, 63), 'subprocess.check_output', 'subprocess.check_output', ({(39, 36, 39, 62): "['command', '-v', 'ninja']"}, {}), "(['command', '-v', 'ninja'])", False, 'import subprocess\n'), ((44, 9, 44, 65), 'subprocess.check_output', 'subprocess.check_output', ({(44, 33, 44, 64): "[ninja, '-t', 'targets', 'all']"}, {}), "([ninja, '-t', 'targets', 'all'])", False, 'import subprocess\n'), ((146, 18, 146, 39), 'os.path.abspath', 'os.path.abspath', ({(146, 34, 146, 38): 'path'}, {}), '(path)', False, 'import os\n'), ((147, 47, 148, 30), 'os.path.relpath', 'os.path.relpath', ({(148, 12, 148, 19): 'abspath', (148, 21, 148, 29): 'base_dir'}, {}), '(abspath, base_dir)', False, 'import os\n')] |
ptphp/PyLib | src/webpy1/src/manage/checkPic.py | 07ac99cf2deb725475f5771b123b9ea1375f5e65 | '''
Created on 2011-6-22
@author: dholer
'''
| [] |
coleb/sendoff | tests/__init__.py | fc1b38ba7571254a88ca457f6f618ae4572f30b6 | """Tests for the `sendoff` library."""
"""
The `sendoff` library tests validate the expected function of the library.
"""
| [] |
Wind-River/starlingx-config | sysinv/sysinv/sysinv/sysinv/helm/garbd.py | 96b92e5179d54dde10cb84c943eb239adf26b958 | #
# Copyright (c) 2018-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils
from sysinv.helm import common
from sysinv.helm import base
class GarbdHelm(base.BaseHelm):
"""Class to encapsulate helm operations for the galera arbitrator chart"""
# The service name is used to build the standard docker image location.
# It is intentionally "mariadb" and not "garbd" as they both use the
# same docker image.
SERVICE_NAME = common.HELM_CHART_MARIADB
CHART = common.HELM_CHART_GARBD
SUPPORTED_NAMESPACES = \
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_OPENSTACK]
SUPPORTED_APP_NAMESPACES = {
constants.HELM_APP_OPENSTACK:
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_OPENSTACK]
}
def _is_enabled(self, app_name, chart_name, namespace):
# First, see if this chart is enabled by the user then adjust based on
# system conditions
enabled = super(GarbdHelm, self)._is_enabled(
app_name, chart_name, namespace)
# If there are fewer than 2 controllers or we're on AIO-DX or we are on
# distributed cloud system controller, we'll use a single mariadb server
# and so we don't want to run garbd.
if enabled and (self._num_controllers() < 2 or
utils.is_aio_duplex_system(self.dbapi) or
(self._distributed_cloud_role() ==
constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER)):
enabled = False
return enabled
def execute_manifest_updates(self, operator):
# On application load this chart is enabled in the mariadb chart group
if not self._is_enabled(operator.APP,
self.CHART, common.HELM_NS_OPENSTACK):
operator.chart_group_chart_delete(
operator.CHART_GROUPS_LUT[self.CHART],
operator.CHARTS_LUT[self.CHART])
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {
}
}
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]
elif namespace:
raise exception.InvalidHelmNamespace(chart=self.CHART,
namespace=namespace)
else:
return overrides
| [((40, 24, 40, 62), 'sysinv.common.utils.is_aio_duplex_system', 'utils.is_aio_duplex_system', ({(40, 51, 40, 61): 'self.dbapi'}, {}), '(self.dbapi)', False, 'from sysinv.common import utils\n'), ((63, 18, 64, 69), 'sysinv.common.exception.InvalidHelmNamespace', 'exception.InvalidHelmNamespace', (), '', False, 'from sysinv.common import exception\n')] |
aaron-zou/pretraining-twostream | dataloader/frame_counter/frame_counter.py | 5aa2f4bafb731e61f8f671e2500a6dfa8436be57 | #!/usr/bin/env python
"""Generate frame counts dict for a dataset.
Usage:
frame_counter.py [options]
Options:
-h, --help Print help message
--root=<str> Path to root of dataset (should contain video folders that contain images)
[default: /vision/vision_users/azou/data/hmdb51_flow/u/]
--output=<str> Output filename [default: hmdb_frame_count.pickle]
"""
from __future__ import print_function
from docopt import docopt
import os
import sys
import pickle
if __name__ == '__main__':
args = docopt(__doc__)
print(args)
# Final counts
counts = {}
min_count = sys.maxint
# Generate list of video folders
for root, dirs, files in os.walk(args['--root']):
# Skip the root directory
if len(dirs) != 0:
continue
# Process a directory and frame count into a dictionary entry
name = os.path.basename(os.path.normpath(root))
print('{}: {} frames'.format(name, len(files)))
counts[name] = len(files)
# Track minimum count
if len(files) < min_count:
min_count = len(files)
with open(args['--output'], 'wb') as ofile:
pickle.dump(counts, ofile)
print('Minimum frame count = {}'.format(min_count))
| [((20, 11, 20, 26), 'docopt.docopt', 'docopt', ({(20, 18, 20, 25): '__doc__'}, {}), '(__doc__)', False, 'from docopt import docopt\n'), ((28, 29, 28, 52), 'os.walk', 'os.walk', ({(28, 37, 28, 51): "args['--root']"}, {}), "(args['--root'])", False, 'import os\n'), ((43, 8, 43, 34), 'pickle.dump', 'pickle.dump', ({(43, 20, 43, 26): 'counts', (43, 28, 43, 33): 'ofile'}, {}), '(counts, ofile)', False, 'import pickle\n'), ((34, 32, 34, 54), 'os.path.normpath', 'os.path.normpath', ({(34, 49, 34, 53): 'root'}, {}), '(root)', False, 'import os\n')] |
jdalzatec/EulerProject | Problem_30/main.py | 2f2f4d9c009be7fd63bb229bb437ea75db77d891 | total = 0
for n in range(1000, 1000000):
suma = 0
for i in str(n):
suma += int(i)**5
if (n == suma):
total += n
print(total) | [] |
celikten/armi | armi/physics/fuelCycle/settings.py | 4e100dd514a59caa9c502bd5a0967fd77fdaf00e | """Settings for generic fuel cycle code."""
import re
import os
from armi.settings import setting
from armi.operators import settingsValidation
CONF_ASSEMBLY_ROTATION_ALG = "assemblyRotationAlgorithm"
CONF_ASSEM_ROTATION_STATIONARY = "assemblyRotationStationary"
CONF_CIRCULAR_RING_MODE = "circularRingMode"
CONF_CIRCULAR_RING_ORDER = "circularRingOrder"
CONF_CUSTOM_FUEL_MANAGEMENT_INDEX = "customFuelManagementIndex"
CONF_RUN_LATTICE_BEFORE_SHUFFLING = "runLatticePhysicsBeforeShuffling"
CONF_SHUFFLE_LOGIC = "shuffleLogic"
CONF_PLOT_SHUFFLE_ARROWS = "plotShuffleArrows"
CONF_FUEL_HANDLER_NAME = "fuelHandlerName"
CONF_JUMP_RING_NUM = "jumpRingNum"
CONF_LEVELS_PER_CASCADE = "levelsPerCascade"
def getFuelCycleSettings():
"""Define settings for fuel cycle."""
settings = [
setting.Setting(
CONF_ASSEMBLY_ROTATION_ALG,
default="",
label="Assembly Rotation Algorithm",
description="The algorithm to use to rotate the detail assemblies while shuffling",
options=["", "buReducingAssemblyRotation", "simpleAssemblyRotation"],
enforcedOptions=True,
),
setting.Setting(
CONF_ASSEM_ROTATION_STATIONARY,
default=False,
label="Rotate stationary assems",
description=(
"Whether or not to rotate assemblies that are not shuffled."
"This can only be True if 'rotation' is true."
),
),
setting.Setting(
CONF_CIRCULAR_RING_MODE,
default=False,
description="Toggle between circular ring definitions to hexagonal ring definitions",
label="Use Circular Rings",
),
setting.Setting(
CONF_CIRCULAR_RING_ORDER,
default="angle",
description="Order by which locations are sorted in circular rings for equilibrium shuffling",
label="Eq. circular sort type",
options=["angle", "distance", "distanceSmart"],
),
setting.Setting(
CONF_CUSTOM_FUEL_MANAGEMENT_INDEX,
default=0,
description=(
"An index that determines which of various options is used in management. "
"Useful for optimization sweeps. "
),
label="Custom Shuffling Index",
),
setting.Setting(
CONF_RUN_LATTICE_BEFORE_SHUFFLING,
default=False,
description=(
"Forces the Generation of Cross Sections Prior to Shuffling the Fuel Assemblies. "
"Note: This is recommended when performing equilibrium shuffling branching searches."
),
label="Generate XS Prior to Fuel Shuffling",
),
setting.Setting(
CONF_SHUFFLE_LOGIC,
default="",
label="Shuffle Logic",
description=(
"Python script written to handle the fuel shuffling for this case. "
"This is user-defined per run as a dynamic input."
),
# schema here could check if file exists, but this is a bit constraining in testing.
# For example, some tests have relative paths for this but aren't running in
# the right directory, and IsFile doesn't seem to work well with relative paths.
# This is left here as an FYI about how we could check existence of files if we get
# around these problem.
# schema=vol.All(
# vol.IsFile(), # pylint: disable=no-value-for-parameter
# msg="Shuffle logic input must be an existing file",
# ),
),
setting.Setting(
CONF_FUEL_HANDLER_NAME,
default="",
label="Fuel Handler Name",
description="The name of the FuelHandler class in the shuffle logic module to activate",
),
setting.Setting(
CONF_PLOT_SHUFFLE_ARROWS,
default=False,
description="Make plots with arrows showing each move.",
label="Plot shuffle arrows",
),
setting.Setting(
CONF_JUMP_RING_NUM, default=8, label="Jump Ring Number", description="None"
),
setting.Setting(
CONF_LEVELS_PER_CASCADE,
default=14,
label="Move per cascade",
description="None",
),
]
return settings
def getFuelCycleSettingValidators(inspector):
queries = []
queries.append(
settingsValidation.Query(
lambda: bool(inspector.cs["shuffleLogic"])
^ bool(inspector.cs["fuelHandlerName"]),
"A value was provided for `fuelHandlerName` or `shuffleLogic`, but not "
"the other. Either both `fuelHandlerName` and `shuffleLogic` should be "
"defined, or neither of them.",
"",
inspector.NO_ACTION,
)
)
# Check for code fixes for input code on the fuel shuffling outside the version control of ARMI
# These are basically auto-migrations for untracked code using
# the ARMI API. (This may make sense at a higher level)
regex_solutions = [
(
r"(#{0,20}?)[^\s#]*output\s*?\((.*?)(,\s*[1-3]{1}\s*)\)",
r"\1runLog.important(\2)",
),
(
r"(#{0,20}?)[^\s#]*output\s*?\((.*?)(,\s*[4-5]{1,2}\s*)\)",
r"\1runLog.info(\2)",
),
(
r"(#{0,20}?)[^\s#]*output\s*?\((.*?)(,\s*[6-8]{1,2}\s*)\)",
r"\1runLog.extra(\2)",
),
(
r"(#{0,20}?)[^\s#]*output\s*?\((.*?)(,\s*\d{1,2}\s*)\)",
r"\1runLog.debug(\2)",
),
(r"(#{0,20}?)[^\s#]*output\s*?\((.*?)\)", r"\1runLog.important(\2)"),
(r"output = self.cs.output", r""),
(r"cs\.getSetting\(\s*([^\)]+)\s*\)", r"cs[\1]"),
(r"cs\.setSetting\(\s*([^\)]+)\s*,\s*([^\)]+)\s*\)", r"cs[\1] = \2"),
(
r"import\s*armi\.components\s*as\s*components",
r"from armi.reactor import components",
),
(r"\[['\"]caseTitle['\"]\]", r".caseTitle"),
(
r"self.r.core.bolAssems\['(.*?)'\]",
r"self.r.blueprints.assemblies['\1']",
),
(r"copyAssembly", r"duplicate"),
]
def _locateRegexOccurences():
with open(inspector._csRelativePath(inspector.cs["shuffleLogic"])) as src:
src = src.read()
matches = []
for pattern, _sub in regex_solutions:
matches += re.findall(pattern, src)
return matches
def _applyRegexSolutions():
srcFile = inspector._csRelativePath(inspector.cs["shuffleLogic"])
destFile = os.path.splitext(srcFile)[0] + "migrated.py"
with open(srcFile) as src, open(destFile, "w") as dest:
srcContent = src.read() # get the buffer content
regexContent = srcContent # keep the before and after changes separate
for pattern, sub in regex_solutions:
regexContent = re.sub(pattern, sub, regexContent)
if regexContent != srcContent:
dest.write("from armi import runLog\n")
dest.write(regexContent)
inspector.cs["shuffleLogic"] = destFile
queries.append(
settingsValidation.Query(
lambda: " " in inspector.cs["shuffleLogic"],
"Spaces are not allowed in shuffleLogic file location. You have specified {0}. "
"Shuffling will not occur.".format(inspector.cs["shuffleLogic"]),
"",
inspector.NO_ACTION,
)
)
def _clearShufflingInput():
inspector._assignCS("shuffleLogic", "")
inspector._assignCS("fuelHandlerName", "")
queries.append(
settingsValidation.Query(
lambda: inspector.cs["shuffleLogic"]
and not inspector._csRelativePathExists(inspector.cs["shuffleLogic"]),
"The specified shuffle logic file '{0}' cannot be found. "
"Shuffling will not occur.".format(inspector.cs["shuffleLogic"]),
"Clear specified file value?",
_clearShufflingInput,
)
)
queries.append(
settingsValidation.Query(
lambda: inspector.cs["shuffleLogic"]
and inspector._csRelativePathExists(inspector.cs["shuffleLogic"])
and _locateRegexOccurences(),
"The shuffle logic file {} uses deprecated code."
" It will not work unless you permit some automated changes to occur."
" The logic file will be backed up to the current directory under a timestamped name"
"".format(inspector.cs["shuffleLogic"]),
"Proceed?",
_applyRegexSolutions,
)
)
return queries
| [((24, 8, 31, 9), 'armi.settings.setting.Setting', 'setting.Setting', (), '', False, 'from armi.settings import setting\n'), ((32, 8, 40, 9), 'armi.settings.setting.Setting', 'setting.Setting', (), '', False, 'from armi.settings import setting\n'), ((41, 8, 46, 9), 'armi.settings.setting.Setting', 'setting.Setting', (), '', False, 'from armi.settings import setting\n'), ((47, 8, 53, 9), 'armi.settings.setting.Setting', 'setting.Setting', (), '', False, 'from armi.settings import setting\n'), ((54, 8, 62, 9), 'armi.settings.setting.Setting', 'setting.Setting', (), '', False, 'from armi.settings import setting\n'), ((63, 8, 71, 9), 'armi.settings.setting.Setting', 'setting.Setting', (), '', False, 'from armi.settings import setting\n'), ((72, 8, 89, 9), 'armi.settings.setting.Setting', 'setting.Setting', (), '', False, 'from armi.settings import setting\n'), ((90, 8, 95, 9), 'armi.settings.setting.Setting', 'setting.Setting', (), '', False, 'from armi.settings import setting\n'), ((96, 8, 101, 9), 'armi.settings.setting.Setting', 'setting.Setting', (), '', False, 'from armi.settings import setting\n'), ((102, 8, 104, 9), 'armi.settings.setting.Setting', 'setting.Setting', (), '', False, 'from armi.settings import setting\n'), ((105, 8, 110, 9), 'armi.settings.setting.Setting', 'setting.Setting', (), '', False, 'from armi.settings import setting\n'), ((171, 27, 171, 51), 're.findall', 're.findall', ({(171, 38, 171, 45): 'pattern', (171, 47, 171, 50): 'src'}, {}), '(pattern, src)', False, 'import re\n'), ((176, 19, 176, 44), 'os.path.splitext', 'os.path.splitext', ({(176, 36, 176, 43): 'srcFile'}, {}), '(srcFile)', False, 'import os\n'), ((182, 31, 182, 65), 're.sub', 're.sub', ({(182, 38, 182, 45): 'pattern', (182, 47, 182, 50): 'sub', (182, 52, 182, 64): 'regexContent'}, {}), '(pattern, sub, regexContent)', False, 'import re\n')] |
jclosure/donkus | nl/predictor.py | b3384447094b2ecbaff5ee9d970818313b6ee8b0 |
from nltk.corpus import gutenberg
from nltk import ConditionalFreqDist
from random import choice
#create the distribution object
cfd = ConditionalFreqDist()
## for each token count the current word given the previous word
prev_word = None
for word in gutenberg.words('austen-persuasion.txt'):
cfd[prev_word][word] += 1
prev_word = word
## start predicting at given word, say "therefore"
word = "therefore"
i = 1
## find all words that can follow the given word and choose one at random
while i<20:
print word,
lwords = cfd.get(word).keys()
follower = choice(lwords)
word = follower
i += 1
| [] |
mmg-3/cloudserver | eve/workers/pykmip/bin/run_server.py | 9ff6364b2ed4f33a5135d86311a72de4caff51c1 | #!/usr/bin/env python
# Copyright (c) 2016 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging # noqa: E402
logging.basicConfig(level=logging.DEBUG)
from kmip.services.server import server # noqa: E402
if __name__ == '__main__':
print('Starting PyKMIP server on 0.0.0.0:5696')
server.main()
| [((20, 0, 20, 40), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((26, 4, 26, 17), 'kmip.services.server.server.main', 'server.main', ({}, {}), '()', False, 'from kmip.services.server import server\n')] |
yokaze/crest-python | tests/test_tempo_event.py | c246b16ade6fd706f0e18aae797660064bddd555 | #
# test_tempo_event.py
# crest-python
#
# Copyright (C) 2017 Rue Yokaze
# Distributed under the MIT License.
#
import crest_loader
import unittest
from crest.events.meta import TempoEvent
class TestTempoEvent(unittest.TestCase):
def test_ctor(self):
TempoEvent()
TempoEvent(120)
def test_message(self):
evt = TempoEvent(120)
self.assertEqual(evt.Message, [0xFF, 0x51, 0x03, 0x07, 0xA1, 0x20])
def test_property(self):
evt = TempoEvent(120)
self.assertEqual(evt.Tempo, 120)
self.assertEqual(evt.MicroSeconds, 500000)
evt.Tempo = 60
self.assertEqual(evt.Tempo, 60)
self.assertEqual(evt.MicroSeconds, 1000000)
evt.MicroSeconds = 250000
self.assertEqual(evt.Tempo, 240)
self.assertEqual(evt.MicroSeconds, 250000)
if (__name__ == '__main__'):
unittest.main()
| [((35, 4, 35, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((15, 8, 15, 20), 'crest.events.meta.TempoEvent', 'TempoEvent', ({}, {}), '()', False, 'from crest.events.meta import TempoEvent\n'), ((16, 8, 16, 23), 'crest.events.meta.TempoEvent', 'TempoEvent', ({(16, 19, 16, 22): '(120)'}, {}), '(120)', False, 'from crest.events.meta import TempoEvent\n'), ((19, 14, 19, 29), 'crest.events.meta.TempoEvent', 'TempoEvent', ({(19, 25, 19, 28): '120'}, {}), '(120)', False, 'from crest.events.meta import TempoEvent\n'), ((23, 14, 23, 29), 'crest.events.meta.TempoEvent', 'TempoEvent', ({(23, 25, 23, 28): '120'}, {}), '(120)', False, 'from crest.events.meta import TempoEvent\n')] |
PyJedi/quantum | tensorflow_quantum/python/differentiators/__init__.py | 3f4a3c320e048b8a8faf3a10339975d2d5366fb6 | # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module functions for tfq.differentiators.*"""
from tensorflow_quantum.python.differentiators.adjoint import (
Adjoint,)
from tensorflow_quantum.python.differentiators.linear_combination import (
ForwardDifference,
CentralDifference,
LinearCombination,
)
from tensorflow_quantum.python.differentiators.parameter_shift import (
ParameterShift,)
from tensorflow_quantum.python.differentiators.differentiator import (
Differentiator,)
| [] |
erykoff/redmapper | tests/test_color_background.py | 23fb66c7369de784c67ce6c41ada2f1f51a84acb | import unittest
import numpy.testing as testing
import numpy as np
import fitsio
import tempfile
import os
from redmapper import ColorBackground
from redmapper import ColorBackgroundGenerator
from redmapper import Configuration
class ColorBackgroundTestCase(unittest.TestCase):
"""
Tests for the redmapper.ColorBackground and
redmapper.ColorBackgroundGenerator classes.
"""
def runTest(self):
"""
Run the ColorBackground and ColorBackgroundGenerator tests.
"""
file_name = 'test_dr8_col_bkg.fit'
file_path = 'data_for_tests'
cbkg = ColorBackground('%s/%s' % (file_path, file_name))
col1 = np.array([0.572300, 1.39560])
col2 = np.array([0.7894, 0.9564])
refmags = np.array([17.587, 18.956])
refmagindex = np.array([258, 395])
col1index = np.array([1, 17])
col2index = np.array([15, 19])
# These are new values that are based on improvements in the binning.
idl_bkg1 = np.array([0.76778, 0.80049])
idl_bkg2 = np.array([0.04012, 0.10077])
idl_bkg12 = np.array([0.01085, 0.081])
# Test color1
py_outputs = cbkg.lookup_diagonal(1, col1, refmags)
testing.assert_almost_equal(py_outputs, idl_bkg1, decimal=5)
# Test color2
py_outputs = cbkg.lookup_diagonal(2, col2, refmags)
testing.assert_almost_equal(py_outputs, idl_bkg2, decimal=5)
# Test off-diagonal
py_outputs = cbkg.lookup_offdiag(1, 2, col1, col2, refmags)
testing.assert_almost_equal(py_outputs, idl_bkg12, decimal=5)
# And a test sigma_g with the usehdrarea=True
cbkg2 = ColorBackground('%s/%s' % (file_path, file_name), usehdrarea=True)
col1 = np.array([0.572300, 1.39560, 1.0])
col2 = np.array([0.7894, 0.9564, 1.0])
refmags = np.array([17.587, 18.956, 25.0])
idl_sigma_g1 = np.array([127.698, 591.112, np.inf])
idl_sigma_g2 = np.array([7.569, 82.8938, np.inf])
# Test color1
py_outputs = cbkg2.sigma_g_diagonal(1, col1, refmags)
testing.assert_almost_equal(py_outputs, idl_sigma_g1, decimal=3)
# Test color2
py_outputs = cbkg2.sigma_g_diagonal(2, col2, refmags)
testing.assert_almost_equal(py_outputs, idl_sigma_g2, decimal=3)
#####################################################
# Now a test of the generation of a color background
conf_filename = 'testconfig.yaml'
config = Configuration(file_path + "/" + conf_filename)
tfile = tempfile.mkstemp()
os.close(tfile[0])
config.bkgfile_color = tfile[1]
config.d.nside = 128
config.d.hpix = [8421]
config.border = 0.0
cbg = ColorBackgroundGenerator(config, minrangecheck=5)
# Need to set clobber=True because the tempfile was created
cbg.run(clobber=True)
fits = fitsio.FITS(config.bkgfile_color)
# Make sure we have 11 extensions
testing.assert_equal(len(fits), 11)
# These tests are obsolete, but could be refactored
# Check the 01_01 and 01_02
# bkg11 = fits['01_01_REF'].read()
# bkg11_compare = fitsio.read(file_path + "/test_dr8_bkg_zredc_sub.fits", ext='01_01_REF')
# testing.assert_almost_equal(bkg11['BC'], bkg11_compare['BC'], 3)
# testing.assert_almost_equal(bkg11['N'], bkg11_compare['N'], 3)
# bkg12 = fits['01_02_REF'].read()
# bkg12_compare = fitsio.read(file_path + "/test_dr8_bkg_zredc_sub.fits", ext='01_02_REF')
# testing.assert_almost_equal(bkg12['BC'], bkg12_compare['BC'], 2)
# testing.assert_almost_equal(bkg12['N'], bkg12_compare['N'], 4)
# And delete the tempfile
os.remove(config.bkgfile_color)
if __name__=='__main__':
unittest.main()
| [((109, 4, 109, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((25, 15, 25, 64), 'redmapper.ColorBackground', 'ColorBackground', ({(25, 31, 25, 63): "'%s/%s' % (file_path, file_name)"}, {}), "('%s/%s' % (file_path, file_name))", False, 'from redmapper import ColorBackground\n'), ((27, 15, 27, 44), 'numpy.array', 'np.array', ({(27, 24, 27, 43): '[0.5723, 1.3956]'}, {}), '([0.5723, 1.3956])', True, 'import numpy as np\n'), ((28, 15, 28, 41), 'numpy.array', 'np.array', ({(28, 24, 28, 40): '[0.7894, 0.9564]'}, {}), '([0.7894, 0.9564])', True, 'import numpy as np\n'), ((29, 18, 29, 44), 'numpy.array', 'np.array', ({(29, 27, 29, 43): '[17.587, 18.956]'}, {}), '([17.587, 18.956])', True, 'import numpy as np\n'), ((31, 22, 31, 42), 'numpy.array', 'np.array', ({(31, 31, 31, 41): '[258, 395]'}, {}), '([258, 395])', True, 'import numpy as np\n'), ((32, 20, 32, 37), 'numpy.array', 'np.array', ({(32, 29, 32, 36): '[1, 17]'}, {}), '([1, 17])', True, 'import numpy as np\n'), ((33, 20, 33, 38), 'numpy.array', 'np.array', ({(33, 29, 33, 37): '[15, 19]'}, {}), '([15, 19])', True, 'import numpy as np\n'), ((36, 19, 36, 47), 'numpy.array', 'np.array', ({(36, 28, 36, 46): '[0.76778, 0.80049]'}, {}), '([0.76778, 0.80049])', True, 'import numpy as np\n'), ((37, 19, 37, 47), 'numpy.array', 'np.array', ({(37, 28, 37, 46): '[0.04012, 0.10077]'}, {}), '([0.04012, 0.10077])', True, 'import numpy as np\n'), ((38, 20, 38, 46), 'numpy.array', 'np.array', ({(38, 29, 38, 45): '[0.01085, 0.081]'}, {}), '([0.01085, 0.081])', True, 'import numpy as np\n'), ((42, 8, 42, 68), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (), '', True, 'import numpy.testing as testing\n'), ((46, 8, 46, 68), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (), '', True, 'import numpy.testing as testing\n'), ((50, 8, 50, 69), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (), '', True, 'import numpy.testing as testing\n'), ((53, 16, 53, 82), 'redmapper.ColorBackground', 'ColorBackground', (), '', False, 'from redmapper import ColorBackground\n'), ((55, 15, 55, 49), 'numpy.array', 'np.array', ({(55, 24, 55, 48): '[0.5723, 1.3956, 1.0]'}, {}), '([0.5723, 1.3956, 1.0])', True, 'import numpy as np\n'), ((56, 15, 56, 46), 'numpy.array', 'np.array', ({(56, 24, 56, 45): '[0.7894, 0.9564, 1.0]'}, {}), '([0.7894, 0.9564, 1.0])', True, 'import numpy as np\n'), ((57, 18, 57, 50), 'numpy.array', 'np.array', ({(57, 27, 57, 49): '[17.587, 18.956, 25.0]'}, {}), '([17.587, 18.956, 25.0])', True, 'import numpy as np\n'), ((59, 23, 59, 59), 'numpy.array', 'np.array', ({(59, 32, 59, 58): '[127.698, 591.112, np.inf]'}, {}), '([127.698, 591.112, np.inf])', True, 'import numpy as np\n'), ((60, 23, 60, 57), 'numpy.array', 'np.array', ({(60, 32, 60, 56): '[7.569, 82.8938, np.inf]'}, {}), '([7.569, 82.8938, np.inf])', True, 'import numpy as np\n'), ((64, 8, 64, 72), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (), '', True, 'import numpy.testing as testing\n'), ((68, 8, 68, 72), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (), '', True, 'import numpy.testing as testing\n'), ((74, 17, 74, 63), 'redmapper.Configuration', 'Configuration', ({(74, 31, 74, 62): "file_path + '/' + conf_filename"}, {}), "(file_path + '/' + conf_filename)", False, 'from redmapper import Configuration\n'), ((76, 16, 76, 34), 'tempfile.mkstemp', 'tempfile.mkstemp', ({}, {}), '()', False, 'import tempfile\n'), ((77, 8, 77, 26), 'os.close', 'os.close', ({(77, 17, 77, 25): 'tfile[0]'}, {}), '(tfile[0])', False, 'import os\n'), ((83, 14, 83, 63), 'redmapper.ColorBackgroundGenerator', 'ColorBackgroundGenerator', (), '', False, 'from redmapper import ColorBackgroundGenerator\n'), ((87, 15, 87, 48), 'fitsio.FITS', 'fitsio.FITS', ({(87, 27, 87, 47): 'config.bkgfile_color'}, {}), '(config.bkgfile_color)', False, 'import fitsio\n'), ((106, 8, 106, 39), 'os.remove', 'os.remove', ({(106, 18, 106, 38): 'config.bkgfile_color'}, {}), '(config.bkgfile_color)', False, 'import os\n')] |
Exi666/MetPy | src/metpy/calc/basic.py | c3cf8b9855e0ce7c14347e9d000fc3d531a18e1c | # Copyright (c) 2008,2015,2016,2017,2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Contains a collection of basic calculations.
These include:
* wind components
* heat index
* windchill
"""
import warnings
import numpy as np
from scipy.ndimage import gaussian_filter
from .. import constants as mpconsts
from ..package_tools import Exporter
from ..units import atleast_1d, check_units, masked_array, units
from ..xarray import preprocess_xarray
exporter = Exporter(globals())
# The following variables are constants for a standard atmosphere
t0 = 288. * units.kelvin
p0 = 1013.25 * units.hPa
@exporter.export
@preprocess_xarray
@check_units('[speed]', '[speed]')
def wind_speed(u, v):
r"""Compute the wind speed from u and v-components.
Parameters
----------
u : `pint.Quantity`
Wind component in the X (East-West) direction
v : `pint.Quantity`
Wind component in the Y (North-South) direction
Returns
-------
wind speed: `pint.Quantity`
The speed of the wind
See Also
--------
wind_components
"""
speed = np.sqrt(u * u + v * v)
return speed
@exporter.export
@preprocess_xarray
@check_units('[speed]', '[speed]')
def wind_direction(u, v, convention='from'):
r"""Compute the wind direction from u and v-components.
Parameters
----------
u : `pint.Quantity`
Wind component in the X (East-West) direction
v : `pint.Quantity`
Wind component in the Y (North-South) direction
convention : str
Convention to return direction. 'from' returns the direction the wind is coming from
(meteorological convention). 'to' returns the direction the wind is going towards
(oceanographic convention). Default is 'from'.
Returns
-------
direction: `pint.Quantity`
The direction of the wind in interval [0, 360] degrees, with 360 being North, with the
direction defined by the convention kwarg.
See Also
--------
wind_components
Notes
-----
In the case of calm winds (where `u` and `v` are zero), this function returns a direction
of 0.
"""
wdir = 90. * units.deg - np.arctan2(-v, -u)
origshape = wdir.shape
wdir = atleast_1d(wdir)
# Handle oceanographic convection
if convention == 'to':
wdir -= 180 * units.deg
elif convention not in ('to', 'from'):
raise ValueError('Invalid kwarg for "convention". Valid options are "from" or "to".')
wdir[wdir <= 0] += 360. * units.deg
# avoid unintended modification of `pint.Quantity` by direct use of magnitude
calm_mask = (np.asarray(u.magnitude) == 0.) & (np.asarray(v.magnitude) == 0.)
# np.any check required for legacy numpy which treats 0-d False boolean index as zero
if np.any(calm_mask):
wdir[calm_mask] = 0. * units.deg
return wdir.reshape(origshape).to('degrees')
@exporter.export
@preprocess_xarray
@check_units('[speed]')
def wind_components(speed, wdir):
r"""Calculate the U, V wind vector components from the speed and direction.
Parameters
----------
speed : `pint.Quantity`
The wind speed (magnitude)
wdir : `pint.Quantity`
The wind direction, specified as the direction from which the wind is
blowing (0-2 pi radians or 0-360 degrees), with 360 degrees being North.
Returns
-------
u, v : tuple of `pint.Quantity`
The wind components in the X (East-West) and Y (North-South)
directions, respectively.
See Also
--------
wind_speed
wind_direction
Examples
--------
>>> from metpy.units import units
>>> metpy.calc.wind_components(10. * units('m/s'), 225. * units.deg)
(<Quantity(7.071067811865475, 'meter / second')>,
<Quantity(7.071067811865477, 'meter / second')>)
"""
wdir = _check_radians(wdir, max_radians=4 * np.pi)
u = -speed * np.sin(wdir)
v = -speed * np.cos(wdir)
return u, v
@exporter.export
@preprocess_xarray
@check_units(temperature='[temperature]', speed='[speed]')
def windchill(temperature, speed, face_level_winds=False, mask_undefined=True):
r"""Calculate the Wind Chill Temperature Index (WCTI).
Calculates WCTI from the current temperature and wind speed using the formula
outlined by the FCM [FCMR192003]_.
Specifically, these formulas assume that wind speed is measured at
10m. If, instead, the speeds are measured at face level, the winds
need to be multiplied by a factor of 1.5 (this can be done by specifying
`face_level_winds` as `True`.)
Parameters
----------
temperature : `pint.Quantity`
The air temperature
speed : `pint.Quantity`
The wind speed at 10m. If instead the winds are at face level,
`face_level_winds` should be set to `True` and the 1.5 multiplicative
correction will be applied automatically.
face_level_winds : bool, optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m, thus requiring a correction. Defaults to
`False`.
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values where wind chill is undefined masked. These are values where
the temperature > 50F or wind speed <= 3 miles per hour. Defaults
to `True`.
Returns
-------
`pint.Quantity`
The corresponding Wind Chill Temperature Index value(s)
See Also
--------
heat_index
"""
# Correct for lower height measurement of winds if necessary
if face_level_winds:
# No in-place so that we copy
# noinspection PyAugmentAssignment
speed = speed * 1.5
temp_limit, speed_limit = 10. * units.degC, 3 * units.mph
speed_factor = speed.to('km/hr').magnitude ** 0.16
wcti = units.Quantity((0.6215 + 0.3965 * speed_factor) * temperature.to('degC').magnitude
- 11.37 * speed_factor + 13.12, units.degC).to(temperature.units)
# See if we need to mask any undefined values
if mask_undefined:
mask = np.array((temperature > temp_limit) | (speed <= speed_limit))
if mask.any():
wcti = masked_array(wcti, mask=mask)
return wcti
@exporter.export
@preprocess_xarray
@check_units('[temperature]')
def heat_index(temperature, rh, mask_undefined=True):
r"""Calculate the Heat Index from the current temperature and relative humidity.
The implementation uses the formula outlined in [Rothfusz1990]_, which is a
multi-variable least-squares regression of the values obtained in [Steadman1979]_.
Additional conditional corrections are applied to match what the National
Weather Service operationally uses. See Figure 3 of [Anderson2013]_ for a
depiction of this algorithm and further discussion.
Parameters
----------
temperature : `pint.Quantity`
Air temperature
rh : `pint.Quantity`
The relative humidity expressed as a unitless ratio in the range [0, 1].
Can also pass a percentage if proper units are attached.
Returns
-------
`pint.Quantity`
The corresponding Heat Index value(s)
Other Parameters
----------------
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values masked where the temperature < 80F. Defaults to `True`.
See Also
--------
windchill
"""
temperature = atleast_1d(temperature)
rh = atleast_1d(rh)
# assign units to rh if they currently are not present
if not hasattr(rh, 'units'):
rh = rh * units.dimensionless
delta = temperature.to(units.degF) - 0. * units.degF
rh2 = rh * rh
delta2 = delta * delta
# Simplifed Heat Index -- constants converted for RH in [0, 1]
a = -10.3 * units.degF + 1.1 * delta + 4.7 * units.delta_degF * rh
# More refined Heat Index -- constants converted for RH in [0, 1]
b = (-42.379 * units.degF
+ 2.04901523 * delta
+ 1014.333127 * units.delta_degF * rh
- 22.475541 * delta * rh
- 6.83783e-3 / units.delta_degF * delta2
- 5.481717e2 * units.delta_degF * rh2
+ 1.22874e-1 / units.delta_degF * delta2 * rh
+ 8.5282 * delta * rh2
- 1.99e-2 / units.delta_degF * delta2 * rh2)
# Create return heat index
hi = np.full(np.shape(temperature), np.nan) * units.degF
# Retain masked status of temperature with resulting heat index
if hasattr(temperature, 'mask'):
hi = masked_array(hi)
# If T <= 40F, Heat Index is T
sel = (temperature <= 40. * units.degF)
if np.any(sel):
hi[sel] = temperature[sel].to(units.degF)
# If a < 79F and hi is unset, Heat Index is a
sel = (a < 79. * units.degF) & np.isnan(hi)
if np.any(sel):
hi[sel] = a[sel]
# Use b now for anywhere hi has yet to be set
sel = np.isnan(hi)
if np.any(sel):
hi[sel] = b[sel]
# Adjustment for RH <= 13% and 80F <= T <= 112F
sel = ((rh <= 13. * units.percent) & (temperature >= 80. * units.degF)
& (temperature <= 112. * units.degF))
if np.any(sel):
rh15adj = ((13. - rh * 100.) / 4.
* ((17. * units.delta_degF - np.abs(delta - 95. * units.delta_degF))
/ 17. * units.delta_degF) ** 0.5)
hi[sel] = hi[sel] - rh15adj[sel]
# Adjustment for RH > 85% and 80F <= T <= 87F
sel = ((rh > 85. * units.percent) & (temperature >= 80. * units.degF)
& (temperature <= 87. * units.degF))
if np.any(sel):
rh85adj = 0.02 * (rh * 100. - 85.) * (87. * units.delta_degF - delta)
hi[sel] = hi[sel] + rh85adj[sel]
# See if we need to mask any undefined values
if mask_undefined:
mask = np.array(temperature < 80. * units.degF)
if mask.any():
hi = masked_array(hi, mask=mask)
return hi
@exporter.export
@preprocess_xarray
@check_units(temperature='[temperature]', speed='[speed]')
def apparent_temperature(temperature, rh, speed, face_level_winds=False, mask_undefined=True):
r"""Calculate the current apparent temperature.
Calculates the current apparent temperature based on the wind chill or heat index
as appropriate for the current conditions. Follows [NWS10201]_.
Parameters
----------
temperature : `pint.Quantity`
The air temperature
rh : `pint.Quantity`
The relative humidity expressed as a unitless ratio in the range [0, 1].
Can also pass a percentage if proper units are attached.
speed : `pint.Quantity`
The wind speed at 10m. If instead the winds are at face level,
`face_level_winds` should be set to `True` and the 1.5 multiplicative
correction will be applied automatically.
face_level_winds : bool, optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m, thus requiring a correction. Defaults to
`False`.
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values where wind chill or heat_index is undefined masked. For wind
chill, these are values where the temperature > 50F or
wind speed <= 3 miles per hour. For heat index, these are values
where the temperature < 80F.
Defaults to `True`.
Returns
-------
`pint.Quantity`
The corresponding apparent temperature value(s)
See Also
--------
heat_index, windchill
"""
is_not_scalar = isinstance(temperature.m, (list, tuple, np.ndarray))
temperature = atleast_1d(temperature)
rh = atleast_1d(rh)
speed = atleast_1d(speed)
# NB: mask_defined=True is needed to know where computed values exist
wind_chill_temperature = windchill(temperature, speed, face_level_winds=face_level_winds,
mask_undefined=True).to(temperature.units)
heat_index_temperature = heat_index(temperature, rh,
mask_undefined=True).to(temperature.units)
# Combine the heat index and wind chill arrays (no point has a value in both)
# NB: older numpy.ma.where does not return a masked array
app_temperature = masked_array(
np.ma.where(masked_array(wind_chill_temperature).mask,
heat_index_temperature.to(temperature.units),
wind_chill_temperature.to(temperature.units)
), temperature.units)
# If mask_undefined is False, then set any masked values to the temperature
if not mask_undefined:
app_temperature[app_temperature.mask] = temperature[app_temperature.mask]
# If no values are masked and provided temperature does not have a mask
# we should return a non-masked array
if not np.any(app_temperature.mask) and not hasattr(temperature, 'mask'):
app_temperature = np.array(app_temperature.m) * temperature.units
if is_not_scalar:
return app_temperature
else:
return atleast_1d(app_temperature)[0]
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def pressure_to_height_std(pressure):
r"""Convert pressure data to heights using the U.S. standard atmosphere [NOAA1976]_.
The implementation uses the formula outlined in [Hobbs1977]_ pg.60-61.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure
Returns
-------
`pint.Quantity`
The corresponding height value(s)
Notes
-----
.. math:: Z = \frac{T_0}{\Gamma}[1-\frac{p}{p_0}^\frac{R\Gamma}{g}]
"""
gamma = 6.5 * units('K/km')
return (t0 / gamma) * (1 - (pressure / p0).to('dimensionless')**(
mpconsts.Rd * gamma / mpconsts.g))
@exporter.export
@preprocess_xarray
@check_units('[length]')
def height_to_geopotential(height):
r"""Compute geopotential for a given height.
Calculates the geopotential from height using the following formula, which is derived from
the definition of geopotential as given in [Hobbs2006]_ Pg. 69 Eq 3.21:
.. math:: \Phi = G m_e \left( \frac{1}{R_e} - \frac{1}{R_e + z}\right)
(where :math:`\Phi` is geopotential, :math:`z` is height, :math:`R_e` is average Earth
radius, :math:`G` is the (universal) gravitational constant, and :math:`m_e` is the
approximate mass of Earth.)
Parameters
----------
height : `pint.Quantity`
Height above sea level
Returns
-------
`pint.Quantity`
The corresponding geopotential value(s)
Examples
--------
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0, 10000, num=11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9817.46806283 19631.85526579 29443.16305887
39251.39289118 49056.54621087 58858.62446524 68657.62910064
78453.56156252 88246.42329544 98036.21574305], 'meter ** 2 / second ** 2')>
"""
# Direct implementation of formula from Hobbs yields poor numerical results (see
# gh-1075), so was replaced with algebraic equivalent.
return (mpconsts.G * mpconsts.me / mpconsts.Re) * (height / (mpconsts.Re + height))
@exporter.export
@preprocess_xarray
def geopotential_to_height(geopot):
r"""Compute height from a given geopotential.
Calculates the height from geopotential using the following formula, which is derived from
the definition of geopotential as given in [Hobbs2006]_ Pg. 69 Eq 3.21:
.. math:: z = \frac{1}{\frac{1}{R_e} - \frac{\Phi}{G m_e}} - R_e
(where :math:`\Phi` is geopotential, :math:`z` is height, :math:`R_e` is average Earth
radius, :math:`G` is the (universal) gravitational constant, and :math:`m_e` is the
approximate mass of Earth.)
Parameters
----------
geopotential : `pint.Quantity`
Geopotential
Returns
-------
`pint.Quantity`
The corresponding height value(s)
Examples
--------
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0, 10000, num=11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9817.46806283 19631.85526579 29443.16305887
39251.39289118 49056.54621087 58858.62446524 68657.62910064
78453.56156252 88246.42329544 98036.21574305], 'meter ** 2 / second ** 2')>
>>> height = metpy.calc.geopotential_to_height(geopot)
>>> height
<Quantity([ 0. 1000. 2000. 3000. 4000. 5000. 6000. 7000. 8000.
9000. 10000.], 'meter')>
"""
# Direct implementation of formula from Hobbs yields poor numerical results (see
# gh-1075), so was replaced with algebraic equivalent.
scaled = geopot * mpconsts.Re
return scaled * mpconsts.Re / (mpconsts.G * mpconsts.me - scaled)
@exporter.export
@preprocess_xarray
@check_units('[length]')
def height_to_pressure_std(height):
r"""Convert height data to pressures using the U.S. standard atmosphere [NOAA1976]_.
The implementation inverts the formula outlined in [Hobbs1977]_ pg.60-61.
Parameters
----------
height : `pint.Quantity`
Atmospheric height
Returns
-------
`pint.Quantity`
The corresponding pressure value(s)
Notes
-----
.. math:: p = p_0 e^{\frac{g}{R \Gamma} \text{ln}(1-\frac{Z \Gamma}{T_0})}
"""
gamma = 6.5 * units('K/km')
return p0 * (1 - (gamma / t0) * height) ** (mpconsts.g / (mpconsts.Rd * gamma))
@exporter.export
@preprocess_xarray
def coriolis_parameter(latitude):
r"""Calculate the coriolis parameter at each point.
The implementation uses the formula outlined in [Hobbs1977]_ pg.370-371.
Parameters
----------
latitude : array_like
Latitude at each point
Returns
-------
`pint.Quantity`
The corresponding coriolis force at each point
"""
latitude = _check_radians(latitude, max_radians=np.pi / 2)
return (2. * mpconsts.omega * np.sin(latitude)).to('1/s')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[length]')
def add_height_to_pressure(pressure, height):
r"""Calculate the pressure at a certain height above another pressure level.
This assumes a standard atmosphere [NOAA1976]_.
Parameters
----------
pressure : `pint.Quantity`
Pressure level
height : `pint.Quantity`
Height above a pressure level
Returns
-------
`pint.Quantity`
The corresponding pressure value for the height above the pressure level
See Also
--------
pressure_to_height_std, height_to_pressure_std, add_pressure_to_height
"""
pressure_level_height = pressure_to_height_std(pressure)
return height_to_pressure_std(pressure_level_height + height)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[pressure]')
def add_pressure_to_height(height, pressure):
r"""Calculate the height at a certain pressure above another height.
This assumes a standard atmosphere [NOAA1976]_.
Parameters
----------
height : `pint.Quantity`
Height level
pressure : `pint.Quantity`
Pressure above height level
Returns
-------
`pint.Quantity`
The corresponding height value for the pressure above the height level
See Also
--------
pressure_to_height_std, height_to_pressure_std, add_height_to_pressure
"""
pressure_at_height = height_to_pressure_std(height)
return pressure_to_height_std(pressure_at_height - pressure)
@exporter.export
@preprocess_xarray
@check_units('[dimensionless]', '[pressure]', '[pressure]')
def sigma_to_pressure(sigma, psfc, ptop):
r"""Calculate pressure from sigma values.
Parameters
----------
sigma : ndarray
The sigma levels to be converted to pressure levels.
psfc : `pint.Quantity`
The surface pressure value.
ptop : `pint.Quantity`
The pressure value at the top of the model domain.
Returns
-------
`pint.Quantity`
The pressure values at the given sigma levels.
Notes
-----
Sigma definition adapted from [Philips1957]_.
.. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top}
* :math:`p` is pressure at a given `\sigma` level
* :math:`\sigma` is non-dimensional, scaled pressure
* :math:`p_{sfc}` is pressure at the surface or model floor
* :math:`p_{top}` is pressure at the top of the model domain
"""
if np.any(sigma < 0) or np.any(sigma > 1):
raise ValueError('Sigma values should be bounded by 0 and 1')
if psfc.magnitude < 0 or ptop.magnitude < 0:
raise ValueError('Pressure input should be non-negative')
return sigma * (psfc - ptop) + ptop
@exporter.export
@preprocess_xarray
def smooth_gaussian(scalar_grid, n):
"""Filter with normal distribution of weights.
Parameters
----------
scalar_grid : `pint.Quantity`
Some n-dimensional scalar grid. If more than two axes, smoothing
is only done across the last two.
n : int
Degree of filtering
Returns
-------
`pint.Quantity`
The filtered 2D scalar grid
Notes
-----
This function is a close replication of the GEMPAK function GWFS,
but is not identical. The following notes are incorporated from
the GEMPAK source code:
This function smoothes a scalar grid using a moving average
low-pass filter whose weights are determined by the normal
(Gaussian) probability distribution function for two dimensions.
The weight given to any grid point within the area covered by the
moving average for a target grid point is proportional to
EXP [ -( D ** 2 ) ],
where D is the distance from that point to the target point divided
by the standard deviation of the normal distribution. The value of
the standard deviation is determined by the degree of filtering
requested. The degree of filtering is specified by an integer.
This integer is the number of grid increments from crest to crest
of the wave for which the theoretical response is 1/e = .3679. If
the grid increment is called delta_x, and the value of this integer
is represented by N, then the theoretical filter response function
value for the N * delta_x wave will be 1/e. The actual response
function will be greater than the theoretical value.
The larger N is, the more severe the filtering will be, because the
response function for all wavelengths shorter than N * delta_x
will be less than 1/e. Furthermore, as N is increased, the slope
of the filter response function becomes more shallow; so, the
response at all wavelengths decreases, but the amount of decrease
lessens with increasing wavelength. (The theoretical response
function can be obtained easily--it is the Fourier transform of the
weight function described above.)
The area of the patch covered by the moving average varies with N.
As N gets bigger, the smoothing gets stronger, and weight values
farther from the target grid point are larger because the standard
deviation of the normal distribution is bigger. Thus, increasing
N has the effect of expanding the moving average window as well as
changing the values of weights. The patch is a square covering all
points whose weight values are within two standard deviations of the
mean of the two dimensional normal distribution.
The key difference between GEMPAK's GWFS and this function is that,
in GEMPAK, the leftover weight values representing the fringe of the
distribution are applied to the target grid point. In this
function, the leftover weights are not used.
When this function is invoked, the first argument is the grid to be
smoothed, the second is the value of N as described above:
GWFS ( S, N )
where N > 1. If N <= 1, N = 2 is assumed. For example, if N = 4,
then the 4 delta x wave length is passed with approximate response
1/e.
"""
# Compute standard deviation in a manner consistent with GEMPAK
n = int(round(n))
if n < 2:
n = 2
sgma = n / (2 * np.pi)
# Construct sigma sequence so smoothing occurs only in horizontal direction
nax = len(scalar_grid.shape)
# Assume the last two axes represent the horizontal directions
sgma_seq = [sgma if i > nax - 3 else 0 for i in range(nax)]
# Compute smoothed field and reattach units
res = gaussian_filter(scalar_grid, sgma_seq, truncate=2 * np.sqrt(2))
if hasattr(scalar_grid, 'units'):
res = res * scalar_grid.units
return res
@exporter.export
@preprocess_xarray
def smooth_n_point(scalar_grid, n=5, passes=1):
"""Filter with normal distribution of weights.
Parameters
----------
scalar_grid : array-like or `pint.Quantity`
Some 2D scalar grid to be smoothed.
n: int
The number of points to use in smoothing, only valid inputs
are 5 and 9. Defaults to 5.
passes : int
The number of times to apply the filter to the grid. Defaults
to 1.
Returns
-------
array-like or `pint.Quantity`
The filtered 2D scalar grid.
Notes
-----
This function is a close replication of the GEMPAK function SM5S
and SM9S depending on the choice of the number of points to use
for smoothing. This function can be applied multiple times to
create a more smoothed field and will only smooth the interior
points, leaving the end points with their original values. If a
masked value or NaN values exists in the array, it will propagate
to any point that uses that particular grid point in the smoothing
calculation. Applying the smoothing function multiple times will
propogate NaNs further throughout the domain.
"""
if n == 9:
p = 0.25
q = 0.125
r = 0.0625
elif n == 5:
p = 0.5
q = 0.125
r = 0.0
else:
raise ValueError('The number of points to use in the smoothing '
'calculation must be either 5 or 9.')
smooth_grid = scalar_grid[:].copy()
for _i in range(passes):
smooth_grid[1:-1, 1:-1] = (p * smooth_grid[1:-1, 1:-1]
+ q * (smooth_grid[2:, 1:-1] + smooth_grid[1:-1, 2:]
+ smooth_grid[:-2, 1:-1] + smooth_grid[1:-1, :-2])
+ r * (smooth_grid[2:, 2:] + smooth_grid[2:, :-2] +
+ smooth_grid[:-2, 2:] + smooth_grid[:-2, :-2]))
return smooth_grid
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[length]')
def altimeter_to_station_pressure(altimeter_value, height):
r"""Convert the altimeter measurement to station pressure.
This function is useful for working with METARs since they do not provide
altimeter values, but not sea-level pressure or station pressure.
The following definitions of altimeter setting and station pressure
are taken from [Smithsonian1951]_ Altimeter setting is the
pressure value to which an aircraft altimeter scale is set so that it will
indicate the altitude above mean sea-level of an aircraft on the ground at the
location for which the value is determined. It assumes a standard atmosphere [NOAA1976]_.
Station pressure is the atmospheric pressure at the designated station elevation.
Finding the station pressure can be helpful for calculating sea-level pressure
or other parameters.
Parameters
----------
altimeter_value : `pint.Quantity`
The altimeter setting value as defined by the METAR or other observation,
which can be measured in either inches of mercury (in. Hg) or millibars (mb)
height: `pint.Quantity`
Elevation of the station measuring pressure.
Returns
-------
`pint.Quantity`
The station pressure in hPa or in. Hg, which can be used to calculate sea-level
pressure
See Also
--------
altimeter_to_sea_level_pressure
Notes
-----
This function is implemented using the following equations from the
Smithsonian Handbook (1951) p. 269
Equation 1:
.. math:: A_{mb} = (p_{mb} - 0.3)F
Equation 3:
.. math:: F = \left [1 + \left(\frac{p_{0}^n a}{T_{0}} \right)
\frac{H_{b}}{p_{1}^n} \right ] ^ \frac{1}{n}
Where
:math:`p_{0}` = standard sea-level pressure = 1013.25 mb
:math:`p_{1} = p_{mb} - 0.3` when :math:`p_{0} = 1013.25 mb`
gamma = lapse rate in [NOAA1976]_ standard atmosphere below the isothermal layer
:math:`6.5^{\circ}C. km.^{-1}`
:math:`t_{0}` = standard sea-level temperature 288 K
:math:`H_{b} =` station elevation in meters (elevation for which station
pressure is given)
:math:`n = \frac{a R_{d}}{g} = 0.190284` where :math:`R_{d}` is the gas
constant for dry air
And solving for :math:`p_{mb}` results in the equation below, which is used to
calculate station pressure :math:`(p_{mb})`
.. math:: p_{mb} = \left [A_{mb} ^ n - \left (\frac{p_{0} a H_{b}}{T_0}
\right) \right] ^ \frac{1}{n} + 0.3
"""
# Gamma Value for this case
gamma = 0.0065 * units('K/m')
# N-Value
n = (mpconsts.Rd * gamma / mpconsts.g).to_base_units()
return ((altimeter_value ** n
- ((p0.to(altimeter_value.units) ** n * gamma * height) / t0)) ** (1 / n)
+ 0.3 * units.hPa)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[length]', '[temperature]')
def altimeter_to_sea_level_pressure(altimeter_value, height, temperature):
r"""Convert the altimeter setting to sea-level pressure.
This function is useful for working with METARs since most provide
altimeter values, but not sea-level pressure, which is often plotted
on surface maps. The following definitions of altimeter setting, station pressure, and
sea-level pressure are taken from [Smithsonian1951]_
Altimeter setting is the pressure value to which an aircraft altimeter scale
is set so that it will indicate the altitude above mean sea-level of an aircraft
on the ground at the location for which the value is determined. It assumes a standard
atmosphere. Station pressure is the atmospheric pressure at the designated station
elevation. Sea-level pressure is a pressure value obtained by the theoretical reduction
of barometric pressure to sea level. It is assumed that atmosphere extends to sea level
below the station and that the properties of the atmosphere are related to conditions
observed at the station. This value is recorded by some surface observation stations,
but not all. If the value is recorded, it can be found in the remarks section. Finding
the sea-level pressure is helpful for plotting purposes and different calculations.
Parameters
----------
altimeter_value : 'pint.Quantity'
The altimeter setting value is defined by the METAR or other observation,
with units of inches of mercury (in Hg) or millibars (hPa)
height : 'pint.Quantity'
Elevation of the station measuring pressure. Often times measured in meters
temperature : 'pint.Quantity'
Temperature at the station
Returns
-------
'pint.Quantity'
The sea-level pressure in hPa and makes pressure values easier to compare
between different stations
See Also
--------
altimeter_to_station_pressure
Notes
-----
This function is implemented using the following equations from Wallace and Hobbs (1977)
Equation 2.29:
.. math::
\Delta z = Z_{2} - Z_{1}
= \frac{R_{d} \bar T_{v}}{g_0}ln\left(\frac{p_{1}}{p_{2}}\right)
= \bar H ln \left (\frac {p_{1}}{p_{2}} \right)
Equation 2.31:
.. math::
p_{0} = p_{g}exp \left(\frac{Z_{g}}{\bar H} \right) \\
= p_{g}exp \left(\frac{g_{0}Z_{g}}{R_{d}\bar T_{v}} \right)
Then by substituting :math:`Delta_{Z}` for :math:`Z_{g}` in Equation 2.31:
.. math:: p_{sea_level} = p_{station} exp\left(\frac{\Delta z}{H}\right)
where :math:`Delta_{Z}` is the elevation in meters and :math:`H = \frac{R_{d}T}{g}`
"""
# Calculate the station pressure using function altimeter_to_station_pressure()
psfc = altimeter_to_station_pressure(altimeter_value, height)
# Calculate the scale height
h = mpconsts.Rd * temperature / mpconsts.g
return psfc * np.exp(height / h)
def _check_radians(value, max_radians=2 * np.pi):
"""Input validation of values that could be in degrees instead of radians.
Parameters
----------
value : `pint.Quantity`
The input value to check.
max_radians : float
Maximum absolute value of radians before warning.
Returns
-------
`pint.Quantity`
The input value
"""
try:
value = value.to('radians').m
except AttributeError:
pass
if np.greater(np.nanmax(np.abs(value)), max_radians):
warnings.warn('Input over {} radians. '
'Ensure proper units are given.'.format(max_radians))
return value
| [((52, 12, 52, 34), 'numpy.sqrt', 'np.sqrt', ({(52, 20, 52, 33): 'u * u + v * v'}, {}), '(u * u + v * v)', True, 'import numpy as np\n'), ((103, 7, 103, 24), 'numpy.any', 'np.any', ({(103, 14, 103, 23): 'calm_mask'}, {}), '(calm_mask)', True, 'import numpy as np\n'), ((276, 7, 276, 18), 'numpy.any', 'np.any', ({(276, 14, 276, 17): 'sel'}, {}), '(sel)', True, 'import numpy as np\n'), ((281, 7, 281, 18), 'numpy.any', 'np.any', ({(281, 14, 281, 17): 'sel'}, {}), '(sel)', True, 'import numpy as np\n'), ((285, 10, 285, 22), 'numpy.isnan', 'np.isnan', ({(285, 19, 285, 21): 'hi'}, {}), '(hi)', True, 'import numpy as np\n'), ((286, 7, 286, 18), 'numpy.any', 'np.any', ({(286, 14, 286, 17): 'sel'}, {}), '(sel)', True, 'import numpy as np\n'), ((292, 7, 292, 18), 'numpy.any', 'np.any', ({(292, 14, 292, 17): 'sel'}, {}), '(sel)', True, 'import numpy as np\n'), ((301, 7, 301, 18), 'numpy.any', 'np.any', ({(301, 14, 301, 17): 'sel'}, {}), '(sel)', True, 'import numpy as np\n'), ((89, 29, 89, 47), 'numpy.arctan2', 'np.arctan2', ({(89, 40, 89, 42): '(-v)', (89, 44, 89, 46): '(-u)'}, {}), '(-v, -u)', True, 'import numpy as np\n'), ((142, 17, 142, 29), 'numpy.sin', 'np.sin', ({(142, 24, 142, 28): 'wdir'}, {}), '(wdir)', True, 'import numpy as np\n'), ((143, 17, 143, 29), 'numpy.cos', 'np.cos', ({(143, 24, 143, 28): 'wdir'}, {}), '(wdir)', True, 'import numpy as np\n'), ((202, 15, 202, 76), 'numpy.array', 'np.array', ({(202, 24, 202, 75): '(temperature > temp_limit) | (speed <= speed_limit)'}, {}), '((temperature > temp_limit) | (speed <= speed_limit))', True, 'import numpy as np\n'), ((280, 35, 280, 47), 'numpy.isnan', 'np.isnan', ({(280, 44, 280, 46): 'hi'}, {}), '(hi)', True, 'import numpy as np\n'), ((307, 15, 307, 55), 'numpy.array', 'np.array', ({(307, 24, 307, 54): 'temperature < 80.0 * units.degF'}, {}), '(temperature < 80.0 * units.degF)', True, 'import numpy as np\n'), ((648, 7, 648, 24), 'numpy.any', 'np.any', ({(648, 14, 648, 23): '(sigma < 0)'}, {}), '(sigma < 0)', True, 'import numpy as np\n'), ((648, 28, 648, 45), 'numpy.any', 'np.any', ({(648, 35, 648, 44): '(sigma > 1)'}, {}), '(sigma > 1)', True, 'import numpy as np\n'), ((960, 18, 960, 36), 'numpy.exp', 'np.exp', ({(960, 25, 960, 35): '(height / h)'}, {}), '(height / h)', True, 'import numpy as np\n'), ((101, 17, 101, 40), 'numpy.asarray', 'np.asarray', ({(101, 28, 101, 39): 'u.magnitude'}, {}), '(u.magnitude)', True, 'import numpy as np\n'), ((101, 51, 101, 74), 'numpy.asarray', 'np.asarray', ({(101, 62, 101, 73): 'v.magnitude'}, {}), '(v.magnitude)', True, 'import numpy as np\n'), ((269, 17, 269, 38), 'numpy.shape', 'np.shape', ({(269, 26, 269, 37): 'temperature'}, {}), '(temperature)', True, 'import numpy as np\n'), ((382, 11, 382, 39), 'numpy.any', 'np.any', ({(382, 18, 382, 38): 'app_temperature.mask'}, {}), '(app_temperature.mask)', True, 'import numpy as np\n'), ((383, 26, 383, 53), 'numpy.array', 'np.array', ({(383, 35, 383, 52): 'app_temperature.m'}, {}), '(app_temperature.m)', True, 'import numpy as np\n'), ((984, 28, 984, 41), 'numpy.abs', 'np.abs', ({(984, 35, 984, 40): 'value'}, {}), '(value)', True, 'import numpy as np\n'), ((553, 34, 553, 50), 'numpy.sin', 'np.sin', ({(553, 41, 553, 49): 'latitude'}, {}), '(latitude)', True, 'import numpy as np\n'), ((746, 62, 746, 72), 'numpy.sqrt', 'np.sqrt', ({(746, 70, 746, 71): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((294, 48, 294, 86), 'numpy.abs', 'np.abs', ({(294, 55, 294, 85): '(delta - 95.0 * units.delta_degF)'}, {}), '(delta - 95.0 * units.delta_degF)', True, 'import numpy as np\n')] |
wryfi/burl | burl/core/api/views.py | 664878ce9a31695456be89c8e10e8bb612074ef6 | from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework.reverse import reverse
from rest_framework_simplejwt.tokens import RefreshToken
@api_view(['GET'])
def root(request, fmt=None):
return Response({
'v1': reverse('api_v1:root', request=request, format=fmt),
})
@api_view(['GET'])
def v1_root(request, fmt=None):
root_navigation = {
'redirects': reverse('api_v1:redirects:redirect-list', request=request, format=fmt),
'token': reverse('api_v1:token_root', request=request, format=fmt)
}
return Response(root_navigation)
@api_view(['GET'])
def token_root(request, fmt=None):
token_navigation = {
'auth': reverse('api_v1:token_auth', request=request, format=fmt),
'refresh': reverse('api_v1:token_refresh', request=request, format=fmt),
'verify': reverse('api_v1:token_verify', request=request, format=fmt),
}
return Response(token_navigation)
@api_view(['POST'])
def token_refresh(request):
token = request.COOKIES.get("burl_refresh_token")
if token:
refresh = RefreshToken(str(token))
access = str(refresh.access_token)
if access:
return Response({"access": access}, 200)
else:
return Response({"unauthorized"}, 401)
return Response("unauthorized", 401)
@api_view(['POST'])
def token_refresh_revoke(_request):
response = Response("ok")
response.delete_cookie("burl_refresh_token")
return response
| [((7, 1, 7, 18), 'rest_framework.decorators.api_view', 'api_view', ({(7, 10, 7, 17): "['GET']"}, {}), "(['GET'])", False, 'from rest_framework.decorators import api_view\n'), ((14, 1, 14, 18), 'rest_framework.decorators.api_view', 'api_view', ({(14, 10, 14, 17): "['GET']"}, {}), "(['GET'])", False, 'from rest_framework.decorators import api_view\n'), ((23, 1, 23, 18), 'rest_framework.decorators.api_view', 'api_view', ({(23, 10, 23, 17): "['GET']"}, {}), "(['GET'])", False, 'from rest_framework.decorators import api_view\n'), ((33, 1, 33, 19), 'rest_framework.decorators.api_view', 'api_view', ({(33, 10, 33, 18): "['POST']"}, {}), "(['POST'])", False, 'from rest_framework.decorators import api_view\n'), ((46, 1, 46, 19), 'rest_framework.decorators.api_view', 'api_view', ({(46, 10, 46, 18): "['POST']"}, {}), "(['POST'])", False, 'from rest_framework.decorators import api_view\n'), ((20, 11, 20, 36), 'rest_framework.response.Response', 'Response', ({(20, 20, 20, 35): 'root_navigation'}, {}), '(root_navigation)', False, 'from rest_framework.response import Response\n'), ((30, 11, 30, 37), 'rest_framework.response.Response', 'Response', ({(30, 20, 30, 36): 'token_navigation'}, {}), '(token_navigation)', False, 'from rest_framework.response import Response\n'), ((43, 11, 43, 40), 'rest_framework.response.Response', 'Response', ({(43, 20, 43, 34): '"""unauthorized"""', (43, 36, 43, 39): '(401)'}, {}), "('unauthorized', 401)", False, 'from rest_framework.response import Response\n'), ((48, 15, 48, 29), 'rest_framework.response.Response', 'Response', ({(48, 24, 48, 28): '"""ok"""'}, {}), "('ok')", False, 'from rest_framework.response import Response\n'), ((17, 21, 17, 91), 'rest_framework.reverse.reverse', 'reverse', (), '', False, 'from rest_framework.reverse import reverse\n'), ((18, 17, 18, 74), 'rest_framework.reverse.reverse', 'reverse', (), '', False, 'from rest_framework.reverse import reverse\n'), ((26, 16, 26, 73), 'rest_framework.reverse.reverse', 'reverse', (), '', False, 'from rest_framework.reverse import reverse\n'), ((27, 19, 27, 79), 'rest_framework.reverse.reverse', 'reverse', (), '', False, 'from rest_framework.reverse import reverse\n'), ((28, 18, 28, 77), 'rest_framework.reverse.reverse', 'reverse', (), '', False, 'from rest_framework.reverse import reverse\n'), ((10, 14, 10, 65), 'rest_framework.reverse.reverse', 'reverse', (), '', False, 'from rest_framework.reverse import reverse\n'), ((40, 19, 40, 52), 'rest_framework.response.Response', 'Response', ({(40, 28, 40, 46): "{'access': access}", (40, 48, 40, 51): '(200)'}, {}), "({'access': access}, 200)", False, 'from rest_framework.response import Response\n'), ((42, 19, 42, 50), 'rest_framework.response.Response', 'Response', ({(42, 28, 42, 44): "{'unauthorized'}", (42, 46, 42, 49): '(401)'}, {}), "({'unauthorized'}, 401)", False, 'from rest_framework.response import Response\n')] |
RomulusGwelt/AngularProject | ITmeetups_back/api/serializers.py | acc7083f30b1edf002da8d156be023d2432a05e4 | from rest_framework import serializers
from .models import Post, Comment, Like
from django.contrib.auth.models import User
class CurrentUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email')
class PostSerializer(serializers.ModelSerializer):
user = CurrentUserSerializer()
class Meta:
model = Post
fields = ('id', 'title', 'text', 'user', 'created_at')
class PostSerializer2(serializers.ModelSerializer):
user = CurrentUserSerializer
class Meta:
model = Post
fields = ('id', 'title', 'text', 'user', 'created_at')
class CommentSerializer(serializers.ModelSerializer):
user = CurrentUserSerializer()
post = PostSerializer()
class Meta:
model = Comment
fields = ('id', 'text', 'user', 'post', 'created_at')
class CommentSerializer2(serializers.ModelSerializer):
user = CurrentUserSerializer
post = PostSerializer
class Meta:
model = Comment
fields = ('id', 'text', 'user', 'post', 'created_at')
class LikeSerializer(serializers.ModelSerializer):
user = CurrentUserSerializer
post = PostSerializer
class Meta:
model = Like
fields = ('id', 'user', 'post', 'created_at')
| [] |
qurator-spk/sbb_ned | qurator/sbb_ned/embeddings/bert.py | d4cfe249f72e48913f254a58fbe0dbe6e47bd168 | from ..embeddings.base import Embeddings
from flair.data import Sentence
class BertEmbeddings(Embeddings):
def __init__(self, model_path,
layers="-1, -2, -3, -4", pooling_operation='first', use_scalar_mix=True, no_cuda=False, *args, **kwargs):
super(BertEmbeddings, self).__init__(*args, **kwargs)
self._path = model_path
self._embeddings = None
self._layers = layers
self._pooling_operation = pooling_operation
self._use_scalar_mix = use_scalar_mix
self._no_cuda = no_cuda
def get(self, keys):
if self._embeddings is None:
if self._no_cuda:
import flair
import torch
flair.device = torch.device('cpu')
from .flair_bert import BertEmbeddings
self._embeddings = BertEmbeddings(bert_model_or_path=self._path,
layers=self._layers,
pooling_operation=self._pooling_operation,
use_scalar_mix=self._use_scalar_mix)
sentences = [Sentence(key) for key in keys]
# noinspection PyUnresolvedReferences
self._embeddings.embed(sentences)
for s_idx, sentence in enumerate(sentences):
for t_idx, token in enumerate(sentence):
emb = token.embedding.cpu().numpy()
yield token.text, emb
del token
del sentence
def config(self):
return {'description': self.description()}
def description(self):
layer_str = self._layers
layer_str = layer_str.replace(' ', '')
layer_str = layer_str.replace(',', '_')
return "bert-layers_{}-pooling_{}-scalarmix_{}".format(layer_str, self._pooling_operation, self._use_scalar_mix)
| [((36, 21, 36, 34), 'flair.data.Sentence', 'Sentence', ({(36, 30, 36, 33): 'key'}, {}), '(key)', False, 'from flair.data import Sentence\n'), ((27, 31, 27, 50), 'torch.device', 'torch.device', ({(27, 44, 27, 49): '"""cpu"""'}, {}), "('cpu')", False, 'import torch\n')] |
ronaldzgithub/CryptoArbitrage | Arbitrage_Future/Arbitrage_Future/test.py | b4b7a12b7b11f3dcf950f9d2039dad4f1388530b | # !/usr/local/bin/python
# -*- coding:utf-8 -*-
import YunBi
import CNBTC
import json
import threading
import Queue
import time
import logging
import numpy
import message
import random
open_platform = [True,True,True,True]
numpy.set_printoptions(suppress=True)
# logging.basicConfig(level=logging.DEBUG,
# format="[%(asctime)20s] [%(levelname)8s] %(filename)10s:%(lineno)-5s --- %(message)s",
# datefmt="%Y-%m-%d %H:%M:%S",
# filename="log/%s.log"%time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())),
# filemode='w')
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# formatter = logging.Formatter("[%(asctime)20s] [%(levelname)8s] %(filename)10s:%(lineno)-5s --- %(message)s", "%Y-%m-%d %H:%M:%S")
# console.setFormatter(formatter)
# logging.getLogger('').addHandler(console)
coin_status = [-1,-1,-1,-1]
money_status = [-1,-1,-1,-1]
history = open("log/historyPrice_%s.txt"%time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time())),"a")
# output = open("journalist.txt",'a')
balance = open("log/balance%s.txt"%time.strftime('%Y_%m_%d %H_%M_%S', time.localtime(time.time())),'a')
ybQue1 = Queue.Queue()
ybQue2 = Queue.Queue()
hbQue1 = Queue.Queue()
hbQue2 = Queue.Queue()
okcQue1 = Queue.Queue()
okcQue2 = Queue.Queue()
cnbtcQue1 = Queue.Queue()
cnbtcQue2 = Queue.Queue()
ybTradeQue1 = Queue.Queue()
ybTradeQue2 = Queue.Queue()
cnbtcTradeQue1 = Queue.Queue()
cnbtcTradeQue2 = Queue.Queue()
hbTradeQue1 = Queue.Queue()
hbTradeQue2 = Queue.Queue()
okcTradeQue1 = Queue.Queue()
okcTradeQue2 = Queue.Queue()
ybAccountQue1 = Queue.Queue()
ybAccountQue2 = Queue.Queue()
cnbtcAccountQue1 = Queue.Queue()
cnbtcAccountQue2 = Queue.Queue()
hbAccountQue1 = Queue.Queue()
hbAccountQue2 = Queue.Queue()
okcAccountQue1 = Queue.Queue()
okcAccountQue2 = Queue.Queue()
alertQue = Queue.Queue()
total_trade_coin = 0
delay_time = 0.2
config = json.load(open("config.json","r"))
#####max coin # in each trade
maxTradeLimitation = float(config["MaxCoinTradeLimitation"])
tel_list = config["tel"]
# maxTradeLimitation_yb_buy_cnbtc_sell = float(config["MaxCoinTradeLimitation_yb_buy_cnbtc_sell"])
# maxTradeLimitation_yb_buy_hb_sell = float(config["MaxCoinTradeLimitation_yb_buy_hb_sell"])
# maxTradeLimitation_yb_sell_hb_buy = float(config["MaxCoinTradeLimitation_yb_sell_hb_buy"])
# maxTradeLimitation_hb_buy_cnbtc_sell = float(config["MaxCoinTradeLimitation_hb_buy_cnbtc_sell"])
# maxTradeLimitation_hb_sell_cnbtc_buy = float(config["MaxCoinTradeLimitation_hb_sell_cnbtc_buy"])
#####max coin # for each account
maxCoin = float(config["MaxCoinLimitation"])
#####if spread over this threshold, we trade
max_thres_limitation = float(config["max_thres_limitation"])
spread_threshold_yb_sell_cnbtc_buy = float(config["spread_threshold_yb_sell_cnbtc_buy"])
spread_threshold_yb_buy_cnbtc_sell = float(config["spread_threshold_yb_buy_cnbtc_sell"])
spread_threshold_yb_buy_hb_sell = float(config["spread_threshold_yb_buy_hb_sell"])
spread_threshold_yb_sell_hb_buy = float(config["spread_threshold_yb_sell_hb_buy"])
spread_threshold_hb_buy_cnbtc_sell = float(config["spread_threshold_hb_buy_cnbtc_sell"])
spread_threshold_hb_sell_cnbtc_buy = float(config["spread_threshold_hb_sell_cnbtc_buy"])
random_range = float(config["RandomRange"])
spread_threshold_yb_sell_okc_buy = float(config["spread_threshold_yb_sell_okc_buy"])
spread_threshold_yb_buy_okc_sell = float(config["spread_threshold_yb_buy_okc_sell"])
spread_threshold_okc_buy_hb_sell = float(config["spread_threshold_okc_buy_hb_sell"])
spread_threshold_okc_sell_hb_buy = float(config["spread_threshold_okc_sell_hb_buy"])
spread_threshold_okc_buy_cnbtc_sell = float(config["spread_threshold_okc_buy_cnbtc_sell"])
spread_threshold_okc_sell_cnbtc_buy = float(config["spread_threshold_okc_sell_cnbtc_buy"])
max_diff_thres = float(config["max_diff_thres"])
#######if coin # is lower than alert thres, it will increase the thres
alert_thres_coin = float(config["alert_thres_coin"])
alert_thres_money = float(config["alert_thres_money"])
thres_coin = float(config["thres_coin"])
thres_money = float(config["thres_money"])
#######max thres increase is slop*alert_thres
slope = float(config["alert_slope"])
# print max_diff_thres,alert_thres,slope
# spread_threshold = float(config["spread_threshold"])
# spread_threshold_minor = float(config["spread_threshold_minor"])
#####if we start a trade, we will accept all trade until spread reach lowest spread threshold, after that, we cancel all trade
lowest_spread_threshold = float(config["lowest_spread_threshold"])
trade_multiplier_ratio = float(config["TradeMultiplyRatio"])
# lowest_spread_threshold_minor = float(config["lowest_spread_threshold_minor"])
#####the trade price is max trade limitation*trade ratio behind the min/max price of ask/bid
trade_ratio = float(config["TradeAdvanceRatio"])
# trade_ratio_minor = float(config["TradeAdvanceRatio_minor"])
#####slippage
slippage = float(config["slippage"])
tmpThres = maxTradeLimitation*trade_ratio
# tmpThres_minor = maxTradeLimitation_minor*trade_ratio
offset_player = int(config["offset_player"])
# offset_player_minor = int(config["offset_player_minor"])
offset_coin = float(config["offset_coin"])
# offset_coin_minor = float(config["offset_coin_minor"])
########return 0 accumulate amount
########return 1 price
########return 2 list
def cnbtcThresCoin(thres,offset_coin,offset_player,list):
acc = 0
for i in range(offset_player,len(list)):
acc += list[i][1]
if acc > thres+offset_coin:
return (thres,list[i][0],list)
return (acc,list[-1][0],list)
def ybThresCoin(thres,offset_coin,offset_player,list):
acc = 0
for i in range(offset_player,len(list)):
acc += float(list[i][1])
if acc > thres+offset_coin:
return (thres,float(list[i][0]),list)
return (acc,float(list[-1][0]),list)
def hbThresCoin(thres,offset_coin,offset_player,list):
acc = 0
for i in range(offset_player,len(list)):
acc += float(list[i][1])
if acc > thres+offset_coin:
return (thres,float(list[i][0]),list)
return (acc,float(list[-1][0]),list)
def okcThresCoin(thres,offset_coin,offset_player,list):
acc = 0
for i in range(offset_player,len(list)):
acc += list[i][1]
if acc > thres+offset_coin:
return (thres,list[i][0],list)
return (acc,list[-1][0],list)
def ybRun():
while True:
yb = ybQue1.get()
if yb == None:
ybQue1.task_done()
break
else:
while True:
depth = yb.getDepth()
if depth:
break
depth["asks"].reverse()
ybQue2.put((ybThresCoin(tmpThres,offset_coin,offset_player,depth["bids"]),depth["timestamp"]))
ybQue2.put((ybThresCoin(tmpThres,offset_coin,offset_player,depth["asks"]),depth["timestamp"]))
ybQue1.task_done()
def okcRun():
while True:
okc = okcQue1.get()
if okc == None:
okcQue1.task_done()
break
else:
while True:
depth = okc.getDepth()
if depth:
break
depth["asks"].reverse()
okcQue2.put((okcThresCoin(tmpThres,offset_coin,offset_player,depth["bids"]),"-99999999"))
okcQue2.put((okcThresCoin(tmpThres,offset_coin,offset_player,depth["asks"]),"-99999999"))
okcQue1.task_done()
def hbRun():
while True:
hb = hbQue1.get()
if hb == None:
hbQue1.task_done()
break
else:
while True:
depth = hb.getDepth()
if depth and depth["status"] == "ok":
break
# depth["tick"]["asks"].reverse()
hbQue2.put((hbThresCoin(tmpThres,offset_coin,offset_player,depth["tick"]["bids"]),depth["ts"]/1000))
hbQue2.put((hbThresCoin(tmpThres,offset_coin,offset_player,depth["tick"]["asks"]),depth["ts"]/1000))
hbQue1.task_done()
def cnbtcRun():
while True:
cnbtc = cnbtcQue1.get()
if cnbtc == None:
cnbtcQue1.task_done()
break
else:
while True:
depth = cnbtc.getDepth()
if depth:
break
depth["asks"].reverse()
cnbtcQue2.put((cnbtcThresCoin(tmpThres,offset_coin,offset_player,depth["bids"]),depth["timestamp"]))
cnbtcQue2.put((cnbtcThresCoin(tmpThres,offset_coin,offset_player,depth["asks"]),depth["timestamp"]))
cnbtcQue1.task_done()
#######tradeque1[0]:obj
#######tradeque1[1]:buy or sell
#######tradeque1[2]:amount
#######tradeque1[3]:price
#######tradeque1[4]:limit_price
def ybTradeRun():
while True:
yb_tuple = ybTradeQue1.get()
money = 0
if yb_tuple == None:
ybTradeQue1.task_done()
break
yb = yb_tuple[0]
amount = yb_tuple[2]
remain = amount
price = yb_tuple[3]
if amount==0:
ybTradeQue2.put((0.0,0.0))
ybTradeQue1.task_done()
continue
sell = True
if yb_tuple[1] == "buy":
sell = False
times = 10
while True:
order = None
if sell:
order = yb.sell(volume = amount,price=price-slippage)
else:
order = yb.buy(volume = amount, price = price + slippage)
if order!= None:
if order.has_key("error"):
time.sleep(delay_time)
print "yb",order
continue
id = order["id"]
wait_times = 3
while wait_times>0:
wait_times-=1
time.sleep(1)
while True:
order = yb.getOrder(id)
if order!=None:
if order.has_key("error"):
time.sleep(delay_time)
print "yb",order
continue
break
print "yb",order
if order["state"] == "done":
break
if order["state"] == "done":
if sell:
print "yunbi remain sell %f"%0.0
money+=amount*(price-slippage)
ybTradeQue2.put((0.0,money))
break
else:
print "yunbi remain buy 0.0"
money-=amount*(price+slippage)
ybTradeQue2.put((0.0,money))
break
else:
# order["state"] == "wait":
while True:
order = yb.deleteOrder(id)
print "yb",order
if order!=None:
if order.has_key("error"):
print "yb,delete",order
time.sleep(delay_time)
continue
break
while True:
order = yb.getOrder(id)
print "yb",order
if order!=None:
if order.has_key("error"):
time.sleep(delay_time)
print "yb",order
continue
if order["state"] != "wait":
break
else:
time.sleep(delay_time)
# break
#todo judge whether has been deleted
if sell:
money+=float(order["executed_volume"])*(price-slippage)
remain = float(order["remaining_volume"])
print "yunbi remain sell %f"%float(order["remaining_volume"])
else:
money-=float(order["executed_volume"])*(price+slippage)
remain = float(order["remaining_volume"])
print "yunbi remain buy %f"%float(order["remaining_volume"])
if remain <=0:
ybTradeQue2.put((0.0,money))
break
print "get_price"
while True:
depth = yb.getDepth()
if depth:
depth["asks"].reverse()
break
if sell:
price_now = ybThresCoin(remain*trade_ratio,offset_coin,offset_player,depth["bids"])[1]
print "price_now yb",price_now,yb_tuple[4]
if price_now<yb_tuple[4]:
ybTradeQue2.put((remain,money))
break
else:
price_now = ybThresCoin(remain*trade_ratio,offset_coin,offset_player,depth["asks"])[1]
print "price_now yb",price_now
if price_now>yb_tuple[4]:
ybTradeQue2.put((remain,money))
break
price = price_now
amount = remain
times-=1
ybTradeQue1.task_done()
def okcTradeRun():
while True:
okc_tuple = okcTradeQue1.get()
money = 0
if okc_tuple == None:
okcTradeQue1.task_done()
break
okc = okc_tuple[0]
amount = okc_tuple[2]
remain = amount
price = okc_tuple[3]
if amount==0:
okcTradeQue2.put((0.0,0.0))
okcTradeQue1.task_done()
continue
sell = True
if okc_tuple[1] == "buy":
sell = False
times = 10
while True:
order = None
if sell:
order = okc.sell(volume = amount,price=price-slippage)
else:
order = okc.buy(volume = amount, price = price+slippage)
if order!= None:
if order["result"] != True:
print "okc",order
time.sleep(delay_time)
continue
id = order["order_id"]
wait_times = 3
while wait_times>0:
wait_times-=1
time.sleep(1)
while True:
order = okc.getOrder(id)
if order!=None:
if order["result"] != True:
time.sleep(delay_time)
print "okc",order
continue
break
print "okc",order
if order["orders"][0]["status"] == 2:
break
if order["orders"][0]["status"] == 2:
if sell:
print "okcoin remain sell %f"%0.0
money+=amount*(price-slippage)
okcTradeQue2.put((0.0,money))
break
else:
print "okcoin remain buy 0.0"
money-=amount*(price+slippage)
okcTradeQue2.put((0.0,money))
break
else:
# order["state"] == "wait":
while True:
order = okc.deleteOrder(id)
if order!=None:
if order["result"] != True:
time.sleep(delay_time)
print "okc",order
if order["error_code"]==10050:
break
continue
break
while True:
order = okc.getOrder(id)
if order!=None:
if order["result"] != True:
time.sleep(delay_time)
print "okc",order
continue
if order["orders"][0]["status"] == 2 or order["orders"][0]["status"]== -1:
break
else:
time.sleep(delay_time)
#todo judge whether has been deleted
if sell:
money+=float(order["orders"][0]["deal_amount"])*(price-slippage)
remain = float(order["orders"][0]["amount"]) - float(order["orders"][0]["deal_amount"])
print "okcoin remain sell %f"%remain
else:
money-=float(order["orders"][0]["deal_amount"])*(price+slippage)
remain = float(order["orders"][0]["amount"])-float(order["orders"][0]["deal_amount"])
print "okcoin remain buy %f"%remain
if remain<=0:
okcTradeQue2.put((0.0,money))
break
print "get_price"
while True:
depth = okc.getDepth()
if depth:
depth["asks"].reverse()
break
if sell:
price_now = okcThresCoin(remain*trade_ratio,offset_coin,offset_player,depth["bids"])[1]
print "price_now okc",price_now,okc_tuple[4]
if price_now<okc_tuple[4]:
okcTradeQue2.put((remain,money))
break
else:
price_now = okcThresCoin(remain*trade_ratio,offset_coin,offset_player,depth["asks"])[1]
print "price_now okc",price_now
if price_now>okc_tuple[4]:
okcTradeQue2.put((remain,money))
break
price = price_now
amount = remain
times-=1
okcTradeQue1.task_done()
def hbTradeRun():
while True:
hb_tuple = hbTradeQue1.get()
money = 0
if hb_tuple == None:
hbTradeQue1.task_done()
break
hb = hb_tuple[0]
amount = hb_tuple[2]
remain = amount
price = hb_tuple[3]
if amount==0:
hbTradeQue2.put((0.0,0.0))
hbTradeQue1.task_done()
continue
sell = True
if hb_tuple[1] == "buy":
sell = False
times = 10
while True:
order = None
if sell:
order = hb.sell(volume = amount,price=price-slippage)
#todo
if order!=None and order["status"] == "ok":
order = hb.place_order(order["data"])
else:
#todo
order = hb.buy(volume = amount, price = price + slippage)
if order!=None and order["status"] == "ok":
order = hb.place_order(order["data"])
if order!= None:
if order["status"]!="ok":
print "hb",order
time.sleep(delay_time)
continue
id = order["data"]
wait_times = 3
while wait_times>0:
wait_times-=1
time.sleep(1)
while True:
order = hb.getOrder(id)
if order!=None:
if order["status"]!="ok":
time.sleep(delay_time)
print "hb",order
continue
break
print "hb",order
if order["data"]["state"] == "filled":
break
#todo
if order["data"]["state"] == "filled":
if sell:
print "huobi remain sell %f"%0.0
money+=amount*(price-slippage)
hbTradeQue2.put((0.0,money))
break
else:
print "huobi remain buy 0.0"
money-=amount*(price+slippage)
hbTradeQue2.put((0.0,money))
break
else:
# order["state"] == "wait":
while True:
print id
order = hb.deleteOrder(id)
if order!=None:
if order["status"]!="ok":
if order['status'] == 'error' and order['err-code'] == 'order-orderstate-error':
break
print "hb",order
continue
break
while True:
order = hb.getOrder(id)
if order!=None:
if order["status"]!="ok":
time.sleep(delay_time)
print "hb",order
continue
print "hb",order
if order["data"]["state"] == "canceled" or order["data"]["state"] == "filled" or order["data"]["state"] == "partial-canceled" or order["data"]["state"] == "partial-filled":
break
else:
time.sleep(delay_time)
#todo judge whether has been deleted
if sell:
money+=float(order["data"]["field-amount"])*(price-slippage)
remain = float(order["data"]["amount"])-float(order["data"]["field-amount"])
print "huobi remain sell %f"%remain
else:
money-=float(order["data"]["field-amount"])*(price+slippage)
remain = float(order["data"]["amount"])-float(order["data"]["field-amount"])
print "huobi remain buy %f"%remain
if remain<=0:
hbTradeQue2.put((0.0,money))
break
print "get_price"
while True:
depth = hb.getDepth()
if depth:
break
if sell:
price_now = hbThresCoin(remain*trade_ratio,offset_coin,offset_player,depth['tick']["bids"])[1]
print "price_now hb",price_now,hb_tuple[4]
if price_now<hb_tuple[4]:
hbTradeQue2.put((remain,money))
break
else:
price_now = hbThresCoin(remain*trade_ratio,offset_coin,offset_player,depth['tick']["asks"])[1]
print "price_now hb",price_now
if price_now>hb_tuple[4]:
hbTradeQue2.put((remain,money))
break
price = price_now
amount = remain
times-=1
hbTradeQue1.task_done()
def cnbtcTradeRun():
while True:
cnbtc_tuple = cnbtcTradeQue1.get()
if cnbtc_tuple == None:
cnbtcTradeQue1.task_done()
break
# print cnbtc_tuple
money = 0;
cnbtc = cnbtc_tuple[0]
amount = cnbtc_tuple[2]
remain = amount
price = cnbtc_tuple[3]
if amount==0:
cnbtcTradeQue2.put((0.0,0.0))
cnbtcTradeQue1.task_done()
continue
buy = True
if cnbtc_tuple[1] == "sell":
buy = False
times = 10
while True:
if buy:
order = cnbtc.buy(volume = amount,price=price+slippage)
else:
order = cnbtc.sell(volume=amount,price=price-slippage)
if order!= None:
if order.has_key("code") and order["code"] != 1000:
time.sleep(delay_time)
print "cnbtc",order
continue
id = order["id"]
wait_times = 5
while wait_times>0:
wait_times-=1
time.sleep(1)
while True:
order = cnbtc.getOrder(id)
if order!=None:
break
print "cnbtc",order
####2 is done
####
if order["status"] == 2:
break
if order["status"] == 2:
if buy:
print "cnbtc remain buy ",0.0
money-=amount*(price+slippage)
cnbtcTradeQue2.put((0.0,money))
else:
print "cnbtc remain sell 0.0"
money+=amount*(price-slippage)
cnbtcTradeQue2.put((0.0,money))
break
elif order["status"] == 0 or order["status"] == 3:
while True:
order = cnbtc.deleteOrder(id)
if order!=None:
if order.has_key("code") and order["code"] != 1000:
print json.dumps(order,ensure_ascii=False)
if order["code"] == 3001:
break
time.sleep(delay_time)
continue
break
while True:
order = cnbtc.getOrder(id)
if order!=None:
# print order
if order.has_key("code") and order["code"] != 1000:
print "cnbtc",order
time.sleep(delay_time)
continue
#todo judge whether is deleted
if order["status"]==1 or order["status"] == 2:
break
else:
time.sleep(delay_time)
print "cnbtc",order
if buy:
money-=float(order["trade_amount"])*(price+slippage)
remain = float(order["total_amount"]) - float(order["trade_amount"])
print "cnbtc remain buy %f/%f"%(remain,float(order["total_amount"]))
else:
money+=float(order["trade_amount"])*(price-slippage)
remain = float(order["total_amount"]) - float(order["trade_amount"])
print "cnbtc remain sell %f/%f"%(remain,float(order["total_amount"]))
if remain<=0:
cnbtcTradeQue2.put((0.0,money))
break
else:
if buy:
money-=float(order["trade_amount"])*(price+slippage)
remain = float(order["total_amount"]) - float(order["trade_amount"])
print "cnbtc remain buy %f/%f"%(remain,float(order["total_amount"]))
else:
money+=float(order["trade_amount"])*(price-slippage)
remain = float(order["total_amount"]) - float(order["trade_amount"])
print "cnbtc remain sell %f/%f"%(remain,float(order["total_amount"]))
if remain<=0:
cnbtcTradeQue2.put((0.0,money))
break
print "get_depth"
while True:
depth = cnbtc.getDepth()
depth["asks"].reverse()
if depth:
break
if buy:
price_now = cnbtcThresCoin(remain*trade_ratio,offset_coin,offset_player,depth["asks"])[1]
print "prince_now cnbtc",price_now
if price_now>cnbtc_tuple[4]:
cnbtcTradeQue2.put((remain,money))
break
else:
price_now = cnbtcThresCoin(remain*trade_ratio,offset_coin,offset_player,depth["bids"])[1]
print "prince_now cnbtc",price_now
if price_now<cnbtc_tuple[4]:
cnbtcTradeQue2.put((remain,money))
break
price = price_now
amount = remain
times-=1
cnbtcTradeQue1.task_done()
def ybAccountRun():
while True:
yb = ybAccountQue1.get()
yb_cny = 0
yb_eth = 0
while True:
yb_acc = yb.get_account()
if yb_acc!= None:
if yb_acc.has_key("error"):
time.sleep(delay_time)
print yb_acc
continue
break
for acc in yb_acc["accounts"]:
if acc["currency"] == "cny":
yb_cny=float(acc["balance"])
elif acc["currency"] == "eth":
yb_eth= float(acc["balance"])
ybAccountQue1.task_done()
ybAccountQue2.put((yb_cny,yb_eth))
def cnbtcAccountRun():
while True:
cnbtc = cnbtcAccountQue1.get()
cnbtc_cny = 0
cnbtc_eth = 0
while True:
cnbtc_acc = cnbtc.get_account()
if cnbtc_acc!= None:
if cnbtc_acc.has_key("code") and cnbtc_acc["code"] != 1000:
time.sleep(delay_time)
print cnbtc_acc
continue
break
cnbtc_eth=cnbtc_acc["result"]["balance"]["ETH"]["amount"]
cnbtc_cny+=cnbtc_acc["result"]["balance"]["CNY"]["amount"]
cnbtcAccountQue1.task_done()
cnbtcAccountQue2.put((cnbtc_cny,cnbtc_eth))
def okcAccountRun():
while True:
time.sleep(delay_time)
okc = okcAccountQue1.get()
okc_cny = 0
okc_eth = 0
while True:
okc_acc = okc.get_account()
if okc_acc!= None:
if okc_acc["result"]!=True:
time.sleep(delay_time)
print "okc",okc_acc
continue
break
okc_eth = float(okc_acc["info"]["funds"]["free"]["eth"])
okc_cny = float(okc_acc["info"]["funds"]["free"]["cny"])
# print okc_acc
okcAccountQue1.task_done()
okcAccountQue2.put((okc_cny,okc_eth))
def hbAccountRun():
while True:
hb = hbAccountQue1.get()
hb_cny = 0
hb_eth = 0
while True:
hb_acc = hb.get_account()
if hb_acc!= None:
if hb_acc["status"]!="ok":
print hb_acc
continue
break
for mon in hb_acc["data"]["list"]:
if mon["currency"]=="cny" and mon["type"] == "trade":
hb_cny = float(mon["balance"])
if mon["currency"] == "eth" and mon["type"] == "trade":
hb_eth = float(mon["balance"])
hbAccountQue1.task_done()
hbAccountQue2.put((hb_cny,hb_eth))
import sys
import numpy.matlib
def setThreshold(cny_list,eth_list,brokerage_fee,cash_fee,thres_list_now,thres_list_origin,number,price,tick_coin,name_list):
trade_multiplier = numpy.ones([number,number])
thres_list = thres_list_origin.copy()
sell_times = eth_list/tick_coin
buy_times = cny_list/price/tick_coin
trade_broker = numpy.add.outer(brokerage_fee,brokerage_fee)*price*1.1
trade_cash = numpy.add.outer(cash_fee,numpy.zeros(cash_fee.shape[0]))*price*1.05
length = cny_list.shape[0]
print "buy_times",buy_times
print "sell_times",sell_times
tmp = buy_times.copy()
tmp[tmp>thres_money] = thres_money
tmp = (-tmp+thres_money)*slope
tmp[tmp>max_thres_limitation] = max_thres_limitation
offset = numpy.matlib.repmat(tmp,length,1)
tmp = buy_times.copy()
tmp[tmp>thres_money] = thres_money
tmp = (-tmp+thres_money)*5/thres_money
tmp[tmp>1] = 1
max_diff_thres_tmp = max(0,max_diff_thres)
tmp_mul = numpy.matlib.repmat(tmp.reshape(length,1),1,length)
trade_multiplier+=tmp_mul*trade_multiplier_ratio
tmp = numpy.matlib.repmat(tmp.reshape(length,1),1,length)
# print 123
offset_cash = -numpy.multiply(tmp,numpy.add.outer(cash_fee,numpy.zeros(cash_fee.shape[0]))*price*1.05)
# print tmp
# tmp = numpy.matlib.repmat(tmp.reshape(length,1),1,length)
# print tmp
tmp = sell_times.copy()
tmp[tmp>thres_coin] = thres_coin
tmp = (-tmp+thres_coin)*slope
tmp[tmp>max_thres_limitation] = max_thres_limitation
offset += numpy.matlib.repmat(tmp.reshape(length,1),1,length)
tmp = sell_times.copy()
tmp[tmp>thres_coin] = thres_coin
tmp = (-tmp+thres_coin)*5/thres_coin
tmp[tmp>1] = 1
tmp_mul = numpy.matlib.repmat(tmp,length,1)
trade_multiplier+=tmp_mul*trade_multiplier_ratio
tmp = numpy.matlib.repmat(tmp,length,1)
# print 123
offset_cash -= numpy.multiply(tmp,numpy.add.outer(cash_fee,numpy.zeros(cash_fee.shape[0]))*price*1.05)
# print offset
# buy_times<100
alertQue.put((buy_times,sell_times,number))
# offset[offset<max_diff_thres_tmp] = max_diff_thres_tmp
offset[offset>max_thres_limitation] = max_thres_limitation
print offset
# print offset
# print trade_broker,trade_cash,offset_cash
thres_list = trade_broker+trade_cash+offset_cash+max_diff_thres_tmp+offset+thres_list_origin
# print thres_list
thres_list[:,buy_times<=8] = 999999
thres_list[sell_times<=8,:] = 999999
buy_tmp = (thres_money-buy_times.copy())*slope
buy_tmp[buy_tmp<0] = 0
buy_tmp[buy_tmp>max_diff_thres_tmp] = max_diff_thres_tmp
buy_tmp_n_n = numpy.matlib.repmat(buy_tmp.reshape(length, 1), 1, length)
sell_tmp = (thres_coin-sell_times.copy())*slope
sell_tmp[sell_tmp<0] = 0
sell_tmp[sell_tmp>max_diff_thres_tmp] = max_diff_thres_tmp
sell_tmp_n_n = numpy.matlib.repmat(sell_tmp,length,1)
tmp_n_n = numpy.maximum(sell_tmp_n_n,buy_tmp_n_n)
# print thres_list
# print tmp_n_n
thres_list -= tmp_n_n
# thres_list -= sell_tmp
numpy.fill_diagonal(thres_list,999999)
numpy.fill_diagonal(trade_multiplier,0)
trade_multiplier[trade_multiplier>2] = 2
# print trade_multiplier
# print thres_list
# thres_list = numpy.maximum.reduce([thres_list,(trade_broker+trade_cash)])
# print buy_times<=1
# print thres_list
# result = thres_list_origin.copy()
# result[:number,:number] = thres_list
# thres_list[2,0] = 0
# thres_list[2,1] = 0
# thres_list[1,2] = 0
# thres_list[0,2] = 0
# print thres_list
return thres_list,trade_multiplier
def alert():
while True:
alertTuple = alertQue.get()
buy_times = alertTuple[0]
sell_times = alertTuple[1]
number = alertTuple[2]
for i in range(number):
if open_platform[i]:
if buy_times[i] <= 8:
if money_status[i] == 0 or money_status[i] == 1:
for tel in tel_list:
res = message.send_sms("提醒:%s的账户完全没钱了" % name_list[i], tel)
print res
money_status[i] = 2
print >> sys.stderr, "%s has no money!!!!!!!!!!!!!!!!!!!!!" % name_list[i]
elif buy_times[i] < alert_thres_money:
if money_status[i] == 0:
for tel in tel_list:
message.send_sms("提醒:%s快没钱了,只能买%f次了" % (name_list[i],buy_times[i]), tel)
money_status[i] = 1
print >> sys.stderr, "%s is low money!!!!!!!!!!!!!!!!!!!!!!" % name_list[i]
else:
money_status[i] = 0
if sell_times[i] <= 8:
if coin_status[i] == 0 or coin_status[i] == 1:
for tel in tel_list:
message.send_sms("提醒:%s的账户完全没币了" % name_list[i], tel)
coin_status[i] = 2
print >> sys.stderr, "%s has no coin!!!!!!!!!!!!!!!!!!!!!!" % name_list[i]
elif sell_times[i] < alert_thres_coin:
if coin_status[i] == 0:
for tel in tel_list:
message.send_sms("提醒:%s快没币了,只能卖%f次了" % (name_list[i],sell_times[i]), tel)
coin_status[i] = 1
print >> sys.stderr, "%s is low coin!!!!!!!!!!!!!!!!!!!!!!" % name_list[i]
else:
coin_status[i] = 0
alertQue.task_done()
import HuoBi
import OKCoin
open_okc = open_platform[3]
open_yb = open_platform[1]
open_cnbtc = open_platform[0]
open_hb = open_platform[2]
if open_yb:
yb = YunBi.Yunbi(config,"LiChen")
print yb.get_account()
else:
yb = None
# import gzip
# from StringIO import StringIO
#
# buf = StringIO(acc["name"])
# f = gzip.GzipFile(fileobj=buf)
# print f.read()
# sss = acc["name"].encode("raw_unicode_escape").decode()
# print ss
# logging.info("YB Account "+json.dumps(yb.get_account(),ensure_ascii=False))
if open_cnbtc:
cnbtc = CNBTC.CNBTC(config)
print("cnbtc Account "+str(cnbtc.get_account()))
else:
cnbtc = None
if open_hb:
hb = HuoBi.HuoBi(config)
print("HB Account "+str(hb.get_account()))
else:
hb = None
if open_okc:
okc = OKCoin.OKCoin(config)
print("OKCoin Account "+str(okc.get_account()))
okc_thread = threading.Thread(target=okcRun)
okc_thread.setDaemon(True)
okc_thread.start()
else:
okc = None
if open_yb:
yb_thread = threading.Thread(target=ybRun)
yb_thread.setDaemon(True)
yb_thread.start()
if open_cnbtc:
cnbtc_thread = threading.Thread(target=cnbtcRun)
cnbtc_thread.setDaemon(True)
cnbtc_thread.start()
if open_hb:
hb_thread = threading.Thread(target=hbRun)
hb_thread.setDaemon(True)
hb_thread.start()
if open_okc:
okc_trade_thread = threading.Thread(target=okcTradeRun)
okc_trade_thread.setDaemon(True)
okc_trade_thread.start()
if open_yb:
yb_trade_thread = threading.Thread(target=ybTradeRun)
yb_trade_thread.setDaemon(True)
yb_trade_thread.start()
if open_cnbtc:
cnbtc_trade_thread = threading.Thread(target = cnbtcTradeRun)
cnbtc_trade_thread.setDaemon(True)
cnbtc_trade_thread.start()
if open_hb:
hb_trade_thread = threading.Thread(target=hbTradeRun)
hb_trade_thread.setDaemon(True)
hb_trade_thread.start()
if open_okc:
okc_account_thread = threading.Thread(target=okcAccountRun)
okc_account_thread.setDaemon(True)
okc_account_thread.start()
if open_yb:
yb_account_thread = threading.Thread(target=ybAccountRun)
yb_account_thread.setDaemon(True)
yb_account_thread.start()
if open_cnbtc:
cnbtc_account_thread = threading.Thread(target = cnbtcAccountRun)
cnbtc_account_thread.setDaemon(True)
cnbtc_account_thread.start()
if open_hb:
hb_account_thread = threading.Thread(target=hbAccountRun)
hb_account_thread.setDaemon(True)
hb_account_thread.start()
alertThread = threading.Thread(target=alert)
alertThread.setDaemon(True)
alertThread.start()
total_coin = 0
total_money = 0
tick = 0
last_total_eth = 0
last_total_cny = 0
first_total_eth = 0
first_total_cny = 0
first = True
platform_number = 4
name_list = ["CNBTC","YunBi","HuoBi","OKCoin"]
obj_list = [cnbtc,yb,hb,okc]
que1_list = [cnbtcQue1,ybQue1,hbQue1,okcQue1]
que2_list = [cnbtcQue2,ybQue2,hbQue2,okcQue2]
trade_que1_list = [cnbtcTradeQue1,ybTradeQue1,hbTradeQue1,okcTradeQue1]
trade_que2_list = [cnbtcTradeQue2,ybTradeQue2,hbTradeQue2,okcTradeQue2]
thres_list = numpy.array([[999999,spread_threshold_yb_buy_cnbtc_sell,spread_threshold_hb_buy_cnbtc_sell,spread_threshold_okc_buy_cnbtc_sell],
[spread_threshold_yb_sell_cnbtc_buy,999999,spread_threshold_yb_sell_hb_buy,spread_threshold_yb_sell_okc_buy],
[spread_threshold_hb_sell_cnbtc_buy,spread_threshold_yb_buy_hb_sell,9999999,spread_threshold_okc_buy_hb_sell],
[spread_threshold_okc_sell_cnbtc_buy,spread_threshold_yb_buy_okc_sell,spread_threshold_okc_sell_hb_buy,999999]])
thres_list_origin = thres_list.copy()
has_ts = [True,True,True,False]
platform_list = []
for i in range(platform_number):
platform_list.append(
{
"name":name_list[i],
"obj":obj_list[i],
"que1":que1_list[i],
"que2":que2_list[i],
"trade_que1":trade_que1_list[i],
"trade_que2":trade_que2_list[i],
"depth_buy":None,
"depth_sell":None,
"has_ts":has_ts[i]
}
)
brokerage_fee = numpy.asarray([0.0004,0.001,0.002,0.001])
cash_fee = numpy.asarray([0.001,0.001,0.002,0.002])
while True:
print 'tick',tick
for platform in platform_list:
if platform["obj"]!=None:
platform["que1"].put(platform["obj"])
if open_yb:
ybAccountQue1.put(yb)
if open_okc:
okcAccountQue1.put(okc)
if open_cnbtc:
cnbtcAccountQue1.put(cnbtc)
if open_hb:
hbAccountQue1.put(hb)
for platform in platform_list:
if platform["obj"]!=None:
platform["depth_sell"] = platform["que2"].get()
platform["depth_buy"] = platform["que2"].get()
###depth[0] is amount
###depth[1] is price
###depth[2] is list platform_list["depth_buy"] = platform["que2"].get()
max_diff = -1000
trade_info = dict()
average_price = 0
open_num = 0
for i in range(platform_number):
if platform_list[i]["obj"]!=None:
open_num+=1
average_price+=platform_list[i]["depth_buy"][0][1]+platform_list[i]["depth_sell"][0][1]
average_price /= open_num*2.0/1.01
print 'average_price %f'%average_price
brokerage_trade = numpy.add.outer(brokerage_fee,brokerage_fee)*average_price
cash_trade = numpy.add.outer(cash_fee,numpy.zeros(cash_fee.shape[0]))*average_price
tick+=1
if tick % 1 == 0:
total_cny = 0
total_eth = 0
yb_cny = 0
yb_eth = 0
cnbtc_cny = 0
cnbtc_eth = 0
hb_cny = 0
hb_eth = 0
okc_cny = 0
okc_eth = 0
if open_yb:
yb_cny,yb_eth = ybAccountQue2.get()
print "yb_balance:%f %f"%(yb_eth,yb_cny)
if open_okc:
okc_cny,okc_eth = okcAccountQue2.get()
print "okc_balance:%f %f"%(okc_eth,okc_cny)
if open_hb:
hb_cny,hb_eth = hbAccountQue2.get()
print "hb balance:%f %f"%(hb_eth,hb_cny)
if open_cnbtc:
cnbtc_cny,cnbtc_eth = cnbtcAccountQue2.get()
print "cnbtc balance:%f %f"%(cnbtc_eth,cnbtc_cny)
total_cny = yb_cny+hb_cny+cnbtc_cny+okc_cny
total_eth = yb_eth+hb_eth+cnbtc_eth+okc_eth
balance.write("%s %f %f %f %f %f %f %f %f %f %f\n"%(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())),
cnbtc_eth,cnbtc_cny,yb_eth,yb_cny,hb_eth,hb_cny,okc_eth,okc_cny,total_eth,total_cny))
history.write("%s "%time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
for i in range(platform_number):
if platform_list[i]["obj"]!=None:
history.write("%f %f "%(platform_list[i]["depth_buy"][0][1],platform_list[i]["depth_sell"][0][1]))
else:
history.write('0 0 ')
history.write('\n')
cny_list = numpy.asarray([cnbtc_cny,yb_cny,hb_cny,okc_cny])
eth_list = numpy.asarray([cnbtc_eth,yb_eth,hb_eth,okc_eth])
last_total_eth = total_eth
last_total_cny = total_cny
if first:
first_total_cny = total_cny
first_total_eth = total_eth
first = False
# history.write("%s %f %f %f %f %f %f\n" % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
# yb_depth[0][1], cnbtc_depth[0][1], yb_depth[0][1] - cnbtc_depth[0][1],
# yb_depth_minor[0][1], cnbtc_depth_minor[0][1],
# cnbtc_depth_minor[0][1] - yb_depth_minor[0][1]))
balance.flush()
history.flush()
if tick%1 == 0:
thres_list,trade_multiplier = setThreshold(cny_list,eth_list,brokerage_fee,cash_fee,thres_list,thres_list_origin,platform_number,average_price,maxTradeLimitation,name_list)
# print thres_list
i1 = None
j1 = None
for i in range(platform_number):
for j in range(platform_number):
if i!=j and platform_list[i]["obj"]!=None and platform_list[j]["obj"]!=None:
# if platform_list[i]["has_ts"] and platform_list[j]["has_ts"]:
# print i,j,int(platform_list[i]["depth_sell"][1]),int(platform_list[j]["depth_buy"][1])
# if (int(platform_list[i]["depth_sell"][1])-int(platform_list[j]["depth_buy"][1]))>5:
# continue
# print platform_list[i],platform_list[j]
if platform_list[i]["depth_sell"][0][1] - platform_list[j]["depth_buy"][0][1]>thres_list[i,j] and platform_list[i]["depth_sell"][0][1] - platform_list[j]["depth_buy"][0][1]-thres_list[i,j]>max_diff:
max_diff = platform_list[i]["depth_sell"][0][1]-platform_list[j]["depth_buy"][0][1]-thres_list[i,j]
trade_info["sell_depth"] = platform_list[i]["depth_sell"]
trade_info["buy_depth"] = platform_list[j]["depth_buy"]
trade_info["sell_name"] = platform_list[i]["name"]
trade_info["buy_name"] = platform_list[j]["name"]
trade_info["sell_que1"] = platform_list[i]["trade_que1"]
trade_info["sell_que2"] = platform_list[i]["trade_que2"]
trade_info["buy_que1"] = platform_list[j]["trade_que1"]
trade_info["buy_que2"] = platform_list[j]["trade_que2"]
trade_info["sell_obj"] = platform_list[i]["obj"]
trade_info["buy_obj"]=platform_list[j]["obj"]
i1 = i
j1 = j
if max_diff>0:
print "max_diff %f"%max_diff
buy_depth = trade_info["buy_depth"]
sell_depth = trade_info["sell_depth"]
# print("BuySide:%s timestamp:%s amount:\t%f price:\t%f"%(trade_info["buy_name"],buy_depth[1],buy_depth[0][0],buy_depth[0][1],str(buy_depth[0][2])))
# print('SellSide:%s timestamp:%s amount:\t%f price:\t%f'%(trade_info["sell_name"],sell_depth[1],sell_depth[0][0],sell_depth[0][1],str(sell_depth[0][2])))
# print 'BuySide:%s timestamp:%s amount:\t%f price:\t%f asks:%s'%(trade_info["buy_name"],buy_depth[1],buy_depth[0][0],buy_depth[0][1],str(buy_depth[0][2]))
# print 'SellSide:%s timestamp:%s amount:\t%f price:\t%f bids:%s'%(trade_info["sell_name"],sell_depth[1],sell_depth[0][0],sell_depth[0][1],str(sell_depth[0][2]))
amount = int(min(buy_depth[0][0],sell_depth[0][0])*1.0/trade_ratio*trade_multiplier[i1,j1]*100)/100.0
amount +=int((random.random()-0.5)*2*(random_range+0.01)*100)/100.0
if amount<0:
amount = 0
amount_buy=amount
amount_sell=amount_buy
limit = (buy_depth[0][1]+sell_depth[0][1])*1.0/2.0
if total_coin>0.0001:
amount_buy = max(amount_buy-total_coin,0)
elif total_coin<-0.0001:
amount_sell = max(amount_sell+total_coin,0)
print "%s buy %f coins at %f and limit %f" %(trade_info["buy_name"],amount_buy,buy_depth[0][1],limit-lowest_spread_threshold/2.0)
trade_info["buy_que1"].put((trade_info["buy_obj"],"buy",amount_buy,buy_depth[0][1],limit-lowest_spread_threshold/2.0))
print "%s sell %f coins at %f and limit %f" %(trade_info["sell_name"],amount_sell,sell_depth[0][1],limit+lowest_spread_threshold/2.0)
trade_info["sell_que1"].put((trade_info["sell_obj"],"sell",amount_sell,sell_depth[0][1],limit+lowest_spread_threshold/2.0))
sell_remain = trade_info["sell_que2"].get()
buy_remain = trade_info["buy_que2"].get()
# output.write('%f, %f, %f, %f\n'%(sell_remain[0]-amount_sell,amount_buy-buy_remain[0],buy_remain[1],sell_remain[1]))
# output.flush()
total_coin+=sell_remain[0]-amount_sell-buy_remain[0]+amount_buy
total_money+=sell_remain[1]+buy_remain[1]
print "%s_remain:%f\t %s_remain:%f,total_remain:%f"%(trade_info["buy_name"],buy_remain[0],trade_info["sell_name"],sell_remain[0],maxCoin)
print"coin:%f,money:%f"%(total_coin,total_money)
maxCoin-=max(sell_remain[0],buy_remain[0])
# if maxCoin<0:
# hbQue1.put(None)
# cnbtcQue1.put(None)
# hbTradeQue1.put(None)
# cnbtcTradeQue1.put(None)
# break
else:
# average_price = 0
for i in range(platform_number):
for j in range(platform_number):
if i!=j and platform_list[i]["obj"]!=None and platform_list[j]["obj"]!=None:
print "no trade %s sell:%f %s buy:%f diff:%15f thres:%20f diff_brokerage:%20f"%(platform_list[i]["name"],platform_list[i]["depth_sell"][0][1],platform_list[j]["name"],platform_list[j]["depth_buy"][0][1],
platform_list[i]["depth_sell"][0][1]-platform_list[j]["depth_buy"][0][1],thres_list[i,j],platform_list[i]["depth_sell"][0][1]-platform_list[j]["depth_buy"][0][1]-thres_list[i,j])
# average_price+=platform_list[i]["depth_buy"][0][1]+platform_list[i]["depth_sell"][0][1]
# average_price/=2.0*platform_number
print average_price
# print "no trade yb sell:%f cnbtc buy:%f diff:%f"%(yb_depth_sell[0][1],cnbtc_depth_buy[0][1],yb_depth_sell[0][1]-cnbtc_depth_buy[0][1])
# print "no trade hb sell:%f cnbtc buy:%f diff:%f"%(hb_depth_sell[0][1],cnbtc_depth_buy[0][1],hb_depth_sell[0][1]-cnbtc_depth_buy[0][1])
# print "no trade yb buy:%f cnbtc sell:%f diff:%f"%(yb_depth_buy[0][1],cnbtc_depth_sell[0][1],cnbtc_depth_sell[0][1]-yb_depth_buy[0][1])
# print "no trade hb buy:%f cnbtc sell:%f diff:%f"%(hb_depth_buy[0][1],cnbtc_depth_sell[0][1],cnbtc_depth_sell[0][1]-hb_depth_buy[0][1])
# print "no trade yb buy:%f hb sell:%f diff:%f"%(yb_depth_buy[0][1],hb_depth_sell[0][1],hb_depth_sell[0][1]-yb_depth_buy[0][1])
# print "no trade hb buy:%f yb sell:%f diff:%f"%(hb_depth_buy[0][1],yb_depth_sell[0][1],yb_depth_sell[0][1]-hb_depth_buy[0][1])
print "balance %f %f diff: %f %f %f first:%f %f"%(total_eth,total_cny, total_eth - last_total_eth,total_cny - last_total_cny,(total_eth - last_total_eth)*2000.0,
total_eth - first_total_eth,total_cny - first_total_cny)
print '\n'
#
# if hb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>spread_threshold_hb_sell_cnbtc_buy and abs(int(cnbtc_depth_buy[1])-int(hb_depth_sell[1])<=3) and hb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>max_diff:
# if cnbtc_depth_sell[0][1]-hb_depth_buy[0][1]>spread_threshold_hb_buy_cnbtc_sell and abs(int(hb_depth_buy[1])-int(cnbtc_depth_sell[1])<=3) and cnbtc_depth_sell[0][1]-hb_depth_buy[0][1]>max_diff:
# max_diff = cnbtc_depth_sell[0][1]-hb_depth_buy[0][1]
# trade_info["sell_depth"] = cnbtc_depth_sell
# trade_info["buy_depth"] = hb_depth_buy
# trade_info["sell_name"] = "CNBTC"
# trade_info["buy_name"] = "HuoBi"
# trade_info["sell_que1"] = cnbtcTradeQue1
# trade_info["sell_que2"] = cnbtcTradeQue2
# trade_info["buy_que1"] = hbTradeQue1
# trade_info["buy_que2"] = hbTradeQue2
# trade_info["buy_obj"] = hb
# trade_info["sell_obj"]=cnbtc
# if hb_depth_sell[0][1]-yb_depth_buy[0][1]>spread_threshold_yb_buy_hb_sell and abs(int(yb_depth_buy[1])-int(hb_depth_sell[1])<=3) and hb_depth_sell[0][1]-yb_depth_buy[0][1]>max_diff:
# max_diff = hb_depth_sell[0][1]-yb_depth_buy[0][1]
# trade_info["sell_depth"] = hb_depth_sell
# trade_info["buy_depth"] = yb_depth_buy
# trade_info["sell_name"] = "HuoBi"
# trade_info["buy_name"] = "YunBi"
# trade_info["sell_que1"] = hbTradeQue1
# trade_info["sell_que2"] = hbTradeQue2
# trade_info["buy_que1"] = ybTradeQue1
# trade_info["buy_que2"] = ybTradeQue2
# trade_info["sell_obj"] = hb
# trade_info["buy_obj"]=yb
# if yb_depth_sell[0][1]-hb_depth_buy[0][1]>spread_threshold_yb_sell_hb_buy and abs(int(hb_depth_buy[1])-int(yb_depth_sell[1])<=3) and yb_depth_sell[0][1]-hb_depth_buy[0][1]>max_diff:
# max_diff = yb_depth_sell[0][1]-hb_depth_buy[0][1]
# trade_info["sell_depth"] = yb_depth_sell
# trade_info["buy_depth"] = hb_depth_buy
# trade_info["sell_name"] = "YunBi"
# trade_info["buy_name"] = "HuoBi"
# trade_info["sell_que1"] = ybTradeQue1
# trade_info["sell_que2"] = ybTradeQue2
# trade_info["buy_que1"] = hbTradeQue1
# trade_info["buy_que2"] = hbTradeQue2
# trade_info["sell_obj"] = yb
# trade_info["buy_obj"]=hb
# if yb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>spread_threshold_yb_sell_cnbtc_buy and abs(int(cnbtc_depth_buy[1])-int(yb_depth_sell[1])<=3) and yb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>max_diff:
# max_diff = yb_depth_sell[0][1]-cnbtc_depth_buy[0][1]
# trade_info["sell_depth"] = yb_depth_sell
# trade_info["buy_depth"] = cnbtc_depth_buy
# trade_info["sell_name"] = "YunBi"
# trade_info["buy_name"] = "CNBTC"
# trade_info["sell_que1"] = ybTradeQue1
# trade_info["sell_que2"] = ybTradeQue2
# trade_info["buy_que1"] = cnbtcTradeQue1
# trade_info["buy_que2"] = cnbtcTradeQue2
# trade_info["sell_obj"] = yb
# trade_info["buy_obj"]=cnbtc
# if cnbtc_depth_sell[0][1]-yb_depth_buy[0][1]>spread_threshold_yb_sell_cnbtc_buy and abs(int(cnbtc_depth_sell[1])-int(yb_depth_buy[1])<=3) and cnbtc_depth_sell[0][1]-yb_depth_buy[0][1]>max_diff:
# max_diff = cnbtc_depth_sell[0][1]-yb_depth_buy[0][1]
# trade_info["sell_depth"] = cnbtc_depth_sell
# trade_info["buy_depth"] = yb_depth_buy
# trade_info["sell_name"] = "CNBTC"
# trade_info["buy_name"] = "YunBi"
# trade_info["sell_que1"] = cnbtcTradeQue1
# trade_info["sell_que2"] = cnbtcTradeQue2
# trade_info["buy_que1"] = ybTradeQue1
# trade_info["buy_que2"] = ybTradeQue2
# trade_info["sell_obj"] = cnbtc
# trade_info["buy_obj"]=yb
# if open_okc:
# if okc_depth_sell[0][1]-cnbtc_depth_buy[0][1]>spread_threshold_okc_sell_cnbtc_buy and okc_depth_sell[0][1]-cnbtc_depth_buy[0][1]>max_diff:
# max_diff = okc_depth_sell[0][1]-cnbtc_depth_buy[0][1]
# trade_info["sell_depth"] = okc_depth_sell
# trade_info["buy_depth"] = cnbtc_depth_buy
# trade_info["sell_name"] = "OKCoin"
# trade_info["buy_name"] = "CNBTC"
# trade_info["sell_que1"] = okcTradeQue1
# trade_info["sell_que2"] = okcTradeQue2
# trade_info["buy_que1"] = cnbtcTradeQue1
# trade_info["buy_que2"] = cnbtcTradeQue2
# trade_info["sell_obj"] = okc
# trade_info["buy_obj"]=cnbtc
# if cnbtc_depth_sell[0][1]-okc_depth_buy[0][1]>spread_threshold_okc_buy_cnbtc_sell and cnbtc_depth_sell[0][1]-okc_depth_buy[0][1]>max_diff:
# max_diff = cnbtc_depth_sell[0][1]-okc_depth_buy[0][1]
# trade_info["sell_depth"] = cnbtc_depth_sell
# trade_info["buy_depth"] = okc_depth_buy
# trade_info["sell_name"] = "CNBTC"
# trade_info["buy_name"] = "OKCoin"
# trade_info["sell_que1"] = cnbtcTradeQue1
# trade_info["sell_que2"] = cnbtcTradeQue2
# trade_info["buy_que1"] = okcTradeQue1
# trade_info["buy_que2"] = okcTradeQue2
# trade_info["buy_obj"] = okc
# trade_info["sell_obj"]=cnbtc
# if hb_depth_sell[0][1]-okc_depth_buy[0][1]>spread_threshold_okc_buy_hb_sell and hb_depth_sell[0][1]-okc_depth_buy[0][1]>max_diff:
# max_diff = hb_depth_sell[0][1]-okc_depth_buy[0][1]
# trade_info["sell_depth"] = hb_depth_sell
# trade_info["buy_depth"] = okc_depth_buy
# trade_info["sell_name"] = "HuoBi"
# trade_info["buy_name"] = "OKCoin"
# trade_info["sell_que1"] = hbTradeQue1
# trade_info["sell_que2"] = hbTradeQue2
# trade_info["buy_que1"] = okcTradeQue1
# trade_info["buy_que2"] = okcTradeQue2
# trade_info["sell_obj"] = hb
# trade_info["buy_obj"]=okc
# if okc_depth_sell[0][1]-hb_depth_buy[0][1]>spread_threshold_okc_sell_hb_buy and okc_depth_sell[0][1]-hb_depth_buy[0][1]>max_diff:
# max_diff = okc_depth_sell[0][1]-hb_depth_buy[0][1]
# trade_info["sell_depth"] = okc_depth_sell
# trade_info["buy_depth"] = hb_depth_buy
# trade_info["sell_name"] = "OKCoin"
# trade_info["buy_name"] = "HuoBi"
# trade_info["sell_que1"] = okcTradeQue1
# trade_info["sell_que2"] = okcTradeQue2
# trade_info["buy_que1"] = hbTradeQue1
# trade_info["buy_que2"] = hbTradeQue2
# trade_info["sell_obj"] = okc
# trade_info["buy_obj"]=hb
# if yb_depth_sell[0][1]-okc_buy[0][1]>spread_threshold_yb_sell_cnbtc_buy and yb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>max_diff:
# max_diff = yb_depth_sell[0][1]-cnbtc_depth_buy[0][1]
# trade_info["sell_depth"] = yb_depth_sell
# trade_info["buy_depth"] = cnbtc_depth_buy
# trade_info["sell_name"] = "YunBi"
# trade_info["buy_name"] = "CNBTC"
# trade_info["sell_que1"] = ybTradeQue1
# trade_info["sell_que2"] = ybTradeQue2
# trade_info["buy_que1"] = cnbtcTradeQue1
# trade_info["buy_que2"] = cnbtcTradeQue2
# trade_info["sell_obj"] = yb
# trade_info["buy_obj"]=cnbtc
# if cnbtc_depth_sell[0][1]-yb_depth_buy[0][1]>spread_threshold_yb_sell_cnbtc_buy and cnbtc_depth_sell[0][1]-yb_depth_buy[0][1]>max_diff:
# max_diff = cnbtc_depth_sell[0][1]-yb_depth_buy[0][1]
# trade_info["sell_depth"] = cnbtc_depth_sell
# trade_info["buy_depth"] = yb_depth_buy
# trade_info["sell_name"] = "CNBTC"
# trade_info["buy_name"] = "YunBi"
# trade_info["sell_que1"] = cnbtcTradeQue1
# trade_info["sell_que2"] = cnbtcTradeQue2
# trade_info["buy_que1"] = ybTradeQue1
# trade_info["buy_que2"] = ybTradeQue2
# trade_info["sell_obj"] = cnbtc
# trade_info["buy_obj"]=yb
# if hb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>spread_threshold_hb_sell_cnbtc_buy and abs(int(cnbtc_depth_buy[1])-int(hb_depth_sell[1])<=3) and hb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>max_diff:
# print "start trade major"
#
# elif yb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>spread_threshold_yb_sell_cnbtc_buy and abs(int(cnbtc_depth_buy[1])-int(yb_depth_sell[1])<=3):
# print 'CNBTC: timestamp:%s amount:\t%f price:\t%f asks:%s'%(cnbtc_depth_buy[1],cnbtc_depth_buy[0][0],cnbtc_depth_buy[0][1],str(cnbtc_depth_buy[0][2]))
# print 'YUNBI: timestamp:%s amount:\t%f price:\t%f bids:%s'%(yb_depth_sell[1],yb_depth_sell[0][0],yb_depth_sell[0][1],str(yb_depth_sell[0][2]))
# print "start trade major"
# amount = min(cnbtc_depth_buy[0][0],yb_depth_sell[0][0])*1.0/trade_ratio
# amount_buy=amount
# amount_sell=amount_buy
# limit = (cnbtc_depth_buy[0][1]+yb_depth_sell[0][1])*1.0/2.0
# if total_coin>0.0001:
# amount_buy = max(amount_buy-total_coin,0)
# elif total_coin<-0.0001:
# amount_sell = max(amount_sell+total_coin,0)
# print "cnbtc buy %f coins at %f and limit %f" %(amount_buy,cnbtc_depth_buy[0][1],limit-lowest_spread_threshold/2.0)
# cnbtcTradeQue1.put((cnbtc,"buy",amount_buy,cnbtc_depth_buy[0][1],limit-lowest_spread_threshold/2.0))
# print "yb sell %f coins at %f and limit %f" %(amount_sell,yb_depth_sell[0][1],limit+lowest_spread_threshold/2.0)
# ybTradeQue1.put((yb,"sell",amount_sell,yb_depth_sell[0][1],limit+lowest_spread_threshold/2.0))
# cnbtc_remain = cnbtcTradeQue2.get()
# yb_remain = ybTradeQue2.get()
# output.write('%f, %f, %f, %f\n'%(yb_remain[0]-amount_sell,amount_buy-cnbtc_remain[0],yb_remain[1],cnbtc_remain[1]))
# output.flush()
# total_coin+=yb_remain[0]-amount_sell-cnbtc_remain[0]+amount_buy
# total_money+=yb_remain[1]+cnbtc_remain[1]
# print "cnbtc_remain:%f\t yb_remain:%f,total_remain:%f"%(cnbtc_remain[0],yb_remain[0],maxCoin)
# print"coin:%f,money:%f"%(total_coin,total_money)
# maxCoin-=max(yb_remain[0],cnbtc_remain[0])
# if maxCoin<0:
# ybQue1.put(None)
# cnbtcQue1.put(None)
# ybTradeQue1.put(None)
# cnbtcTradeQue1.put(None)
# break
#
# # elif False:
# elif cnbtc_depth_sell[0][1]-yb_depth_buy[0][1]>spread_threshold_yb_buy_cnbtc_sell and abs(int(cnbtc_depth_sell[1])-int(yb_depth_buy[1])<=3):
# print 'CNBTC: timestamp:%s amount:\t%f price:\t%f bids:%s'%(cnbtc_depth_sell[1],cnbtc_depth_sell[0][0],cnbtc_depth_sell[0][1],str(cnbtc_depth_sell[0][2]))
# print 'YUNBI: timestamp:%s amount:\t%f price:\t%f asks:%s'%(yb_depth_buy[1],yb_depth_buy[0][0],yb_depth_buy[0][1],str(yb_depth_buy[0][2]))
# print "start trade minor"
# amount = min(cnbtc_depth_sell[0][0], yb_depth_buy[0][0]) * 1.0 / trade_ratio
# amount_buy = amount
# amount_sell = amount_buy
# limit = (cnbtc_depth_sell[0][1] + yb_depth_buy[0][1]) * 1.0 / 2.0
# if total_coin > 0.01:
# amount_buy = max(amount_buy - total_coin, 0)
# elif total_coin < -0.01:
# amount_sell = max(amount_sell + total_coin, 0)
# print "cnbtc sell %f coins at %f and limit %f" % (amount_sell, cnbtc_depth_sell[0][1], limit + lowest_spread_threshold/ 2.0)
# cnbtcTradeQue1.put((cnbtc, "sell", amount_sell, cnbtc_depth_sell[0][1], limit + lowest_spread_threshold / 2.0))
# print "yb buy %f coins at %f and limit %f" % (amount_buy, yb_depth_buy[0][1], limit - lowest_spread_threshold / 2.0)
# ybTradeQue1.put(
# (yb, "buy", amount_buy, yb_depth_buy[0][1], limit - lowest_spread_threshold / 2.0))
# cnbtc_remain = cnbtcTradeQue2.get()
# yb_remain = ybTradeQue2.get()
# output.write('%f, %f, %f, %f\n' % (
# amount_buy - yb_remain[0], cnbtc_remain[0] - amount_sell, yb_remain[1], cnbtc_remain[1]))
# total_coin += -yb_remain[0] - amount_sell + cnbtc_remain[0] + amount_buy
# total_money += yb_remain[1] + cnbtc_remain[1]
# print "cnbtc_remain:%f\t yb_remain:%f,total_remain:%f" % (cnbtc_remain[0], yb_remain[0], maxCoin)
# print"coin:%f,money:%f" % (total_coin, total_money)
# maxCoin -= max(yb_remain[0], cnbtc_remain[0])
# if maxCoin < 0:
# ybQue1.put(None)
# cnbtcQue1.put(None)
# ybTradeQue1.put(None)
# cnbtcTradeQue1.put(None)
# break
# # elif False:
# elif cnbtc_depth_sell[0][1]-hb_depth_buy[0][1]>spread_threshold_hb_buy_cnbtc_sell and abs(int(cnbtc_depth_sell[1])-int(hb_depth_buy[1])<=3):
# print 'CNBTC: timestamp:%s amount:\t%f price:\t%f bids:%s'%(cnbtc_depth_sell[1],cnbtc_depth_sell[0][0],cnbtc_depth_sell[0][1],str(cnbtc_depth_sell[0][2]))
# print 'HuoBI: timestamp:%s amount:\t%f price:\t%f asks:%s'%(hb_depth_buy[1],hb_depth_buy[0][0],hb_depth_buy[0][1],str(hb_depth_buy[0][2]))
# print "start trade minor"
# amount = min(cnbtc_depth_sell[0][0], hb_depth_buy[0][0]) * 1.0 / trade_ratio
# amount_buy = amount
# amount_sell = amount_buy
# limit = (cnbtc_depth_sell[0][1] + hb_depth_buy[0][1]) * 1.0 / 2.0
# if total_coin > 0.01:
# amount_buy = max(amount_buy - total_coin, 0)
# elif total_coin < -0.01:
# amount_sell = max(amount_sell + total_coin, 0)
# print "cnbtc sell %f coins at %f and limit %f" % (amount_sell, cnbtc_depth_sell[0][1], limit + lowest_spread_threshold/ 2.0)
# cnbtcTradeQue1.put((cnbtc, "sell", amount_sell, cnbtc_depth_sell[0][1], limit + lowest_spread_threshold / 2.0))
# print "hb buy %f coins at %f and limit %f" % (amount_buy, hb_depth_buy[0][1], limit - lowest_spread_threshold / 2.0)
# hbTradeQue1.put(
# (hb, "buy", amount_buy, hb_depth_buy[0][1], limit - lowest_spread_threshold / 2.0))
# cnbtc_remain = cnbtcTradeQue2.get()
# hb_remain = hbTradeQue2.get()
# output.write('%f, %f, %f, %f\n' % (
# amount_buy - hb_remain[0], cnbtc_remain[0] - amount_sell, hb_remain[1], cnbtc_remain[1]))
# total_coin += -hb_remain[0] - amount_sell + cnbtc_remain[0] + amount_buy
# total_money += hb_remain[1] + cnbtc_remain[1]
# print "cnbtc_remain:%f\t hb_remain:%f,total_remain:%f" % (cnbtc_remain[0], hb_remain[0], maxCoin)
# print"coin:%f,money:%f" % (total_coin, total_money)
# maxCoin -= max(hb_remain[0], cnbtc_remain[0])
# if maxCoin < 0:
# hbQue1.put(None)
# cnbtcQue1.put(None)
# hbTradeQue1.put(None)
# cnbtcTradeQue1.put(None)
# break
# else:
# # print "total coin: %f total_cny %f"%(total_eth,total_cny)
# # print "yunbi ",str(yb.get_account())
# # print "cnbtc ",str(cnbtc.get_account())
# print cnbtc.get_account()
# cnbtc.getDepth()
# print cnbtc.buy(volume=0.01,price=1461)
# print cnbtc.get_account()
# hft = HaiFengTeng.HaiFengTeng(config)
# hft.login()
# yb = YunBi.Yunbi(config,"YunBi2")
# yb.get_account()
# yb.buy(volume=0.001,price=9999.0)
# yb.getOrder()
# print yb.getDepth()
| [] |
cudmore/startupnotify | startuptweet.py | 76b61b295ae7049e597fa05457a6696e624c4955 | #!/usr/bin/python3
"""
Author: Robert Cudmore
Date: 20181013
Purpose: Send a Tweet with IP and MAC address of a Raspberry Pi
Install:
pip3 install tweepy
Usage:
python3 startuptweet.py 'this is my tweet'
"""
import tweepy
import sys
import socket
import subprocess
from uuid import getnode as get_mac
from datetime import datetime
# Create variables for each key, secret, token
from my_config import hash_tag
from my_config import consumer_key
from my_config import consumer_secret
from my_config import access_token
from my_config import access_token_secret
message = ''
if len( sys.argv ) > 1:
message = sys.argv[1]
# Set up OAuth and integrate with API
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#
thetime = datetime.now().strftime('%Y%m%d %H:%M:%S')
ip = subprocess.check_output(['hostname', '--all-ip-addresses'])
ip = ip.decode('utf-8').strip()
hostname = socket.gethostname()
mac = get_mac()
mac = hex(mac)
tweet = thetime + ' ' + hostname + ' ' + ip + ' ' + mac + ' ' + message + ' ' + hash_tag
print('tweeting:', tweet)
api.update_status(status=tweet)
| [((33, 7, 33, 57), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', ({(33, 27, 33, 39): 'consumer_key', (33, 41, 33, 56): 'consumer_secret'}, {}), '(consumer_key, consumer_secret)', False, 'import tweepy\n'), ((35, 6, 35, 22), 'tweepy.API', 'tweepy.API', ({(35, 17, 35, 21): 'auth'}, {}), '(auth)', False, 'import tweepy\n'), ((41, 5, 41, 64), 'subprocess.check_output', 'subprocess.check_output', ({(41, 29, 41, 63): "['hostname', '--all-ip-addresses']"}, {}), "(['hostname', '--all-ip-addresses'])", False, 'import subprocess\n'), ((44, 11, 44, 31), 'socket.gethostname', 'socket.gethostname', ({}, {}), '()', False, 'import socket\n'), ((46, 6, 46, 15), 'uuid.getnode', 'get_mac', ({}, {}), '()', True, 'from uuid import getnode as get_mac\n'), ((39, 10, 39, 24), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n')] |
VW-Stephen/pySpiderScrape | distributed/db.py | 861d7289743d5b65916310448526a58b381fde8d | #!/usr/bin/python
from bs4 import BeautifulSoup
import sqlite3
class DB:
"""
Abstraction for the profile database
"""
def __init__(self, filename):
"""
Creates a new connection to the database
filename - The name of the database file to use
"""
self.Filename = filename
self.Connection = sqlite3.connect(filename)
self.Cursor = self.Connection.cursor()
def SaveProfile(self, data):
"""
Saves the profile to the database
data - A dictionary of profile information
"""
self.Cursor.execute("INSERT INTO profiles (url, e0, e1, e2, e3, e4, e5, e6, e7, e8, gender, age, orientation, status, location) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (data['url'], data['e0'], data['e1'], data['e2'], data['e3'], data['e4'], data['e5'], data['e6'], data['e7'], data['e8'], data['gender'], data['age'], data['orientation'], data['status'], data['location']))
self.Connection.commit()
def HasVisited(self, url):
"""
Returns true if the given URL is in the database, false otherwise
url - The URL to check
"""
self.Cursor.execute("SELECT 1 FROM profiles WHERE url = ? LIMIT 1", (url,))
return self.Cursor.fetchone() is not None
| [((17, 26, 17, 51), 'sqlite3.connect', 'sqlite3.connect', ({(17, 42, 17, 50): 'filename'}, {}), '(filename)', False, 'import sqlite3\n')] |
bansal-shubham/stopstalk-deployment | private/scripts/recheck-invalid-handles.py | 6392eace490311be103292fdaff9ae215e4db7e6 | """
Copyright (c) 2015-2019 Raj Patel([email protected]), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import requests, bs4
import sites
# Constants to be used in case of request failures
SERVER_FAILURE = "SERVER_FAILURE"
NOT_FOUND = "NOT_FOUND"
OTHER_FAILURE = "OTHER_FAILURE"
REQUEST_FAILURES = (SERVER_FAILURE, NOT_FOUND, OTHER_FAILURE)
def get_invalid_handle_method(site):
site_class = getattr(sites, site.lower())
invalid_handle_method = getattr(site_class.Profile, "is_invalid_handle")
return invalid_handle_method
if __name__ == "__main__":
ihtable = db.invalid_handle
atable = db.auth_user
cftable = db.custom_friend
stable = db.submission
nrtable = db.next_retrieval
mapping = {}
handle_to_row = {}
for site in current.SITES:
mapping[site] = get_invalid_handle_method(site)
handle_to_row[site] = {}
impossiblehandle = "thisreallycantbeahandle308"
assert(all(map(lambda site: get_invalid_handle_method(site)(impossiblehandle), current.SITES.keys())))
def populate_handle_to_row(table):
for row in db(table).select():
for site in current.SITES:
site_handle = row[site.lower() + "_handle"]
if site_handle:
if handle_to_row[site].has_key(site_handle):
handle_to_row[site][site_handle].append(row)
else:
handle_to_row[site][site_handle] = [row]
populate_handle_to_row(atable)
populate_handle_to_row(cftable)
# for site in current.SITES:
# print site
# for site_handle in handle_to_row[site]:
# print "\t", site_handle
# for row in handle_to_row[site][site_handle]:
# print "\t\t", row.first_name, row.last_name, row.stopstalk_handle
update_dict = {"stopstalk_rating": 0,
"stopstalk_prev_rating": 0,
"per_day": 0.0,
"per_day_change": "0.0",
"authentic": False}
final_delete_query = False
cnt = 0
for row in db(ihtable).iterselect():
# If not an invalid handle anymore
if handle_to_row[row.site].has_key(row.handle) and mapping[row.site](row.handle) is False:
cnt += 1
print row.site, row.handle, "deleted"
for row_obj in handle_to_row[row.site][row.handle]:
print "\t", row_obj.stopstalk_handle, "updated"
update_dict[row.site.lower() + "_lr"] = current.INITIAL_DATE
row_obj.update_record(**update_dict)
if "user_id" in row_obj:
# Custom user
db(nrtable.custom_user_id == row_obj.id).update(**{row.site.lower() + "_delay": 0})
else:
db(nrtable.user_id == row_obj.id).update(**{row.site.lower() + "_delay": 0})
final_delete_query |= ((stable.site == row.site) & \
(stable.stopstalk_handle == row_obj.stopstalk_handle))
del update_dict[row.site.lower() + "_lr"]
row.delete_record()
if cnt >= 10:
if final_delete_query:
db(final_delete_query).delete()
cnt = 0
final_delete_query = False
if final_delete_query:
db(final_delete_query).delete()
| [] |
gpminsuk/onnxmltools | onnxmltools/convert/keras/_parse.py | 4e88929a79a1018183f58e2d5e032dd639839dd2 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import tensorflow as tf
from keras.models import Model
from keras.layers import Layer, InputLayer
from ...proto import onnx
from ..common._container import KerasModelContainer
from ..common._topology import Topology
from ..common.data_types import *
def _extract_inbound_nodes(model):
if hasattr(model, 'inbound_nodes'):
return model.inbound_nodes
elif hasattr(model, '_inbound_nodes'):
return model._inbound_nodes
else:
raise ValueError('Failed to find inbound_nodes and _inbound_nodes when parsing Keras model')
def extract_model_input_and_output_shapes(model, default_batch_size):
if hasattr(model, 'input_shape'):
if not isinstance(model.input_shape, list):
input_shapes = [list(model.input_shape)]
else:
input_shapes = [list(shape) for shape in model.input_shape]
elif hasattr(model, 'input_shapes'):
input_shapes = [list(shape) for shape in model.input_shapes]
else:
raise ValueError('Fail to extract model input shape(s)')
for shape in input_shapes:
if not isinstance(shape[0], numbers.Integral):
shape[0] = default_batch_size
if hasattr(model, 'output_shape'):
if not isinstance(model.output_shape, list):
output_shapes = [list(model.output_shape)]
else:
output_shapes = [list(shape) for shape in model.output_shape]
elif hasattr(model, 'output_shapes'):
output_shapes = [list(shape) for shape in model.output_shapes]
else:
raise ValueError('Fail to extract model output shape(s)')
for shape in output_shapes:
if not isinstance(shape[0], numbers.Integral):
shape[0] = default_batch_size
return input_shapes, output_shapes
def determine_tensor_type(tensor, default_batch_size, keras_shape=None):
# keras_shape can overwrite the shaped defined in Tensorflow tensor
if keras_shape is None:
tensor_shape = [d.value if d.value is not None else 'None' for d in tensor.shape]
else:
tensor_shape = [d if d is not None else 'None' for d in keras_shape]
# Adjust batch size if needed
if tensor_shape[0] == 'None':
tensor_shape[0] = default_batch_size
# Determine the tensor's element type
tensor_type = tensor.dtype
if tensor_type in [tf.int8, tf.int16, tf.int32, tf.int64]:
return Int64TensorType(shape=tensor_shape)
elif tensor_type in [tf.float16, tf.float32, tf.float64]:
return FloatTensorType(shape=tensor_shape)
else:
raise ValueError('Unable to find out a correct type for tensor %s' % tensor)
def parse_keras(model, initial_types=None, targeted_onnx=onnx.__version__):
'''
The main parsing function of Keras Model and Sequential objects.
:param model: A Keras Model or Sequential object
:param initial_types: A list providing some types for some root variables. Each element is a tuple of a variable
name and a type defined in data_types.py.
:param targeted_onnx: a version string such as `1.1.2` or `1.2.1` for specifying the ONNX version used to produce
the output model.
:return: a Topology object. It's a intermediate representation of the input Keras model
'''
raw_model_container = KerasModelContainer(model)
topology = Topology(raw_model_container, default_batch_size=1, initial_types=initial_types,
targeted_onnx=targeted_onnx)
scope = topology.declare_scope('__root__')
# Each inbound node defines an evaluation of the underlining model (if the model is called multiple times, it may
# contain several inbound nodes). According to the tensors specified in those inbound nodes, we declare the roots
# and leaves of the computational graph described by the Keras input model.
for node in _extract_inbound_nodes(model):
input_shapes, output_shapes = extract_model_input_and_output_shapes(model, topology.default_batch_size)
# Declare inputs for a specific model execution
for tensor, shape in zip(node.input_tensors, input_shapes):
raw_model_container.add_input_name(tensor.name)
tensor_type = determine_tensor_type(tensor, topology.default_batch_size, list(shape))
scope.get_local_variable_or_declare_one(tensor.name, tensor_type)
# Declare outputs for a specific model execution
for tensor, shape in zip(node.output_tensors, output_shapes):
raw_model_container.add_output_name(tensor.name)
tensor_type = determine_tensor_type(tensor, topology.default_batch_size, list(shape))
scope.get_local_variable_or_declare_one(tensor.name, tensor_type)
# For each model execution, we call a parsing function to create a computational (sub-)graph because ONNX has no
# model/layer sharing.
for node in _extract_inbound_nodes(model):
_parse_keras(topology, scope, model, node)
topology.root_names = [variable.onnx_name for variable in scope.variables.values()]
return topology
def _parse_keras(topology, parent_scope, model, inbound_node):
if isinstance(model, Model):
scope = topology.declare_scope('scope')
# Declare output variables so that they can be connected with the variables produced in layers and sub-models
for layer in model.layers:
for node in _extract_inbound_nodes(layer):
for tensor in node.output_tensors:
tensor_type = determine_tensor_type(tensor, topology.default_batch_size)
scope.declare_local_variable(tensor.name, tensor_type)
# Recursively call the parsing function
for layer in model.layers:
for node in _extract_inbound_nodes(layer):
_parse_keras(topology, scope, layer, node)
# Connect the variables declared when parsing the input model and the actual model inputs. inbound_node has the
# actual inputs while the while graph is indeed declared only via the first inbound node of the input model.
# That is, for a shared (sub-)model, we may declare it several times and each time we may connect its I/O with
# the I/O specified in a inbound node.
for parent_tensor, local_tensor in zip(inbound_node.input_tensors, _extract_inbound_nodes(model)[0].input_tensors):
parent_tensor_type = determine_tensor_type(parent_tensor, topology.default_batch_size)
local_tensor_type = determine_tensor_type(local_tensor, topology.default_batch_size)
parent_variable = parent_scope.get_local_variable_or_declare_one(parent_tensor.name, parent_tensor_type)
local_variable = scope.get_local_variable_or_declare_one(local_tensor.name, local_tensor_type)
operator = scope.declare_local_operator('identity')
operator.inputs.append(parent_variable)
operator.outputs.append(local_variable)
# Connect the variables declared when parsing the input model and the actual model outputs. inbound_node has the
# actual outputs while the while graph is indeed declared via the first inbound node of the input models.
for parent_tensor, local_tensor in zip(inbound_node.output_tensors, _extract_inbound_nodes(model)[0].output_tensors):
parent_tensor_type = determine_tensor_type(parent_tensor, topology.default_batch_size)
local_tensor_type = determine_tensor_type(local_tensor, topology.default_batch_size)
parent_variable = parent_scope.get_local_variable_or_declare_one(parent_tensor.name, parent_tensor_type)
local_variable = scope.get_local_variable_or_declare_one(local_tensor.name, local_tensor_type)
operator = scope.declare_local_operator('identity')
operator.inputs.append(local_variable)
operator.outputs.append(parent_variable)
elif isinstance(model, Layer):
if isinstance(model, InputLayer):
return
operator = parent_scope.declare_local_operator(type(model), raw_model=model)
# Simply connect the layer's I/O with variables declared in the parent scope. Note that it may create input
# variables in the parent scope because we only declare output variables in the beginning of _parse_keras(...)
for parent_tensor in inbound_node.input_tensors:
tensor_type = determine_tensor_type(parent_tensor, topology.default_batch_size)
operator.inputs.append(parent_scope.get_local_variable_or_declare_one(parent_tensor.name, tensor_type))
for parent_tensor in inbound_node.output_tensors:
tensor_type = determine_tensor_type(parent_tensor, topology.default_batch_size)
operator.outputs.append(parent_scope.get_local_variable_or_declare_one(parent_tensor.name, tensor_type))
else:
raise RuntimeError('Unsupported Keras component %s' % type(model))
| [] |
cahartsell/Scenic | src/scenic/core/regions.py | 2e7979011aef426108687947668d9ba6f5439136 | """Objects representing regions in space."""
import math
import random
import itertools
import numpy
import scipy.spatial
import shapely.geometry
import shapely.ops
from scenic.core.distributions import Samplable, RejectionException, needsSampling
from scenic.core.lazy_eval import valueInContext
from scenic.core.vectors import Vector, OrientedVector, VectorDistribution
from scenic.core.geometry import RotatedRectangle
from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors
from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion
from scenic.core.type_support import toVector
from scenic.core.utils import cached, areEquivalent
def toPolygon(thing):
if needsSampling(thing):
return None
if hasattr(thing, 'polygon'):
return thing.polygon
if hasattr(thing, 'polygons'):
return thing.polygons
if hasattr(thing, 'lineString'):
return thing.lineString
return None
def regionFromShapelyObject(obj, orientation=None):
"""Build a 'Region' from Shapely geometry."""
assert obj.is_valid, obj
if obj.is_empty:
return nowhere
elif isinstance(obj, (shapely.geometry.Polygon, shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=obj, orientation=orientation)
elif isinstance(obj, (shapely.geometry.LineString, shapely.geometry.MultiLineString)):
return PolylineRegion(polyline=obj, orientation=orientation)
else:
raise RuntimeError(f'unhandled type of Shapely geometry: {obj}')
class PointInRegionDistribution(VectorDistribution):
"""Uniform distribution over points in a Region"""
def __init__(self, region):
super().__init__(region)
self.region = region
def sampleGiven(self, value):
return value[self.region].uniformPointInner()
def __str__(self):
return f'PointIn({self.region})'
class Region(Samplable):
"""Abstract class for regions."""
def __init__(self, name, *dependencies, orientation=None):
super().__init__(dependencies)
self.name = name
self.orientation = orientation
def sampleGiven(self, value):
return self
def intersect(self, other, triedReversed=False):
"""Get a `Region` representing the intersection of this one with another."""
if triedReversed:
return IntersectionRegion(self, other)
else:
return other.intersect(self, triedReversed=True)
@staticmethod
def uniformPointIn(region):
"""Get a uniform `Distribution` over points in a `Region`."""
return PointInRegionDistribution(region)
def uniformPoint(self):
"""Sample a uniformly-random point in this `Region`.
Can only be called on fixed Regions with no random parameters.
"""
assert not needsSampling(self)
return self.uniformPointInner()
def uniformPointInner(self):
"""Do the actual random sampling. Implemented by subclasses."""
raise NotImplementedError()
def containsPoint(self, point):
"""Check if the `Region` contains a point. Implemented by subclasses."""
raise NotImplementedError()
def containsObject(self, obj):
"""Check if the `Region` contains an :obj:`~scenic.core.object_types.Object`.
The default implementation assumes the `Region` is convex; subclasses must
override the method if this is not the case.
"""
for corner in obj.corners:
if not self.containsPoint(corner):
return False
return True
def __contains__(self, thing):
"""Check if this `Region` contains an object or vector."""
from scenic.core.object_types import Object
if isinstance(thing, Object):
return self.containsObject(thing)
vec = toVector(thing, '"X in Y" with X not an Object or a vector')
return self.containsPoint(vec)
def getAABB(self):
"""Axis-aligned bounding box for this `Region`. Implemented by some subclasses."""
raise NotImplementedError()
def orient(self, vec):
"""Orient the given vector along the region's orientation, if any."""
if self.orientation is None:
return vec
else:
return OrientedVector(vec.x, vec.y, self.orientation[vec])
def __str__(self):
return f'<Region {self.name}>'
class AllRegion(Region):
"""Region consisting of all space."""
def intersect(self, other, triedReversed=False):
return other
def containsPoint(self, point):
return True
def containsObject(self, obj):
return True
def __eq__(self, other):
return type(other) is AllRegion
def __hash__(self):
return hash(AllRegion)
class EmptyRegion(Region):
"""Region containing no points."""
def intersect(self, other, triedReversed=False):
return self
def uniformPointInner(self):
raise RejectionException(f'sampling empty Region')
def containsPoint(self, point):
return False
def containsObject(self, obj):
return False
def show(self, plt, style=None):
pass
def __eq__(self, other):
return type(other) is EmptyRegion
def __hash__(self):
return hash(EmptyRegion)
everywhere = AllRegion('everywhere')
nowhere = EmptyRegion('nowhere')
class CircularRegion(Region):
def __init__(self, center, radius, resolution=32):
super().__init__('Circle', center, radius)
self.center = center.toVector()
self.radius = radius
self.circumcircle = (self.center, self.radius)
if not (needsSampling(self.center) or needsSampling(self.radius)):
ctr = shapely.geometry.Point(self.center)
self.polygon = ctr.buffer(self.radius, resolution=resolution)
def sampleGiven(self, value):
return CircularRegion(value[self.center], value[self.radius])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
return CircularRegion(center, radius)
def containsPoint(self, point):
point = point.toVector()
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
r = random.triangular(0, self.radius, self.radius)
t = random.uniform(-math.pi, math.pi)
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def getAABB(self):
x, y = self.center
r = self.radius
return ((x - r, y - r), (x + r, y + r))
def isEquivalentTo(self, other):
if type(other) is not CircularRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius))
def __str__(self):
return f'CircularRegion({self.center}, {self.radius})'
class SectorRegion(Region):
def __init__(self, center, radius, heading, angle, resolution=32):
super().__init__('Sector', center, radius, heading, angle)
self.center = center.toVector()
self.radius = radius
self.heading = heading
self.angle = angle
r = (radius / 2) * cos(angle / 2)
self.circumcircle = (self.center.offsetRadially(r, heading), r)
if not any(needsSampling(x) for x in (self.center, radius, heading, angle)):
ctr = shapely.geometry.Point(self.center)
circle = ctr.buffer(self.radius, resolution=resolution)
if angle >= math.tau - 0.001:
self.polygon = circle
else:
mask = shapely.geometry.Polygon([
self.center,
self.center.offsetRadially(radius, heading + angle/2),
self.center.offsetRadially(2*radius, heading),
self.center.offsetRadially(radius, heading - angle/2)
])
self.polygon = circle & mask
def sampleGiven(self, value):
return SectorRegion(value[self.center], value[self.radius],
value[self.heading], value[self.angle])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
heading = valueInContext(self.heading, context)
angle = valueInContext(self.angle, context)
return SectorRegion(center, radius, heading, angle)
def containsPoint(self, point):
point = point.toVector()
if not pointIsInCone(tuple(point), tuple(self.center), self.heading, self.angle):
return False
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
heading, angle, maxDist = self.heading, self.angle, self.radius
r = random.triangular(0, maxDist, maxDist)
ha = angle / 2.0
t = random.uniform(-ha, ha) + (heading + (math.pi / 2))
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def isEquivalentTo(self, other):
if type(other) is not SectorRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.angle, self.angle))
def __str__(self):
return f'SectorRegion({self.center},{self.radius},{self.heading},{self.angle})'
class RectangularRegion(RotatedRectangle, Region):
def __init__(self, position, heading, width, height):
super().__init__('Rectangle', position, heading, width, height)
self.position = position.toVector()
self.heading = heading
self.width = width
self.height = height
self.hw = hw = width / 2
self.hh = hh = height / 2
self.radius = hypot(hw, hh) # circumcircle; for collision detection
self.corners = tuple(position.offsetRotated(heading, Vector(*offset))
for offset in ((hw, hh), (-hw, hh), (-hw, -hh), (hw, -hh)))
self.circumcircle = (self.position, self.radius)
def sampleGiven(self, value):
return RectangularRegion(value[self.position], value[self.heading],
value[self.width], value[self.height])
def evaluateInner(self, context):
position = valueInContext(self.position, context)
heading = valueInContext(self.heading, context)
width = valueInContext(self.width, context)
height = valueInContext(self.height, context)
return RectangularRegion(position, heading, width, height)
def uniformPointInner(self):
hw, hh = self.hw, self.hh
rx = random.uniform(-hw, hw)
ry = random.uniform(-hh, hh)
pt = self.position.offsetRotated(self.heading, Vector(rx, ry))
return self.orient(pt)
def getAABB(self):
x, y = zip(*self.corners)
minx, maxx = findMinMax(x)
miny, maxy = findMinMax(y)
return ((minx, miny), (maxx, maxy))
def isEquivalentTo(self, other):
if type(other) is not RectangularRegion:
return False
return (areEquivalent(other.position, self.position)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.width, self.width)
and areEquivalent(other.height, self.height))
def __str__(self):
return f'RectangularRegion({self.position},{self.heading},{self.width},{self.height})'
class PolylineRegion(Region):
"""Region given by one or more polylines (chain of line segments)"""
def __init__(self, points=None, polyline=None, orientation=True):
super().__init__('Polyline', orientation=orientation)
if points is not None:
points = tuple(points)
if len(points) < 2:
raise RuntimeError('tried to create PolylineRegion with < 2 points')
self.points = points
self.lineString = shapely.geometry.LineString(points)
elif polyline is not None:
if isinstance(polyline, shapely.geometry.LineString):
if len(polyline.coords) < 2:
raise RuntimeError('tried to create PolylineRegion with <2-point LineString')
elif isinstance(polyline, shapely.geometry.MultiLineString):
if len(polyline) == 0:
raise RuntimeError('tried to create PolylineRegion from empty MultiLineString')
for line in polyline:
assert len(line.coords) >= 2
else:
raise RuntimeError('tried to create PolylineRegion from non-LineString')
self.lineString = polyline
else:
raise RuntimeError('must specify points or polyline for PolylineRegion')
if not self.lineString.is_valid:
raise RuntimeError('tried to create PolylineRegion with '
f'invalid LineString {self.lineString}')
self.segments = self.segmentsOf(self.lineString)
cumulativeLengths = []
total = 0
for p, q in self.segments:
dx, dy = p[0] - q[0], p[1] - q[1]
total += math.hypot(dx, dy)
cumulativeLengths.append(total)
self.cumulativeLengths = cumulativeLengths
@classmethod
def segmentsOf(cls, lineString):
if isinstance(lineString, shapely.geometry.LineString):
segments = []
points = list(lineString.coords)
if len(points) < 2:
raise RuntimeError('LineString has fewer than 2 points')
last = points[0]
for point in points[1:]:
segments.append((last, point))
last = point
return segments
elif isinstance(lineString, shapely.geometry.MultiLineString):
allSegments = []
for line in lineString:
allSegments.extend(cls.segmentsOf(line))
return allSegments
else:
raise RuntimeError('called segmentsOf on non-linestring')
def uniformPointInner(self):
pointA, pointB = random.choices(self.segments,
cum_weights=self.cumulativeLengths)[0]
interpolation = random.random()
x, y = averageVectors(pointA, pointB, weight=interpolation)
if self.orientation is True:
return OrientedVector(x, y, headingOfSegment(pointA, pointB))
else:
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
if poly is not None:
intersection = self.lineString & poly
if (intersection.is_empty or
not isinstance(intersection, (shapely.geometry.LineString,
shapely.geometry.MultiLineString))):
# TODO handle points!
return nowhere
return PolylineRegion(polyline=intersection)
return super().intersect(other, triedReversed)
def containsPoint(self, point):
return self.lineString.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
return False
def getAABB(self):
xmin, ymin, xmax, ymax = self.lineString.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
for pointA, pointB in self.segments:
plt.plot([pointA[0], pointB[0]], [pointA[1], pointB[1]], style)
def __str__(self):
return f'PolylineRegion({self.lineString})'
def __eq__(self, other):
if type(other) is not PolylineRegion:
return NotImplemented
return (other.lineString == self.lineString)
@cached
def __hash__(self):
return hash(str(self.lineString))
class PolygonalRegion(Region):
"""Region given by one or more polygons (possibly with holes)"""
def __init__(self, points=None, polygon=None, orientation=None):
super().__init__('Polygon', orientation=orientation)
if polygon is None and points is None:
raise RuntimeError('must specify points or polygon for PolygonalRegion')
if polygon is None:
points = tuple(points)
if len(points) == 0:
raise RuntimeError('tried to create PolygonalRegion from empty point list!')
for point in points:
if needsSampling(point):
raise RuntimeError('only fixed PolygonalRegions are supported')
self.points = points
polygon = shapely.geometry.Polygon(points)
if isinstance(polygon, shapely.geometry.Polygon):
self.polygons = shapely.geometry.MultiPolygon([polygon])
elif isinstance(polygon, shapely.geometry.MultiPolygon):
self.polygons = polygon
else:
raise RuntimeError(f'tried to create PolygonalRegion from non-polygon {polygon}')
if not self.polygons.is_valid:
raise RuntimeError('tried to create PolygonalRegion with '
f'invalid polygon {self.polygons}')
if points is None and len(self.polygons) == 1 and len(self.polygons[0].interiors) == 0:
self.points = tuple(self.polygons[0].exterior.coords[:-1])
if self.polygons.is_empty:
raise RuntimeError('tried to create empty PolygonalRegion')
triangles = []
for polygon in self.polygons:
triangles.extend(triangulatePolygon(polygon))
assert len(triangles) > 0, self.polygons
self.trianglesAndBounds = tuple((tri, tri.bounds) for tri in triangles)
areas = (triangle.area for triangle in triangles)
self.cumulativeTriangleAreas = tuple(itertools.accumulate(areas))
def uniformPointInner(self):
triangle, bounds = random.choices(
self.trianglesAndBounds,
cum_weights=self.cumulativeTriangleAreas)[0]
minx, miny, maxx, maxy = bounds
# TODO improve?
while True:
x, y = random.uniform(minx, maxx), random.uniform(miny, maxy)
if triangle.intersects(shapely.geometry.Point(x, y)):
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
orientation = other.orientation if self.orientation is None else self.orientation
if poly is not None:
intersection = self.polygons & poly
if intersection.is_empty:
return nowhere
elif isinstance(intersection, (shapely.geometry.Polygon,
shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=intersection, orientation=orientation)
elif isinstance(intersection, shapely.geometry.GeometryCollection):
polys = []
for geom in intersection:
if isinstance(geom, shapely.geometry.Polygon):
polys.append(geom)
if len(polys) == 0:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
intersection = shapely.geometry.MultiPolygon(polys)
return PolygonalRegion(polygon=intersection, orientation=orientation)
else:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
return super().intersect(other, triedReversed)
def union(self, other):
poly = toPolygon(other)
if not poly:
raise RuntimeError(f'cannot take union of PolygonalRegion with {other}')
union = polygonUnion((self.polygons, poly))
return PolygonalRegion(polygon=union)
def containsPoint(self, point):
return self.polygons.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
objPoly = obj.polygon
if objPoly is None:
raise RuntimeError('tried to test containment of symbolic Object!')
# TODO improve boundary handling?
return self.polygons.contains(objPoly)
def getAABB(self):
xmin, xmax, ymin, ymax = self.polygons.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
plotPolygon(self.polygons, plt, style=style)
def __str__(self):
return '<PolygonalRegion>'
def __eq__(self, other):
if type(other) is not PolygonalRegion:
return NotImplemented
return (other.polygons == self.polygons
and other.orientation == self.orientation)
@cached
def __hash__(self):
# TODO better way to hash mutable Shapely geometries? (also for PolylineRegion)
return hash((str(self.polygons), self.orientation))
class PointSetRegion(Region):
"""Region consisting of a set of discrete points.
No :obj:`~scenic.core.object_types.Object` can be contained in a `PointSetRegion`,
since the latter is discrete. (This may not be true for subclasses, e.g.
`GridRegion`.)
Args:
name (str): name for debugging
points (iterable): set of points comprising the region
kdtree (:obj:`scipy.spatial.KDTree`, optional): k-D tree for the points (one will
be computed if none is provided)
orientation (:obj:`~scenic.core.vectors.VectorField`, optional): orientation for
the region
tolerance (float, optional): distance tolerance for checking whether a point lies
in the region
"""
def __init__(self, name, points, kdTree=None, orientation=None, tolerance=1e-6):
super().__init__(name, orientation=orientation)
self.points = tuple(points)
for point in self.points:
if needsSampling(point):
raise RuntimeError('only fixed PointSetRegions are supported')
self.kdTree = scipy.spatial.cKDTree(self.points) if kdTree is None else kdTree
self.orientation = orientation
self.tolerance = tolerance
def uniformPointInner(self):
return self.orient(Vector(*random.choice(self.points)))
def intersect(self, other, triedReversed=False):
def sampler(intRegion):
o = intRegion.regions[1]
center, radius = o.circumcircle
possibles = (Vector(*self.kdTree.data[i])
for i in self.kdTree.query_ball_point(center, radius))
intersection = [p for p in possibles if o.containsPoint(p)]
if len(intersection) == 0:
raise RejectionException(f'empty intersection of Regions {self} and {o}')
return self.orient(random.choice(intersection))
return IntersectionRegion(self, other, sampler=sampler, orientation=self.orientation)
def containsPoint(self, point):
distance, location = self.kdTree.query(point)
return (distance <= self.tolerance)
def containsObject(self, obj):
raise NotImplementedError()
def __eq__(self, other):
if type(other) is not PointSetRegion:
return NotImplemented
return (other.name == self.name
and other.points == self.points
and other.orientation == self.orientation)
def __hash__(self):
return hash((self.name, self.points, self.orientation))
class GridRegion(PointSetRegion):
"""A Region given by an obstacle grid.
A point is considered to be in a `GridRegion` if the nearest grid point is
not an obstacle.
Args:
name (str): name for debugging
grid: 2D list, tuple, or NumPy array of 0s and 1s, where 1 indicates an obstacle
and 0 indicates free space
Ax (float): spacing between grid points along X axis
Ay (float): spacing between grid points along Y axis
Bx (float): X coordinate of leftmost grid column
By (float): Y coordinate of lowest grid row
orientation (:obj:`~scenic.core.vectors.VectorField`, optional): orientation of region
"""
def __init__(self, name, grid, Ax, Ay, Bx, By, orientation=None):
self.grid = numpy.array(grid)
self.sizeY, self.sizeX = self.grid.shape
self.Ax, self.Ay = Ax, Ay
self.Bx, self.By = Bx, By
y, x = numpy.where(self.grid == 0)
points = [self.gridToPoint(point) for point in zip(x, y)]
super().__init__(name, points, orientation=orientation)
def gridToPoint(self, gp):
x, y = gp
return ((self.Ax * x) + self.Bx, (self.Ay * y) + self.By)
def pointToGrid(self, point):
x, y = point
x = (x - self.Bx) / self.Ax
y = (y - self.By) / self.Ay
nx = int(round(x))
if nx < 0 or nx >= self.sizeX:
return None
ny = int(round(y))
if ny < 0 or ny >= self.sizeY:
return None
return (nx, ny)
def containsPoint(self, point):
gp = self.pointToGrid(point)
if gp is None:
return False
x, y = gp
return (self.grid[y, x] == 0)
def containsObject(self, obj):
# TODO improve this procedure!
# Fast check
for c in obj.corners:
if not self.containsPoint(c):
return False
# Slow check
gps = [self.pointToGrid(corner) for corner in obj.corners]
x, y = zip(*gps)
minx, maxx = findMinMax(x)
miny, maxy = findMinMax(y)
for x in range(minx, maxx+1):
for y in range(miny, maxy+1):
p = self.gridToPoint((x, y))
if self.grid[y, x] == 1 and obj.containsPoint(p):
return False
return True
class IntersectionRegion(Region):
def __init__(self, *regions, orientation=None, sampler=None):
self.regions = tuple(regions)
if len(self.regions) < 2:
raise RuntimeError('tried to take intersection of fewer than 2 regions')
super().__init__('Intersection', *self.regions, orientation=orientation)
if sampler is None:
sampler = self.genericSampler
self.sampler = sampler
def sampleGiven(self, value):
regs = [value[reg] for reg in self.regions]
# Now that regions have been sampled, attempt intersection again in the hopes
# there is a specialized sampler to handle it (unless we already have one)
if self.sampler is self.genericSampler:
failed = False
intersection = regs[0]
for region in regs[1:]:
intersection = intersection.intersect(region)
if isinstance(intersection, IntersectionRegion):
failed = True
break
if not failed:
intersection.orientation = value[self.orientation]
return intersection
return IntersectionRegion(*regs, orientation=value[self.orientation],
sampler=self.sampler)
def evaluateInner(self, context):
regs = (valueInContext(reg, context) for reg in self.regions)
orientation = valueInContext(self.orientation, context)
return IntersectionRegion(*regs, orientation=orientation, sampler=self.sampler)
def containsPoint(self, point):
return all(region.containsPoint(point) for region in self.regions)
def uniformPointInner(self):
return self.orient(self.sampler(self))
@staticmethod
def genericSampler(intersection):
regs = intersection.regions
point = regs[0].uniformPointInner()
for region in regs[1:]:
if not region.containsPoint(point):
raise RejectionException(
f'sampling intersection of Regions {regs[0]} and {region}')
return point
def isEquivalentTo(self, other):
if type(other) is not IntersectionRegion:
return False
return (areEquivalent(set(other.regions), set(self.regions))
and other.orientation == self.orientation)
def __str__(self):
return f'IntersectionRegion({self.regions})'
| [((22, 4, 22, 24), 'scenic.core.distributions.needsSampling', 'needsSampling', ({(22, 18, 22, 23): 'thing'}, {}), '(thing)', False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((110, 8, 110, 68), 'scenic.core.type_support.toVector', 'toVector', ({(110, 17, 110, 22): 'thing', (110, 24, 110, 67): '""""X in Y" with X not an Object or a vector"""'}, {}), '(thing, \'"X in Y" with X not an Object or a vector\')', False, 'from scenic.core.type_support import toVector\n'), ((150, 8, 150, 52), 'scenic.core.distributions.RejectionException', 'RejectionException', ({(150, 27, 150, 51): 'f"""sampling empty Region"""'}, {}), "(f'sampling empty Region')", False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((185, 11, 185, 47), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', ({(185, 26, 185, 37): 'self.center', (185, 39, 185, 46): 'context'}, {}), '(self.center, context)', False, 'from scenic.core.lazy_eval import valueInContext\n'), ((186, 11, 186, 47), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', ({(186, 26, 186, 37): 'self.radius', (186, 39, 186, 46): 'context'}, {}), '(self.radius, context)', False, 'from scenic.core.lazy_eval import valueInContext\n'), ((195, 6, 195, 52), 'random.triangular', 'random.triangular', ({(195, 24, 195, 25): '0', (195, 27, 195, 38): 'self.radius', (195, 40, 195, 51): 'self.radius'}, {}), '(0, self.radius, self.radius)', False, 'import random\n'), ((196, 6, 196, 39), 'random.uniform', 'random.uniform', ({(196, 21, 196, 29): '-math.pi', (196, 31, 196, 38): 'math.pi'}, {}), '(-math.pi, math.pi)', False, 'import random\n'), ((243, 11, 243, 47), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', ({(243, 26, 243, 37): 'self.center', (243, 39, 243, 46): 'context'}, {}), '(self.center, context)', False, 'from scenic.core.lazy_eval import valueInContext\n'), ((244, 11, 244, 47), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', ({(244, 26, 244, 37): 'self.radius', (244, 39, 244, 46): 'context'}, {}), '(self.radius, context)', False, 'from scenic.core.lazy_eval import valueInContext\n'), ((245, 12, 245, 49), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', ({(245, 27, 245, 39): 'self.heading', (245, 41, 245, 48): 'context'}, {}), '(self.heading, context)', False, 'from scenic.core.lazy_eval import valueInContext\n'), ((246, 10, 246, 45), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', ({(246, 25, 246, 35): 'self.angle', (246, 37, 246, 44): 'context'}, {}), '(self.angle, context)', False, 'from scenic.core.lazy_eval import valueInContext\n'), ((258, 6, 258, 44), 'random.triangular', 'random.triangular', ({(258, 24, 258, 25): '0', (258, 27, 258, 34): 'maxDist', (258, 36, 258, 43): 'maxDist'}, {}), '(0, maxDist, maxDist)', False, 'import random\n'), ((284, 16, 284, 29), 'scenic.core.geometry.hypot', 'hypot', ({(284, 22, 284, 24): 'hw', (284, 26, 284, 28): 'hh'}, {}), '(hw, hh)', False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((294, 13, 294, 51), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', ({(294, 28, 294, 41): 'self.position', (294, 43, 294, 50): 'context'}, {}), '(self.position, context)', False, 'from scenic.core.lazy_eval import valueInContext\n'), ((295, 12, 295, 49), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', ({(295, 27, 295, 39): 'self.heading', (295, 41, 295, 48): 'context'}, {}), '(self.heading, context)', False, 'from scenic.core.lazy_eval import valueInContext\n'), ((296, 10, 296, 45), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', ({(296, 25, 296, 35): 'self.width', (296, 37, 296, 44): 'context'}, {}), '(self.width, context)', False, 'from scenic.core.lazy_eval import valueInContext\n'), ((297, 11, 297, 47), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', ({(297, 26, 297, 37): 'self.height', (297, 39, 297, 46): 'context'}, {}), '(self.height, context)', False, 'from scenic.core.lazy_eval import valueInContext\n'), ((302, 7, 302, 30), 'random.uniform', 'random.uniform', ({(302, 22, 302, 25): '-hw', (302, 27, 302, 29): 'hw'}, {}), '(-hw, hw)', False, 'import random\n'), ((303, 7, 303, 30), 'random.uniform', 'random.uniform', ({(303, 22, 303, 25): '-hh', (303, 27, 303, 29): 'hh'}, {}), '(-hh, hh)', False, 'import random\n'), ((309, 15, 309, 28), 'scenic.core.geometry.findMinMax', 'findMinMax', ({(309, 26, 309, 27): 'x'}, {}), '(x)', False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((310, 15, 310, 28), 'scenic.core.geometry.findMinMax', 'findMinMax', ({(310, 26, 310, 27): 'y'}, {}), '(y)', False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((383, 18, 383, 33), 'random.random', 'random.random', ({}, {}), '()', False, 'import random\n'), ((384, 9, 384, 61), 'scenic.core.geometry.averageVectors', 'averageVectors', (), '', False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((508, 10, 508, 45), 'scenic.core.geometry.polygonUnion', 'polygonUnion', ({(508, 23, 508, 44): '(self.polygons, poly)'}, {}), '((self.polygons, poly))', False, 'from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion\n'), ((526, 2, 526, 46), 'scenic.core.geometry.plotPolygon', 'plotPolygon', (), '', False, 'from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion\n'), ((619, 14, 619, 31), 'numpy.array', 'numpy.array', ({(619, 26, 619, 30): 'grid'}, {}), '(grid)', False, 'import numpy\n'), ((623, 9, 623, 36), 'numpy.where', 'numpy.where', ({(623, 21, 623, 35): 'self.grid == 0'}, {}), '(self.grid == 0)', False, 'import numpy\n'), ((659, 15, 659, 28), 'scenic.core.geometry.findMinMax', 'findMinMax', ({(659, 26, 659, 27): 'x'}, {}), '(x)', False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((660, 15, 660, 28), 'scenic.core.geometry.findMinMax', 'findMinMax', ({(660, 26, 660, 27): 'y'}, {}), '(y)', False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((698, 16, 698, 57), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', ({(698, 31, 698, 47): 'self.orientation', (698, 49, 698, 56): 'context'}, {}), '(self.orientation, context)', False, 'from scenic.core.lazy_eval import valueInContext\n'), ((83, 13, 83, 32), 'scenic.core.distributions.needsSampling', 'needsSampling', ({(83, 27, 83, 31): 'self'}, {}), '(self)', False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((122, 10, 122, 61), 'scenic.core.vectors.OrientedVector', 'OrientedVector', ({(122, 25, 122, 30): 'vec.x', (122, 32, 122, 37): 'vec.y', (122, 39, 122, 60): 'self.orientation[vec]'}, {}), '(vec.x, vec.y, self.orientation[vec])', False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((208, 10, 208, 50), 'scenic.core.utils.areEquivalent', 'areEquivalent', ({(208, 24, 208, 36): 'other.center', (208, 38, 208, 49): 'self.center'}, {}), '(other.center, self.center)', False, 'from scenic.core.utils import cached, areEquivalent\n'), ((209, 14, 209, 54), 'scenic.core.utils.areEquivalent', 'areEquivalent', ({(209, 28, 209, 40): 'other.radius', (209, 42, 209, 53): 'self.radius'}, {}), '(other.radius, self.radius)', False, 'from scenic.core.utils import cached, areEquivalent\n'), ((221, 21, 221, 35), 'scenic.core.geometry.cos', 'cos', ({(221, 25, 221, 34): '(angle / 2)'}, {}), '(angle / 2)', False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((260, 6, 260, 29), 'random.uniform', 'random.uniform', ({(260, 21, 260, 24): '(-ha)', (260, 26, 260, 28): 'ha'}, {}), '(-ha, ha)', False, 'import random\n'), ((267, 10, 267, 50), 'scenic.core.utils.areEquivalent', 'areEquivalent', ({(267, 24, 267, 36): 'other.center', (267, 38, 267, 49): 'self.center'}, {}), '(other.center, self.center)', False, 'from scenic.core.utils import cached, areEquivalent\n'), ((268, 14, 268, 54), 'scenic.core.utils.areEquivalent', 'areEquivalent', ({(268, 28, 268, 40): 'other.radius', (268, 42, 268, 53): 'self.radius'}, {}), '(other.radius, self.radius)', False, 'from scenic.core.utils import cached, areEquivalent\n'), ((269, 14, 269, 56), 'scenic.core.utils.areEquivalent', 'areEquivalent', ({(269, 28, 269, 41): 'other.heading', (269, 43, 269, 55): 'self.heading'}, {}), '(other.heading, self.heading)', False, 'from scenic.core.utils import cached, areEquivalent\n'), ((270, 14, 270, 52), 'scenic.core.utils.areEquivalent', 'areEquivalent', ({(270, 28, 270, 39): 'other.angle', (270, 41, 270, 51): 'self.angle'}, {}), '(other.angle, self.angle)', False, 'from scenic.core.utils import cached, areEquivalent\n'), ((304, 49, 304, 63), 'scenic.core.vectors.Vector', 'Vector', ({(304, 56, 304, 58): 'rx', (304, 60, 304, 62): 'ry'}, {}), '(rx, ry)', False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((316, 10, 316, 54), 'scenic.core.utils.areEquivalent', 'areEquivalent', ({(316, 24, 316, 38): 'other.position', (316, 40, 316, 53): 'self.position'}, {}), '(other.position, self.position)', False, 'from scenic.core.utils import cached, areEquivalent\n'), ((317, 14, 317, 56), 'scenic.core.utils.areEquivalent', 'areEquivalent', ({(317, 28, 317, 41): 'other.heading', (317, 43, 317, 55): 'self.heading'}, {}), '(other.heading, self.heading)', False, 'from scenic.core.utils import cached, areEquivalent\n'), ((318, 14, 318, 52), 'scenic.core.utils.areEquivalent', 'areEquivalent', ({(318, 28, 318, 39): 'other.width', (318, 41, 318, 51): 'self.width'}, {}), '(other.width, self.width)', False, 'from scenic.core.utils import cached, areEquivalent\n'), ((319, 14, 319, 54), 'scenic.core.utils.areEquivalent', 'areEquivalent', ({(319, 28, 319, 40): 'other.height', (319, 42, 319, 53): 'self.height'}, {}), '(other.height, self.height)', False, 'from scenic.core.utils import cached, areEquivalent\n'), ((356, 12, 356, 30), 'math.hypot', 'math.hypot', ({(356, 23, 356, 25): 'dx', (356, 27, 356, 29): 'dy'}, {}), '(dx, dy)', False, 'import math\n'), ((381, 19, 382, 69), 'random.choices', 'random.choices', (), '', False, 'import random\n'), ((466, 39, 466, 66), 'itertools.accumulate', 'itertools.accumulate', ({(466, 60, 466, 65): 'areas'}, {}), '(areas)', False, 'import itertools\n'), ((469, 21, 471, 44), 'random.choices', 'random.choices', (), '', False, 'import random\n'), ((564, 6, 564, 26), 'scenic.core.distributions.needsSampling', 'needsSampling', ({(564, 20, 564, 25): 'point'}, {}), '(point)', False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((697, 10, 697, 38), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', ({(697, 25, 697, 28): 'reg', (697, 30, 697, 37): 'context'}, {}), '(reg, context)', False, 'from scenic.core.lazy_eval import valueInContext\n'), ((177, 10, 177, 36), 'scenic.core.distributions.needsSampling', 'needsSampling', ({(177, 24, 177, 35): 'self.center'}, {}), '(self.center)', False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((177, 40, 177, 66), 'scenic.core.distributions.needsSampling', 'needsSampling', ({(177, 54, 177, 65): 'self.radius'}, {}), '(self.radius)', False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((386, 31, 386, 63), 'scenic.core.geometry.headingOfSegment', 'headingOfSegment', ({(386, 48, 386, 54): 'pointA', (386, 56, 386, 62): 'pointB'}, {}), '(pointA, pointB)', False, 'from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion\n'), ((388, 22, 388, 34), 'scenic.core.vectors.Vector', 'Vector', ({(388, 29, 388, 30): 'x', (388, 32, 388, 33): 'y'}, {}), '(x, y)', False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((439, 7, 439, 27), 'scenic.core.distributions.needsSampling', 'needsSampling', ({(439, 21, 439, 26): 'point'}, {}), '(point)', False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((462, 20, 462, 47), 'scenic.core.geometry.triangulatePolygon', 'triangulatePolygon', ({(462, 39, 462, 46): 'polygon'}, {}), '(polygon)', False, 'from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion\n'), ((475, 10, 475, 36), 'random.uniform', 'random.uniform', ({(475, 25, 475, 29): 'minx', (475, 31, 475, 35): 'maxx'}, {}), '(minx, maxx)', False, 'import random\n'), ((475, 38, 475, 64), 'random.uniform', 'random.uniform', ({(475, 53, 475, 57): 'miny', (475, 59, 475, 63): 'maxy'}, {}), '(miny, maxy)', False, 'import random\n'), ((577, 16, 577, 44), 'scenic.core.vectors.Vector', 'Vector', ({(577, 23, 577, 43): '*self.kdTree.data[i]'}, {}), '(*self.kdTree.data[i])', False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((581, 10, 581, 77), 'scenic.core.distributions.RejectionException', 'RejectionException', ({(581, 29, 581, 76): 'f"""empty intersection of Regions {self} and {o}"""'}, {}), "(f'empty intersection of Regions {self} and {o}')", False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((582, 22, 582, 49), 'random.choice', 'random.choice', ({(582, 36, 582, 48): 'intersection'}, {}), '(intersection)', False, 'import random\n'), ((713, 10, 714, 67), 'scenic.core.distributions.RejectionException', 'RejectionException', ({(714, 8, 714, 66): 'f"""sampling intersection of Regions {regs[0]} and {region}"""'}, {}), "(f'sampling intersection of Regions {regs[0]} and {region}')", False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((197, 23, 197, 29), 'scenic.core.geometry.cos', 'cos', ({(197, 27, 197, 28): 't'}, {}), '(t)', False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((197, 41, 197, 47), 'scenic.core.geometry.sin', 'sin', ({(197, 45, 197, 46): 't'}, {}), '(t)', False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((224, 13, 224, 29), 'scenic.core.distributions.needsSampling', 'needsSampling', ({(224, 27, 224, 28): 'x'}, {}), '(x)', False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((261, 23, 261, 29), 'scenic.core.geometry.cos', 'cos', ({(261, 27, 261, 28): 't'}, {}), '(t)', False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((261, 41, 261, 47), 'scenic.core.geometry.sin', 'sin', ({(261, 45, 261, 46): 't'}, {}), '(t)', False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((285, 55, 285, 70), 'scenic.core.vectors.Vector', 'Vector', ({(285, 62, 285, 69): '*offset'}, {}), '(*offset)', False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((477, 23, 477, 35), 'scenic.core.vectors.Vector', 'Vector', ({(477, 30, 477, 31): 'x', (477, 33, 477, 34): 'y'}, {}), '(x, y)', False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((571, 29, 571, 55), 'random.choice', 'random.choice', ({(571, 43, 571, 54): 'self.points'}, {}), '(self.points)', False, 'import random\n')] |
mrahnis/orangery | orangery/cli/cutfill.py | 69afe0057bd61163eb8e026e58d648dfa1e73b94 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import logging
import time
import json
import click
import matplotlib.pyplot as plt
import orangery as o
from orangery.cli import defaults, util
from orangery.tools.plotting import get_scale_factor
@click.command(options_metavar='<options>')
@click.argument('file1', nargs=1, type=click.Path(exists=True), metavar='<file_t0>') # help="survey representing the initial condition"
@click.argument('file2', nargs=1, type=click.Path(exists=True), metavar='<file_t1>') # help="survey representing the final condition"
@click.argument('fields', nargs=1, metavar='<fields>') # help="character string identifying the columns"
@click.argument('xs_name', nargs=1, metavar='<name>') # help="name of the cross-section to plot"
@click.option('--codes', 'codes_f', nargs=1, type=click.Path(exists=True), metavar='<codes_file>', help="JSON file representing the usage intent of a set of survey codes")
@click.option('--show/--save', is_flag=True, default=True, help="Show the plot or save to files; --show is the default")
@click.option('--summary/--no-summary', default=True, help="Print summary information; --summary is the default")
@click.option('--units', type=click.Choice(['m','sft','ft']), default='m', help="Unit to show in axis labels")
@click.option('--labels', nargs=2, metavar='<text text>', help="Labels to display in the legend")
@click.option('--exaggeration', metavar='<int>', default=3, help="Vertical exaggeration of plot")
@click.option('--scale', nargs=2, metavar='<float int>', type=click.Tuple([float, int]), default=(10, 300), help="Scale where first argument is units per-inch on the horizontal axis and second argument is output DPI")
@click.option('--close/--no-close', default=True, help="Close the line ends; --close is the default")
@click.option('--reverse', type=click.Choice(['t0','t1','tx']), help="Reverse a line or lines of section (t0=initial, t1=final, tx=both)")
@click.option('--exclude', nargs=2, type=click.Tuple([str, click.Choice(['t0','t1','tx'])]), multiple=True, metavar='<str choice>', help="Exclude a survey code from a line or lines of section (t0=initial, t1=final, tx=both)")
@click.option('--overlay', nargs=1, type=click.Path(exists=True))
@click.option('-v', '--verbose', is_flag=True, help="Enables verbose mode")
def cutfill(file1, file2, fields, xs_name, codes_f, show, summary, units, labels, exaggeration, scale, close, reverse, exclude, overlay, verbose):
"""Displays a plot of a repeat survey with cut and fill.
\b
The cutfill subcommand takes four arguments:
<file_t0> : survey data representing the initial condition in csv format
<file_t1> : survey data representing the final condition in csv format
<fields> : series of characters describing the data columns
<name> : name of cross-section to plot
Options allow to set various properties of the plot. The default is to --show the plot.
With the --save option the plot will be saved as an image along with a csv file containing
data about cross-sectional cut-and-fill areas along the line of secion.
\b
Example:
orangery cutfill file_2004.csv file_2010.csv pxyzctr XS-7 --reverse t0
"""
if verbose is True:
loglevel = 2
else:
loglevel = 0
logging.basicConfig(stream=sys.stderr, level=loglevel or logging.INFO)
# load the configuration
codes = defaults.codes.copy()
if codes_f:
user_codes = util.load_config(codes_f)
codes.update(user_codes)
# load the survey data
s1 = o.Survey(file1, fields, codes, 0)
s2 = o.Survey(file2, fields, codes, 0)
if overlay:
s3 = o.Survey(overlay, fields, codes, 0)
exclude_t0 = []
exclude_t1 = []
for code in exclude:
if code[1] in ('t0', 'tx'):
exclude_t0.append(code[0])
if code[1] in ('t1', 'tx'):
exclude_t1.append(code[0])
# select a group of points, in this case a cross section
xs_pts1 = o.group(s1.data, s1.code_table, group=xs_name, exclude=exclude_t0)
xs_pts2 = o.group(s2.data, s2.code_table, group=xs_name, exclude=exclude_t1)
# xs_pts_overlay = o.group(s3.data, s3.code_table, group=xs_name)
# get the endpoints of the group
p1, p2 = o.endpoints(xs_pts1, reverse=reverse in ('t0','tx'))
# make the sections
xs1 = o.Section(xs_pts1, p1, p2, reverse=reverse in ('t0','tx'))
xs2 = o.Section(xs_pts2, p1, p2, reverse=reverse in ('t1','tx'))
# xs_overlay = o.Section(xs_pts_overlay, p1, p2)
if labels:
label_t0 = labels[0]
label_t1 = labels[1]
label_overlay = labels[3]
elif 't' in fields:
label_t0 = (xs1.data.iloc[0]['t']).split('T')[0]
label_t1 = (xs2.data.iloc[0]['t']).split('T')[0]
# label_overlay = (xs_overlay.data.iloc[0]['t']).split('T')[0]
else:
label_t0 = 't0'
label_t1 = 't1'
# label_overlay = 'pre-restoration'
# calculate the change
chg = o.Change(xs1, xs2, close_ends=close)
if summary:
chg.summarize()
import matplotlib
font = {'family':'normal','weight':'normal','size':16}
matplotlib.rc('font', **font)
# plot the change between two cross-sections
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_aspect(exaggeration)
# xs_overlay.plot(ax=ax, marker='None', linestyle='-', linewidth=3, color='tab:red', label=label_overlay)
xs1.plot(ax=ax, marker='o', markersize=4, markerfacecolor='white', markeredgecolor='black', linestyle='-', color='gray', label=label_t0)
xs2.plot(ax=ax, marker='o', markersize=4, markerfacecolor='black', markeredgecolor='black', linestyle='-', color='black', label=label_t1)
chg.polygon_plot(ax=ax, fill_label='Fill', cut_label='Cut')
chg.annotate_plot(ax=ax)
ax.set_xlabel('Distance ({0})'.format(units))
ax.set_ylabel('Elevation ({0}), {1}x exaggeration'.format(units, exaggeration))
plt.legend(loc='best')
plt.title('Cross-section {0}'.format(xs_name))
if show:
plt.show()
else:
fname = xs_name + '-' + label_t0.replace('-', '') + '-' + label_t1.replace('-', '')
scale_factor = get_scale_factor(fig, ax, scale[0])
dims = fig.get_size_inches()
fig.set_size_inches(dims[0]*scale_factor, dims[1]*scale_factor)
fig.savefig(fname+'.png', dpi=scale[1])
click.echo('Figure saved to: {}'.format(fname+'.png'))
chg.save(fname+'.csv')
click.echo('Data saved to: {}'.format(fname+'.csv'))
| [((17, 1, 17, 43), 'click.command', 'click.command', (), '', False, 'import click\n'), ((20, 1, 20, 54), 'click.argument', 'click.argument', (), '', False, 'import click\n'), ((21, 1, 21, 53), 'click.argument', 'click.argument', (), '', False, 'import click\n'), ((23, 1, 23, 120), 'click.option', 'click.option', (), '', False, 'import click\n'), ((24, 1, 24, 113), 'click.option', 'click.option', (), '', False, 'import click\n'), ((26, 1, 26, 97), 'click.option', 'click.option', (), '', False, 'import click\n'), ((27, 1, 27, 97), 'click.option', 'click.option', (), '', False, 'import click\n'), ((29, 1, 29, 101), 'click.option', 'click.option', (), '', False, 'import click\n'), ((33, 1, 33, 75), 'click.option', 'click.option', (), '', False, 'import click\n'), ((58, 4, 58, 74), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((61, 12, 61, 33), 'orangery.cli.defaults.codes.copy', 'defaults.codes.copy', ({}, {}), '()', False, 'from orangery.cli import defaults, util\n'), ((67, 9, 67, 42), 'orangery.Survey', 'o.Survey', ({(67, 18, 67, 23): 'file1', (67, 25, 67, 31): 'fields', (67, 33, 67, 38): 'codes', (67, 40, 67, 41): '0'}, {}), '(file1, fields, codes, 0)', True, 'import orangery as o\n'), ((68, 9, 68, 42), 'orangery.Survey', 'o.Survey', ({(68, 18, 68, 23): 'file2', (68, 25, 68, 31): 'fields', (68, 33, 68, 38): 'codes', (68, 40, 68, 41): '0'}, {}), '(file2, fields, codes, 0)', True, 'import orangery as o\n'), ((82, 14, 82, 80), 'orangery.group', 'o.group', (), '', True, 'import orangery as o\n'), ((83, 14, 83, 80), 'orangery.group', 'o.group', (), '', True, 'import orangery as o\n'), ((88, 13, 88, 65), 'orangery.endpoints', 'o.endpoints', (), '', True, 'import orangery as o\n'), ((91, 10, 91, 68), 'orangery.Section', 'o.Section', (), '', True, 'import orangery as o\n'), ((92, 10, 92, 68), 'orangery.Section', 'o.Section', (), '', True, 'import orangery as o\n'), ((110, 10, 110, 46), 'orangery.Change', 'o.Change', (), '', True, 'import orangery as o\n'), ((116, 4, 116, 33), 'matplotlib.rc', 'matplotlib.rc', ({(116, 18, 116, 24): '"""font"""'}, {}), "('font', **font)", False, 'import matplotlib\n'), ((118, 10, 118, 22), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((129, 4, 129, 26), 'matplotlib.pyplot.legend', 'plt.legend', (), '', True, 'import matplotlib.pyplot as plt\n'), ((63, 21, 63, 46), 'orangery.cli.util.load_config', 'util.load_config', ({(63, 38, 63, 45): 'codes_f'}, {}), '(codes_f)', False, 'from orangery.cli import defaults, util\n'), ((71, 13, 71, 48), 'orangery.Survey', 'o.Survey', ({(71, 22, 71, 29): 'overlay', (71, 31, 71, 37): 'fields', (71, 39, 71, 44): 'codes', (71, 46, 71, 47): '0'}, {}), '(overlay, fields, codes, 0)', True, 'import orangery as o\n'), ((133, 8, 133, 18), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((137, 23, 137, 58), 'orangery.tools.plotting.get_scale_factor', 'get_scale_factor', ({(137, 40, 137, 43): 'fig', (137, 45, 137, 47): 'ax', (137, 49, 137, 57): 'scale[0]'}, {}), '(fig, ax, scale[0])', False, 'from orangery.tools.plotting import get_scale_factor\n'), ((18, 39, 18, 62), 'click.Path', 'click.Path', (), '', False, 'import click\n'), ((19, 39, 19, 62), 'click.Path', 'click.Path', (), '', False, 'import click\n'), ((22, 50, 22, 73), 'click.Path', 'click.Path', (), '', False, 'import click\n'), ((25, 30, 25, 60), 'click.Choice', 'click.Choice', ({(25, 43, 25, 59): "['m', 'sft', 'ft']"}, {}), "(['m', 'sft', 'ft'])", False, 'import click\n'), ((28, 62, 28, 87), 'click.Tuple', 'click.Tuple', ({(28, 74, 28, 86): '[float, int]'}, {}), '([float, int])', False, 'import click\n'), ((30, 32, 30, 62), 'click.Choice', 'click.Choice', ({(30, 45, 30, 61): "['t0', 't1', 'tx']"}, {}), "(['t0', 't1', 'tx'])", False, 'import click\n'), ((32, 41, 32, 64), 'click.Path', 'click.Path', (), '', False, 'import click\n'), ((31, 59, 31, 89), 'click.Choice', 'click.Choice', ({(31, 72, 31, 88): "['t0', 't1', 'tx']"}, {}), "(['t0', 't1', 'tx'])", False, 'import click\n')] |
cadia-lvl/ice-g2p | src/ice_g2p/dictionaries.py | 5a6cc55f45282e8a656ea0742e2f373189c9a912 | import os, sys
DICTIONARY_FILE = os.path.join(sys.prefix, 'dictionaries/ice_pron_dict_standard_clear.csv')
HEAD_FILE = os.path.join(sys.prefix, 'data/head_map.csv')
MODIFIER_FILE = os.path.join(sys.prefix, 'data/modifier_map.csv')
VOWELS_FILE = os.path.join(sys.prefix, 'data/vowels_sampa.txt')
CONS_CLUSTERS_FILE = os.path.join(sys.prefix, 'data/cons_clusters_sampa.txt')
def read_map(filename):
with open(filename) as f:
file_content = f.read().splitlines()
dict_map = {}
for line in file_content:
arr = line.split('\t')
if len(arr) > 1:
values = arr[1:]
else:
values = []
key = arr[0]
dict_map[key] = values
return dict_map
def read_dictionary(filename):
with open(filename) as f:
file_content = f.read().splitlines()
pronDict = {}
for line in file_content:
word, transcr = line.split('\t')
pronDict[word] = transcr
return pronDict
def read_list(filename):
with open(filename) as f:
file_content = f.read().splitlines()
return file_content
def get_head_map():
return read_map(HEAD_FILE)
def get_modifier_map():
return read_map(MODIFIER_FILE)
def get_dictionary():
return read_dictionary(DICTIONARY_FILE)
def get_vowels():
return read_list(VOWELS_FILE)
def get_cons_clusters():
return read_list(CONS_CLUSTERS_FILE)
| [((2, 18, 2, 91), 'os.path.join', 'os.path.join', ({(2, 31, 2, 41): 'sys.prefix', (2, 43, 2, 90): '"""dictionaries/ice_pron_dict_standard_clear.csv"""'}, {}), "(sys.prefix, 'dictionaries/ice_pron_dict_standard_clear.csv')", False, 'import os, sys\n'), ((3, 12, 3, 57), 'os.path.join', 'os.path.join', ({(3, 25, 3, 35): 'sys.prefix', (3, 37, 3, 56): '"""data/head_map.csv"""'}, {}), "(sys.prefix, 'data/head_map.csv')", False, 'import os, sys\n'), ((4, 16, 4, 65), 'os.path.join', 'os.path.join', ({(4, 29, 4, 39): 'sys.prefix', (4, 41, 4, 64): '"""data/modifier_map.csv"""'}, {}), "(sys.prefix, 'data/modifier_map.csv')", False, 'import os, sys\n'), ((5, 14, 5, 63), 'os.path.join', 'os.path.join', ({(5, 27, 5, 37): 'sys.prefix', (5, 39, 5, 62): '"""data/vowels_sampa.txt"""'}, {}), "(sys.prefix, 'data/vowels_sampa.txt')", False, 'import os, sys\n'), ((6, 21, 6, 77), 'os.path.join', 'os.path.join', ({(6, 34, 6, 44): 'sys.prefix', (6, 46, 6, 76): '"""data/cons_clusters_sampa.txt"""'}, {}), "(sys.prefix, 'data/cons_clusters_sampa.txt')", False, 'import os, sys\n')] |
jeromedockes/pylabelbuddy | tests/test_annotations_notebook.py | 26be00db679e94117968387aa7010dab2739b517 | from pylabelbuddy import _annotations_notebook
def test_annotations_notebook(root, annotations_mock, dataset_mock):
nb = _annotations_notebook.AnnotationsNotebook(
root, annotations_mock, dataset_mock
)
nb.change_database()
assert nb.notebook.index(nb.notebook.select()) == 2
nb.go_to_annotations()
assert nb.notebook.index(nb.notebook.select()) == 0
| [((5, 9, 7, 5), 'pylabelbuddy._annotations_notebook.AnnotationsNotebook', '_annotations_notebook.AnnotationsNotebook', ({(6, 8, 6, 12): 'root', (6, 14, 6, 30): 'annotations_mock', (6, 32, 6, 44): 'dataset_mock'}, {}), '(root, annotations_mock, dataset_mock)', False, 'from pylabelbuddy import _annotations_notebook\n')] |
zcemycl/algoTest | py/solns/wordSearch/wordSearch.py | 9518fb2b60fd83c85aeb2ab809ff647aaf643f0a | class Solution:
@staticmethod
def naive(board,word):
rows,cols,n = len(board),len(board[0]),len(word)
visited = set()
def dfs(i,j,k):
idf = str(i)+','+str(j)
if i<0 or j<0 or i>cols-1 or j>rows-1 or \
board[j][i]!=word[k] or idf in visited:
return False
if k==n-1 and word[k]==board[j][i]:
return True
visited.add(idf)
if word[k]==board[j][i]:
return dfs(i+1,j,k+1) or dfs(i-1,j,k+1) or\
dfs(i,j+1,k+1) or dfs(i,j-1,k+1)
for j in range(rows):
for i in range(cols):
if board[j][i]==word[0]:
if dfs(i,j,0): return True
return False
@staticmethod
def quick(board,word):
''' Improve by,
1. Exclude set which stores visited coordinates, and use #.
2. No indicing in original word.
3. Quick exit for 4 directions.
'''
rows,cols,n = len(board),len(board[0]),len(word)
def dfs(i,j,remain):
if len(remain)==0: return True
if i<0 or j<0 or i>cols-1 or j>rows-1 or \
board[j][i]!=remain[0]: return False
board[j][i]="#"
ret = False
for rowOff,colOff in [(1,0),(-1,0),(0,1),(0,-1)]:
ret = dfs(i+colOff,j+rowOff,remain[1:])
if ret: break
board[j][i]=remain[0]
return ret
for j in range(rows):
for i in range(cols):
if board[j][i]==word[0]:
if dfs(i,j,word): return True
return False | [] |
natedogg484/react-flask-authentication | middleware/run.py | 5000685d35471b03f72e0b07dfbdbf6d5fc296d2 | from flask import Flask
from flask_cors import CORS
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from flask_jwt_extended import JWTManager
app = Flask(__name__)
CORS(app)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'some-secret-string'
app.config['JWT_SECRET_KEY'] = 'jwt-secret-string'
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
db = SQLAlchemy(app)
jwt = JWTManager(app)
@app.before_first_request
def create_tables():
db.create_all()
import models, resources, views
api.add_resource(resources.UserRegistration, '/registration')
api.add_resource(resources.UserLogin, '/login')
api.add_resource(resources.UserLogoutAccess, '/logout/access')
api.add_resource(resources.UserLogoutRefresh, '/logout/refresh')
api.add_resource(resources.TokenRefresh, '/token/refresh')
api.add_resource(resources.AllUsers, '/users')
api.add_resource(resources.SecretResource, '/secret')
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return models.RevokedTokenModel.is_jti_blacklisted(jti) | [((8, 6, 8, 21), 'flask.Flask', 'Flask', ({(8, 12, 8, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask\n'), ((9, 0, 9, 9), 'flask_cors.CORS', 'CORS', ({(9, 5, 9, 8): 'app'}, {}), '(app)', False, 'from flask_cors import CORS\n'), ((10, 6, 10, 14), 'flask_restful.Api', 'Api', ({(10, 10, 10, 13): 'app'}, {}), '(app)', False, 'from flask_restful import Api\n'), ((21, 5, 21, 20), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ({(21, 16, 21, 19): 'app'}, {}), '(app)', False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((22, 6, 22, 21), 'flask_jwt_extended.JWTManager', 'JWTManager', ({(22, 17, 22, 20): 'app'}, {}), '(app)', False, 'from flask_jwt_extended import JWTManager\n'), ((45, 11, 45, 59), 'models.RevokedTokenModel.is_jti_blacklisted', 'models.RevokedTokenModel.is_jti_blacklisted', ({(45, 55, 45, 58): 'jti'}, {}), '(jti)', False, 'import models, resources, views\n')] |
carvalhopedro22/Programas-em-python-cursos-e-geral- | Programas do Curso/Desafio 2.py | 970e1ebe6cdd1e31f52dfd60328c2203d4de3ef1 | nome = input('Qual o seu nome? ')
dia = input('Que dia do mês você nasceu? ')
mes = input('Qual o mês em que você nasceu? ')
ano = input('Qual o ano em que você nasceu? ')
print(nome, 'nasceu em', dia,'de',mes,'do ano',ano) | [] |
prorevizor/noc | cmibs/cisco_vlan_membership_mib.py | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | # ----------------------------------------------------------------------
# CISCO-VLAN-MEMBERSHIP-MIB
# Compiled MIB
# Do not modify this file directly
# Run ./noc mib make-cmib instead
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# MIB Name
NAME = "CISCO-VLAN-MEMBERSHIP-MIB"
# Metadata
LAST_UPDATED = "2007-12-14"
COMPILED = "2020-01-19"
# MIB Data: name -> oid
MIB = {
"CISCO-VLAN-MEMBERSHIP-MIB::ciscoVlanMembershipMIB": "1.3.6.1.4.1.9.9.68",
"CISCO-VLAN-MEMBERSHIP-MIB::ciscoVlanMembershipMIBObjects": "1.3.6.1.4.1.9.9.68.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmps": "1.3.6.1.4.1.9.9.68.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsVQPVersion": "1.3.6.1.4.1.9.9.68.1.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsRetries": "1.3.6.1.4.1.9.9.68.1.1.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsReconfirmInterval": "1.3.6.1.4.1.9.9.68.1.1.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsReconfirm": "1.3.6.1.4.1.9.9.68.1.1.4",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsReconfirmResult": "1.3.6.1.4.1.9.9.68.1.1.5",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsCurrent": "1.3.6.1.4.1.9.9.68.1.1.6",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsTable": "1.3.6.1.4.1.9.9.68.1.1.7",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsEntry": "1.3.6.1.4.1.9.9.68.1.1.7.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsIpAddress": "1.3.6.1.4.1.9.9.68.1.1.7.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsPrimary": "1.3.6.1.4.1.9.9.68.1.1.7.1.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsRowStatus": "1.3.6.1.4.1.9.9.68.1.1.7.1.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembership": "1.3.6.1.4.1.9.9.68.1.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryTable": "1.3.6.1.4.1.9.9.68.1.2.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryEntry": "1.3.6.1.4.1.9.9.68.1.2.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryVlanIndex": "1.3.6.1.4.1.9.9.68.1.2.1.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryMemberPorts": "1.3.6.1.4.1.9.9.68.1.2.1.1.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryMember2kPorts": "1.3.6.1.4.1.9.9.68.1.2.1.1.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipTable": "1.3.6.1.4.1.9.9.68.1.2.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipEntry": "1.3.6.1.4.1.9.9.68.1.2.2.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVlanType": "1.3.6.1.4.1.9.9.68.1.2.2.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVlan": "1.3.6.1.4.1.9.9.68.1.2.2.1.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmPortStatus": "1.3.6.1.4.1.9.9.68.1.2.2.1.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVlans": "1.3.6.1.4.1.9.9.68.1.2.2.1.4",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVlans2k": "1.3.6.1.4.1.9.9.68.1.2.2.1.5",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVlans3k": "1.3.6.1.4.1.9.9.68.1.2.2.1.6",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVlans4k": "1.3.6.1.4.1.9.9.68.1.2.2.1.7",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryExtTable": "1.3.6.1.4.1.9.9.68.1.2.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryExtEntry": "1.3.6.1.4.1.9.9.68.1.2.3.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipPortRangeIndex": "1.3.6.1.4.1.9.9.68.1.2.3.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryExtPorts": "1.3.6.1.4.1.9.9.68.1.2.3.1.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVlanCreationMode": "1.3.6.1.4.1.9.9.68.1.2.4",
"CISCO-VLAN-MEMBERSHIP-MIB::vmStatistics": "1.3.6.1.4.1.9.9.68.1.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVQPQueries": "1.3.6.1.4.1.9.9.68.1.3.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVQPResponses": "1.3.6.1.4.1.9.9.68.1.3.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsChanges": "1.3.6.1.4.1.9.9.68.1.3.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVQPShutdown": "1.3.6.1.4.1.9.9.68.1.3.4",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVQPDenied": "1.3.6.1.4.1.9.9.68.1.3.5",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVQPWrongDomain": "1.3.6.1.4.1.9.9.68.1.3.6",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVQPWrongVersion": "1.3.6.1.4.1.9.9.68.1.3.7",
"CISCO-VLAN-MEMBERSHIP-MIB::vmInsufficientResources": "1.3.6.1.4.1.9.9.68.1.3.8",
"CISCO-VLAN-MEMBERSHIP-MIB::vmStatus": "1.3.6.1.4.1.9.9.68.1.4",
"CISCO-VLAN-MEMBERSHIP-MIB::vmNotificationsEnabled": "1.3.6.1.4.1.9.9.68.1.4.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVoiceVlan": "1.3.6.1.4.1.9.9.68.1.5",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVoiceVlanTable": "1.3.6.1.4.1.9.9.68.1.5.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVoiceVlanEntry": "1.3.6.1.4.1.9.9.68.1.5.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVoiceVlanId": "1.3.6.1.4.1.9.9.68.1.5.1.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVoiceVlanCdpVerifyEnable": "1.3.6.1.4.1.9.9.68.1.5.1.1.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmNotifications": "1.3.6.1.4.1.9.9.68.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmNotificationsPrefix": "1.3.6.1.4.1.9.9.68.2.0",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsChange": "1.3.6.1.4.1.9.9.68.2.0.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMIBConformance": "1.3.6.1.4.1.9.9.68.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMIBCompliances": "1.3.6.1.4.1.9.9.68.3.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMIBGroups": "1.3.6.1.4.1.9.9.68.3.2",
}
DISPLAY_HINTS = {}
| [] |
tdimnet/integrations-core | harbor/tests/test_unit.py | a78133a3b71a1b8377fa214d121a98647031ab06 | # (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from mock import MagicMock
from requests import HTTPError
from datadog_checks.base import AgentCheck
from datadog_checks.dev.http import MockResponse
from .common import HARBOR_COMPONENTS, HARBOR_VERSION, VERSION_1_5, VERSION_1_6, VERSION_1_8
@pytest.mark.usefixtures("patch_requests")
def test_check_health(aggregator, harbor_check, harbor_api):
base_tags = ['tag1:val1', 'tag2']
harbor_check._check_health(harbor_api, base_tags)
if harbor_api.harbor_version >= VERSION_1_8:
components = HARBOR_COMPONENTS
for c in components:
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags + ['component:{}'.format(c)])
elif harbor_api.harbor_version >= VERSION_1_6:
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags + ['component:chartmuseum'])
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags)
elif harbor_api.harbor_version >= VERSION_1_5:
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags)
else:
aggregator.assert_service_check('harbor.status', AgentCheck.UNKNOWN, tags=base_tags)
@pytest.mark.usefixtures("patch_requests")
def test_check_registries_health(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._check_registries_health(harbor_api, tags)
tags.append('registry:demo')
aggregator.assert_service_check('harbor.registry.status', AgentCheck.OK, tags=tags)
@pytest.mark.usefixtures("patch_requests")
def test_submit_project_metrics(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._submit_project_metrics(harbor_api, tags)
aggregator.assert_metric('harbor.projects.count', 2, tags=tags)
@pytest.mark.usefixtures("patch_requests")
def test_submit_disk_metrics(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._submit_disk_metrics(harbor_api, tags)
aggregator.assert_metric('harbor.disk.free', 5e5, tags=tags)
aggregator.assert_metric('harbor.disk.total', 1e6, tags=tags)
@pytest.mark.usefixtures("patch_requests")
@pytest.mark.skipif(HARBOR_VERSION < VERSION_1_5, reason="The registry.read_only metric is submitted for Harbor 1.5+")
def test_submit_read_only_status(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._submit_read_only_status(harbor_api, tags)
aggregator.assert_metric('harbor.registry.read_only', 0, tags=tags)
def test_api__make_get_request(harbor_api):
harbor_api.http = MagicMock()
harbor_api.http.get = MagicMock(return_value=MockResponse(json_data={'json': True}))
assert harbor_api._make_get_request('{base_url}/api/path') == {"json": True}
harbor_api.http.get = MagicMock(return_value=MockResponse(status_code=500))
with pytest.raises(HTTPError):
harbor_api._make_get_request('{base_url}/api/path')
def test_api__make_paginated_get_request(harbor_api):
expected_result = [{'item': i} for i in range(20)]
paginated_result = [[expected_result[i], expected_result[i + 1]] for i in range(0, len(expected_result) - 1, 2)]
values = []
for r in paginated_result:
values.append(MockResponse(json_data=r, headers={'link': 'Link: <unused_url>; rel=next; type="text/plain"'}))
values[-1].headers.pop('link')
harbor_api.http = MagicMock()
harbor_api.http.get = MagicMock(side_effect=values)
assert harbor_api._make_paginated_get_request('{base_url}/api/path') == expected_result
def test_api__make_post_request(harbor_api):
harbor_api.http = MagicMock()
harbor_api.http.post = MagicMock(return_value=MockResponse(json_data={'json': True}))
assert harbor_api._make_post_request('{base_url}/api/path') == {"json": True}
harbor_api.http.post = MagicMock(return_value=MockResponse(status_code=500))
with pytest.raises(HTTPError):
harbor_api._make_post_request('{base_url}/api/path')
| [((14, 1, 14, 42), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', ({(14, 25, 14, 41): '"""patch_requests"""'}, {}), "('patch_requests')", False, 'import pytest\n'), ((32, 1, 32, 42), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', ({(32, 25, 32, 41): '"""patch_requests"""'}, {}), "('patch_requests')", False, 'import pytest\n'), ((40, 1, 40, 42), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', ({(40, 25, 40, 41): '"""patch_requests"""'}, {}), "('patch_requests')", False, 'import pytest\n'), ((47, 1, 47, 42), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', ({(47, 25, 47, 41): '"""patch_requests"""'}, {}), "('patch_requests')", False, 'import pytest\n'), ((55, 1, 55, 42), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', ({(55, 25, 55, 41): '"""patch_requests"""'}, {}), "('patch_requests')", False, 'import pytest\n'), ((56, 1, 56, 118), 'pytest.mark.skipif', 'pytest.mark.skipif', (), '', False, 'import pytest\n'), ((64, 22, 64, 33), 'mock.MagicMock', 'MagicMock', ({}, {}), '()', False, 'from mock import MagicMock\n'), ((81, 22, 81, 33), 'mock.MagicMock', 'MagicMock', ({}, {}), '()', False, 'from mock import MagicMock\n'), ((82, 26, 82, 55), 'mock.MagicMock', 'MagicMock', (), '', False, 'from mock import MagicMock\n'), ((88, 22, 88, 33), 'mock.MagicMock', 'MagicMock', ({}, {}), '()', False, 'from mock import MagicMock\n'), ((69, 9, 69, 33), 'pytest.raises', 'pytest.raises', ({(69, 23, 69, 32): 'HTTPError'}, {}), '(HTTPError)', False, 'import pytest\n'), ((93, 9, 93, 33), 'pytest.raises', 'pytest.raises', ({(93, 23, 93, 32): 'HTTPError'}, {}), '(HTTPError)', False, 'import pytest\n'), ((65, 49, 65, 87), 'datadog_checks.dev.http.MockResponse', 'MockResponse', (), '', False, 'from datadog_checks.dev.http import MockResponse\n'), ((68, 49, 68, 78), 'datadog_checks.dev.http.MockResponse', 'MockResponse', (), '', False, 'from datadog_checks.dev.http import MockResponse\n'), ((78, 22, 78, 116), 'datadog_checks.dev.http.MockResponse', 'MockResponse', (), '', False, 'from datadog_checks.dev.http import MockResponse\n'), ((89, 50, 89, 88), 'datadog_checks.dev.http.MockResponse', 'MockResponse', (), '', False, 'from datadog_checks.dev.http import MockResponse\n'), ((92, 50, 92, 79), 'datadog_checks.dev.http.MockResponse', 'MockResponse', (), '', False, 'from datadog_checks.dev.http import MockResponse\n')] |
CN-UPB/SPRING | M-SPRING/template/adapter.py | 1cb74919689e832987cb2c9b490eec7f09a64f52 | # module for adapting templates on the fly if components are reused
# check that all reused components are defined consistently -> else: exception
def check_consistency(components):
for j1 in components:
for j2 in components: # compare all components
if j1 == j2 and j1.__dict__ != j2.__dict__: # same name and reuseID but different other attributes
raise ValueError("Inconsistent definition of reused component {}.".format(j1))
# check and return number of reuses
def reuses(component, arcs):
# count number of reuses for each port
times = set() # set => no duplicates
for k in range(component.inputs):
times.add(len([a for a in arcs if a.ends_in(k, component)]))
for k in range(component.outputs):
times.add(len([a for a in arcs if a.starts_at(k, component)]))
# check if each port was reused the same number of times (requirement/assumption)
if len(times) != 1:
raise ValueError("Not all ports of {} are (re-)used the same number of times (required).".format(component))
return times.pop()
# return adapted templates with adapted reused components and exactly one arc per port (allows proportional output)
def adapt_for_reuse(templates):
# create set of components and arcs
arcs = []
for t in templates:
arcs += t.arcs
# find reused components and adapt them
component_reuses = {} # dictionary with components-#reuses
reused_components = [] # list of all reused components (contains duplicates) for consistency check
for t in templates:
for j in t.components:
uses = reuses(j, arcs)
if uses > 1: # used by >1 => reuse
if j.source:
raise ValueError("Source component {} cannot be reused".format(j))
j.adapt(uses) # add ports and functions on the fly
component_reuses[j] = uses
reused_components.append(j)
check_consistency(reused_components) # check consistent def of reused components
# adjust arcs to use new ports
for j in component_reuses:
uses = component_reuses[j]
port_offset = 0
for t in templates:
# adjust/shift ingoing arcs by offset to correct port
arc_shifted = False
for a in t.arcs:
if a.dest == j:
a.dest_in += port_offset
arc_shifted = True
if a.source == j:
a.src_out += port_offset
arc_shifted = True
# increase the offset for the next template if an arc was shifted
if arc_shifted:
if port_offset >= uses: # arc was shifted too often: something went wrong
raise ValueError("Port offset {} too high. Should be < {} (#reuses).".format(port_offset, uses))
port_offset += 1
return templates
| [] |
AllanLRH/column_completer | column_completer.py | c1a0e1915256a4e3825c5c3b9863d78fdaf50be1 | class ColumnCompleter(object):
"""Complete Pandas DataFrame column names"""
def __init__(self, df, space_filler='_', silence_warnings=False):
"""
Once instantiated with a Pandas DataFrame, it will expose the column
names as attributes which maps to their string counterparts.
Autocompletion is supported.
Spaces in the column names are by default replaced with underscores, though
it still maps to the original column names — the replacement is necessary to
conform to a valid Python syntax.
Parameters
----------
df : pd.DataFrame
DataFrame whose column names to expose.
space_filler : str, optional
String to replace spaces in collumn names, by default '_'.
silence_warnings : bool, optional
Set to True to disable warning concerning column names which start or ends
with spaces, which is hard to detect by visual inspection, by default False.
"""
super(ColumnCompleter, self).__init__()
# We copy the columns to avoid keeping old references to a DataFrame which
# would otherwise be garbage collected.
self.columns = df.columns.copy()
self.space_filler = space_filler
self.silence_warnings = silence_warnings
if not self.silence_warnings:
self._warn_about_column_names_edge_spaces()
self._set_columns()
def _warn_about_column_names_edge_spaces(self):
if not hasattr(self.columns, 'str'): # the column names are not strings
return None
if self.columns.str.startswith(' ').any():
raise Warning("The following columns starts with one or more spaces: " +
self.columns[self.columns.str.startswith(' ')])
if self.columns.str.endswith(' ').any():
raise Warning("The following columns ends with one or more spaces: " +
self.columns[self.columns.str.endswith(' ')])
def _set_columns(self):
if not hasattr(self.columns, 'str'): # the column names are not strings
self.mapping = {col: col for col in self.columns}
elif self.space_filler is None:
self.mapping = {col: col for col in self.columns if ' ' not in col}
else:
self.mapping = {col.replace(
' ', self.space_filler): col for col in self.columns}
if len(self.mapping) < len(self.columns):
raise ValueError("Using {} as a replacemnt for".format(repr(self.space_filler)) +
" spaces causes a collision of column names, please chose another.")
self.keys = self.mapping.keys()
if len(self.keys) < len(self.columns) and not self.silence_warnings:
raise Warning("Without a space_filler specified, you're only able to autocomplete " +
"{} of {} column names.".format(len(self.keys), len(self.columns)))
@staticmethod
def replace_df_column_spaces(df, rep, capatilize_first_letter=False):
"""
Return a DataFrame with the spaces in the column names replaced with a custom string.
Parameters
----------
df : pd.DataFrame
DataFrame whose columns ot rename.
rep : str
String to replace spaces with.
capatilize_first_letter : bool, optional
If True, the first letter of the rennamed columns will be capitalized, by default False.
Returns
-------
pd.DataFrame
DataFrame with renamed columns.
Raises
------
ValueError
If the renaming of the columns causes one or more column names to be identical.
"""
rename_dict = {col: col.replace(' ', rep) for col in df.columns}
if len(set(rename_dict.values())) < len(df.columns.unique()):
raise ValueError("Renaming the columns in such a way would cause a " +
"collision of column names.")
if capatilize_first_letter:
rename_dict = {k: v[0].upper() + v[1:]
for k, v in rename_dict.items()}
return df.rename(columns=rename_dict)
def __dir__(self):
return self.keys
def __getattr__(self, key):
return self.mapping[key]
| [] |
ramkrsna/virtual-storage-manager | source/vsm-dashboard/vsm_dashboard/test/test_data/swift_data.py | 78125bfb4dd4d78ff96bc3274c8919003769c545 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vsm_dashboard.api import swift
from .utils import TestDataContainer
def data(TEST):
TEST.containers = TestDataContainer()
TEST.objects = TestDataContainer()
container_1 = swift.Container(dict(name=u"container_one\u6346"))
container_2 = swift.Container(dict(name=u"container_two\u6346"))
TEST.containers.add(container_1, container_2)
object_dict = {"name": u"test_object\u6346",
"content_type": u"text/plain",
"bytes": 128,
"last_modified": None,
"hash": u"object_hash"}
obj_dicts = [object_dict]
obj_data = "Fake Data"
for obj_dict in obj_dicts:
swift_object = swift.StorageObject(obj_dict,
container_1.name,
data=obj_data)
TEST.objects.add(swift_object)
| [((35, 23, 37, 57), 'vsm_dashboard.api.swift.StorageObject', 'swift.StorageObject', (), '', False, 'from vsm_dashboard.api import swift\n')] |
liangintel/stx-cinder | cinder/backup/driver.py | f4c43797a3f8c0caebfd8fb67244c084d26d9741 | # Copyright (C) 2013 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for all backup drivers."""
import abc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _
from cinder import keymgr as key_manager
service_opts = [
cfg.IntOpt('backup_metadata_version', default=2,
help='Backup metadata version to be used when backing up '
'volume metadata. If this number is bumped, make sure the '
'service doing the restore supports the new version.'),
cfg.IntOpt('backup_object_number_per_notification',
default=10,
help='The number of chunks or objects, for which one '
'Ceilometer notification will be sent'),
cfg.IntOpt('backup_timer_interval',
default=120,
help='Interval, in seconds, between two progress notifications '
'reporting the backup status'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
LOG = logging.getLogger(__name__)
class BackupMetadataAPI(base.Base):
TYPE_TAG_VOL_BASE_META = 'volume-base-metadata'
TYPE_TAG_VOL_META = 'volume-metadata'
TYPE_TAG_VOL_GLANCE_META = 'volume-glance-metadata'
def __init__(self, context, db=None):
super(BackupMetadataAPI, self).__init__(db)
self.context = context
@staticmethod
def _is_serializable(value):
"""Returns True if value is serializable."""
try:
jsonutils.dumps(value)
except TypeError:
LOG.info("Value with type=%s is not serializable",
type(value))
return False
return True
def _save_vol_base_meta(self, container, volume_id):
"""Save base volume metadata to container.
This will fetch all fields from the db Volume object for volume_id and
save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_BASE_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for key, value in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(value):
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", key)
continue
# Copy the encryption key UUID for backup
if key is 'encryption_key_id' and value is not None:
km = key_manager.API(CONF)
value = km.store(self.context, km.get(self.context, value))
LOG.debug("Copying encryption key UUID for backup.")
container[type_tag][key] = value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_meta(self, container, volume_id):
"""Save volume metadata to container.
This will fetch all fields from the db VolumeMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]):
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", entry)
continue
container[type_tag][entry] = meta[entry]
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_glance_meta(self, container, volume_id):
"""Save volume Glance metadata to container.
This will fetch all fields from the db VolumeGlanceMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_GLANCE_META
LOG.debug("Getting metadata type '%s'", type_tag)
try:
meta = self.db.volume_glance_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value):
LOG.info("Unable to serialize field '%s' - "
"excluding from backup", entry)
continue
container[type_tag][entry.key] = entry.value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
except exception.GlanceMetadataNotFound:
LOG.debug("No metadata type '%s' available", type_tag)
@staticmethod
def _filter(metadata, fields, excludes=None):
"""Returns set of metadata restricted to required fields.
If fields is empty list, the full set is returned.
:param metadata: master set of metadata
:param fields: list of fields we want to extract
:param excludes: fields to be excluded
:returns: filtered metadata
"""
if not fields:
return metadata
if not excludes:
excludes = []
subset = {}
for field in fields:
if field in metadata and field not in excludes:
subset[field] = metadata[field]
else:
LOG.debug("Excluding field '%s'", field)
return subset
def _restore_vol_base_meta(self, metadata, volume_id, fields):
"""Restore values to Volume object for provided fields."""
LOG.debug("Restoring volume base metadata")
excludes = []
# Ignore unencrypted backups.
key = 'encryption_key_id'
if key in fields and key in metadata and metadata[key] is not None:
self._restore_vol_encryption_meta(volume_id,
metadata['volume_type_id'])
# NOTE(dosaboy): if the target volume looks like it was auto-created
# as part of this restore operation and we have a name to restore
# then apply the name to the target volume. However, if that target
# volume already existed and it has a name or we do not have a name to
# restore, then ignore this key. This is intended to be a less drastic
# solution than commit 7ee80f7.
key = 'display_name'
if key in fields and key in metadata:
target_vol = self.db.volume_get(self.context, volume_id)
name = target_vol.get(key, '')
if (not metadata.get(key) or name and
not name.startswith('restore_backup_')):
excludes.append(key)
excludes.append('display_description')
metadata = self._filter(metadata, fields, excludes=excludes)
self.db.volume_update(self.context, volume_id, metadata)
def _restore_vol_encryption_meta(self, volume_id, src_volume_type_id):
"""Restores the volume_type_id for encryption if needed.
Only allow restoration of an encrypted backup if the destination
volume has the same volume type as the source volume. Otherwise
encryption will not work. If volume types are already the same,
no action is needed.
"""
dest_vol = self.db.volume_get(self.context, volume_id)
if dest_vol['volume_type_id'] != src_volume_type_id:
LOG.debug("Volume type id's do not match.")
# If the volume types do not match, and the destination volume
# does not have a volume type, force the destination volume
# to have the encrypted volume type, provided it still exists.
if dest_vol['volume_type_id'] is None:
try:
self.db.volume_type_get(
self.context, src_volume_type_id)
except exception.VolumeTypeNotFound:
LOG.debug("Volume type of source volume has been "
"deleted. Encrypted backup restore has "
"failed.")
msg = _("The source volume type '%s' is not "
"available.") % (src_volume_type_id)
raise exception.EncryptedBackupOperationFailed(msg)
# Update dest volume with src volume's volume_type_id.
LOG.debug("The volume type of the destination volume "
"will become the volume type of the source "
"volume.")
self.db.volume_update(self.context, volume_id,
{'volume_type_id': src_volume_type_id})
else:
# Volume type id's do not match, and destination volume
# has a volume type. Throw exception.
LOG.warning("Destination volume type is different from "
"source volume type for an encrypted volume. "
"Encrypted backup restore has failed.")
msg = (_("The source volume type '%(src)s' is different "
"than the destination volume type '%(dest)s'.") %
{'src': src_volume_type_id,
'dest': dest_vol['volume_type_id']})
raise exception.EncryptedBackupOperationFailed(msg)
def _restore_vol_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeMetadata object for provided fields."""
LOG.debug("Restoring volume metadata")
metadata = self._filter(metadata, fields)
self.db.volume_metadata_update(self.context, volume_id, metadata, True)
def _restore_vol_glance_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeGlanceMetadata object for provided fields.
First delete any existing metadata then save new values.
"""
LOG.debug("Restoring volume glance metadata")
metadata = self._filter(metadata, fields)
self.db.volume_glance_metadata_delete_by_volume(self.context,
volume_id)
for key, value in metadata.items():
self.db.volume_glance_metadata_create(self.context,
volume_id,
key, value)
# Now mark the volume as bootable
self.db.volume_update(self.context, volume_id,
{'bootable': True})
def _v1_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<restore function>, <fields list>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['display_name', 'display_description']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def _v2_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<restore function>, <fields list>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['display_name', 'display_description', 'encryption_key_id']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def get(self, volume_id):
"""Get volume metadata.
Returns a json-encoded dict containing all metadata and the restore
version i.e. the version used to decide what actually gets restored
from this container when doing a backup restore.
"""
container = {'version': CONF.backup_metadata_version}
self._save_vol_base_meta(container, volume_id)
self._save_vol_meta(container, volume_id)
self._save_vol_glance_meta(container, volume_id)
if container:
return jsonutils.dumps(container)
else:
return None
def put(self, volume_id, json_metadata):
"""Restore volume metadata to a volume.
The json container should contain a version that is supported here.
"""
meta_container = jsonutils.loads(json_metadata)
version = meta_container['version']
if version == 1:
factory = self._v1_restore_factory()
elif version == 2:
factory = self._v2_restore_factory()
else:
msg = (_("Unsupported backup metadata version (%s)") % (version))
raise exception.BackupMetadataUnsupportedVersion(msg)
for type in factory:
func = factory[type][0]
fields = factory[type][1]
if type in meta_container:
func(meta_container[type], volume_id, fields)
else:
LOG.debug("No metadata of type '%s' to restore", type)
@six.add_metaclass(abc.ABCMeta)
class BackupDriver(base.Base):
def __init__(self, context, db=None):
super(BackupDriver, self).__init__(db)
self.context = context
self.backup_meta_api = BackupMetadataAPI(context, db)
# This flag indicates if backup driver supports force
# deletion. So it should be set to True if the driver that inherits
# from BackupDriver supports the force deletion function.
self.support_force_delete = False
def get_metadata(self, volume_id):
return self.backup_meta_api.get(volume_id)
def put_metadata(self, volume_id, json_metadata):
self.backup_meta_api.put(volume_id, json_metadata)
@abc.abstractmethod
def backup(self, backup, volume_file, backup_metadata=False):
"""Start a backup of a specified volume.
Some I/O operations may block greenthreads, so in order to prevent
starvation parameter volume_file will be a proxy that will execute all
methods in native threads, so the method implementation doesn't need to
worry about that..
"""
return
@abc.abstractmethod
def restore(self, backup, volume_id, volume_file):
"""Restore a saved backup.
Some I/O operations may block greenthreads, so in order to prevent
starvation parameter volume_file will be a proxy that will execute all
methods in native threads, so the method implementation doesn't need to
worry about that..
"""
return
@abc.abstractmethod
def delete_backup(self, backup):
"""Delete a saved backup."""
return
def export_record(self, backup):
"""Export driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
and return it here as a dictionary so it can be serialized into a
string.
Default backup driver implementation has no extra information.
:param backup: backup object to export
:returns: driver_info - dictionary with extra information
"""
return {}
def import_record(self, backup, driver_info):
"""Import driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
since it will be called with the extra information that was provided by
export_record when exporting the backup.
Default backup driver implementation does nothing since it didn't
export any specific data in export_record.
:param backup: backup object to export
:param driver_info: dictionary with driver specific backup record
information
:returns: nothing
"""
return
def check_for_setup_error(self):
"""Method for checking if backup backend is successfully installed."""
return
@six.add_metaclass(abc.ABCMeta)
class BackupDriverWithVerify(BackupDriver):
@abc.abstractmethod
def verify(self, backup):
"""Verify that the backup exists on the backend.
Verify that the backup is OK, possibly following an import record
operation.
:param backup: backup id of the backup to verify
:raises InvalidBackup, NotImplementedError:
"""
return
| [((48, 6, 48, 33), 'oslo_log.log.getLogger', 'logging.getLogger', ({(48, 24, 48, 32): '__name__'}, {}), '(__name__)', True, 'from oslo_log import log as logging\n'), ((347, 1, 347, 31), 'six.add_metaclass', 'six.add_metaclass', ({(347, 19, 347, 30): 'abc.ABCMeta'}, {}), '(abc.ABCMeta)', False, 'import six\n'), ((430, 1, 430, 31), 'six.add_metaclass', 'six.add_metaclass', ({(430, 19, 430, 30): 'abc.ABCMeta'}, {}), '(abc.ABCMeta)', False, 'import six\n'), ((31, 4, 34, 74), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (), '', False, 'from oslo_config import cfg\n'), ((35, 4, 38, 59), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (), '', False, 'from oslo_config import cfg\n'), ((39, 4, 42, 50), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (), '', False, 'from oslo_config import cfg\n'), ((328, 25, 328, 55), 'oslo_serialization.jsonutils.loads', 'jsonutils.loads', ({(328, 41, 328, 54): 'json_metadata'}, {}), '(json_metadata)', False, 'from oslo_serialization import jsonutils\n'), ((65, 12, 65, 34), 'oslo_serialization.jsonutils.dumps', 'jsonutils.dumps', ({(65, 28, 65, 33): 'value'}, {}), '(value)', False, 'from oslo_serialization import jsonutils\n'), ((319, 19, 319, 45), 'oslo_serialization.jsonutils.dumps', 'jsonutils.dumps', ({(319, 35, 319, 44): 'container'}, {}), '(container)', False, 'from oslo_serialization import jsonutils\n'), ((244, 22, 244, 67), 'cinder.exception.EncryptedBackupOperationFailed', 'exception.EncryptedBackupOperationFailed', ({(244, 63, 244, 66): 'msg'}, {}), '(msg)', False, 'from cinder import exception\n'), ((336, 18, 336, 65), 'cinder.exception.BackupMetadataUnsupportedVersion', 'exception.BackupMetadataUnsupportedVersion', ({(336, 61, 336, 64): 'msg'}, {}), '(msg)', False, 'from cinder import exception\n'), ((92, 25, 92, 46), 'cinder.keymgr.API', 'key_manager.API', ({(92, 41, 92, 45): 'CONF'}, {}), '(CONF)', True, 'from cinder import keymgr as key_manager\n'), ((240, 23, 241, 72), 'cinder.i18n._', '_', ({(240, 25, 241, 71): '"""The source volume type \'%(src)s\' is different than the destination volume type \'%(dest)s\'."""'}, {}), '("The source volume type \'%(src)s\' is different than the destination volume type \'%(dest)s\'."\n )', False, 'from cinder.i18n import _\n'), ((335, 19, 335, 64), 'cinder.i18n._', '_', ({(335, 21, 335, 63): '"""Unsupported backup metadata version (%s)"""'}, {}), "('Unsupported backup metadata version (%s)')", False, 'from cinder.i18n import _\n'), ((227, 26, 227, 71), 'cinder.exception.EncryptedBackupOperationFailed', 'exception.EncryptedBackupOperationFailed', ({(227, 67, 227, 70): 'msg'}, {}), '(msg)', False, 'from cinder import exception\n'), ((225, 26, 226, 41), 'cinder.i18n._', '_', ({(225, 28, 226, 40): '"""The source volume type \'%s\' is not available."""'}, {}), '("The source volume type \'%s\' is not available.")', False, 'from cinder.i18n import _\n')] |
ENDERZOMBI102/chained | __init__.py | d01f04d1eb9a913f64cea9da52e61d91300315ff | from .chainOpen import chainOpen
__all__ = [
'chainOpen'
] | [] |
andrewsu/RTX | code/reasoningtool/tests/QuerySciGraphTests.py | dd1de262d0817f7e6d2f64e5bec7d5009a3a2740 | import unittest
from QuerySciGraph import QuerySciGraph
class QuerySciGraphTestCase(unittest.TestCase):
def test_get_disont_ids_for_mesh_id(self):
disont_ids = QuerySciGraph.get_disont_ids_for_mesh_id('MESH:D005199')
known_ids = {'DOID:13636'}
self.assertSetEqual(disont_ids, known_ids)
def test_query_sub_phenotypes_for_phenotype(self):
sub_phenotypes = QuerySciGraph.query_sub_phenotypes_for_phenotype("HP:0000107") # Renal cyst
known_phenotypes = {'HP:0100877': 'Renal diverticulum',
'HP:0000108': 'Renal corticomedullary cysts',
'HP:0000803': 'Renal cortical cysts',
'HP:0000003': 'Multicystic kidney dysplasia',
'HP:0008659': 'Multiple small medullary renal cysts',
'HP:0005562': 'Multiple renal cysts',
'HP:0000800': 'Cystic renal dysplasia',
'HP:0012581': 'Solitary renal cyst'}
self.assertDictEqual(sub_phenotypes, known_phenotypes)
if __name__ == '__main__':
unittest.main()
| [((25, 4, 25, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((7, 21, 7, 77), 'QuerySciGraph.QuerySciGraph.get_disont_ids_for_mesh_id', 'QuerySciGraph.get_disont_ids_for_mesh_id', ({(7, 62, 7, 76): '"""MESH:D005199"""'}, {}), "('MESH:D005199')", False, 'from QuerySciGraph import QuerySciGraph\n'), ((12, 25, 12, 87), 'QuerySciGraph.QuerySciGraph.query_sub_phenotypes_for_phenotype', 'QuerySciGraph.query_sub_phenotypes_for_phenotype', ({(12, 74, 12, 86): '"""HP:0000107"""'}, {}), "('HP:0000107')", False, 'from QuerySciGraph import QuerySciGraph\n')] |
gianghta/Ledis | ledis/cli.py | a6b31617621746344408ee411cf510ef3cfb2e7b | from typing import Any
from ledis import Ledis
from ledis.exceptions import InvalidUsage
class CLI:
__slots__ = {"ledis", "commands"}
def __init__(self):
self.ledis = Ledis()
self.commands = {
"set": self.ledis.set,
"get": self.ledis.get,
"sadd": self.ledis.sadd,
"srem": self.ledis.srem,
"smembers": self.ledis.smembers,
"sinter": self.ledis.sinter,
"keys": self.ledis.keys,
"del": self.ledis.delete,
"expire": self.ledis.expire,
"ttl": self.ledis.ttl,
"save": self.ledis.save,
"restore": self.ledis.restore,
}
def call(self, query: str) -> Any:
if " " in query:
command, data = query.split(" ", 1)
data = data.split()
else:
command = query
data = []
if command.lower() not in self.commands:
allowed_commands = ", ".join(key.upper() for key in self.commands)
raise InvalidUsage(
f"Command '{command}' is invalid. "
f"Allowed commands are {allowed_commands}."
)
try:
return self.commands[command.lower()](*data)
except TypeError:
raise InvalidUsage("Invalid command format")
| [((11, 21, 11, 28), 'ledis.Ledis', 'Ledis', ({}, {}), '()', False, 'from ledis import Ledis\n'), ((37, 18, 40, 13), 'ledis.exceptions.InvalidUsage', 'InvalidUsage', ({(38, 16, 39, 59): 'f"""Command \'{command}\' is invalid. Allowed commands are {allowed_commands}."""'}, {}), '(\n f"Command \'{command}\' is invalid. Allowed commands are {allowed_commands}."\n )', False, 'from ledis.exceptions import InvalidUsage\n'), ((45, 18, 45, 56), 'ledis.exceptions.InvalidUsage', 'InvalidUsage', ({(45, 31, 45, 55): '"""Invalid command format"""'}, {}), "('Invalid command format')", False, 'from ledis.exceptions import InvalidUsage\n')] |
nazhanshaberi/miniature-octo-barnacle | ClosedLoopTF.py | eb1a8b5366003bf2d0f7e89af9d9dea120965f4f | #group 1: Question 1(b)
# A control system for positioning the head of a laser printer has the closed loop transfer function:
# !pip install control
import matplotlib.pyplot as plt
import control
a=10 #Value for a
b=50 #value for b
sys1 = control.tf(20*b,[1,20+a,b+20*a,20*b])
print('3rd order system transfer function T1(s)=',sys1)
sys2=control.tf(b,[1,a,b])
print('2nd order system transfer funtion T2(s)',sys2)
value = sys1.pole()
list_of_poles = [pole.round(2) for pole in value]
print('poles',list_of_poles)
y1=control.step_response(sys1)
y2=control.step_response(sys2)
plt.plot(y1[0],y1[1],'r--', label='3rd order actual system')
plt.plot(y2[0],y2[1],'g', label='2nd order approximation system')
plt.legend()
plt.grid()
plt.xlabel('time (s)')
plt.ylabel('step response y(t)')
plt.title('step response comparison of 3rd and 2nd order system')
plt.show()
| [((10, 7, 10, 44), 'control.tf', 'control.tf', ({(10, 18, 10, 22): '20 * b', (10, 23, 10, 43): '[1, 20 + a, b + 20 * a, 20 * b]'}, {}), '(20 * b, [1, 20 + a, b + 20 * a, 20 * b])', False, 'import control\n'), ((12, 5, 12, 26), 'control.tf', 'control.tf', ({(12, 16, 12, 17): 'b', (12, 18, 12, 25): '[1, a, b]'}, {}), '(b, [1, a, b])', False, 'import control\n'), ((19, 3, 19, 30), 'control.step_response', 'control.step_response', ({(19, 25, 19, 29): 'sys1'}, {}), '(sys1)', False, 'import control\n'), ((20, 3, 20, 30), 'control.step_response', 'control.step_response', ({(20, 25, 20, 29): 'sys2'}, {}), '(sys2)', False, 'import control\n'), ((21, 0, 21, 60), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((22, 0, 22, 65), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((23, 0, 23, 12), 'matplotlib.pyplot.legend', 'plt.legend', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((24, 0, 24, 10), 'matplotlib.pyplot.grid', 'plt.grid', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((25, 0, 25, 22), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(25, 11, 25, 21): '"""time (s)"""'}, {}), "('time (s)')", True, 'import matplotlib.pyplot as plt\n'), ((26, 0, 26, 32), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(26, 11, 26, 31): '"""step response y(t)"""'}, {}), "('step response y(t)')", True, 'import matplotlib.pyplot as plt\n'), ((27, 0, 27, 65), 'matplotlib.pyplot.title', 'plt.title', ({(27, 10, 27, 64): '"""step response comparison of 3rd and 2nd order system"""'}, {}), "('step response comparison of 3rd and 2nd order system')", True, 'import matplotlib.pyplot as plt\n'), ((28, 0, 28, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n')] |
bastiedotorg/django-precise-bbcode | example_project/test_messages/bbcode_tags.py | 567a8a7f104fb7f2c9d59f304791e53d2d8f4dea | import re
from precise_bbcode.bbcode.tag import BBCodeTag
from precise_bbcode.tag_pool import tag_pool
color_re = re.compile(r'^([a-z]+|#[0-9abcdefABCDEF]{3,6})$')
class SubTag(BBCodeTag):
name = 'sub'
def render(self, value, option=None, parent=None):
return '<sub>%s</sub>' % value
class PreTag(BBCodeTag):
name = 'pre'
render_embedded = False
def render(self, value, option=None, parent=None):
return '<pre>%s</pre>' % value
class SizeTag(BBCodeTag):
name = 'size'
definition_string = '[size={RANGE=4,7}]{TEXT}[/size]'
format_string = '<span style="font-size:{RANGE=4,7}px;">{TEXT}</span>'
class FruitTag(BBCodeTag):
name = 'fruit'
definition_string = '[fruit]{CHOICE=tomato,orange,apple}[/fruit]'
format_string = '<h5>{CHOICE=tomato,orange,apple}</h5>'
class PhoneLinkTag(BBCodeTag):
name = 'phone'
definition_string = '[phone]{PHONENUMBER}[/phone]'
format_string = '<a href="tel:{PHONENUMBER}">{PHONENUMBER}</a>'
def render(self, value, option=None, parent=None):
href = 'tel:{}'.format(value)
return '<a href="{0}">{0}</a>'.format(href, value)
class StartsWithATag(BBCodeTag):
name = 'startswitha'
definition_string = '[startswitha]{STARTSWITH=a}[/startswitha]'
format_string = '<span>{STARTSWITH=a}</span>'
class RoundedBBCodeTag(BBCodeTag):
name = 'rounded'
class Options:
strip = False
def render(self, value, option=None, parent=None):
if option and re.search(color_re, option) is not None:
return '<div class="rounded" style="border-color:{};">{}</div>'.format(option, value)
return '<div class="rounded">{}</div>'.format(value)
tag_pool.register_tag(SubTag)
tag_pool.register_tag(PreTag)
tag_pool.register_tag(SizeTag)
tag_pool.register_tag(FruitTag)
tag_pool.register_tag(PhoneLinkTag)
tag_pool.register_tag(StartsWithATag)
tag_pool.register_tag(RoundedBBCodeTag)
| [((7, 11, 7, 60), 're.compile', 're.compile', ({(7, 22, 7, 59): '"""^([a-z]+|#[0-9abcdefABCDEF]{3,6})$"""'}, {}), "('^([a-z]+|#[0-9abcdefABCDEF]{3,6})$')", False, 'import re\n'), ((65, 0, 65, 29), 'precise_bbcode.tag_pool.tag_pool.register_tag', 'tag_pool.register_tag', ({(65, 22, 65, 28): 'SubTag'}, {}), '(SubTag)', False, 'from precise_bbcode.tag_pool import tag_pool\n'), ((66, 0, 66, 29), 'precise_bbcode.tag_pool.tag_pool.register_tag', 'tag_pool.register_tag', ({(66, 22, 66, 28): 'PreTag'}, {}), '(PreTag)', False, 'from precise_bbcode.tag_pool import tag_pool\n'), ((67, 0, 67, 30), 'precise_bbcode.tag_pool.tag_pool.register_tag', 'tag_pool.register_tag', ({(67, 22, 67, 29): 'SizeTag'}, {}), '(SizeTag)', False, 'from precise_bbcode.tag_pool import tag_pool\n'), ((68, 0, 68, 31), 'precise_bbcode.tag_pool.tag_pool.register_tag', 'tag_pool.register_tag', ({(68, 22, 68, 30): 'FruitTag'}, {}), '(FruitTag)', False, 'from precise_bbcode.tag_pool import tag_pool\n'), ((69, 0, 69, 35), 'precise_bbcode.tag_pool.tag_pool.register_tag', 'tag_pool.register_tag', ({(69, 22, 69, 34): 'PhoneLinkTag'}, {}), '(PhoneLinkTag)', False, 'from precise_bbcode.tag_pool import tag_pool\n'), ((70, 0, 70, 37), 'precise_bbcode.tag_pool.tag_pool.register_tag', 'tag_pool.register_tag', ({(70, 22, 70, 36): 'StartsWithATag'}, {}), '(StartsWithATag)', False, 'from precise_bbcode.tag_pool import tag_pool\n'), ((71, 0, 71, 39), 'precise_bbcode.tag_pool.tag_pool.register_tag', 'tag_pool.register_tag', ({(71, 22, 71, 38): 'RoundedBBCodeTag'}, {}), '(RoundedBBCodeTag)', False, 'from precise_bbcode.tag_pool import tag_pool\n'), ((60, 22, 60, 49), 're.search', 're.search', ({(60, 32, 60, 40): 'color_re', (60, 42, 60, 48): 'option'}, {}), '(color_re, option)', False, 'import re\n')] |
ramtingh/vmtk | tests/test_vmtkScripts/test_vmtksurfacescaling.py | 4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3 | ## Program: VMTK
## Language: Python
## Date: January 10, 2018
## Version: 1.4
## Copyright (c) Richard Izzo, Luca Antiga, All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this code was contributed by
## Richard Izzo (Github @rlizzo)
## University at Buffalo
import pytest
import vmtk.vmtksurfacescaling as scaling
def test_isotropic_scale(aorta_surface, compare_surfaces):
name = __name__ + '_test_isotropic_scale.vtp'
scaler = scaling.vmtkSurfaceScaling()
scaler.Surface = aorta_surface
scaler.ScaleFactor = 2
scaler.Execute()
assert compare_surfaces(scaler.Surface, name, tolerance=1.0) == True
@pytest.mark.parametrize('xfactor,yfactor,zfactor,paramid', [
(2, None, None, '0'),
(None, 2, None, '1'),
(None, None, 2, '2'),
(2, 2, None, '3'),
(2, None, 2, '4'),
(None, 2, 2, '5'),
])
def test_xyz_scale_factors(aorta_surface, compare_surfaces, xfactor,
yfactor, zfactor, paramid):
name = __name__ + '_test_xyz_scale_factors_' + paramid + '.vtp'
scaler = scaling.vmtkSurfaceScaling()
scaler.Surface = aorta_surface
scaler.ScaleFactorX = xfactor
scaler.ScaleFactorY = yfactor
scaler.ScaleFactorZ = zfactor
scaler.Execute()
assert compare_surfaces(scaler.Surface, name, tolerance=1.0) == True
| [((31, 1, 38, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(31, 25, 31, 58): '"""xfactor,yfactor,zfactor,paramid"""', (31, 60, 38, 1): "[(2, None, None, '0'), (None, 2, None, '1'), (None, None, 2, '2'), (2, 2,\n None, '3'), (2, None, 2, '4'), (None, 2, 2, '5')]"}, {}), "('xfactor,yfactor,zfactor,paramid', [(2, None, None,\n '0'), (None, 2, None, '1'), (None, None, 2, '2'), (2, 2, None, '3'), (2,\n None, 2, '4'), (None, 2, 2, '5')])", False, 'import pytest\n'), ((23, 13, 23, 41), 'vmtk.vmtksurfacescaling.vmtkSurfaceScaling', 'scaling.vmtkSurfaceScaling', ({}, {}), '()', True, 'import vmtk.vmtksurfacescaling as scaling\n'), ((42, 13, 42, 41), 'vmtk.vmtksurfacescaling.vmtkSurfaceScaling', 'scaling.vmtkSurfaceScaling', ({}, {}), '()', True, 'import vmtk.vmtksurfacescaling as scaling\n')] |
yangyimincn/tencentcloud-sdk-python | tencentcloud/vpc/v20170312/models.py | 1d4f1bd83bb57a91bb6d2631131a339bc1f9b91d | # -*- coding: utf8 -*-
# Copyright 1999-2017 Tencent Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tencentcloud.common.abstract_model import AbstractModel
class AccountAttribute(AbstractModel):
"""账户属性对象
"""
def __init__(self):
"""
:param AttributeName: 属性名
:type AttributeName: str
:param AttributeValues: 属性值
:type AttributeValues: list of str
"""
self.AttributeName = None
self.AttributeValues = None
def _deserialize(self, params):
self.AttributeName = params.get("AttributeName")
self.AttributeValues = params.get("AttributeValues")
class Address(AbstractModel):
"""描述 EIP 信息
"""
def __init__(self):
"""
:param AddressId: `EIP`的`ID`,是`EIP`的唯一标识。
:type AddressId: str
:param AddressName: `EIP`名称。
:type AddressName: str
:param AddressStatus: `EIP`状态。
:type AddressStatus: str
:param AddressIp: 弹性外网IP
:type AddressIp: str
:param BindedResourceId: 绑定的资源实例`ID`。可能是一个`CVM`,`NAT`,或是弹性网卡。
:type BindedResourceId: str
:param CreatedTime: 创建时间。按照`ISO8601`标准表示,并且使用`UTC`时间。格式为:`YYYY-MM-DDThh:mm:ssZ`。
:type CreatedTime: str
"""
self.AddressId = None
self.AddressName = None
self.AddressStatus = None
self.AddressIp = None
self.BindedResourceId = None
self.CreatedTime = None
def _deserialize(self, params):
self.AddressId = params.get("AddressId")
self.AddressName = params.get("AddressName")
self.AddressStatus = params.get("AddressStatus")
self.AddressIp = params.get("AddressIp")
self.BindedResourceId = params.get("BindedResourceId")
self.CreatedTime = params.get("CreatedTime")
class AddressTemplate(AbstractModel):
"""IP地址模板
"""
def __init__(self):
"""
:param AddressTemplateName: IP地址模板名称。
:type AddressTemplateName: str
:param AddressTemplateId: IP地址模板实例唯一ID。
:type AddressTemplateId: str
:param AddressSet: IP地址信息。
:type AddressSet: list of str
:param CreatedTime: 创建时间。
:type CreatedTime: str
"""
self.AddressTemplateName = None
self.AddressTemplateId = None
self.AddressSet = None
self.CreatedTime = None
def _deserialize(self, params):
self.AddressTemplateName = params.get("AddressTemplateName")
self.AddressTemplateId = params.get("AddressTemplateId")
self.AddressSet = params.get("AddressSet")
self.CreatedTime = params.get("CreatedTime")
class AddressTemplateGroup(AbstractModel):
"""IP地址模板集合
"""
def __init__(self):
"""
:param AddressTemplateGroupName: IP地址模板集合名称。
:type AddressTemplateGroupName: str
:param AddressTemplateGroupId: IP地址模板集合实例ID,例如:ipmg-dih8xdbq。
:type AddressTemplateGroupId: str
:param AddressTemplateIdSet: IP地址模板ID。
:type AddressTemplateIdSet: list of str
:param CreatedTime: 创建时间。
:type CreatedTime: str
"""
self.AddressTemplateGroupName = None
self.AddressTemplateGroupId = None
self.AddressTemplateIdSet = None
self.CreatedTime = None
def _deserialize(self, params):
self.AddressTemplateGroupName = params.get("AddressTemplateGroupName")
self.AddressTemplateGroupId = params.get("AddressTemplateGroupId")
self.AddressTemplateIdSet = params.get("AddressTemplateIdSet")
self.CreatedTime = params.get("CreatedTime")
class AllocateAddressesRequest(AbstractModel):
"""AllocateAddresses请求参数结构体
"""
def __init__(self):
"""
:param AddressCount: 申请 EIP 数量,默认值为1。
:type AddressCount: int
"""
self.AddressCount = None
def _deserialize(self, params):
self.AddressCount = params.get("AddressCount")
class AllocateAddressesResponse(AbstractModel):
"""AllocateAddresses返回参数结构体
"""
def __init__(self):
"""
:param AddressSet: 申请到的 EIP 的唯一 ID 列表。
:type AddressSet: list of str
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.AddressSet = None
self.RequestId = None
def _deserialize(self, params):
self.AddressSet = params.get("AddressSet")
self.RequestId = params.get("RequestId")
class AssignPrivateIpAddressesRequest(AbstractModel):
"""AssignPrivateIpAddresses请求参数结构体
"""
def __init__(self):
"""
:param NetworkInterfaceId: 弹性网卡实例ID,例如:eni-m6dyj72l。
:type NetworkInterfaceId: str
:param PrivateIpAddresses: 指定的内网IP信息。
:type PrivateIpAddresses: list of PrivateIpAddressSpecification
:param SecondaryPrivateIpAddressCount: 新申请的内网IP地址个数。
:type SecondaryPrivateIpAddressCount: int
"""
self.NetworkInterfaceId = None
self.PrivateIpAddresses = None
self.SecondaryPrivateIpAddressCount = None
def _deserialize(self, params):
self.NetworkInterfaceId = params.get("NetworkInterfaceId")
if params.get("PrivateIpAddresses") is not None:
self.PrivateIpAddresses = []
for item in params.get("PrivateIpAddresses"):
obj = PrivateIpAddressSpecification()
obj._deserialize(item)
self.PrivateIpAddresses.append(obj)
self.SecondaryPrivateIpAddressCount = params.get("SecondaryPrivateIpAddressCount")
class AssignPrivateIpAddressesResponse(AbstractModel):
"""AssignPrivateIpAddresses返回参数结构体
"""
def __init__(self):
"""
:param PrivateIpAddressSet: 内网IP详细信息。
:type PrivateIpAddressSet: list of PrivateIpAddressSpecification
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.PrivateIpAddressSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("PrivateIpAddressSet") is not None:
self.PrivateIpAddressSet = []
for item in params.get("PrivateIpAddressSet"):
obj = PrivateIpAddressSpecification()
obj._deserialize(item)
self.PrivateIpAddressSet.append(obj)
self.RequestId = params.get("RequestId")
class AssociateAddressRequest(AbstractModel):
"""AssociateAddress请求参数结构体
"""
def __init__(self):
"""
:param AddressId: 标识 EIP 的唯一 ID。EIP 唯一 ID 形如:`eip-11112222`。
:type AddressId: str
:param InstanceId: 要绑定的实例 ID。实例 ID 形如:`ins-11112222`。可通过登录[控制台](https://console.cloud.tencent.com/cvm)查询,也可通过 [DescribeInstances](https://cloud.tencent.com/document/api/213/9389) 接口返回值中的`InstanceId`获取。
:type InstanceId: str
:param NetworkInterfaceId: 要绑定的弹性网卡 ID。 弹性网卡 ID 形如:`eni-11112222`。`NetworkInterfaceId` 与 `InstanceId` 不可同时指定。弹性网卡 ID 可通过登录[控制台](https://console.cloud.tencent.com/vpc/eni)查询,也可通过[DescribeNetworkInterfaces](https://cloud.tencent.com/document/api/215/4814)接口返回值中的`networkInterfaceId`获取。
:type NetworkInterfaceId: str
:param PrivateIpAddress: 要绑定的内网 IP。如果指定了 `NetworkInterfaceId` 则也必须指定 `PrivateIpAddress` ,表示将 EIP 绑定到指定弹性网卡的指定内网 IP 上。同时要确保指定的 `PrivateIpAddress` 是指定的 `NetworkInterfaceId` 上的一个内网 IP。指定弹性网卡的内网 IP 可通过登录[控制台](https://console.cloud.tencent.com/vpc/eni)查询,也可通过[DescribeNetworkInterfaces](https://cloud.tencent.com/document/api/215/4814)接口返回值中的`privateIpAddress`获取。
:type PrivateIpAddress: str
"""
self.AddressId = None
self.InstanceId = None
self.NetworkInterfaceId = None
self.PrivateIpAddress = None
def _deserialize(self, params):
self.AddressId = params.get("AddressId")
self.InstanceId = params.get("InstanceId")
self.NetworkInterfaceId = params.get("NetworkInterfaceId")
self.PrivateIpAddress = params.get("PrivateIpAddress")
class AssociateAddressResponse(AbstractModel):
"""AssociateAddress返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class AttachClassicLinkVpcRequest(AbstractModel):
"""AttachClassicLinkVpc请求参数结构体
"""
def __init__(self):
"""
:param VpcId: VPC实例ID
:type VpcId: str
:param InstanceIds: CVM实例ID
:type InstanceIds: list of str
"""
self.VpcId = None
self.InstanceIds = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.InstanceIds = params.get("InstanceIds")
class AttachClassicLinkVpcResponse(AbstractModel):
"""AttachClassicLinkVpc返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class AttachNetworkInterfaceRequest(AbstractModel):
"""AttachNetworkInterface请求参数结构体
"""
def __init__(self):
"""
:param NetworkInterfaceId: 弹性网卡实例ID,例如:eni-m6dyj72l。
:type NetworkInterfaceId: str
:param InstanceId: CVM实例ID。形如:ins-r8hr2upy。
:type InstanceId: str
"""
self.NetworkInterfaceId = None
self.InstanceId = None
def _deserialize(self, params):
self.NetworkInterfaceId = params.get("NetworkInterfaceId")
self.InstanceId = params.get("InstanceId")
class AttachNetworkInterfaceResponse(AbstractModel):
"""AttachNetworkInterface返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ClassicLinkInstance(AbstractModel):
"""私有网络和基础网络互通设备
"""
def __init__(self):
"""
:param VpcId: VPC实例ID
:type VpcId: str
:param InstanceId: 云服务器实例唯一ID
:type InstanceId: str
"""
self.VpcId = None
self.InstanceId = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.InstanceId = params.get("InstanceId")
class CreateAddressTemplateGroupRequest(AbstractModel):
"""CreateAddressTemplateGroup请求参数结构体
"""
def __init__(self):
"""
:param AddressTemplateGroupName: IP地址模版集合名称。
:type AddressTemplateGroupName: str
:param AddressTemplateIds: IP地址模版实例ID,例如:ipm-mdunqeb6。
:type AddressTemplateIds: list of str
"""
self.AddressTemplateGroupName = None
self.AddressTemplateIds = None
def _deserialize(self, params):
self.AddressTemplateGroupName = params.get("AddressTemplateGroupName")
self.AddressTemplateIds = params.get("AddressTemplateIds")
class CreateAddressTemplateGroupResponse(AbstractModel):
"""CreateAddressTemplateGroup返回参数结构体
"""
def __init__(self):
"""
:param AddressTemplateGroup: IP地址模板集合对象。
:type AddressTemplateGroup: :class:`tencentcloud.vpc.v20170312.models.AddressTemplateGroup`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.AddressTemplateGroup = None
self.RequestId = None
def _deserialize(self, params):
if params.get("AddressTemplateGroup") is not None:
self.AddressTemplateGroup = AddressTemplateGroup()
self.AddressTemplateGroup._deserialize(params.get("AddressTemplateGroup"))
self.RequestId = params.get("RequestId")
class CreateAddressTemplateRequest(AbstractModel):
"""CreateAddressTemplate请求参数结构体
"""
def __init__(self):
"""
:param AddressTemplateName: IP地址模版名称
:type AddressTemplateName: str
:param Addresses: 地址信息,支持 IP、CIDR、IP 范围。
:type Addresses: list of str
"""
self.AddressTemplateName = None
self.Addresses = None
def _deserialize(self, params):
self.AddressTemplateName = params.get("AddressTemplateName")
self.Addresses = params.get("Addresses")
class CreateAddressTemplateResponse(AbstractModel):
"""CreateAddressTemplate返回参数结构体
"""
def __init__(self):
"""
:param AddressTemplate: IP地址模板对象。
:type AddressTemplate: :class:`tencentcloud.vpc.v20170312.models.AddressTemplate`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.AddressTemplate = None
self.RequestId = None
def _deserialize(self, params):
if params.get("AddressTemplate") is not None:
self.AddressTemplate = AddressTemplate()
self.AddressTemplate._deserialize(params.get("AddressTemplate"))
self.RequestId = params.get("RequestId")
class CreateCustomerGatewayRequest(AbstractModel):
"""CreateCustomerGateway请求参数结构体
"""
def __init__(self):
"""
:param CustomerGatewayName: 对端网关名称,可任意命名,但不得超过60个字符。
:type CustomerGatewayName: str
:param IpAddress: 对端网关公网IP。
:type IpAddress: str
"""
self.CustomerGatewayName = None
self.IpAddress = None
def _deserialize(self, params):
self.CustomerGatewayName = params.get("CustomerGatewayName")
self.IpAddress = params.get("IpAddress")
class CreateCustomerGatewayResponse(AbstractModel):
"""CreateCustomerGateway返回参数结构体
"""
def __init__(self):
"""
:param CustomerGateway: 对端网关对象
:type CustomerGateway: :class:`tencentcloud.vpc.v20170312.models.CustomerGateway`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.CustomerGateway = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CustomerGateway") is not None:
self.CustomerGateway = CustomerGateway()
self.CustomerGateway._deserialize(params.get("CustomerGateway"))
self.RequestId = params.get("RequestId")
class CreateDefaultVpcRequest(AbstractModel):
"""CreateDefaultVpc请求参数结构体
"""
def __init__(self):
"""
:param Zone: 子网所在的可用区ID,不指定将随机选择可用区
:type Zone: str
:param Force: 是否强制返回默认VPC
:type Force: bool
"""
self.Zone = None
self.Force = None
def _deserialize(self, params):
self.Zone = params.get("Zone")
self.Force = params.get("Force")
class CreateDefaultVpcResponse(AbstractModel):
"""CreateDefaultVpc返回参数结构体
"""
def __init__(self):
"""
:param Vpc: 默认VPC和子网ID
:type Vpc: :class:`tencentcloud.vpc.v20170312.models.DefaultVpcSubnet`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.Vpc = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Vpc") is not None:
self.Vpc = DefaultVpcSubnet()
self.Vpc._deserialize(params.get("Vpc"))
self.RequestId = params.get("RequestId")
class CreateNetworkInterfaceRequest(AbstractModel):
"""CreateNetworkInterface请求参数结构体
"""
def __init__(self):
"""
:param VpcId: VPC实例ID。可通过DescribeVpcs接口返回值中的VpcId获取。
:type VpcId: str
:param NetworkInterfaceName: 弹性网卡名称,最大长度不能超过60个字节。
:type NetworkInterfaceName: str
:param SubnetId: 弹性网卡所在的子网实例ID,例如:subnet-0ap8nwca。
:type SubnetId: str
:param NetworkInterfaceDescription: 弹性网卡描述,可任意命名,但不得超过60个字符。
:type NetworkInterfaceDescription: str
:param SecondaryPrivateIpAddressCount: 新申请的内网IP地址个数。
:type SecondaryPrivateIpAddressCount: int
:param SecurityGroupIds: 指定绑定的安全组,例如:['sg-1dd51d']。
:type SecurityGroupIds: list of str
:param PrivateIpAddresses: 指定内网IP信息。
:type PrivateIpAddresses: list of PrivateIpAddressSpecification
"""
self.VpcId = None
self.NetworkInterfaceName = None
self.SubnetId = None
self.NetworkInterfaceDescription = None
self.SecondaryPrivateIpAddressCount = None
self.SecurityGroupIds = None
self.PrivateIpAddresses = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.NetworkInterfaceName = params.get("NetworkInterfaceName")
self.SubnetId = params.get("SubnetId")
self.NetworkInterfaceDescription = params.get("NetworkInterfaceDescription")
self.SecondaryPrivateIpAddressCount = params.get("SecondaryPrivateIpAddressCount")
self.SecurityGroupIds = params.get("SecurityGroupIds")
if params.get("PrivateIpAddresses") is not None:
self.PrivateIpAddresses = []
for item in params.get("PrivateIpAddresses"):
obj = PrivateIpAddressSpecification()
obj._deserialize(item)
self.PrivateIpAddresses.append(obj)
class CreateNetworkInterfaceResponse(AbstractModel):
"""CreateNetworkInterface返回参数结构体
"""
def __init__(self):
"""
:param NetworkInterface: 弹性网卡实例。
:type NetworkInterface: :class:`tencentcloud.vpc.v20170312.models.NetworkInterface`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.NetworkInterface = None
self.RequestId = None
def _deserialize(self, params):
if params.get("NetworkInterface") is not None:
self.NetworkInterface = NetworkInterface()
self.NetworkInterface._deserialize(params.get("NetworkInterface"))
self.RequestId = params.get("RequestId")
class CreateRouteTableRequest(AbstractModel):
"""CreateRouteTable请求参数结构体
"""
def __init__(self):
"""
:param VpcId: 待操作的VPC实例ID。可通过DescribeVpcs接口返回值中的VpcId获取。
:type VpcId: str
:param RouteTableName: 路由表名称,最大长度不能超过60个字节。
:type RouteTableName: str
"""
self.VpcId = None
self.RouteTableName = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.RouteTableName = params.get("RouteTableName")
class CreateRouteTableResponse(AbstractModel):
"""CreateRouteTable返回参数结构体
"""
def __init__(self):
"""
:param RouteTable: 路由表对象。
:type RouteTable: :class:`tencentcloud.vpc.v20170312.models.RouteTable`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RouteTable = None
self.RequestId = None
def _deserialize(self, params):
if params.get("RouteTable") is not None:
self.RouteTable = RouteTable()
self.RouteTable._deserialize(params.get("RouteTable"))
self.RequestId = params.get("RequestId")
class CreateRoutesRequest(AbstractModel):
"""CreateRoutes请求参数结构体
"""
def __init__(self):
"""
:param RouteTableId: 路由表实例ID。
:type RouteTableId: str
:param Routes: 路由策略对象。
:type Routes: list of Route
"""
self.RouteTableId = None
self.Routes = None
def _deserialize(self, params):
self.RouteTableId = params.get("RouteTableId")
if params.get("Routes") is not None:
self.Routes = []
for item in params.get("Routes"):
obj = Route()
obj._deserialize(item)
self.Routes.append(obj)
class CreateRoutesResponse(AbstractModel):
"""CreateRoutes返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateSecurityGroupPoliciesRequest(AbstractModel):
"""CreateSecurityGroupPolicies请求参数结构体
"""
def __init__(self):
"""
:param SecurityGroupId: 安全组实例ID,例如sg-33ocnj9n,可通过DescribeSecurityGroups获取。
:type SecurityGroupId: str
:param SecurityGroupPolicySet: 安全组规则集合。
:type SecurityGroupPolicySet: :class:`tencentcloud.vpc.v20170312.models.SecurityGroupPolicySet`
"""
self.SecurityGroupId = None
self.SecurityGroupPolicySet = None
def _deserialize(self, params):
self.SecurityGroupId = params.get("SecurityGroupId")
if params.get("SecurityGroupPolicySet") is not None:
self.SecurityGroupPolicySet = SecurityGroupPolicySet()
self.SecurityGroupPolicySet._deserialize(params.get("SecurityGroupPolicySet"))
class CreateSecurityGroupPoliciesResponse(AbstractModel):
"""CreateSecurityGroupPolicies返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateSecurityGroupRequest(AbstractModel):
"""CreateSecurityGroup请求参数结构体
"""
def __init__(self):
"""
:param GroupName: 安全组名称,可任意命名,但不得超过60个字符。
:type GroupName: str
:param GroupDescription: 安全组备注,最多100个字符。
:type GroupDescription: str
:param ProjectId: 项目id,默认0。可在qcloud控制台项目管理页面查询到。
:type ProjectId: str
"""
self.GroupName = None
self.GroupDescription = None
self.ProjectId = None
def _deserialize(self, params):
self.GroupName = params.get("GroupName")
self.GroupDescription = params.get("GroupDescription")
self.ProjectId = params.get("ProjectId")
class CreateSecurityGroupResponse(AbstractModel):
"""CreateSecurityGroup返回参数结构体
"""
def __init__(self):
"""
:param SecurityGroup: 安全组对象。
:type SecurityGroup: :class:`tencentcloud.vpc.v20170312.models.SecurityGroup`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.SecurityGroup = None
self.RequestId = None
def _deserialize(self, params):
if params.get("SecurityGroup") is not None:
self.SecurityGroup = SecurityGroup()
self.SecurityGroup._deserialize(params.get("SecurityGroup"))
self.RequestId = params.get("RequestId")
class CreateServiceTemplateGroupRequest(AbstractModel):
"""CreateServiceTemplateGroup请求参数结构体
"""
def __init__(self):
"""
:param ServiceTemplateGroupName: 协议端口模板集合名称
:type ServiceTemplateGroupName: str
:param ServiceTemplateIds: 协议端口模板实例ID,例如:ppm-4dw6agho。
:type ServiceTemplateIds: list of str
"""
self.ServiceTemplateGroupName = None
self.ServiceTemplateIds = None
def _deserialize(self, params):
self.ServiceTemplateGroupName = params.get("ServiceTemplateGroupName")
self.ServiceTemplateIds = params.get("ServiceTemplateIds")
class CreateServiceTemplateGroupResponse(AbstractModel):
"""CreateServiceTemplateGroup返回参数结构体
"""
def __init__(self):
"""
:param ServiceTemplateGroup: 协议端口模板集合对象。
:type ServiceTemplateGroup: :class:`tencentcloud.vpc.v20170312.models.ServiceTemplateGroup`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.ServiceTemplateGroup = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ServiceTemplateGroup") is not None:
self.ServiceTemplateGroup = ServiceTemplateGroup()
self.ServiceTemplateGroup._deserialize(params.get("ServiceTemplateGroup"))
self.RequestId = params.get("RequestId")
class CreateServiceTemplateRequest(AbstractModel):
"""CreateServiceTemplate请求参数结构体
"""
def __init__(self):
"""
:param ServiceTemplateName: 协议端口模板名称
:type ServiceTemplateName: str
:param Services: 支持单个端口、多个端口、连续端口及所有端口,协议支持:TCP、UDP、ICMP、GRE 协议。
:type Services: list of str
"""
self.ServiceTemplateName = None
self.Services = None
def _deserialize(self, params):
self.ServiceTemplateName = params.get("ServiceTemplateName")
self.Services = params.get("Services")
class CreateServiceTemplateResponse(AbstractModel):
"""CreateServiceTemplate返回参数结构体
"""
def __init__(self):
"""
:param ServiceTemplate: 协议端口模板对象。
:type ServiceTemplate: :class:`tencentcloud.vpc.v20170312.models.ServiceTemplate`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.ServiceTemplate = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ServiceTemplate") is not None:
self.ServiceTemplate = ServiceTemplate()
self.ServiceTemplate._deserialize(params.get("ServiceTemplate"))
self.RequestId = params.get("RequestId")
class CreateSubnetRequest(AbstractModel):
"""CreateSubnet请求参数结构体
"""
def __init__(self):
"""
:param VpcId: 待操作的VPC实例ID。可通过DescribeVpcs接口返回值中的VpcId获取。
:type VpcId: str
:param SubnetName: 子网名称,最大长度不能超过60个字节。
:type SubnetName: str
:param CidrBlock: 子网网段,子网网段必须在VPC网段内,相同VPC内子网网段不能重叠。
:type CidrBlock: str
:param Zone: 子网所在的可用区ID,不同子网选择不同可用区可以做跨可用区灾备。
:type Zone: str
"""
self.VpcId = None
self.SubnetName = None
self.CidrBlock = None
self.Zone = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.SubnetName = params.get("SubnetName")
self.CidrBlock = params.get("CidrBlock")
self.Zone = params.get("Zone")
class CreateSubnetResponse(AbstractModel):
"""CreateSubnet返回参数结构体
"""
def __init__(self):
"""
:param Subnet: 子网对象。
:type Subnet: :class:`tencentcloud.vpc.v20170312.models.Subnet`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.Subnet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Subnet") is not None:
self.Subnet = Subnet()
self.Subnet._deserialize(params.get("Subnet"))
self.RequestId = params.get("RequestId")
class CreateVpcRequest(AbstractModel):
"""CreateVpc请求参数结构体
"""
def __init__(self):
"""
:param VpcName: vpc名称,最大长度不能超过60个字节。
:type VpcName: str
:param CidrBlock: vpc的cidr,只能为10.0.0.0/16,172.16.0.0/12,192.168.0.0/16这三个内网网段内。
:type CidrBlock: str
:param EnableMulticast: 是否开启组播。true: 开启, false: 不开启。
:type EnableMulticast: str
:param DnsServers: DNS地址,最多支持4个
:type DnsServers: list of str
:param DomainName: 域名
:type DomainName: str
"""
self.VpcName = None
self.CidrBlock = None
self.EnableMulticast = None
self.DnsServers = None
self.DomainName = None
def _deserialize(self, params):
self.VpcName = params.get("VpcName")
self.CidrBlock = params.get("CidrBlock")
self.EnableMulticast = params.get("EnableMulticast")
self.DnsServers = params.get("DnsServers")
self.DomainName = params.get("DomainName")
class CreateVpcResponse(AbstractModel):
"""CreateVpc返回参数结构体
"""
def __init__(self):
"""
:param Vpc: Vpc对象。
:type Vpc: :class:`tencentcloud.vpc.v20170312.models.Vpc`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.Vpc = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Vpc") is not None:
self.Vpc = Vpc()
self.Vpc._deserialize(params.get("Vpc"))
self.RequestId = params.get("RequestId")
class CreateVpnConnectionRequest(AbstractModel):
"""CreateVpnConnection请求参数结构体
"""
def __init__(self):
"""
:param VpcId: VPC实例ID。可通过DescribeVpcs接口返回值中的VpcId获取。
:type VpcId: str
:param VpnGatewayId: VPN网关实例ID。
:type VpnGatewayId: str
:param CustomerGatewayId: 对端网关ID,例如:cgw-2wqq41m9,可通过DescribeCustomerGateways接口查询对端网关。
:type CustomerGatewayId: str
:param VpnConnectionName: 通道名称,可任意命名,但不得超过60个字符。
:type VpnConnectionName: str
:param PreShareKey: 预共享密钥。
:type PreShareKey: str
:param SecurityPolicyDatabases: SPD策略组,例如:{"10.0.0.5/24":["172.123.10.5/16"]},10.0.0.5/24是vpc内网段172.123.10.5/16是IDC网段。用户指定VPC内哪些网段可以和您IDC中哪些网段通信。
:type SecurityPolicyDatabases: list of SecurityPolicyDatabase
:param IKEOptionsSpecification: IKE配置(Internet Key Exchange,因特网密钥交换),IKE具有一套自保护机制,用户配置网络安全协议
:type IKEOptionsSpecification: :class:`tencentcloud.vpc.v20170312.models.IKEOptionsSpecification`
:param IPSECOptionsSpecification: IPSec配置,腾讯云提供IPSec安全会话设置
:type IPSECOptionsSpecification: :class:`tencentcloud.vpc.v20170312.models.IPSECOptionsSpecification`
"""
self.VpcId = None
self.VpnGatewayId = None
self.CustomerGatewayId = None
self.VpnConnectionName = None
self.PreShareKey = None
self.SecurityPolicyDatabases = None
self.IKEOptionsSpecification = None
self.IPSECOptionsSpecification = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.VpnGatewayId = params.get("VpnGatewayId")
self.CustomerGatewayId = params.get("CustomerGatewayId")
self.VpnConnectionName = params.get("VpnConnectionName")
self.PreShareKey = params.get("PreShareKey")
if params.get("SecurityPolicyDatabases") is not None:
self.SecurityPolicyDatabases = []
for item in params.get("SecurityPolicyDatabases"):
obj = SecurityPolicyDatabase()
obj._deserialize(item)
self.SecurityPolicyDatabases.append(obj)
if params.get("IKEOptionsSpecification") is not None:
self.IKEOptionsSpecification = IKEOptionsSpecification()
self.IKEOptionsSpecification._deserialize(params.get("IKEOptionsSpecification"))
if params.get("IPSECOptionsSpecification") is not None:
self.IPSECOptionsSpecification = IPSECOptionsSpecification()
self.IPSECOptionsSpecification._deserialize(params.get("IPSECOptionsSpecification"))
class CreateVpnConnectionResponse(AbstractModel):
"""CreateVpnConnection返回参数结构体
"""
def __init__(self):
"""
:param VpnConnection: 通道实例对象。
:type VpnConnection: :class:`tencentcloud.vpc.v20170312.models.VpnConnection`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.VpnConnection = None
self.RequestId = None
def _deserialize(self, params):
if params.get("VpnConnection") is not None:
self.VpnConnection = VpnConnection()
self.VpnConnection._deserialize(params.get("VpnConnection"))
self.RequestId = params.get("RequestId")
class CreateVpnGatewayRequest(AbstractModel):
"""CreateVpnGateway请求参数结构体
"""
def __init__(self):
"""
:param VpcId: VPC实例ID。可通过DescribeVpcs接口返回值中的VpcId获取。
:type VpcId: str
:param VpnGatewayName: VPN网关名称,最大长度不能超过60个字节。
:type VpnGatewayName: str
:param InternetMaxBandwidthOut: 公网带宽设置。可选带宽规格:5, 10, 20, 50, 100;单位:Mbps
:type InternetMaxBandwidthOut: int
:param InstanceChargeType: VPN网关计费模式,PREPAID:表示预付费,即包年包月,POSTPAID_BY_HOUR:表示后付费,即按量计费。默认:POSTPAID_BY_HOUR,如果指定预付费模式,参数InstanceChargePrepaid必填。
:type InstanceChargeType: str
:param InstanceChargePrepaid: 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月实例的购买时长、是否设置自动续费等属性。若指定实例的付费模式为预付费则该参数必传。
:type InstanceChargePrepaid: :class:`tencentcloud.vpc.v20170312.models.InstanceChargePrepaid`
"""
self.VpcId = None
self.VpnGatewayName = None
self.InternetMaxBandwidthOut = None
self.InstanceChargeType = None
self.InstanceChargePrepaid = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.VpnGatewayName = params.get("VpnGatewayName")
self.InternetMaxBandwidthOut = params.get("InternetMaxBandwidthOut")
self.InstanceChargeType = params.get("InstanceChargeType")
if params.get("InstanceChargePrepaid") is not None:
self.InstanceChargePrepaid = InstanceChargePrepaid()
self.InstanceChargePrepaid._deserialize(params.get("InstanceChargePrepaid"))
class CreateVpnGatewayResponse(AbstractModel):
"""CreateVpnGateway返回参数结构体
"""
def __init__(self):
"""
:param VpnGateway: VPN网关对象
:type VpnGateway: :class:`tencentcloud.vpc.v20170312.models.VpnGateway`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.VpnGateway = None
self.RequestId = None
def _deserialize(self, params):
if params.get("VpnGateway") is not None:
self.VpnGateway = VpnGateway()
self.VpnGateway._deserialize(params.get("VpnGateway"))
self.RequestId = params.get("RequestId")
class CustomerGateway(AbstractModel):
"""对端网关
"""
def __init__(self):
"""
:param CustomerGatewayId: 用户网关唯一ID
:type CustomerGatewayId: str
:param CustomerGatewayName: 网关名称
:type CustomerGatewayName: str
:param IpAddress: 公网地址
:type IpAddress: str
:param CreatedTime: 创建时间
:type CreatedTime: str
"""
self.CustomerGatewayId = None
self.CustomerGatewayName = None
self.IpAddress = None
self.CreatedTime = None
def _deserialize(self, params):
self.CustomerGatewayId = params.get("CustomerGatewayId")
self.CustomerGatewayName = params.get("CustomerGatewayName")
self.IpAddress = params.get("IpAddress")
self.CreatedTime = params.get("CreatedTime")
class CustomerGatewayVendor(AbstractModel):
"""对端网关厂商信息对象。
"""
def __init__(self):
"""
:param Platform: 平台。
:type Platform: str
:param SoftwareVersion: 软件版本。
:type SoftwareVersion: str
:param VendorName: 供应商名称。
:type VendorName: str
"""
self.Platform = None
self.SoftwareVersion = None
self.VendorName = None
def _deserialize(self, params):
self.Platform = params.get("Platform")
self.SoftwareVersion = params.get("SoftwareVersion")
self.VendorName = params.get("VendorName")
class DefaultVpcSubnet(AbstractModel):
"""默认VPC和子网
"""
def __init__(self):
"""
:param VpcId: 默认VpcId
:type VpcId: str
:param SubnetId: 默认SubnetId
:type SubnetId: str
"""
self.VpcId = None
self.SubnetId = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
class DeleteAddressTemplateGroupRequest(AbstractModel):
"""DeleteAddressTemplateGroup请求参数结构体
"""
def __init__(self):
"""
:param AddressTemplateGroupId: IP地址模板集合实例ID,例如:ipmg-90cex8mq。
:type AddressTemplateGroupId: str
"""
self.AddressTemplateGroupId = None
def _deserialize(self, params):
self.AddressTemplateGroupId = params.get("AddressTemplateGroupId")
class DeleteAddressTemplateGroupResponse(AbstractModel):
"""DeleteAddressTemplateGroup返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteAddressTemplateRequest(AbstractModel):
"""DeleteAddressTemplate请求参数结构体
"""
def __init__(self):
"""
:param AddressTemplateId: IP地址模板实例ID,例如:ipm-09o5m8kc。
:type AddressTemplateId: str
"""
self.AddressTemplateId = None
def _deserialize(self, params):
self.AddressTemplateId = params.get("AddressTemplateId")
class DeleteAddressTemplateResponse(AbstractModel):
"""DeleteAddressTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteCustomerGatewayRequest(AbstractModel):
"""DeleteCustomerGateway请求参数结构体
"""
def __init__(self):
"""
:param CustomerGatewayId: 对端网关ID,例如:cgw-2wqq41m9,可通过DescribeCustomerGateways接口查询对端网关。
:type CustomerGatewayId: str
"""
self.CustomerGatewayId = None
def _deserialize(self, params):
self.CustomerGatewayId = params.get("CustomerGatewayId")
class DeleteCustomerGatewayResponse(AbstractModel):
"""DeleteCustomerGateway返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteNetworkInterfaceRequest(AbstractModel):
"""DeleteNetworkInterface请求参数结构体
"""
def __init__(self):
"""
:param NetworkInterfaceId: 弹性网卡实例ID,例如:eni-m6dyj72l。
:type NetworkInterfaceId: str
"""
self.NetworkInterfaceId = None
def _deserialize(self, params):
self.NetworkInterfaceId = params.get("NetworkInterfaceId")
class DeleteNetworkInterfaceResponse(AbstractModel):
"""DeleteNetworkInterface返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteRouteTableRequest(AbstractModel):
"""DeleteRouteTable请求参数结构体
"""
def __init__(self):
"""
:param RouteTableId: 路由表实例ID,例如:rtb-azd4dt1c。
:type RouteTableId: str
"""
self.RouteTableId = None
def _deserialize(self, params):
self.RouteTableId = params.get("RouteTableId")
class DeleteRouteTableResponse(AbstractModel):
"""DeleteRouteTable返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteRoutesRequest(AbstractModel):
"""DeleteRoutes请求参数结构体
"""
def __init__(self):
"""
:param RouteTableId: 路由表实例ID。
:type RouteTableId: str
:param Routes: 路由策略对象。
:type Routes: list of Route
"""
self.RouteTableId = None
self.Routes = None
def _deserialize(self, params):
self.RouteTableId = params.get("RouteTableId")
if params.get("Routes") is not None:
self.Routes = []
for item in params.get("Routes"):
obj = Route()
obj._deserialize(item)
self.Routes.append(obj)
class DeleteRoutesResponse(AbstractModel):
"""DeleteRoutes返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteSecurityGroupPoliciesRequest(AbstractModel):
"""DeleteSecurityGroupPolicies请求参数结构体
"""
def __init__(self):
"""
:param SecurityGroupId: 安全组实例ID,例如sg-33ocnj9n,可通过DescribeSecurityGroups获取。
:type SecurityGroupId: str
:param SecurityGroupPolicySet: 安全组规则集合。一个请求中只能删除单个方向的一条或多条规则。支持指定索引(PolicyIndex) 匹配删除和安全组规则匹配删除两种方式,一个请求中只能使用一种匹配方式。
:type SecurityGroupPolicySet: :class:`tencentcloud.vpc.v20170312.models.SecurityGroupPolicySet`
"""
self.SecurityGroupId = None
self.SecurityGroupPolicySet = None
def _deserialize(self, params):
self.SecurityGroupId = params.get("SecurityGroupId")
if params.get("SecurityGroupPolicySet") is not None:
self.SecurityGroupPolicySet = SecurityGroupPolicySet()
self.SecurityGroupPolicySet._deserialize(params.get("SecurityGroupPolicySet"))
class DeleteSecurityGroupPoliciesResponse(AbstractModel):
"""DeleteSecurityGroupPolicies返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteSecurityGroupRequest(AbstractModel):
"""DeleteSecurityGroup请求参数结构体
"""
def __init__(self):
"""
:param SecurityGroupId: 安全组实例ID,例如sg-33ocnj9n,可通过DescribeSecurityGroups获取。
:type SecurityGroupId: str
"""
self.SecurityGroupId = None
def _deserialize(self, params):
self.SecurityGroupId = params.get("SecurityGroupId")
class DeleteSecurityGroupResponse(AbstractModel):
"""DeleteSecurityGroup返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteServiceTemplateGroupRequest(AbstractModel):
"""DeleteServiceTemplateGroup请求参数结构体
"""
def __init__(self):
"""
:param ServiceTemplateGroupId: 协议端口模板集合实例ID,例如:ppmg-n17uxvve。
:type ServiceTemplateGroupId: str
"""
self.ServiceTemplateGroupId = None
def _deserialize(self, params):
self.ServiceTemplateGroupId = params.get("ServiceTemplateGroupId")
class DeleteServiceTemplateGroupResponse(AbstractModel):
"""DeleteServiceTemplateGroup返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteServiceTemplateRequest(AbstractModel):
"""DeleteServiceTemplate请求参数结构体
"""
def __init__(self):
"""
:param ServiceTemplateId: 协议端口模板实例ID,例如:ppm-e6dy460g。
:type ServiceTemplateId: str
"""
self.ServiceTemplateId = None
def _deserialize(self, params):
self.ServiceTemplateId = params.get("ServiceTemplateId")
class DeleteServiceTemplateResponse(AbstractModel):
"""DeleteServiceTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteSubnetRequest(AbstractModel):
"""DeleteSubnet请求参数结构体
"""
def __init__(self):
"""
:param SubnetId: 子网实例ID。可通过DescribeSubnets接口返回值中的SubnetId获取。
:type SubnetId: str
"""
self.SubnetId = None
def _deserialize(self, params):
self.SubnetId = params.get("SubnetId")
class DeleteSubnetResponse(AbstractModel):
"""DeleteSubnet返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteVpcRequest(AbstractModel):
"""DeleteVpc请求参数结构体
"""
def __init__(self):
"""
:param VpcId: VPC实例ID。可通过DescribeVpcs接口返回值中的VpcId获取。
:type VpcId: str
"""
self.VpcId = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
class DeleteVpcResponse(AbstractModel):
"""DeleteVpc返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteVpnConnectionRequest(AbstractModel):
"""DeleteVpnConnection请求参数结构体
"""
def __init__(self):
"""
:param VpnGatewayId: VPN网关实例ID。
:type VpnGatewayId: str
:param VpnConnectionId: VPN通道实例ID。形如:vpnx-f49l6u0z。
:type VpnConnectionId: str
"""
self.VpnGatewayId = None
self.VpnConnectionId = None
def _deserialize(self, params):
self.VpnGatewayId = params.get("VpnGatewayId")
self.VpnConnectionId = params.get("VpnConnectionId")
class DeleteVpnConnectionResponse(AbstractModel):
"""DeleteVpnConnection返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteVpnGatewayRequest(AbstractModel):
"""DeleteVpnGateway请求参数结构体
"""
def __init__(self):
"""
:param VpnGatewayId: VPN网关实例ID。
:type VpnGatewayId: str
"""
self.VpnGatewayId = None
def _deserialize(self, params):
self.VpnGatewayId = params.get("VpnGatewayId")
class DeleteVpnGatewayResponse(AbstractModel):
"""DeleteVpnGateway返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeAccountAttributesRequest(AbstractModel):
"""DescribeAccountAttributes请求参数结构体
"""
class DescribeAccountAttributesResponse(AbstractModel):
"""DescribeAccountAttributes返回参数结构体
"""
def __init__(self):
"""
:param AccountAttributeSet: 用户账号属性对象
:type AccountAttributeSet: list of AccountAttribute
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.AccountAttributeSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("AccountAttributeSet") is not None:
self.AccountAttributeSet = []
for item in params.get("AccountAttributeSet"):
obj = AccountAttribute()
obj._deserialize(item)
self.AccountAttributeSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeAddressQuotaRequest(AbstractModel):
"""DescribeAddressQuota请求参数结构体
"""
class DescribeAddressQuotaResponse(AbstractModel):
"""DescribeAddressQuota返回参数结构体
"""
def __init__(self):
"""
:param QuotaSet: 账户 EIP 配额信息。
:type QuotaSet: list of Quota
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.QuotaSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("QuotaSet") is not None:
self.QuotaSet = []
for item in params.get("QuotaSet"):
obj = Quota()
obj._deserialize(item)
self.QuotaSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeAddressTemplateGroupsRequest(AbstractModel):
"""DescribeAddressTemplateGroups请求参数结构体
"""
def __init__(self):
"""
:param Filters: 过滤条件。
<li>address-template-group-name - String - (过滤条件)IP地址模板集合名称。</li>
<li>address-template-group-id - String - (过滤条件)IP地址模板实集合例ID,例如:ipmg-mdunqeb6。</li>
:type Filters: list of Filter
:param Offset: 偏移量,默认为0。
:type Offset: str
:param Limit: 返回数量,默认为20,最大值为100。
:type Limit: str
"""
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeAddressTemplateGroupsResponse(AbstractModel):
"""DescribeAddressTemplateGroups返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合条件的实例数量。
:type TotalCount: int
:param AddressTemplateGroupSet: IP地址模板。
:type AddressTemplateGroupSet: list of AddressTemplateGroup
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.AddressTemplateGroupSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("AddressTemplateGroupSet") is not None:
self.AddressTemplateGroupSet = []
for item in params.get("AddressTemplateGroupSet"):
obj = AddressTemplateGroup()
obj._deserialize(item)
self.AddressTemplateGroupSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeAddressTemplatesRequest(AbstractModel):
"""DescribeAddressTemplates请求参数结构体
"""
def __init__(self):
"""
:param Filters: 过滤条件。
<li>address-template-name - String - (过滤条件)IP地址模板名称。</li>
<li>address-template-id - String - (过滤条件)IP地址模板实例ID,例如:ipm-mdunqeb6。</li>
:type Filters: list of Filter
:param Offset: 偏移量,默认为0。
:type Offset: str
:param Limit: 返回数量,默认为20,最大值为100。
:type Limit: str
"""
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeAddressTemplatesResponse(AbstractModel):
"""DescribeAddressTemplates返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合条件的实例数量。
:type TotalCount: int
:param AddressTemplateSet: IP地址模版。
:type AddressTemplateSet: list of AddressTemplate
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.AddressTemplateSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("AddressTemplateSet") is not None:
self.AddressTemplateSet = []
for item in params.get("AddressTemplateSet"):
obj = AddressTemplate()
obj._deserialize(item)
self.AddressTemplateSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeAddressesRequest(AbstractModel):
"""DescribeAddresses请求参数结构体
"""
def __init__(self):
"""
:param AddressIds: 标识 EIP 的唯一 ID 列表。EIP 唯一 ID 形如:`eip-11112222`。参数不支持同时指定`AddressIds`和`Filters`。
:type AddressIds: list of str
:param Filters: 每次请求的`Filters`的上限为10,`Filter.Values`的上限为5。参数不支持同时指定`AddressIds`和`Filters`。详细的过滤条件如下:
<li> address-id - String - 是否必填:否 - (过滤条件)按照 EIP 的唯一 ID 过滤。EIP 唯一 ID 形如:eip-11112222。</li>
<li> address-name - String - 是否必填:否 - (过滤条件)按照 EIP 名称过滤。不支持模糊过滤。</li>
<li> address-ip - String - 是否必填:否 - (过滤条件)按照 EIP 的 IP 地址过滤。</li>
<li> address-status - String - 是否必填:否 - (过滤条件)按照 EIP 的状态过滤。取值范围:[详见EIP状态列表](https://cloud.tencent.com/document/api/213/9452#eip_state)。</li>
<li> instance-id - String - 是否必填:否 - (过滤条件)按照 EIP 绑定的实例 ID 过滤。实例 ID 形如:ins-11112222。</li>
<li> private-ip-address - String - 是否必填:否 - (过滤条件)按照 EIP 绑定的内网 IP 过滤。</li>
<li> network-interface-id - String - 是否必填:否 - (过滤条件)按照 EIP 绑定的弹性网卡 ID 过滤。弹性网卡 ID 形如:eni-11112222。</li>
<li> is-arrears - String - 是否必填:否 - (过滤条件)按照 EIP 是否欠费进行过滤。(TRUE:EIP 处于欠费状态|FALSE:EIP 费用状态正常)</li>
:type Filters: list of Filter
:param Offset: 偏移量,默认为0。关于`Offset`的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/11646)中的相关小节。
:type Offset: int
:param Limit: 返回数量,默认为20,最大值为100。关于`Limit`的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/11646)中的相关小节。
:type Limit: int
"""
self.AddressIds = None
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.AddressIds = params.get("AddressIds")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeAddressesResponse(AbstractModel):
"""DescribeAddresses返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合条件的 EIP 数量。
:type TotalCount: int
:param AddressSet: EIP 详细信息列表。
:type AddressSet: list of Address
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.AddressSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("AddressSet") is not None:
self.AddressSet = []
for item in params.get("AddressSet"):
obj = Address()
obj._deserialize(item)
self.AddressSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeClassicLinkInstancesRequest(AbstractModel):
"""DescribeClassicLinkInstances请求参数结构体
"""
def __init__(self):
"""
:param Filters: 过滤条件。
<li>vpc-id - String - (过滤条件)VPC实例ID。</li>
<li>vm-ip - String - (过滤条件)基础网络云主机IP。</li>
:type Filters: list of FilterObject
:param Offset: 偏移量
:type Offset: str
:param Limit: 返回数量
:type Limit: str
"""
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = FilterObject()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeClassicLinkInstancesResponse(AbstractModel):
"""DescribeClassicLinkInstances返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合条件的实例数量。
:type TotalCount: int
:param ClassicLinkInstanceSet: 私有网络和基础网络互通设备。
:type ClassicLinkInstanceSet: list of ClassicLinkInstance
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.ClassicLinkInstanceSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("ClassicLinkInstanceSet") is not None:
self.ClassicLinkInstanceSet = []
for item in params.get("ClassicLinkInstanceSet"):
obj = ClassicLinkInstance()
obj._deserialize(item)
self.ClassicLinkInstanceSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeCustomerGatewayVendorsRequest(AbstractModel):
"""DescribeCustomerGatewayVendors请求参数结构体
"""
class DescribeCustomerGatewayVendorsResponse(AbstractModel):
"""DescribeCustomerGatewayVendors返回参数结构体
"""
def __init__(self):
"""
:param CustomerGatewayVendorSet: 对端网关厂商信息对象。
:type CustomerGatewayVendorSet: list of CustomerGatewayVendor
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.CustomerGatewayVendorSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CustomerGatewayVendorSet") is not None:
self.CustomerGatewayVendorSet = []
for item in params.get("CustomerGatewayVendorSet"):
obj = CustomerGatewayVendor()
obj._deserialize(item)
self.CustomerGatewayVendorSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeCustomerGatewaysRequest(AbstractModel):
"""DescribeCustomerGateways请求参数结构体
"""
def __init__(self):
"""
:param CustomerGatewayIds: 对端网关ID,例如:cgw-2wqq41m9。每次请求的实例的上限为100。参数不支持同时指定CustomerGatewayIds和Filters。
:type CustomerGatewayIds: list of str
:param Filters: 过滤条件,详见下表:实例过滤条件表。每次请求的Filters的上限为10,Filter.Values的上限为5。参数不支持同时指定CustomerGatewayIds和Filters。
:type Filters: list of Filter
:param Offset: 偏移量,默认为0。关于Offset的更进一步介绍请参考 API 简介中的相关小节。
:type Offset: int
:param Limit: 返回数量,默认为20,最大值为100。
:type Limit: int
"""
self.CustomerGatewayIds = None
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.CustomerGatewayIds = params.get("CustomerGatewayIds")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeCustomerGatewaysResponse(AbstractModel):
"""DescribeCustomerGateways返回参数结构体
"""
def __init__(self):
"""
:param CustomerGatewaySet: 对端网关对象列表
:type CustomerGatewaySet: list of CustomerGateway
:param TotalCount: 符合条件的实例数量。
:type TotalCount: int
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.CustomerGatewaySet = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CustomerGatewaySet") is not None:
self.CustomerGatewaySet = []
for item in params.get("CustomerGatewaySet"):
obj = CustomerGateway()
obj._deserialize(item)
self.CustomerGatewaySet.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeNetworkInterfacesRequest(AbstractModel):
"""DescribeNetworkInterfaces请求参数结构体
"""
def __init__(self):
"""
:param NetworkInterfaceIds: 弹性网卡实例ID查询。形如:eni-pxir56ns。每次请求的实例的上限为100。参数不支持同时指定NetworkInterfaceIds和Filters。
:type NetworkInterfaceIds: list of str
:param Filters: 过滤条件,参数不支持同时指定NetworkInterfaceIds和Filters。
<li>vpc-id - String - (过滤条件)VPC实例ID,形如:vpc-f49l6u0z。</li>
<li>subnet-id - String - (过滤条件)所属子网实例ID,形如:subnet-f49l6u0z。</li>
<li>network-interface-id - String - (过滤条件)弹性网卡实例ID,形如:eni-5k56k7k7。</li>
<li>attachment.instance-id - String - (过滤条件)绑定的云服务器实例ID,形如:ins-3nqpdn3i。</li>
<li>groups.security-group-id - String - (过滤条件)绑定的安全组实例ID,例如:sg-f9ekbxeq。</li>
<li>network-interface-name - String - (过滤条件)网卡实例名称。</li>
<li>network-interface-description - String - (过滤条件)网卡实例描述。</li>
:type Filters: list of Filter
:param Offset: 偏移量,默认为0。
:type Offset: int
:param Limit: 返回数量,默认为20,最大值为100。
:type Limit: int
"""
self.NetworkInterfaceIds = None
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.NetworkInterfaceIds = params.get("NetworkInterfaceIds")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeNetworkInterfacesResponse(AbstractModel):
"""DescribeNetworkInterfaces返回参数结构体
"""
def __init__(self):
"""
:param NetworkInterfaceSet: 实例详细信息列表。
:type NetworkInterfaceSet: list of NetworkInterface
:param TotalCount: 符合条件的实例数量。
:type TotalCount: int
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.NetworkInterfaceSet = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("NetworkInterfaceSet") is not None:
self.NetworkInterfaceSet = []
for item in params.get("NetworkInterfaceSet"):
obj = NetworkInterface()
obj._deserialize(item)
self.NetworkInterfaceSet.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeRouteTablesRequest(AbstractModel):
"""DescribeRouteTables请求参数结构体
"""
def __init__(self):
"""
:param RouteTableIds: 路由表实例ID,例如:rtb-azd4dt1c。
:type RouteTableIds: list of str
:param Filters: 过滤条件,参数不支持同时指定RouteTableIds和Filters。
<li>route-table-id - String - (过滤条件)路由表实例ID。</li>
<li>route-table-name - String - (过滤条件)路由表名称。</li>
<li>vpc-id - String - (过滤条件)VPC实例ID,形如:vpc-f49l6u0z。</li>
<li>association.main - String - (过滤条件)是否主路由表。</li>
:type Filters: list of Filter
:param Offset: 偏移量。
:type Offset: str
:param Limit: 请求对象个数。
:type Limit: str
"""
self.RouteTableIds = None
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.RouteTableIds = params.get("RouteTableIds")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeRouteTablesResponse(AbstractModel):
"""DescribeRouteTables返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合条件的实例数量。
:type TotalCount: int
:param RouteTableSet: 路由表对象。
:type RouteTableSet: list of RouteTable
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.RouteTableSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("RouteTableSet") is not None:
self.RouteTableSet = []
for item in params.get("RouteTableSet"):
obj = RouteTable()
obj._deserialize(item)
self.RouteTableSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSecurityGroupAssociationStatisticsRequest(AbstractModel):
"""DescribeSecurityGroupAssociationStatistics请求参数结构体
"""
def __init__(self):
"""
:param SecurityGroupIds: 安全实例ID,例如sg-33ocnj9n,可通过DescribeSecurityGroups获取。
:type SecurityGroupIds: list of str
"""
self.SecurityGroupIds = None
def _deserialize(self, params):
self.SecurityGroupIds = params.get("SecurityGroupIds")
class DescribeSecurityGroupAssociationStatisticsResponse(AbstractModel):
"""DescribeSecurityGroupAssociationStatistics返回参数结构体
"""
def __init__(self):
"""
:param SecurityGroupAssociationStatisticsSet: 安全组关联实例统计。
:type SecurityGroupAssociationStatisticsSet: list of SecurityGroupAssociationStatistics
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.SecurityGroupAssociationStatisticsSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("SecurityGroupAssociationStatisticsSet") is not None:
self.SecurityGroupAssociationStatisticsSet = []
for item in params.get("SecurityGroupAssociationStatisticsSet"):
obj = SecurityGroupAssociationStatistics()
obj._deserialize(item)
self.SecurityGroupAssociationStatisticsSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSecurityGroupPoliciesRequest(AbstractModel):
"""DescribeSecurityGroupPolicies请求参数结构体
"""
def __init__(self):
"""
:param SecurityGroupId: 安全组实例ID,例如:sg-33ocnj9n,可通过DescribeSecurityGroups获取。
:type SecurityGroupId: str
"""
self.SecurityGroupId = None
def _deserialize(self, params):
self.SecurityGroupId = params.get("SecurityGroupId")
class DescribeSecurityGroupPoliciesResponse(AbstractModel):
"""DescribeSecurityGroupPolicies返回参数结构体
"""
def __init__(self):
"""
:param SecurityGroupPolicySet: 安全组规则集合。
:type SecurityGroupPolicySet: :class:`tencentcloud.vpc.v20170312.models.SecurityGroupPolicySet`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.SecurityGroupPolicySet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("SecurityGroupPolicySet") is not None:
self.SecurityGroupPolicySet = SecurityGroupPolicySet()
self.SecurityGroupPolicySet._deserialize(params.get("SecurityGroupPolicySet"))
self.RequestId = params.get("RequestId")
class DescribeSecurityGroupsRequest(AbstractModel):
"""DescribeSecurityGroups请求参数结构体
"""
def __init__(self):
"""
:param SecurityGroupIds: 安全组实例ID,例如:sg-33ocnj9n,可通过DescribeSecurityGroups获取。每次请求的实例的上限为100。参数不支持同时指定SecurityGroupIds和Filters。
:type SecurityGroupIds: list of str
:param Filters: 过滤条件,参数不支持同时指定SecurityGroupIds和Filters。
<li>project-id - Integer - (过滤条件)项目id。</li>
<li>security-group-name - String - (过滤条件)安全组名称。</li>
:type Filters: list of Filter
:param Offset: 偏移量。
:type Offset: str
:param Limit: 返回数量。
:type Limit: str
"""
self.SecurityGroupIds = None
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.SecurityGroupIds = params.get("SecurityGroupIds")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeSecurityGroupsResponse(AbstractModel):
"""DescribeSecurityGroups返回参数结构体
"""
def __init__(self):
"""
:param SecurityGroupSet: 安全组对象。
:type SecurityGroupSet: list of SecurityGroup
:param TotalCount: 符合条件的实例数量。
:type TotalCount: int
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.SecurityGroupSet = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("SecurityGroupSet") is not None:
self.SecurityGroupSet = []
for item in params.get("SecurityGroupSet"):
obj = SecurityGroup()
obj._deserialize(item)
self.SecurityGroupSet.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeServiceTemplateGroupsRequest(AbstractModel):
"""DescribeServiceTemplateGroups请求参数结构体
"""
def __init__(self):
"""
:param Filters: 过滤条件。
<li>service-template-group-name - String - (过滤条件)协议端口模板集合名称。</li>
<li>service-template-group-id - String - (过滤条件)协议端口模板集合实例ID,例如:ppmg-e6dy460g。</li>
:type Filters: list of Filter
:param Offset: 偏移量,默认为0。
:type Offset: str
:param Limit: 返回数量,默认为20,最大值为100。
:type Limit: str
"""
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeServiceTemplateGroupsResponse(AbstractModel):
"""DescribeServiceTemplateGroups返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合条件的实例数量。
:type TotalCount: int
:param ServiceTemplateGroupSet: 协议端口模板集合。
:type ServiceTemplateGroupSet: list of ServiceTemplateGroup
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.ServiceTemplateGroupSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("ServiceTemplateGroupSet") is not None:
self.ServiceTemplateGroupSet = []
for item in params.get("ServiceTemplateGroupSet"):
obj = ServiceTemplateGroup()
obj._deserialize(item)
self.ServiceTemplateGroupSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeServiceTemplatesRequest(AbstractModel):
"""DescribeServiceTemplates请求参数结构体
"""
def __init__(self):
"""
:param Filters: 过滤条件。
<li>service-template-name - String - (过滤条件)协议端口模板名称。</li>
<li>service-template-id - String - (过滤条件)协议端口模板实例ID,例如:ppm-e6dy460g。</li>
:type Filters: list of Filter
:param Offset: 偏移量,默认为0。
:type Offset: str
:param Limit: 返回数量,默认为20,最大值为100。
:type Limit: str
"""
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeServiceTemplatesResponse(AbstractModel):
"""DescribeServiceTemplates返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合条件的实例数量。
:type TotalCount: int
:param ServiceTemplateSet: 协议端口模板对象。
:type ServiceTemplateSet: list of ServiceTemplate
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.ServiceTemplateSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("ServiceTemplateSet") is not None:
self.ServiceTemplateSet = []
for item in params.get("ServiceTemplateSet"):
obj = ServiceTemplate()
obj._deserialize(item)
self.ServiceTemplateSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSubnetsRequest(AbstractModel):
"""DescribeSubnets请求参数结构体
"""
def __init__(self):
"""
:param SubnetIds: 子网实例ID查询。形如:subnet-pxir56ns。每次请求的实例的上限为100。参数不支持同时指定SubnetIds和Filters。
:type SubnetIds: list of str
:param Filters: 过滤条件,参数不支持同时指定SubnetIds和Filters。
<li>subnet-id - String - (过滤条件)Subnet实例名称。</li>
<li>vpc-id - String - (过滤条件)VPC实例ID,形如:vpc-f49l6u0z。</li>
<li>cidr-block - String - (过滤条件)vpc的cidr。</li>
<li>is-default - Boolean - (过滤条件)是否是默认子网。</li>
<li>subnet-name - String - (过滤条件)子网名称。</li>
<li>zone - String - (过滤条件)可用区。</li>
:type Filters: list of Filter
:param Offset: 偏移量
:type Offset: str
:param Limit: 返回数量
:type Limit: str
"""
self.SubnetIds = None
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.SubnetIds = params.get("SubnetIds")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeSubnetsResponse(AbstractModel):
"""DescribeSubnets返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合条件的实例数量。
:type TotalCount: int
:param SubnetSet: 子网对象。
:type SubnetSet: list of Subnet
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.SubnetSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("SubnetSet") is not None:
self.SubnetSet = []
for item in params.get("SubnetSet"):
obj = Subnet()
obj._deserialize(item)
self.SubnetSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeVpcsRequest(AbstractModel):
"""DescribeVpcs请求参数结构体
"""
def __init__(self):
"""
:param VpcIds: VPC实例ID。形如:vpc-f49l6u0z。每次请求的实例的上限为100。参数不支持同时指定VpcIds和Filters。
:type VpcIds: list of str
:param Filters: 过滤条件,参数不支持同时指定VpcIds和Filters。
<li>vpc-name - String - (过滤条件)VPC实例名称。</li>
<li>is-default - String - (过滤条件)是否默认VPC。</li>
<li>vpc-id - String - (过滤条件)VPC实例ID形如:vpc-f49l6u0z。</li>
<li>cidr-block - String - (过滤条件)vpc的cidr。</li>
:type Filters: list of Filter
:param Offset: 偏移量
:type Offset: str
:param Limit: 返回数量
:type Limit: str
"""
self.VpcIds = None
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.VpcIds = params.get("VpcIds")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeVpcsResponse(AbstractModel):
"""DescribeVpcs返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合条件的对象数。
:type TotalCount: int
:param VpcSet: VPC对象。
:type VpcSet: list of Vpc
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.VpcSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("VpcSet") is not None:
self.VpcSet = []
for item in params.get("VpcSet"):
obj = Vpc()
obj._deserialize(item)
self.VpcSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeVpnConnectionsRequest(AbstractModel):
"""DescribeVpnConnections请求参数结构体
"""
def __init__(self):
"""
:param VpnConnectionIds: VPN通道实例ID。形如:vpnx-f49l6u0z。每次请求的实例的上限为100。参数不支持同时指定VpnConnectionIds和Filters。
:type VpnConnectionIds: list of str
:param Filters: 过滤条件,详见下表:实例过滤条件表。每次请求的Filters的上限为10,Filter.Values的上限为5。参数不支持同时指定VpnConnectionIds和Filters。
:type Filters: list of Filter
:param Offset: 偏移量,默认为0。关于Offset的更进一步介绍请参考 API 简介中的相关小节。
:type Offset: int
:param Limit: 返回数量,默认为20,最大值为100。
:type Limit: int
"""
self.VpnConnectionIds = None
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.VpnConnectionIds = params.get("VpnConnectionIds")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeVpnConnectionsResponse(AbstractModel):
"""DescribeVpnConnections返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合条件的实例数量。
:type TotalCount: int
:param VpnConnectionSet: VPN通道实例。
:type VpnConnectionSet: list of VpnConnection
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.VpnConnectionSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("VpnConnectionSet") is not None:
self.VpnConnectionSet = []
for item in params.get("VpnConnectionSet"):
obj = VpnConnection()
obj._deserialize(item)
self.VpnConnectionSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeVpnGatewaysRequest(AbstractModel):
"""DescribeVpnGateways请求参数结构体
"""
def __init__(self):
"""
:param VpnGatewayIds: VPN网关实例ID。形如:vpngw-f49l6u0z。每次请求的实例的上限为100。参数不支持同时指定VpnGatewayIds和Filters。
:type VpnGatewayIds: list of str
:param Filters: 过滤器对象属性
:type Filters: list of FilterObject
:param Offset: 偏移量
:type Offset: int
:param Limit: 请求对象个数
:type Limit: int
"""
self.VpnGatewayIds = None
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.VpnGatewayIds = params.get("VpnGatewayIds")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = FilterObject()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeVpnGatewaysResponse(AbstractModel):
"""DescribeVpnGateways返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合条件的实例数量。
:type TotalCount: int
:param VpnGatewaySet: VPN网关实例详细信息列表。
:type VpnGatewaySet: list of VpnGateway
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.VpnGatewaySet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("VpnGatewaySet") is not None:
self.VpnGatewaySet = []
for item in params.get("VpnGatewaySet"):
obj = VpnGateway()
obj._deserialize(item)
self.VpnGatewaySet.append(obj)
self.RequestId = params.get("RequestId")
class DetachClassicLinkVpcRequest(AbstractModel):
"""DetachClassicLinkVpc请求参数结构体
"""
def __init__(self):
"""
:param VpcId: VPC实例ID。可通过DescribeVpcs接口返回值中的VpcId获取。
:type VpcId: str
:param InstanceIds: CVM实例ID查询。形如:ins-r8hr2upy。
:type InstanceIds: list of str
"""
self.VpcId = None
self.InstanceIds = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.InstanceIds = params.get("InstanceIds")
class DetachClassicLinkVpcResponse(AbstractModel):
"""DetachClassicLinkVpc返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DetachNetworkInterfaceRequest(AbstractModel):
"""DetachNetworkInterface请求参数结构体
"""
def __init__(self):
"""
:param NetworkInterfaceId: 弹性网卡实例ID,例如:eni-m6dyj72l。
:type NetworkInterfaceId: str
:param InstanceId: CVM实例ID。形如:ins-r8hr2upy。
:type InstanceId: str
"""
self.NetworkInterfaceId = None
self.InstanceId = None
def _deserialize(self, params):
self.NetworkInterfaceId = params.get("NetworkInterfaceId")
self.InstanceId = params.get("InstanceId")
class DetachNetworkInterfaceResponse(AbstractModel):
"""DetachNetworkInterface返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DisassociateAddressRequest(AbstractModel):
"""DisassociateAddress请求参数结构体
"""
def __init__(self):
"""
:param AddressId: 标识 EIP 的唯一 ID。EIP 唯一 ID 形如:`eip-11112222`。
:type AddressId: str
:param ReallocateNormalPublicIp: 表示解绑 EIP 之后是否分配普通公网 IP。取值范围:<br><li>TRUE:表示解绑 EIP 之后分配普通公网 IP。<br><li>FALSE:表示解绑 EIP 之后不分配普通公网 IP。<br>默认取值:FALSE。<br><br>只有满足以下条件时才能指定该参数:<br><li> 只有在解绑主网卡的主内网 IP 上的 EIP 时才能指定该参数。<br><li>解绑 EIP 后重新分配普通公网 IP 操作一个账号每天最多操作 10 次;详情可通过 [DescribeAddressQuota](https://cloud.tencent.com/document/api/213/1378) 接口获取。
:type ReallocateNormalPublicIp: bool
"""
self.AddressId = None
self.ReallocateNormalPublicIp = None
def _deserialize(self, params):
self.AddressId = params.get("AddressId")
self.ReallocateNormalPublicIp = params.get("ReallocateNormalPublicIp")
class DisassociateAddressResponse(AbstractModel):
"""DisassociateAddress返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DownloadCustomerGatewayConfigurationRequest(AbstractModel):
"""DownloadCustomerGatewayConfiguration请求参数结构体
"""
def __init__(self):
"""
:param VpnGatewayId: VPN网关实例ID。
:type VpnGatewayId: str
:param VpnConnectionId: VPN通道实例ID。形如:vpnx-f49l6u0z。
:type VpnConnectionId: str
:param CustomerGatewayVendor: 对端网关厂商信息对象,可通过DescribeCustomerGatewayVendors获取。
:type CustomerGatewayVendor: :class:`tencentcloud.vpc.v20170312.models.CustomerGatewayVendor`
:param InterfaceName: 通道接入设备物理接口名称。
:type InterfaceName: str
"""
self.VpnGatewayId = None
self.VpnConnectionId = None
self.CustomerGatewayVendor = None
self.InterfaceName = None
def _deserialize(self, params):
self.VpnGatewayId = params.get("VpnGatewayId")
self.VpnConnectionId = params.get("VpnConnectionId")
if params.get("CustomerGatewayVendor") is not None:
self.CustomerGatewayVendor = CustomerGatewayVendor()
self.CustomerGatewayVendor._deserialize(params.get("CustomerGatewayVendor"))
self.InterfaceName = params.get("InterfaceName")
class DownloadCustomerGatewayConfigurationResponse(AbstractModel):
"""DownloadCustomerGatewayConfiguration返回参数结构体
"""
def __init__(self):
"""
:param CustomerGatewayConfiguration: XML格式配置信息。
:type CustomerGatewayConfiguration: str
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.CustomerGatewayConfiguration = None
self.RequestId = None
def _deserialize(self, params):
self.CustomerGatewayConfiguration = params.get("CustomerGatewayConfiguration")
self.RequestId = params.get("RequestId")
class Filter(AbstractModel):
"""过滤器
"""
def __init__(self):
"""
:param Name: 属性名称, 若存在多个Filter时,Filter间的关系为逻辑与(AND)关系。
:type Name: str
:param Values: 属性值, 若同一个Filter存在多个Values,同一Filter下Values间的关系为逻辑或(OR)关系。
:type Values: list of str
"""
self.Name = None
self.Values = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Values = params.get("Values")
class FilterObject(AbstractModel):
"""过滤器键值对
"""
def __init__(self):
"""
:param Name: 属性名称, 若存在多个Filter时,Filter间的关系为逻辑与(AND)关系。
:type Name: str
:param Values: 属性值, 若同一个Filter存在多个Values,同一Filter下Values间的关系为逻辑或(OR)关系。
:type Values: list of str
"""
self.Name = None
self.Values = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Values = params.get("Values")
class IKEOptionsSpecification(AbstractModel):
"""IKE配置(Internet Key Exchange,因特网密钥交换),IKE具有一套自保护机制,用户配置网络安全协议
"""
def __init__(self):
"""
:param PropoEncryAlgorithm: 加密算法,可选值:'3DES-CBC', 'AES-CBC-128', 'AES-CBS-192', 'AES-CBC-256', 'DES-CBC',默认为3DES-CBC
:type PropoEncryAlgorithm: str
:param PropoAuthenAlgorithm: 认证算法:可选值:'MD5', 'SHA1',默认为MD5
:type PropoAuthenAlgorithm: str
:param ExchangeMode: 协商模式:可选值:'AGGRESSIVE', 'MAIN',默认为MAIN
:type ExchangeMode: str
:param LocalIdentity: 本端标识类型:可选值:'ADDRESS', 'FQDN',默认为ADDRESS
:type LocalIdentity: str
:param RemoteIdentity: 对端标识类型:可选值:'ADDRESS', 'FQDN',默认为ADDRESS
:type RemoteIdentity: str
:param LocalAddress: 本端标识,当LocalIdentity选为ADDRESS时,LocalAddress必填。localAddress默认为vpn网关公网IP
:type LocalAddress: str
:param RemoteAddress: 对端标识,当RemoteIdentity选为ADDRESS时,RemoteAddress必填
:type RemoteAddress: str
:param LocalFqdnName: 本端标识,当LocalIdentity选为FQDN时,LocalFqdnName必填
:type LocalFqdnName: str
:param RemoteFqdnName: 对端标识,当remoteIdentity选为FQDN时,RemoteFqdnName必填
:type RemoteFqdnName: str
:param DhGroupName: DH group,指定IKE交换密钥时使用的DH组,可选值:'GROUP1', 'GROUP2', 'GROUP5', 'GROUP14', 'GROUP24',
:type DhGroupName: str
:param IKESaLifetimeSeconds: IKE SA Lifetime,单位:秒,设置IKE SA的生存周期,取值范围:60-604800
:type IKESaLifetimeSeconds: int
:param IKEVersion: IKE版本
:type IKEVersion: str
"""
self.PropoEncryAlgorithm = None
self.PropoAuthenAlgorithm = None
self.ExchangeMode = None
self.LocalIdentity = None
self.RemoteIdentity = None
self.LocalAddress = None
self.RemoteAddress = None
self.LocalFqdnName = None
self.RemoteFqdnName = None
self.DhGroupName = None
self.IKESaLifetimeSeconds = None
self.IKEVersion = None
def _deserialize(self, params):
self.PropoEncryAlgorithm = params.get("PropoEncryAlgorithm")
self.PropoAuthenAlgorithm = params.get("PropoAuthenAlgorithm")
self.ExchangeMode = params.get("ExchangeMode")
self.LocalIdentity = params.get("LocalIdentity")
self.RemoteIdentity = params.get("RemoteIdentity")
self.LocalAddress = params.get("LocalAddress")
self.RemoteAddress = params.get("RemoteAddress")
self.LocalFqdnName = params.get("LocalFqdnName")
self.RemoteFqdnName = params.get("RemoteFqdnName")
self.DhGroupName = params.get("DhGroupName")
self.IKESaLifetimeSeconds = params.get("IKESaLifetimeSeconds")
self.IKEVersion = params.get("IKEVersion")
class IPSECOptionsSpecification(AbstractModel):
"""IPSec配置,腾讯云提供IPSec安全会话设置
"""
def __init__(self):
"""
:param EncryptAlgorithm: 加密算法,可选值:'3DES-CBC', 'AES-CBC-128', 'AES-CBC-192', 'AES-CBC-256', 'DES-CBC', 'NULL', 默认为AES-CBC-128
:type EncryptAlgorithm: str
:param IntegrityAlgorith: 认证算法:可选值:'MD5', 'SHA1',默认为
:type IntegrityAlgorith: str
:param IPSECSaLifetimeSeconds: IPsec SA lifetime(s):单位秒,取值范围:180-604800
:type IPSECSaLifetimeSeconds: int
:param PfsDhGroup: PFS:可选值:'NULL', 'DH-GROUP1', 'DH-GROUP2', 'DH-GROUP5', 'DH-GROUP14', 'DH-GROUP24',默认为NULL
:type PfsDhGroup: str
:param IPSECSaLifetimeTraffic: IPsec SA lifetime(KB):单位KB,取值范围:2560-604800
:type IPSECSaLifetimeTraffic: int
"""
self.EncryptAlgorithm = None
self.IntegrityAlgorith = None
self.IPSECSaLifetimeSeconds = None
self.PfsDhGroup = None
self.IPSECSaLifetimeTraffic = None
def _deserialize(self, params):
self.EncryptAlgorithm = params.get("EncryptAlgorithm")
self.IntegrityAlgorith = params.get("IntegrityAlgorith")
self.IPSECSaLifetimeSeconds = params.get("IPSECSaLifetimeSeconds")
self.PfsDhGroup = params.get("PfsDhGroup")
self.IPSECSaLifetimeTraffic = params.get("IPSECSaLifetimeTraffic")
class InquiryPriceCreateVpnGatewayRequest(AbstractModel):
"""InquiryPriceCreateVpnGateway请求参数结构体
"""
def __init__(self):
"""
:param InternetMaxBandwidthOut: 公网带宽设置。可选带宽规格:5, 10, 20, 50, 100;单位:Mbps。
:type InternetMaxBandwidthOut: int
:param InstanceChargeType: VPN网关计费模式,PREPAID:表示预付费,即包年包月,POSTPAID_BY_HOUR:表示后付费,即按量计费。默认:POSTPAID_BY_HOUR,如果指定预付费模式,参数InstanceChargePrepaid必填。
:type InstanceChargeType: str
:param InstanceChargePrepaid: 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月实例的购买时长、是否设置自动续费等属性。若指定实例的付费模式为预付费则该参数必传。
:type InstanceChargePrepaid: :class:`tencentcloud.vpc.v20170312.models.InstanceChargePrepaid`
"""
self.InternetMaxBandwidthOut = None
self.InstanceChargeType = None
self.InstanceChargePrepaid = None
def _deserialize(self, params):
self.InternetMaxBandwidthOut = params.get("InternetMaxBandwidthOut")
self.InstanceChargeType = params.get("InstanceChargeType")
if params.get("InstanceChargePrepaid") is not None:
self.InstanceChargePrepaid = InstanceChargePrepaid()
self.InstanceChargePrepaid._deserialize(params.get("InstanceChargePrepaid"))
class InquiryPriceCreateVpnGatewayResponse(AbstractModel):
"""InquiryPriceCreateVpnGateway返回参数结构体
"""
def __init__(self):
"""
:param Price: 商品价格。
:type Price: :class:`tencentcloud.vpc.v20170312.models.Price`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.Price = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Price") is not None:
self.Price = Price()
self.Price._deserialize(params.get("Price"))
self.RequestId = params.get("RequestId")
class InquiryPriceRenewVpnGatewayRequest(AbstractModel):
"""InquiryPriceRenewVpnGateway请求参数结构体
"""
def __init__(self):
"""
:param VpnGatewayId: VPN网关实例ID。
:type VpnGatewayId: str
:param InstanceChargePrepaid: 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月实例的购买时长、是否设置自动续费等属性。若指定实例的付费模式为预付费则该参数必传。
:type InstanceChargePrepaid: :class:`tencentcloud.vpc.v20170312.models.InstanceChargePrepaid`
"""
self.VpnGatewayId = None
self.InstanceChargePrepaid = None
def _deserialize(self, params):
self.VpnGatewayId = params.get("VpnGatewayId")
if params.get("InstanceChargePrepaid") is not None:
self.InstanceChargePrepaid = InstanceChargePrepaid()
self.InstanceChargePrepaid._deserialize(params.get("InstanceChargePrepaid"))
class InquiryPriceRenewVpnGatewayResponse(AbstractModel):
"""InquiryPriceRenewVpnGateway返回参数结构体
"""
def __init__(self):
"""
:param Price: 商品价格。
:type Price: :class:`tencentcloud.vpc.v20170312.models.Price`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.Price = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Price") is not None:
self.Price = Price()
self.Price._deserialize(params.get("Price"))
self.RequestId = params.get("RequestId")
class InquiryPriceResetVpnGatewayInternetMaxBandwidthRequest(AbstractModel):
"""InquiryPriceResetVpnGatewayInternetMaxBandwidth请求参数结构体
"""
def __init__(self):
"""
:param VpnGatewayId: VPN网关实例ID。
:type VpnGatewayId: str
:param InternetMaxBandwidthOut: 公网带宽设置。可选带宽规格:5, 10, 20, 50, 100;单位:Mbps。
:type InternetMaxBandwidthOut: int
"""
self.VpnGatewayId = None
self.InternetMaxBandwidthOut = None
def _deserialize(self, params):
self.VpnGatewayId = params.get("VpnGatewayId")
self.InternetMaxBandwidthOut = params.get("InternetMaxBandwidthOut")
class InquiryPriceResetVpnGatewayInternetMaxBandwidthResponse(AbstractModel):
"""InquiryPriceResetVpnGatewayInternetMaxBandwidth返回参数结构体
"""
def __init__(self):
"""
:param Price: 商品价格。
:type Price: :class:`tencentcloud.vpc.v20170312.models.Price`
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.Price = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Price") is not None:
self.Price = Price()
self.Price._deserialize(params.get("Price"))
self.RequestId = params.get("RequestId")
class InstanceChargePrepaid(AbstractModel):
"""预付费(包年包月)计费对象。
"""
def __init__(self):
"""
:param Period: 购买实例的时长,单位:月。取值范围:1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 24, 36。
:type Period: int
:param RenewFlag: 自动续费标识。取值范围: NOTIFY_AND_AUTO_RENEW:通知过期且自动续费, NOTIFY_AND_MANUAL_RENEW:通知过期不自动续费。默认:NOTIFY_AND_MANUAL_RENEW
:type RenewFlag: str
"""
self.Period = None
self.RenewFlag = None
def _deserialize(self, params):
self.Period = params.get("Period")
self.RenewFlag = params.get("RenewFlag")
class ItemPrice(AbstractModel):
"""单项计费价格信息
"""
def __init__(self):
"""
:param UnitPrice: 按量计费后付费单价,单位:元。
:type UnitPrice: float
:param ChargeUnit: 按量计费后付费计价单元,可取值范围: HOUR:表示计价单元是按每小时来计算。当前涉及该计价单元的场景有:实例按小时后付费(POSTPAID_BY_HOUR)、带宽按小时后付费(BANDWIDTH_POSTPAID_BY_HOUR): GB:表示计价单元是按每GB来计算。当前涉及该计价单元的场景有:流量按小时后付费(TRAFFIC_POSTPAID_BY_HOUR)。
:type ChargeUnit: str
:param OriginalPrice: 预付费商品的原价,单位:元。
:type OriginalPrice: float
:param DiscountPrice: 预付费商品的折扣价,单位:元。
:type DiscountPrice: float
"""
self.UnitPrice = None
self.ChargeUnit = None
self.OriginalPrice = None
self.DiscountPrice = None
def _deserialize(self, params):
self.UnitPrice = params.get("UnitPrice")
self.ChargeUnit = params.get("ChargeUnit")
self.OriginalPrice = params.get("OriginalPrice")
self.DiscountPrice = params.get("DiscountPrice")
class MigrateNetworkInterfaceRequest(AbstractModel):
"""MigrateNetworkInterface请求参数结构体
"""
def __init__(self):
"""
:param NetworkInterfaceId: 弹性网卡实例ID,例如:eni-m6dyj72l。
:type NetworkInterfaceId: str
:param SourceInstanceId: 弹性网卡当前绑定的CVM实例ID。形如:ins-r8hr2upy。
:type SourceInstanceId: str
:param DestinationInstanceId: 待迁移的目的CVM实例ID。
:type DestinationInstanceId: str
"""
self.NetworkInterfaceId = None
self.SourceInstanceId = None
self.DestinationInstanceId = None
def _deserialize(self, params):
self.NetworkInterfaceId = params.get("NetworkInterfaceId")
self.SourceInstanceId = params.get("SourceInstanceId")
self.DestinationInstanceId = params.get("DestinationInstanceId")
class MigrateNetworkInterfaceResponse(AbstractModel):
"""MigrateNetworkInterface返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class MigratePrivateIpAddressRequest(AbstractModel):
"""MigratePrivateIpAddress请求参数结构体
"""
def __init__(self):
"""
:param SourceNetworkInterfaceId: 当内网IP绑定的弹性网卡实例ID,例如:eni-m6dyj72l。
:type SourceNetworkInterfaceId: str
:param DestinationNetworkInterfaceId: 待迁移的目的弹性网卡实例ID。
:type DestinationNetworkInterfaceId: str
:param PrivateIpAddress: 迁移的内网IP地址,例如:10.0.0.6。
:type PrivateIpAddress: str
"""
self.SourceNetworkInterfaceId = None
self.DestinationNetworkInterfaceId = None
self.PrivateIpAddress = None
def _deserialize(self, params):
self.SourceNetworkInterfaceId = params.get("SourceNetworkInterfaceId")
self.DestinationNetworkInterfaceId = params.get("DestinationNetworkInterfaceId")
self.PrivateIpAddress = params.get("PrivateIpAddress")
class MigratePrivateIpAddressResponse(AbstractModel):
"""MigratePrivateIpAddress返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyAddressAttributeRequest(AbstractModel):
"""ModifyAddressAttribute请求参数结构体
"""
def __init__(self):
"""
:param AddressId: 标识 EIP 的唯一 ID。EIP 唯一 ID 形如:`eip-11112222`。
:type AddressId: str
:param AddressName: 修改后的 EIP 名称。长度上限为20个字符。
:type AddressName: str
"""
self.AddressId = None
self.AddressName = None
def _deserialize(self, params):
self.AddressId = params.get("AddressId")
self.AddressName = params.get("AddressName")
class ModifyAddressAttributeResponse(AbstractModel):
"""ModifyAddressAttribute返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyAddressTemplateAttributeRequest(AbstractModel):
"""ModifyAddressTemplateAttribute请求参数结构体
"""
def __init__(self):
"""
:param AddressTemplateId: IP地址模板实例ID,例如:ipm-mdunqeb6。
:type AddressTemplateId: str
:param AddressTemplateName: IP地址模板名称。
:type AddressTemplateName: str
:param Addresses: 地址信息,支持 IP、CIDR、IP 范围。
:type Addresses: list of str
"""
self.AddressTemplateId = None
self.AddressTemplateName = None
self.Addresses = None
def _deserialize(self, params):
self.AddressTemplateId = params.get("AddressTemplateId")
self.AddressTemplateName = params.get("AddressTemplateName")
self.Addresses = params.get("Addresses")
class ModifyAddressTemplateAttributeResponse(AbstractModel):
"""ModifyAddressTemplateAttribute返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyAddressTemplateGroupAttributeRequest(AbstractModel):
"""ModifyAddressTemplateGroupAttribute请求参数结构体
"""
def __init__(self):
"""
:param AddressTemplateGroupId: IP地址模板集合实例ID,例如:ipmg-2uw6ujo6。
:type AddressTemplateGroupId: str
:param AddressTemplateGroupName: IP地址模板集合名称。
:type AddressTemplateGroupName: str
:param AddressTemplateIds: IP地址模板实例ID, 例如:ipm-mdunqeb6。
:type AddressTemplateIds: list of str
"""
self.AddressTemplateGroupId = None
self.AddressTemplateGroupName = None
self.AddressTemplateIds = None
def _deserialize(self, params):
self.AddressTemplateGroupId = params.get("AddressTemplateGroupId")
self.AddressTemplateGroupName = params.get("AddressTemplateGroupName")
self.AddressTemplateIds = params.get("AddressTemplateIds")
class ModifyAddressTemplateGroupAttributeResponse(AbstractModel):
"""ModifyAddressTemplateGroupAttribute返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyCustomerGatewayAttributeRequest(AbstractModel):
"""ModifyCustomerGatewayAttribute请求参数结构体
"""
def __init__(self):
"""
:param CustomerGatewayId: 对端网关ID,例如:cgw-2wqq41m9,可通过DescribeCustomerGateways接口查询对端网关。
:type CustomerGatewayId: str
:param CustomerGatewayName: 对端网关名称,可任意命名,但不得超过60个字符。
:type CustomerGatewayName: str
"""
self.CustomerGatewayId = None
self.CustomerGatewayName = None
def _deserialize(self, params):
self.CustomerGatewayId = params.get("CustomerGatewayId")
self.CustomerGatewayName = params.get("CustomerGatewayName")
class ModifyCustomerGatewayAttributeResponse(AbstractModel):
"""ModifyCustomerGatewayAttribute返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyNetworkInterfaceAttributeRequest(AbstractModel):
"""ModifyNetworkInterfaceAttribute请求参数结构体
"""
def __init__(self):
"""
:param NetworkInterfaceId: 弹性网卡实例ID,例如:eni-pxir56ns。
:type NetworkInterfaceId: str
:param NetworkInterfaceName: 弹性网卡名称,最大长度不能超过60个字节。
:type NetworkInterfaceName: str
:param NetworkInterfaceDescription: 弹性网卡描述,可任意命名,但不得超过60个字符。
:type NetworkInterfaceDescription: str
:param SecurityGroupIds: 指定绑定的安全组,例如:['sg-1dd51d']。
:type SecurityGroupIds: list of str
"""
self.NetworkInterfaceId = None
self.NetworkInterfaceName = None
self.NetworkInterfaceDescription = None
self.SecurityGroupIds = None
def _deserialize(self, params):
self.NetworkInterfaceId = params.get("NetworkInterfaceId")
self.NetworkInterfaceName = params.get("NetworkInterfaceName")
self.NetworkInterfaceDescription = params.get("NetworkInterfaceDescription")
self.SecurityGroupIds = params.get("SecurityGroupIds")
class ModifyNetworkInterfaceAttributeResponse(AbstractModel):
"""ModifyNetworkInterfaceAttribute返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyPrivateIpAddressesAttributeRequest(AbstractModel):
"""ModifyPrivateIpAddressesAttribute请求参数结构体
"""
def __init__(self):
"""
:param NetworkInterfaceId: 弹性网卡实例ID,例如:eni-m6dyj72l。
:type NetworkInterfaceId: str
:param PrivateIpAddresses: 指定的内网IP信息。
:type PrivateIpAddresses: list of PrivateIpAddressSpecification
"""
self.NetworkInterfaceId = None
self.PrivateIpAddresses = None
def _deserialize(self, params):
self.NetworkInterfaceId = params.get("NetworkInterfaceId")
if params.get("PrivateIpAddresses") is not None:
self.PrivateIpAddresses = []
for item in params.get("PrivateIpAddresses"):
obj = PrivateIpAddressSpecification()
obj._deserialize(item)
self.PrivateIpAddresses.append(obj)
class ModifyPrivateIpAddressesAttributeResponse(AbstractModel):
"""ModifyPrivateIpAddressesAttribute返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyRouteTableAttributeRequest(AbstractModel):
"""ModifyRouteTableAttribute请求参数结构体
"""
def __init__(self):
"""
:param RouteTableId: 路由表实例ID,例如:rtb-azd4dt1c。
:type RouteTableId: str
:param RouteTableName: 路由表名称。
:type RouteTableName: str
"""
self.RouteTableId = None
self.RouteTableName = None
def _deserialize(self, params):
self.RouteTableId = params.get("RouteTableId")
self.RouteTableName = params.get("RouteTableName")
class ModifyRouteTableAttributeResponse(AbstractModel):
"""ModifyRouteTableAttribute返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifySecurityGroupAttributeRequest(AbstractModel):
"""ModifySecurityGroupAttribute请求参数结构体
"""
def __init__(self):
"""
:param SecurityGroupId: 安全组实例ID,例如sg-33ocnj9n,可通过DescribeSecurityGroups获取。
:type SecurityGroupId: str
:param GroupName: 安全组名称,可任意命名,但不得超过60个字符。
:type GroupName: str
:param GroupDescription: 安全组备注,最多100个字符。
:type GroupDescription: str
"""
self.SecurityGroupId = None
self.GroupName = None
self.GroupDescription = None
def _deserialize(self, params):
self.SecurityGroupId = params.get("SecurityGroupId")
self.GroupName = params.get("GroupName")
self.GroupDescription = params.get("GroupDescription")
class ModifySecurityGroupAttributeResponse(AbstractModel):
"""ModifySecurityGroupAttribute返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifySecurityGroupPoliciesRequest(AbstractModel):
"""ModifySecurityGroupPolicies请求参数结构体
"""
def __init__(self):
"""
:param SecurityGroupId: 安全组实例ID,例如sg-33ocnj9n,可通过DescribeSecurityGroups获取。
:type SecurityGroupId: str
:param SecurityGroupPolicySet: 安全组规则集合。 SecurityGroupPolicySet对象必须同时指定新的出(Egress)入(Ingress)站规则。 SecurityGroupPolicy对象不支持自定义索引(PolicyIndex)。
:type SecurityGroupPolicySet: :class:`tencentcloud.vpc.v20170312.models.SecurityGroupPolicySet`
"""
self.SecurityGroupId = None
self.SecurityGroupPolicySet = None
def _deserialize(self, params):
self.SecurityGroupId = params.get("SecurityGroupId")
if params.get("SecurityGroupPolicySet") is not None:
self.SecurityGroupPolicySet = SecurityGroupPolicySet()
self.SecurityGroupPolicySet._deserialize(params.get("SecurityGroupPolicySet"))
class ModifySecurityGroupPoliciesResponse(AbstractModel):
"""ModifySecurityGroupPolicies返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyServiceTemplateAttributeRequest(AbstractModel):
"""ModifyServiceTemplateAttribute请求参数结构体
"""
def __init__(self):
"""
:param ServiceTemplateId: 协议端口模板实例ID,例如:ppm-529nwwj8。
:type ServiceTemplateId: str
:param ServiceTemplateName: 协议端口模板名称。
:type ServiceTemplateName: str
:param Services: 支持单个端口、多个端口、连续端口及所有端口,协议支持:TCP、UDP、ICMP、GRE 协议。
:type Services: list of str
"""
self.ServiceTemplateId = None
self.ServiceTemplateName = None
self.Services = None
def _deserialize(self, params):
self.ServiceTemplateId = params.get("ServiceTemplateId")
self.ServiceTemplateName = params.get("ServiceTemplateName")
self.Services = params.get("Services")
class ModifyServiceTemplateAttributeResponse(AbstractModel):
"""ModifyServiceTemplateAttribute返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyServiceTemplateGroupAttributeRequest(AbstractModel):
"""ModifyServiceTemplateGroupAttribute请求参数结构体
"""
def __init__(self):
"""
:param ServiceTemplateGroupId: 协议端口模板集合实例ID,例如:ppmg-ei8hfd9a。
:type ServiceTemplateGroupId: str
:param ServiceTemplateGroupName: 协议端口模板集合名称。
:type ServiceTemplateGroupName: str
:param ServiceTemplateIds: 协议端口模板实例ID,例如:ppm-4dw6agho。
:type ServiceTemplateIds: list of str
"""
self.ServiceTemplateGroupId = None
self.ServiceTemplateGroupName = None
self.ServiceTemplateIds = None
def _deserialize(self, params):
self.ServiceTemplateGroupId = params.get("ServiceTemplateGroupId")
self.ServiceTemplateGroupName = params.get("ServiceTemplateGroupName")
self.ServiceTemplateIds = params.get("ServiceTemplateIds")
class ModifyServiceTemplateGroupAttributeResponse(AbstractModel):
"""ModifyServiceTemplateGroupAttribute返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifySubnetAttributeRequest(AbstractModel):
"""ModifySubnetAttribute请求参数结构体
"""
def __init__(self):
"""
:param SubnetId: 子网实例ID。形如:subnet-pxir56ns。
:type SubnetId: str
:param SubnetName: 子网名称,最大长度不能超过60个字节。
:type SubnetName: str
:param EnableBroadcast: 子网是否开启广播。
:type EnableBroadcast: str
"""
self.SubnetId = None
self.SubnetName = None
self.EnableBroadcast = None
def _deserialize(self, params):
self.SubnetId = params.get("SubnetId")
self.SubnetName = params.get("SubnetName")
self.EnableBroadcast = params.get("EnableBroadcast")
class ModifySubnetAttributeResponse(AbstractModel):
"""ModifySubnetAttribute返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyVpcAttributeRequest(AbstractModel):
"""ModifyVpcAttribute请求参数结构体
"""
def __init__(self):
"""
:param VpcId: VPC实例ID。形如:vpc-f49l6u0z。每次请求的实例的上限为100。参数不支持同时指定VpcIds和Filters。
:type VpcId: str
:param VpcName: 私有网络名称,可任意命名,但不得超过60个字符。
:type VpcName: str
:param EnableMulticast: 是否开启组播。true: 开启, false: 关闭。
:type EnableMulticast: str
:param DnsServers: DNS地址,最多支持4个,第1个默认为主,其余为备
:type DnsServers: list of str
:param DomainName: 域名
:type DomainName: str
"""
self.VpcId = None
self.VpcName = None
self.EnableMulticast = None
self.DnsServers = None
self.DomainName = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.VpcName = params.get("VpcName")
self.EnableMulticast = params.get("EnableMulticast")
self.DnsServers = params.get("DnsServers")
self.DomainName = params.get("DomainName")
class ModifyVpcAttributeResponse(AbstractModel):
"""ModifyVpcAttribute返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyVpnConnectionAttributeRequest(AbstractModel):
"""ModifyVpnConnectionAttribute请求参数结构体
"""
def __init__(self):
"""
:param VpnConnectionId: VPN通道实例ID。形如:vpnx-f49l6u0z。
:type VpnConnectionId: str
:param VpnConnectionName: VPN通道名称,可任意命名,但不得超过60个字符。
:type VpnConnectionName: str
:param PreShareKey: 预共享密钥。
:type PreShareKey: str
:param SecurityPolicyDatabases: SPD策略组,例如:{"10.0.0.5/24":["172.123.10.5/16"]},10.0.0.5/24是vpc内网段172.123.10.5/16是IDC网段。用户指定VPC内哪些网段可以和您IDC中哪些网段通信。
:type SecurityPolicyDatabases: list of SecurityPolicyDatabase
:param IKEOptionsSpecification: IKE配置(Internet Key Exchange,因特网密钥交换),IKE具有一套自保护机制,用户配置网络安全协议。
:type IKEOptionsSpecification: :class:`tencentcloud.vpc.v20170312.models.IKEOptionsSpecification`
:param IPSECOptionsSpecification: IPSec配置,腾讯云提供IPSec安全会话设置。
:type IPSECOptionsSpecification: :class:`tencentcloud.vpc.v20170312.models.IPSECOptionsSpecification`
"""
self.VpnConnectionId = None
self.VpnConnectionName = None
self.PreShareKey = None
self.SecurityPolicyDatabases = None
self.IKEOptionsSpecification = None
self.IPSECOptionsSpecification = None
def _deserialize(self, params):
self.VpnConnectionId = params.get("VpnConnectionId")
self.VpnConnectionName = params.get("VpnConnectionName")
self.PreShareKey = params.get("PreShareKey")
if params.get("SecurityPolicyDatabases") is not None:
self.SecurityPolicyDatabases = []
for item in params.get("SecurityPolicyDatabases"):
obj = SecurityPolicyDatabase()
obj._deserialize(item)
self.SecurityPolicyDatabases.append(obj)
if params.get("IKEOptionsSpecification") is not None:
self.IKEOptionsSpecification = IKEOptionsSpecification()
self.IKEOptionsSpecification._deserialize(params.get("IKEOptionsSpecification"))
if params.get("IPSECOptionsSpecification") is not None:
self.IPSECOptionsSpecification = IPSECOptionsSpecification()
self.IPSECOptionsSpecification._deserialize(params.get("IPSECOptionsSpecification"))
class ModifyVpnConnectionAttributeResponse(AbstractModel):
"""ModifyVpnConnectionAttribute返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyVpnGatewayAttributeRequest(AbstractModel):
"""ModifyVpnGatewayAttribute请求参数结构体
"""
def __init__(self):
"""
:param VpnGatewayId: VPN网关实例ID。
:type VpnGatewayId: str
:param VpnGatewayName: VPN网关名称,最大长度不能超过60个字节。
:type VpnGatewayName: str
:param InstanceChargeType: VPN网关计费模式,目前只支持预付费(即包年包月)到后付费(即按量计费)的转换。即参数只支持:POSTPAID_BY_HOUR。
:type InstanceChargeType: str
"""
self.VpnGatewayId = None
self.VpnGatewayName = None
self.InstanceChargeType = None
def _deserialize(self, params):
self.VpnGatewayId = params.get("VpnGatewayId")
self.VpnGatewayName = params.get("VpnGatewayName")
self.InstanceChargeType = params.get("InstanceChargeType")
class ModifyVpnGatewayAttributeResponse(AbstractModel):
"""ModifyVpnGatewayAttribute返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class NetworkInterface(AbstractModel):
"""弹性网卡
"""
def __init__(self):
"""
:param NetworkInterfaceId: 弹性网卡实例ID,例如:eni-f1xjkw1b。
:type NetworkInterfaceId: str
:param NetworkInterfaceName: 弹性网卡名称。
:type NetworkInterfaceName: str
:param NetworkInterfaceDescription: 弹性网卡描述。
:type NetworkInterfaceDescription: str
:param SubnetId: 子网实例ID。
:type SubnetId: str
:param VpcId: VPC实例ID。
:type VpcId: str
:param GroupSet: 绑定的安全组。
:type GroupSet: list of str
:param Primary: 是否是主网卡。
:type Primary: bool
:param MacAddress: MAC地址。
:type MacAddress: str
:param State: 取值范围:PENDING|AVAILABLE|ATTACHING|DETACHING|DELETING。
:type State: str
:param PrivateIpAddressSet: 内网IP信息。
:type PrivateIpAddressSet: list of PrivateIpAddressSpecification
:param Attachment: 绑定的云服务器对象。
:type Attachment: :class:`tencentcloud.vpc.v20170312.models.NetworkInterfaceAttachment`
:param Zone: 可用区。
:type Zone: str
:param CreatedTime: 创建时间。
:type CreatedTime: str
"""
self.NetworkInterfaceId = None
self.NetworkInterfaceName = None
self.NetworkInterfaceDescription = None
self.SubnetId = None
self.VpcId = None
self.GroupSet = None
self.Primary = None
self.MacAddress = None
self.State = None
self.PrivateIpAddressSet = None
self.Attachment = None
self.Zone = None
self.CreatedTime = None
def _deserialize(self, params):
self.NetworkInterfaceId = params.get("NetworkInterfaceId")
self.NetworkInterfaceName = params.get("NetworkInterfaceName")
self.NetworkInterfaceDescription = params.get("NetworkInterfaceDescription")
self.SubnetId = params.get("SubnetId")
self.VpcId = params.get("VpcId")
self.GroupSet = params.get("GroupSet")
self.Primary = params.get("Primary")
self.MacAddress = params.get("MacAddress")
self.State = params.get("State")
if params.get("PrivateIpAddressSet") is not None:
self.PrivateIpAddressSet = []
for item in params.get("PrivateIpAddressSet"):
obj = PrivateIpAddressSpecification()
obj._deserialize(item)
self.PrivateIpAddressSet.append(obj)
if params.get("Attachment") is not None:
self.Attachment = NetworkInterfaceAttachment()
self.Attachment._deserialize(params.get("Attachment"))
self.Zone = params.get("Zone")
self.CreatedTime = params.get("CreatedTime")
class NetworkInterfaceAttachment(AbstractModel):
"""弹性网卡绑定关系
"""
def __init__(self):
"""
:param InstanceId: 云主机实例ID。
:type InstanceId: str
:param DeviceIndex: 网卡在云主机实例内的序号。
:type DeviceIndex: int
:param InstanceAccountId: 云主机所有者账户信息。
:type InstanceAccountId: str
:param AttachTime: 绑定时间。
:type AttachTime: str
"""
self.InstanceId = None
self.DeviceIndex = None
self.InstanceAccountId = None
self.AttachTime = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.DeviceIndex = params.get("DeviceIndex")
self.InstanceAccountId = params.get("InstanceAccountId")
self.AttachTime = params.get("AttachTime")
class Price(AbstractModel):
"""价格
"""
def __init__(self):
"""
:param InstancePrice: 实例价格。
:type InstancePrice: :class:`tencentcloud.vpc.v20170312.models.ItemPrice`
:param BandwidthPrice: 网络价格。
:type BandwidthPrice: :class:`tencentcloud.vpc.v20170312.models.ItemPrice`
"""
self.InstancePrice = None
self.BandwidthPrice = None
def _deserialize(self, params):
if params.get("InstancePrice") is not None:
self.InstancePrice = ItemPrice()
self.InstancePrice._deserialize(params.get("InstancePrice"))
if params.get("BandwidthPrice") is not None:
self.BandwidthPrice = ItemPrice()
self.BandwidthPrice._deserialize(params.get("BandwidthPrice"))
class PrivateIpAddressSpecification(AbstractModel):
"""内网IP信息
"""
def __init__(self):
"""
:param PrivateIpAddress: 内网IP地址。
:type PrivateIpAddress: str
:param Primary: 是否是主IP。
:type Primary: bool
:param PublicIpAddress: 公网IP地址。
:type PublicIpAddress: str
:param AddressId: EIP实例ID,例如:eip-11112222。
:type AddressId: str
:param Description: 内网IP描述信息。
:type Description: str
:param IsWanIpBlocked: 公网IP是否被封堵。
:type IsWanIpBlocked: bool
"""
self.PrivateIpAddress = None
self.Primary = None
self.PublicIpAddress = None
self.AddressId = None
self.Description = None
self.IsWanIpBlocked = None
def _deserialize(self, params):
self.PrivateIpAddress = params.get("PrivateIpAddress")
self.Primary = params.get("Primary")
self.PublicIpAddress = params.get("PublicIpAddress")
self.AddressId = params.get("AddressId")
self.Description = params.get("Description")
self.IsWanIpBlocked = params.get("IsWanIpBlocked")
class Quota(AbstractModel):
"""描述了配额信息
"""
def __init__(self):
"""
:param QuotaId: 配额名称,取值范围:<br><li>`TOTAL_EIP_QUOTA`:用户当前地域下EIP的配额数;<br><li>`DAILY_EIP_APPLY`:用户当前地域下今日申购次数;<br><li>`DAILY_PUBLIC_IP_ASSIGN`:用户当前地域下,重新分配公网 IP次数。
:type QuotaId: str
:param QuotaCurrent: 当前数量
:type QuotaCurrent: int
:param QuotaLimit: 配额数量
:type QuotaLimit: int
"""
self.QuotaId = None
self.QuotaCurrent = None
self.QuotaLimit = None
def _deserialize(self, params):
self.QuotaId = params.get("QuotaId")
self.QuotaCurrent = params.get("QuotaCurrent")
self.QuotaLimit = params.get("QuotaLimit")
class ReleaseAddressesRequest(AbstractModel):
"""ReleaseAddresses请求参数结构体
"""
def __init__(self):
"""
:param AddressIds: 标识 EIP 的唯一 ID 列表。EIP 唯一 ID 形如:`eip-11112222`。
:type AddressIds: list of str
"""
self.AddressIds = None
def _deserialize(self, params):
self.AddressIds = params.get("AddressIds")
class ReleaseAddressesResponse(AbstractModel):
"""ReleaseAddresses返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class RenewVpnGatewayRequest(AbstractModel):
"""RenewVpnGateway请求参数结构体
"""
def __init__(self):
"""
:param VpnGatewayId: VPN网关实例ID。
:type VpnGatewayId: str
:param InstanceChargePrepaid: 预付费计费模式。
:type InstanceChargePrepaid: :class:`tencentcloud.vpc.v20170312.models.InstanceChargePrepaid`
"""
self.VpnGatewayId = None
self.InstanceChargePrepaid = None
def _deserialize(self, params):
self.VpnGatewayId = params.get("VpnGatewayId")
if params.get("InstanceChargePrepaid") is not None:
self.InstanceChargePrepaid = InstanceChargePrepaid()
self.InstanceChargePrepaid._deserialize(params.get("InstanceChargePrepaid"))
class RenewVpnGatewayResponse(AbstractModel):
"""RenewVpnGateway返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ReplaceRouteTableAssociationRequest(AbstractModel):
"""ReplaceRouteTableAssociation请求参数结构体
"""
def __init__(self):
"""
:param SubnetId: 子网实例ID,例如:subnet-3x5lf5q0。可通过DescribeSubnets接口查询。
:type SubnetId: str
:param RouteTableId: 路由表实例ID,例如:rtb-azd4dt1c。
:type RouteTableId: str
"""
self.SubnetId = None
self.RouteTableId = None
def _deserialize(self, params):
self.SubnetId = params.get("SubnetId")
self.RouteTableId = params.get("RouteTableId")
class ReplaceRouteTableAssociationResponse(AbstractModel):
"""ReplaceRouteTableAssociation返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ReplaceRoutesRequest(AbstractModel):
"""ReplaceRoutes请求参数结构体
"""
def __init__(self):
"""
:param RouteTableId: 路由表实例ID,例如:rtb-azd4dt1c。
:type RouteTableId: str
:param Routes: 路由策略对象。只需要指定路由策略ID(RouteId)。
:type Routes: list of Route
"""
self.RouteTableId = None
self.Routes = None
def _deserialize(self, params):
self.RouteTableId = params.get("RouteTableId")
if params.get("Routes") is not None:
self.Routes = []
for item in params.get("Routes"):
obj = Route()
obj._deserialize(item)
self.Routes.append(obj)
class ReplaceRoutesResponse(AbstractModel):
"""ReplaceRoutes返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ReplaceSecurityGroupPolicyRequest(AbstractModel):
"""ReplaceSecurityGroupPolicy请求参数结构体
"""
def __init__(self):
"""
:param SecurityGroupId: 安全组实例ID,例如sg-33ocnj9n,可通过DescribeSecurityGroups获取。
:type SecurityGroupId: str
:param SecurityGroupPolicySet: 安全组规则集合对象。
:type SecurityGroupPolicySet: :class:`tencentcloud.vpc.v20170312.models.SecurityGroupPolicySet`
"""
self.SecurityGroupId = None
self.SecurityGroupPolicySet = None
def _deserialize(self, params):
self.SecurityGroupId = params.get("SecurityGroupId")
if params.get("SecurityGroupPolicySet") is not None:
self.SecurityGroupPolicySet = SecurityGroupPolicySet()
self.SecurityGroupPolicySet._deserialize(params.get("SecurityGroupPolicySet"))
class ReplaceSecurityGroupPolicyResponse(AbstractModel):
"""ReplaceSecurityGroupPolicy返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ResetRoutesRequest(AbstractModel):
"""ResetRoutes请求参数结构体
"""
def __init__(self):
"""
:param RouteTableId: 路由表实例ID,例如:rtb-azd4dt1c。
:type RouteTableId: str
:param RouteTableName: 路由表名称,最大长度不能超过60个字节。
:type RouteTableName: str
:param Routes: 路由策略。
:type Routes: list of Route
"""
self.RouteTableId = None
self.RouteTableName = None
self.Routes = None
def _deserialize(self, params):
self.RouteTableId = params.get("RouteTableId")
self.RouteTableName = params.get("RouteTableName")
if params.get("Routes") is not None:
self.Routes = []
for item in params.get("Routes"):
obj = Route()
obj._deserialize(item)
self.Routes.append(obj)
class ResetRoutesResponse(AbstractModel):
"""ResetRoutes返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ResetVpnConnectionRequest(AbstractModel):
"""ResetVpnConnection请求参数结构体
"""
def __init__(self):
"""
:param VpnGatewayId: VPN网关实例ID。
:type VpnGatewayId: str
:param VpnConnectionId: VPN通道实例ID。形如:vpnx-f49l6u0z。
:type VpnConnectionId: str
"""
self.VpnGatewayId = None
self.VpnConnectionId = None
def _deserialize(self, params):
self.VpnGatewayId = params.get("VpnGatewayId")
self.VpnConnectionId = params.get("VpnConnectionId")
class ResetVpnConnectionResponse(AbstractModel):
"""ResetVpnConnection返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ResetVpnGatewayInternetMaxBandwidthRequest(AbstractModel):
"""ResetVpnGatewayInternetMaxBandwidth请求参数结构体
"""
def __init__(self):
"""
:param VpnGatewayId: VPN网关实例ID。
:type VpnGatewayId: str
:param InternetMaxBandwidthOut: 公网带宽设置。可选带宽规格:5, 10, 20, 50, 100;单位:Mbps。
:type InternetMaxBandwidthOut: int
"""
self.VpnGatewayId = None
self.InternetMaxBandwidthOut = None
def _deserialize(self, params):
self.VpnGatewayId = params.get("VpnGatewayId")
self.InternetMaxBandwidthOut = params.get("InternetMaxBandwidthOut")
class ResetVpnGatewayInternetMaxBandwidthResponse(AbstractModel):
"""ResetVpnGatewayInternetMaxBandwidth返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class Route(AbstractModel):
"""路由策略对象
"""
def __init__(self):
"""
:param DestinationCidrBlock: 目的网段,取值不能在私有网络网段内,例如:112.20.51.0/24。
:type DestinationCidrBlock: str
:param GatewayType: 下一跳类型,目前我们支持的类型有:CVM:公网网关类型的云主机;VPN:vpn网关; DIRECTCONNECT:专线网关;PEERCONNECTION:对等连接;SSLVPN:sslvpn网关;NAT:nat网关; NORMAL_CVM:普通云主机。
:type GatewayType: str
:param GatewayId: 下一跳地址,这里只需要指定不同下一跳类型的网关ID,系统会自动匹配到下一跳地址。
:type GatewayId: str
:param RouteId: 路由策略ID。
:type RouteId: int
:param RouteDescription: 路由策略描述。
:type RouteDescription: str
:param Enabled: 是否启用
:type Enabled: bool
"""
self.DestinationCidrBlock = None
self.GatewayType = None
self.GatewayId = None
self.RouteId = None
self.RouteDescription = None
self.Enabled = None
def _deserialize(self, params):
self.DestinationCidrBlock = params.get("DestinationCidrBlock")
self.GatewayType = params.get("GatewayType")
self.GatewayId = params.get("GatewayId")
self.RouteId = params.get("RouteId")
self.RouteDescription = params.get("RouteDescription")
self.Enabled = params.get("Enabled")
class RouteTable(AbstractModel):
"""路由表对象
"""
def __init__(self):
"""
:param VpcId: VPC实例ID。
:type VpcId: str
:param RouteTableId: 路由表实例ID,例如:rtb-azd4dt1c。
:type RouteTableId: str
:param RouteTableName: 路由表名称。
:type RouteTableName: str
:param AssociationSet: 路由表关联关系。
:type AssociationSet: list of RouteTableAssociation
:param RouteSet: 路由表策略集合。
:type RouteSet: list of Route
:param Main: 是否默认路由表。
:type Main: bool
:param CreatedTime: 创建时间。
:type CreatedTime: str
"""
self.VpcId = None
self.RouteTableId = None
self.RouteTableName = None
self.AssociationSet = None
self.RouteSet = None
self.Main = None
self.CreatedTime = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.RouteTableId = params.get("RouteTableId")
self.RouteTableName = params.get("RouteTableName")
if params.get("AssociationSet") is not None:
self.AssociationSet = []
for item in params.get("AssociationSet"):
obj = RouteTableAssociation()
obj._deserialize(item)
self.AssociationSet.append(obj)
if params.get("RouteSet") is not None:
self.RouteSet = []
for item in params.get("RouteSet"):
obj = Route()
obj._deserialize(item)
self.RouteSet.append(obj)
self.Main = params.get("Main")
self.CreatedTime = params.get("CreatedTime")
class RouteTableAssociation(AbstractModel):
"""路由表关联关系
"""
def __init__(self):
"""
:param SubnetId: 子网实例ID。
:type SubnetId: str
:param RouteTableId: 路由表实例ID。
:type RouteTableId: str
"""
self.SubnetId = None
self.RouteTableId = None
def _deserialize(self, params):
self.SubnetId = params.get("SubnetId")
self.RouteTableId = params.get("RouteTableId")
class SecurityGroup(AbstractModel):
"""安全组对象
"""
def __init__(self):
"""
:param SecurityGroupId: 安全组实例ID,例如:sg-ohuuioma。
:type SecurityGroupId: str
:param SecurityGroupName: 安全组名称,可任意命名,但不得超过60个字符。
:type SecurityGroupName: str
:param SecurityGroupDesc: 安全组备注,最多100个字符。
:type SecurityGroupDesc: str
:param ProjectId: 项目id,默认0。可在qcloud控制台项目管理页面查询到。
:type ProjectId: str
:param IsDefault: 是否是默认安全组,默认安全组不支持删除。
:type IsDefault: bool
:param CreatedTime: 安全组创建时间。
:type CreatedTime: str
"""
self.SecurityGroupId = None
self.SecurityGroupName = None
self.SecurityGroupDesc = None
self.ProjectId = None
self.IsDefault = None
self.CreatedTime = None
def _deserialize(self, params):
self.SecurityGroupId = params.get("SecurityGroupId")
self.SecurityGroupName = params.get("SecurityGroupName")
self.SecurityGroupDesc = params.get("SecurityGroupDesc")
self.ProjectId = params.get("ProjectId")
self.IsDefault = params.get("IsDefault")
self.CreatedTime = params.get("CreatedTime")
class SecurityGroupAssociationStatistics(AbstractModel):
"""安全组关联的实例统计
"""
def __init__(self):
"""
:param SecurityGroupId: 安全组实例ID。
:type SecurityGroupId: str
:param CVM: 云主机实例数。
:type CVM: int
:param CDB: 数据库实例数。
:type CDB: int
:param ENI: 弹性网卡实例数。
:type ENI: int
:param SG: 被安全组引用数。
:type SG: int
:param CLB: 负载均衡实例数。
:type CLB: int
"""
self.SecurityGroupId = None
self.CVM = None
self.CDB = None
self.ENI = None
self.SG = None
self.CLB = None
def _deserialize(self, params):
self.SecurityGroupId = params.get("SecurityGroupId")
self.CVM = params.get("CVM")
self.CDB = params.get("CDB")
self.ENI = params.get("ENI")
self.SG = params.get("SG")
self.CLB = params.get("CLB")
class SecurityGroupPolicy(AbstractModel):
"""安全组规则对象
"""
def __init__(self):
"""
:param PolicyIndex: 安全组规则索引号。
:type PolicyIndex: int
:param Protocol: 协议, 取值: TCP,UDP, ICMP。
:type Protocol: str
:param Port: 端口(all, 离散port, range)。
:type Port: str
:param ServiceTemplate: 协议端口ID或者协议端口组ID。ServiceTemplate和Protocol+Port互斥。
:type ServiceTemplate: list of str
:param CidrBlock: 网段或IP(互斥)。
:type CidrBlock: str
:param SecurityGroupId: 已绑定安全组的网段或IP。
:type SecurityGroupId: str
:param AddressTemplate: IP地址ID或者ID地址组ID。
:type AddressTemplate: str
:param Action: ACCEPT 或 DROP。
:type Action: str
:param PolicyDescription: 安全组规则描述。
:type PolicyDescription: str
"""
self.PolicyIndex = None
self.Protocol = None
self.Port = None
self.ServiceTemplate = None
self.CidrBlock = None
self.SecurityGroupId = None
self.AddressTemplate = None
self.Action = None
self.PolicyDescription = None
def _deserialize(self, params):
self.PolicyIndex = params.get("PolicyIndex")
self.Protocol = params.get("Protocol")
self.Port = params.get("Port")
self.ServiceTemplate = params.get("ServiceTemplate")
self.CidrBlock = params.get("CidrBlock")
self.SecurityGroupId = params.get("SecurityGroupId")
self.AddressTemplate = params.get("AddressTemplate")
self.Action = params.get("Action")
self.PolicyDescription = params.get("PolicyDescription")
class SecurityGroupPolicySet(AbstractModel):
"""安全组规则集合
"""
def __init__(self):
"""
:param Version: 安全组规则当前版本。用户每次更新安全规则版本会自动加1,防止更新的路由规则已过期,不填不考虑冲突。
:type Version: str
:param Egress: 出站规则。
:type Egress: list of SecurityGroupPolicy
:param Ingress: 入站规则。
:type Ingress: list of SecurityGroupPolicy
"""
self.Version = None
self.Egress = None
self.Ingress = None
def _deserialize(self, params):
self.Version = params.get("Version")
if params.get("Egress") is not None:
self.Egress = []
for item in params.get("Egress"):
obj = SecurityGroupPolicy()
obj._deserialize(item)
self.Egress.append(obj)
if params.get("Ingress") is not None:
self.Ingress = []
for item in params.get("Ingress"):
obj = SecurityGroupPolicy()
obj._deserialize(item)
self.Ingress.append(obj)
class SecurityPolicyDatabase(AbstractModel):
"""SecurityPolicyDatabase策略
"""
def __init__(self):
"""
:param LocalCidrBlock: 本端网段
:type LocalCidrBlock: str
:param RemoteCidrBlock: 对端网段
:type RemoteCidrBlock: list of str
"""
self.LocalCidrBlock = None
self.RemoteCidrBlock = None
def _deserialize(self, params):
self.LocalCidrBlock = params.get("LocalCidrBlock")
self.RemoteCidrBlock = params.get("RemoteCidrBlock")
class ServiceTemplate(AbstractModel):
"""协议端口模板
"""
def __init__(self):
"""
:param ServiceTemplateId: 协议端口实例ID,例如:ppm-f5n1f8da。
:type ServiceTemplateId: str
:param ServiceTemplateName: 模板名称。
:type ServiceTemplateName: str
:param ServiceSet: 协议端口信息。
:type ServiceSet: list of str
:param CreatedTime: 创建时间。
:type CreatedTime: str
"""
self.ServiceTemplateId = None
self.ServiceTemplateName = None
self.ServiceSet = None
self.CreatedTime = None
def _deserialize(self, params):
self.ServiceTemplateId = params.get("ServiceTemplateId")
self.ServiceTemplateName = params.get("ServiceTemplateName")
self.ServiceSet = params.get("ServiceSet")
self.CreatedTime = params.get("CreatedTime")
class ServiceTemplateGroup(AbstractModel):
"""协议端口模板集合
"""
def __init__(self):
"""
:param ServiceTemplateGroupId: 协议端口模板集合实例ID,例如:ppmg-2klmrefu。
:type ServiceTemplateGroupId: str
:param ServiceTemplateGroupName: 协议端口模板集合名称。
:type ServiceTemplateGroupName: str
:param ServiceTemplateIdSet: 协议端口模板实例ID。
:type ServiceTemplateIdSet: list of str
:param CreatedTime: 创建时间。
:type CreatedTime: str
"""
self.ServiceTemplateGroupId = None
self.ServiceTemplateGroupName = None
self.ServiceTemplateIdSet = None
self.CreatedTime = None
def _deserialize(self, params):
self.ServiceTemplateGroupId = params.get("ServiceTemplateGroupId")
self.ServiceTemplateGroupName = params.get("ServiceTemplateGroupName")
self.ServiceTemplateIdSet = params.get("ServiceTemplateIdSet")
self.CreatedTime = params.get("CreatedTime")
class Subnet(AbstractModel):
"""子网对象
"""
def __init__(self):
"""
:param VpcId: VPC实例ID。
:type VpcId: str
:param SubnetId: 子网实例ID,例如:subnet-bthucmmy。
:type SubnetId: str
:param SubnetName: 子网名称。
:type SubnetName: str
:param CidrBlock: 子网的CIDR。
:type CidrBlock: str
:param IsDefault: 是否默认子网。
:type IsDefault: bool
:param EnableBroadcast: 是否开启广播。
:type EnableBroadcast: bool
:param Zone: 可用区。
:type Zone: str
:param RouteTableId: 路由表实例ID,例如:rtb-l2h8d7c2。
:type RouteTableId: str
:param CreatedTime: 创建时间。
:type CreatedTime: str
:param AvailableIpAddressCount: 可用IP数。
:type AvailableIpAddressCount: int
"""
self.VpcId = None
self.SubnetId = None
self.SubnetName = None
self.CidrBlock = None
self.IsDefault = None
self.EnableBroadcast = None
self.Zone = None
self.RouteTableId = None
self.CreatedTime = None
self.AvailableIpAddressCount = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
self.SubnetName = params.get("SubnetName")
self.CidrBlock = params.get("CidrBlock")
self.IsDefault = params.get("IsDefault")
self.EnableBroadcast = params.get("EnableBroadcast")
self.Zone = params.get("Zone")
self.RouteTableId = params.get("RouteTableId")
self.CreatedTime = params.get("CreatedTime")
self.AvailableIpAddressCount = params.get("AvailableIpAddressCount")
class TransformAddressRequest(AbstractModel):
"""TransformAddress请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 待操作有普通公网 IP 的实例 ID。实例 ID 形如:`ins-11112222`。可通过登录[控制台](https://console.cloud.tencent.com/cvm)查询,也可通过 [DescribeInstances](https://cloud.tencent.com/document/api/213/9389) 接口返回值中的`InstanceId`获取。
:type InstanceId: str
"""
self.InstanceId = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
class TransformAddressResponse(AbstractModel):
"""TransformAddress返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class UnassignPrivateIpAddressesRequest(AbstractModel):
"""UnassignPrivateIpAddresses请求参数结构体
"""
def __init__(self):
"""
:param NetworkInterfaceId: 弹性网卡实例ID,例如:eni-m6dyj72l。
:type NetworkInterfaceId: str
:param PrivateIpAddresses: 指定的内网IP信息。
:type PrivateIpAddresses: list of PrivateIpAddressSpecification
"""
self.NetworkInterfaceId = None
self.PrivateIpAddresses = None
def _deserialize(self, params):
self.NetworkInterfaceId = params.get("NetworkInterfaceId")
if params.get("PrivateIpAddresses") is not None:
self.PrivateIpAddresses = []
for item in params.get("PrivateIpAddresses"):
obj = PrivateIpAddressSpecification()
obj._deserialize(item)
self.PrivateIpAddresses.append(obj)
class UnassignPrivateIpAddressesResponse(AbstractModel):
"""UnassignPrivateIpAddresses返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class Vpc(AbstractModel):
"""私有网络(VPC)对象。
"""
def __init__(self):
"""
:param VpcName: Vpc名称。
:type VpcName: str
:param VpcId: VPC实例ID,例如:vpc-azd4dt1c。
:type VpcId: str
:param CidrBlock: VPC的cidr,只能为10.0.0.0/16,172.16.0.0/12,192.168.0.0/16这三个内网网段内。
:type CidrBlock: str
:param IsDefault: 是否默认VPC。
:type IsDefault: bool
:param EnableMulticast: 是否开启组播。
:type EnableMulticast: bool
:param CreatedTime: 创建时间。
:type CreatedTime: str
:param DnsServerSet: DNS列表
:type DnsServerSet: list of str
:param DomainName: DHCP域名选项值
:type DomainName: str
:param DhcpOptionsId: DHCP选项集ID
:type DhcpOptionsId: str
"""
self.VpcName = None
self.VpcId = None
self.CidrBlock = None
self.IsDefault = None
self.EnableMulticast = None
self.CreatedTime = None
self.DnsServerSet = None
self.DomainName = None
self.DhcpOptionsId = None
def _deserialize(self, params):
self.VpcName = params.get("VpcName")
self.VpcId = params.get("VpcId")
self.CidrBlock = params.get("CidrBlock")
self.IsDefault = params.get("IsDefault")
self.EnableMulticast = params.get("EnableMulticast")
self.CreatedTime = params.get("CreatedTime")
self.DnsServerSet = params.get("DnsServerSet")
self.DomainName = params.get("DomainName")
self.DhcpOptionsId = params.get("DhcpOptionsId")
class VpnConnection(AbstractModel):
"""VPN通道对象。
"""
def __init__(self):
"""
:param VpnConnectionId: 通道实例ID。
:type VpnConnectionId: str
:param VpnConnectionName: 通道名称。
:type VpnConnectionName: str
:param VpcId: VPC实例ID。
:type VpcId: str
:param VpnGatewayId: VPN网关实例ID。
:type VpnGatewayId: str
:param CustomerGatewayId: 对端网关实例ID。
:type CustomerGatewayId: str
:param PreShareKey: 预共享密钥。
:type PreShareKey: str
:param VpnProto: 通道传输协议。
:type VpnProto: str
:param EncryptProto: 通道加密协议。
:type EncryptProto: str
:param RouteType: 路由类型。
:type RouteType: str
:param CreatedTime: 创建时间。
:type CreatedTime: str
:param State: 通道的生产状态,PENDING:生产中,AVAILABLE:运行中,DELETING:删除中。
:type State: str
:param NetStatus: 通道连接状态,AVAILABLE:已连接。
:type NetStatus: str
:param SecurityPolicyDatabaseSet: SPD。
:type SecurityPolicyDatabaseSet: list of SecurityPolicyDatabase
:param IKEOptionsSpecification: IKE选项。
:type IKEOptionsSpecification: :class:`tencentcloud.vpc.v20170312.models.IKEOptionsSpecification`
:param IPSECOptionsSpecification: IPSEC选择。
:type IPSECOptionsSpecification: :class:`tencentcloud.vpc.v20170312.models.IPSECOptionsSpecification`
"""
self.VpnConnectionId = None
self.VpnConnectionName = None
self.VpcId = None
self.VpnGatewayId = None
self.CustomerGatewayId = None
self.PreShareKey = None
self.VpnProto = None
self.EncryptProto = None
self.RouteType = None
self.CreatedTime = None
self.State = None
self.NetStatus = None
self.SecurityPolicyDatabaseSet = None
self.IKEOptionsSpecification = None
self.IPSECOptionsSpecification = None
def _deserialize(self, params):
self.VpnConnectionId = params.get("VpnConnectionId")
self.VpnConnectionName = params.get("VpnConnectionName")
self.VpcId = params.get("VpcId")
self.VpnGatewayId = params.get("VpnGatewayId")
self.CustomerGatewayId = params.get("CustomerGatewayId")
self.PreShareKey = params.get("PreShareKey")
self.VpnProto = params.get("VpnProto")
self.EncryptProto = params.get("EncryptProto")
self.RouteType = params.get("RouteType")
self.CreatedTime = params.get("CreatedTime")
self.State = params.get("State")
self.NetStatus = params.get("NetStatus")
if params.get("SecurityPolicyDatabaseSet") is not None:
self.SecurityPolicyDatabaseSet = []
for item in params.get("SecurityPolicyDatabaseSet"):
obj = SecurityPolicyDatabase()
obj._deserialize(item)
self.SecurityPolicyDatabaseSet.append(obj)
if params.get("IKEOptionsSpecification") is not None:
self.IKEOptionsSpecification = IKEOptionsSpecification()
self.IKEOptionsSpecification._deserialize(params.get("IKEOptionsSpecification"))
if params.get("IPSECOptionsSpecification") is not None:
self.IPSECOptionsSpecification = IPSECOptionsSpecification()
self.IPSECOptionsSpecification._deserialize(params.get("IPSECOptionsSpecification"))
class VpnGateway(AbstractModel):
"""VPN网关对象。
"""
def __init__(self):
"""
:param VpnGatewayId: 网关实例ID。
:type VpnGatewayId: str
:param VpcId: VPC实例ID。
:type VpcId: str
:param VpnGatewayName: 网关实例名称。
:type VpnGatewayName: str
:param Type: 网关实例类型:'IPSEC', 'SSL'。
:type Type: str
:param State: 网关实例状态, 'PENDING':生产中,'DELETING':删除中,'AVAILABLE':运行中。
:type State: str
:param PublicIpAddress: 网关公网IP。
:type PublicIpAddress: str
:param RenewFlag: 网关续费类型:'NOTIFY_AND_MANUAL_RENEW':手动续费,'NOTIFY_AND_AUTO_RENEW':自动续费
:type RenewFlag: str
:param InstanceChargeType: 网关付费类型:POSTPAID_BY_HOUR:按小时后付费,PREPAID:包年包月预付费,
:type InstanceChargeType: str
:param InternetMaxBandwidthOut: 网关出带宽。
:type InternetMaxBandwidthOut: int
:param CreatedTime: 创建时间。
:type CreatedTime: str
:param ExpiredTime: 预付费网关过期时间。
:type ExpiredTime: str
:param IsAddressBlocked: 公网IP是否被封堵。
:type IsAddressBlocked: bool
:param NewPurchasePlan: 计费模式变更,PREPAID_TO_POSTPAID:包年包月预付费到期转按小时后付费。
:type NewPurchasePlan: str
:param RestrictState: 网关计费装,PROTECTIVELY_ISOLATED:被安全隔离的实例,NORMAL:正常。
:type RestrictState: str
"""
self.VpnGatewayId = None
self.VpcId = None
self.VpnGatewayName = None
self.Type = None
self.State = None
self.PublicIpAddress = None
self.RenewFlag = None
self.InstanceChargeType = None
self.InternetMaxBandwidthOut = None
self.CreatedTime = None
self.ExpiredTime = None
self.IsAddressBlocked = None
self.NewPurchasePlan = None
self.RestrictState = None
def _deserialize(self, params):
self.VpnGatewayId = params.get("VpnGatewayId")
self.VpcId = params.get("VpcId")
self.VpnGatewayName = params.get("VpnGatewayName")
self.Type = params.get("Type")
self.State = params.get("State")
self.PublicIpAddress = params.get("PublicIpAddress")
self.RenewFlag = params.get("RenewFlag")
self.InstanceChargeType = params.get("InstanceChargeType")
self.InternetMaxBandwidthOut = params.get("InternetMaxBandwidthOut")
self.CreatedTime = params.get("CreatedTime")
self.ExpiredTime = params.get("ExpiredTime")
self.IsAddressBlocked = params.get("IsAddressBlocked")
self.NewPurchasePlan = params.get("NewPurchasePlan")
self.RestrictState = params.get("RestrictState") | [] |
nobodywasishere/MLCSim | mlcsim/dist.py | a3eb3d39b6970a4e706e292c6a283531fb44350c | #!/usr/bin/env python
"""Distribution functions
This module provides functions for dealing with normal distributions
and generating error maps.
When called directly as main, it allows for converting a threshold map
into an error map.
```
$ python -m mlcsim.dist --help
usage: dist.py [-h] [-b {1,2,3,4}] -f F [-o O]
options:
-h, --help show this help message and exit
-b {1,2,3,4} bits per cell
-f F Threshold map json to convert
-o O output to file
```
"""
import argparse
import json
from pprint import pprint
from typing import Dict, List
import numpy as np
from scipy import stats as ss # type: ignore
# https://stackoverflow.com/a/32574638/9047818
# https://stackoverflow.com/a/13072714/9047818
def normalMidpoint(mean_a: float, mean_b: float, std_a: float, std_b: float) -> float:
"""Find the midpoint between two normal distributions
Args:
mean_a (float): Mean of first distribution
mean_b (float): Mean of second distribution
std_a (float): Std dev of first distribution
std_b (float): Std dev of second distribution
Returns:
float: Midpoint between distributions
"""
a = 1 / (2 * std_a**2) - 1 / (2 * std_b**2)
b = mean_b / (std_b**2) - mean_a / (std_a**2)
c = (
mean_a**2 / (2 * std_a**2)
- mean_b**2 / (2 * std_b**2)
- np.log(std_b / std_a)
)
roots = np.roots([a, b, c])
masked = np.ma.masked_outside(roots, mean_a, mean_b)
return float(masked[~masked.mask][0][0])
# https://www.askpython.com/python/normal-distribution
def normalChance(mean: float, stdev: float, thr: float) -> float:
"""Find the chance of a normal distribution above/below a given value
Args:
mean (float): Mean of the distribution
stdev (float): Std dev of the distribution
thr (float): Threshold to check above/below
Returns:
float: Chance for threshold to end up above/below the given point in the distribution
"""
chance = ss.norm(loc=mean, scale=stdev).cdf(thr)
return float(chance if mean > thr else 1 - chance)
def genErrorMap(thr_maps: Dict[str, List[List[float]]], bpc: int) -> List[List[float]]:
"""Generate an error map from a threshold map
Args:
thr_maps (dict): Threshold map
bpc (int): Bits per cell
Raises:
ValueError: if the given bpc is not in the threshold map
Returns:
list: Error map from the threshold map
"""
if str(bpc) not in thr_maps.keys():
raise ValueError(f"Threshold map does not have values for {bpc} levels")
thr_map: List[List[float]] = thr_maps[str(bpc)]
err_map = [[0.0]]
for i in range(len(thr_map) - 1):
mid = normalMidpoint(
thr_map[i][0], thr_map[i + 1][0], thr_map[i][1], thr_map[i + 1][1]
)
up = normalChance(thr_map[i][0], thr_map[i][1], mid)
dn = normalChance(thr_map[i + 1][0], thr_map[i + 1][1], mid)
err_map[i].append(up)
err_map.append([dn])
err_map[-1].append(0.0)
return err_map
def _main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-b", type=int, default=2, choices=range(1, 5), help="bits per cell"
)
parser.add_argument("-f", required=True, help="Threshold map json to convert")
parser.add_argument("-o", type=str, help="output to file")
args = parser.parse_args()
with open(args.f) as f:
thr_map = json.load(f)
err_map = genErrorMap(thr_map, args.b)
if args.o:
with open(args.o, "w") as f:
json.dump(err_map, f)
else:
pprint(err_map)
if __name__ == "__main__":
_main()
| [((53, 12, 53, 31), 'numpy.roots', 'np.roots', ({(53, 21, 53, 30): '[a, b, c]'}, {}), '([a, b, c])', True, 'import numpy as np\n'), ((54, 13, 54, 56), 'numpy.ma.masked_outside', 'np.ma.masked_outside', ({(54, 34, 54, 39): 'roots', (54, 41, 54, 47): 'mean_a', (54, 49, 54, 55): 'mean_b'}, {}), '(roots, mean_a, mean_b)', True, 'import numpy as np\n'), ((108, 13, 108, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((51, 10, 51, 31), 'numpy.log', 'np.log', ({(51, 17, 51, 30): '(std_b / std_a)'}, {}), '(std_b / std_a)', True, 'import numpy as np\n'), ((119, 18, 119, 30), 'json.load', 'json.load', ({(119, 28, 119, 29): 'f'}, {}), '(f)', False, 'import json\n'), ((127, 8, 127, 23), 'pprint.pprint', 'pprint', ({(127, 15, 127, 22): 'err_map'}, {}), '(err_map)', False, 'from pprint import pprint\n'), ((70, 13, 70, 43), 'scipy.stats.norm', 'ss.norm', (), '', True, 'from scipy import stats as ss\n'), ((125, 12, 125, 33), 'json.dump', 'json.dump', ({(125, 22, 125, 29): 'err_map', (125, 31, 125, 32): 'f'}, {}), '(err_map, f)', False, 'import json\n')] |
JackShen1/pr-labs | Pr-Lab5/lab5.py | c84df379d8f7b26ccff30248dfb23ae38e0ce7c2 | earth = {
"Asia":
{'Japan': ("Tokyo", 377975, 125620000)},
"Europe":
{'Austria': ("Vienna", 83800, 8404000),
'Germany': ("Berlin", 357000, 81751000),
'Great Britain': ("London", 244800, 62700000),
'Iceland': ("Reykjavík", 103000, 317630),
'Italy': ("Rome", 301400, 60605000),
'Spain': ("Madrid", 506000, 46162000),
'Ukraine': ("Kyiv", 603700, 45562000)}
}
class Earth:
def __init__(self, continent):
self.dictionary = earth
self.continent = continent
def continent_out(self, a):
print(
" Country " + " " * 20 + " Capital " + " " * 15 + " Area (km²) " + " " * 7 + " Population " + "\n" +
"-----------" + " " * 20 + "-----------" + " " * 15 + "-------------------" + " " * 7 + "--------------")
for x in self.dictionary.get(a.title()):
print("{:30}".format(x),
"{:<30}{:<25}{:<25}".format(self.dictionary.get(a.title())[x][0],
str(self.dictionary.get(a.title())[x][1]) + " km²",
str(self.dictionary.get(a.title())[x][2])))
def country_out(self, a):
a.insert(0, ('Continent', ('Capital', 'Area (km²)', 'Population')))
b = []
for i in a:
b.extend((i[0], i[1][0], str(i[1][1]), str(i[1][2])))
return ("{:<20}{:<20}{:<25}{:<25}\n" * len(a)).format(*b)
def print_continent(self):
return self.continent_out(self.continent)
def print_country(self, a):
for i in self.dictionary.keys():
continent = i
country_describe = self.dictionary.get(continent).get(a.title())
if country_describe is None: continue
return self.country_out([(continent, country_describe)])
input_str = input("Enter the name of the continent or country: ")
if input_str.title() in earth.keys():
Earth(input_str).print_continent()
else:
print(Earth(continent=None).print_country(input_str))
| [] |
ubiquitoustech/rules_vue | vue/repositories.bzl | 759786eae1b6caf647b1c6018e16030a66e486e2 | """Declare runtime dependencies
These are needed for local dev, and users must install them as well.
See https://docs.bazel.build/versions/main/skylark/deploying.html#dependencies
"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
# WARNING: any changes in this function may be BREAKING CHANGES for users
# because we'll fetch a dependency which may be different from one that
# they were previously fetching later in their WORKSPACE setup, and now
# ours took precedence. Such breakages are challenging for users, so any
# changes in this function should be marked as BREAKING in the commit message
# and released only in semver majors.
def rules_vue_dependencies():
# The minimal version of bazel_skylib we require
maybe(
http_archive,
name = "bazel_skylib",
sha256 = "c6966ec828da198c5d9adbaa94c05e3a1c7f21bd012a0b29ba8ddbccb2c93b0d",
urls = [
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.1.1/bazel-skylib-1.1.1.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.1.1/bazel-skylib-1.1.1.tar.gz",
],
)
maybe(
http_archive,
name = "build_bazel_rules_nodejs",
sha256 = "4913ea835810c195df24d3a929315c29a64566cc48e409d8b0f35008b4e02e59",
urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/4.4.4/rules_nodejs-4.4.4.tar.gz"],
)
| [] |
kwestpharedhat/quay | endpoints/api/test/test_tag.py | a0df895005bcd3e53847046f69f6a7add87c88fd | import pytest
from playhouse.test_utils import assert_query_count
from data.registry_model import registry_model
from data.database import Manifest
from endpoints.api.test.shared import conduct_api_call
from endpoints.test.shared import client_with_identity
from endpoints.api.tag import RepositoryTag, RestoreTag, ListRepositoryTags
from test.fixtures import *
@pytest.mark.parametrize(
"expiration_time, expected_status",
[
(None, 201),
("aksdjhasd", 400),
],
)
def test_change_tag_expiration_default(expiration_time, expected_status, client, app):
with client_with_identity("devtable", client) as cl:
params = {
"repository": "devtable/simple",
"tag": "latest",
}
request_body = {
"expiration": expiration_time,
}
conduct_api_call(cl, RepositoryTag, "put", params, request_body, expected_status)
def test_change_tag_expiration(client, app):
with client_with_identity("devtable", client) as cl:
params = {
"repository": "devtable/simple",
"tag": "latest",
}
repo_ref = registry_model.lookup_repository("devtable", "simple")
tag = registry_model.get_repo_tag(repo_ref, "latest")
updated_expiration = tag.lifetime_start_ts + 60 * 60 * 24
request_body = {
"expiration": updated_expiration,
}
conduct_api_call(cl, RepositoryTag, "put", params, request_body, 201)
tag = registry_model.get_repo_tag(repo_ref, "latest")
assert tag.lifetime_end_ts == updated_expiration
@pytest.mark.parametrize(
"manifest_exists,test_tag,expected_status",
[
(True, "-INVALID-TAG-NAME", 400),
(True, ".INVALID-TAG-NAME", 400),
(
True,
"INVALID-TAG_NAME-BECAUSE-THIS-IS-WAY-WAY-TOO-LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOONG",
400,
),
(False, "newtag", 404),
(True, "generatemanifestfail", None),
(True, "latest", 201),
(True, "newtag", 201),
],
)
def test_move_tag(manifest_exists, test_tag, expected_status, client, app):
with client_with_identity("devtable", client) as cl:
test_image = "unknown"
if manifest_exists:
repo_ref = registry_model.lookup_repository("devtable", "simple")
tag_ref = registry_model.get_repo_tag(repo_ref, "latest")
assert tag_ref
test_image = tag_ref.manifest.digest
params = {"repository": "devtable/simple", "tag": test_tag}
request_body = {"manifest_digest": test_image}
if expected_status is None:
with pytest.raises(Exception):
conduct_api_call(cl, RepositoryTag, "put", params, request_body, expected_status)
else:
conduct_api_call(cl, RepositoryTag, "put", params, request_body, expected_status)
@pytest.mark.parametrize(
"repo_namespace, repo_name, query_count",
[
("devtable", "simple", 4),
("devtable", "history", 4),
("devtable", "complex", 4),
("devtable", "gargantuan", 4),
("buynlarge", "orgrepo", 6), # +2 for permissions checks.
("buynlarge", "anotherorgrepo", 6), # +2 for permissions checks.
],
)
def test_list_repo_tags(repo_namespace, repo_name, client, query_count, app):
# Pre-cache media type loads to ensure consistent query count.
Manifest.media_type.get_name(1)
params = {"repository": repo_namespace + "/" + repo_name}
with client_with_identity("devtable", client) as cl:
with assert_query_count(query_count):
tags = conduct_api_call(cl, ListRepositoryTags, "get", params).json["tags"]
repo_ref = registry_model.lookup_repository(repo_namespace, repo_name)
history, _ = registry_model.list_repository_tag_history(repo_ref)
assert len(tags) == len(history)
| [((15, 1, 21, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(16, 4, 16, 38): '"""expiration_time, expected_status"""', (17, 4, 20, 5): "[(None, 201), ('aksdjhasd', 400)]"}, {}), "('expiration_time, expected_status', [(None, 201), (\n 'aksdjhasd', 400)])", False, 'import pytest\n'), ((57, 1, 72, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(58, 4, 58, 46): '"""manifest_exists,test_tag,expected_status"""', (59, 4, 71, 5): "[(True, '-INVALID-TAG-NAME', 400), (True, '.INVALID-TAG-NAME', 400), (True,\n 'INVALID-TAG_NAME-BECAUSE-THIS-IS-WAY-WAY-TOO-LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOONG'\n , 400), (False, 'newtag', 404), (True, 'generatemanifestfail', None), (\n True, 'latest', 201), (True, 'newtag', 201)]"}, {}), "('manifest_exists,test_tag,expected_status', [(True,\n '-INVALID-TAG-NAME', 400), (True, '.INVALID-TAG-NAME', 400), (True,\n 'INVALID-TAG_NAME-BECAUSE-THIS-IS-WAY-WAY-TOO-LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOONG'\n , 400), (False, 'newtag', 404), (True, 'generatemanifestfail', None), (\n True, 'latest', 201), (True, 'newtag', 201)])", False, 'import pytest\n'), ((92, 1, 102, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(93, 4, 93, 44): '"""repo_namespace, repo_name, query_count"""', (94, 4, 101, 5): "[('devtable', 'simple', 4), ('devtable', 'history', 4), ('devtable',\n 'complex', 4), ('devtable', 'gargantuan', 4), ('buynlarge', 'orgrepo', \n 6), ('buynlarge', 'anotherorgrepo', 6)]"}, {}), "('repo_namespace, repo_name, query_count', [(\n 'devtable', 'simple', 4), ('devtable', 'history', 4), ('devtable',\n 'complex', 4), ('devtable', 'gargantuan', 4), ('buynlarge', 'orgrepo', \n 6), ('buynlarge', 'anotherorgrepo', 6)])", False, 'import pytest\n'), ((105, 4, 105, 35), 'data.database.Manifest.media_type.get_name', 'Manifest.media_type.get_name', ({(105, 33, 105, 34): '(1)'}, {}), '(1)', False, 'from data.database import Manifest\n'), ((23, 9, 23, 49), 'endpoints.test.shared.client_with_identity', 'client_with_identity', ({(23, 30, 23, 40): '"""devtable"""', (23, 42, 23, 48): 'client'}, {}), "('devtable', client)", False, 'from endpoints.test.shared import client_with_identity\n'), ((33, 8, 33, 89), 'endpoints.api.test.shared.conduct_api_call', 'conduct_api_call', ({(33, 25, 33, 27): 'cl', (33, 29, 33, 42): 'RepositoryTag', (33, 44, 33, 49): '"""put"""', (33, 51, 33, 57): 'params', (33, 59, 33, 71): 'request_body', (33, 73, 33, 88): 'expected_status'}, {}), "(cl, RepositoryTag, 'put', params, request_body,\n expected_status)", False, 'from endpoints.api.test.shared import conduct_api_call\n'), ((37, 9, 37, 49), 'endpoints.test.shared.client_with_identity', 'client_with_identity', ({(37, 30, 37, 40): '"""devtable"""', (37, 42, 37, 48): 'client'}, {}), "('devtable', client)", False, 'from endpoints.test.shared import client_with_identity\n'), ((43, 19, 43, 73), 'data.registry_model.registry_model.lookup_repository', 'registry_model.lookup_repository', ({(43, 52, 43, 62): '"""devtable"""', (43, 64, 43, 72): '"""simple"""'}, {}), "('devtable', 'simple')", False, 'from data.registry_model import registry_model\n'), ((44, 14, 44, 61), 'data.registry_model.registry_model.get_repo_tag', 'registry_model.get_repo_tag', ({(44, 42, 44, 50): 'repo_ref', (44, 52, 44, 60): '"""latest"""'}, {}), "(repo_ref, 'latest')", False, 'from data.registry_model import registry_model\n'), ((52, 8, 52, 77), 'endpoints.api.test.shared.conduct_api_call', 'conduct_api_call', ({(52, 25, 52, 27): 'cl', (52, 29, 52, 42): 'RepositoryTag', (52, 44, 52, 49): '"""put"""', (52, 51, 52, 57): 'params', (52, 59, 52, 71): 'request_body', (52, 73, 52, 76): '(201)'}, {}), "(cl, RepositoryTag, 'put', params, request_body, 201)", False, 'from endpoints.api.test.shared import conduct_api_call\n'), ((53, 14, 53, 61), 'data.registry_model.registry_model.get_repo_tag', 'registry_model.get_repo_tag', ({(53, 42, 53, 50): 'repo_ref', (53, 52, 53, 60): '"""latest"""'}, {}), "(repo_ref, 'latest')", False, 'from data.registry_model import registry_model\n'), ((74, 9, 74, 49), 'endpoints.test.shared.client_with_identity', 'client_with_identity', ({(74, 30, 74, 40): '"""devtable"""', (74, 42, 74, 48): 'client'}, {}), "('devtable', client)", False, 'from endpoints.test.shared import client_with_identity\n'), ((108, 9, 108, 49), 'endpoints.test.shared.client_with_identity', 'client_with_identity', ({(108, 30, 108, 40): '"""devtable"""', (108, 42, 108, 48): 'client'}, {}), "('devtable', client)", False, 'from endpoints.test.shared import client_with_identity\n'), ((112, 19, 112, 78), 'data.registry_model.registry_model.lookup_repository', 'registry_model.lookup_repository', ({(112, 52, 112, 66): 'repo_namespace', (112, 68, 112, 77): 'repo_name'}, {}), '(repo_namespace, repo_name)', False, 'from data.registry_model import registry_model\n'), ((113, 21, 113, 73), 'data.registry_model.registry_model.list_repository_tag_history', 'registry_model.list_repository_tag_history', ({(113, 64, 113, 72): 'repo_ref'}, {}), '(repo_ref)', False, 'from data.registry_model import registry_model\n'), ((77, 23, 77, 77), 'data.registry_model.registry_model.lookup_repository', 'registry_model.lookup_repository', ({(77, 56, 77, 66): '"""devtable"""', (77, 68, 77, 76): '"""simple"""'}, {}), "('devtable', 'simple')", False, 'from data.registry_model import registry_model\n'), ((78, 22, 78, 69), 'data.registry_model.registry_model.get_repo_tag', 'registry_model.get_repo_tag', ({(78, 50, 78, 58): 'repo_ref', (78, 60, 78, 68): '"""latest"""'}, {}), "(repo_ref, 'latest')", False, 'from data.registry_model import registry_model\n'), ((89, 12, 89, 93), 'endpoints.api.test.shared.conduct_api_call', 'conduct_api_call', ({(89, 29, 89, 31): 'cl', (89, 33, 89, 46): 'RepositoryTag', (89, 48, 89, 53): '"""put"""', (89, 55, 89, 61): 'params', (89, 63, 89, 75): 'request_body', (89, 77, 89, 92): 'expected_status'}, {}), "(cl, RepositoryTag, 'put', params, request_body,\n expected_status)", False, 'from endpoints.api.test.shared import conduct_api_call\n'), ((109, 13, 109, 44), 'playhouse.test_utils.assert_query_count', 'assert_query_count', ({(109, 32, 109, 43): 'query_count'}, {}), '(query_count)', False, 'from playhouse.test_utils import assert_query_count\n'), ((86, 17, 86, 41), 'pytest.raises', 'pytest.raises', ({(86, 31, 86, 40): 'Exception'}, {}), '(Exception)', False, 'import pytest\n'), ((87, 16, 87, 97), 'endpoints.api.test.shared.conduct_api_call', 'conduct_api_call', ({(87, 33, 87, 35): 'cl', (87, 37, 87, 50): 'RepositoryTag', (87, 52, 87, 57): '"""put"""', (87, 59, 87, 65): 'params', (87, 67, 87, 79): 'request_body', (87, 81, 87, 96): 'expected_status'}, {}), "(cl, RepositoryTag, 'put', params, request_body,\n expected_status)", False, 'from endpoints.api.test.shared import conduct_api_call\n'), ((110, 19, 110, 74), 'endpoints.api.test.shared.conduct_api_call', 'conduct_api_call', ({(110, 36, 110, 38): 'cl', (110, 40, 110, 58): 'ListRepositoryTags', (110, 60, 110, 65): '"""get"""', (110, 67, 110, 73): 'params'}, {}), "(cl, ListRepositoryTags, 'get', params)", False, 'from endpoints.api.test.shared import conduct_api_call\n')] |
Jongerr/vendor_receiving | inventory.py | f69f09a5b41d38b45e9ea0bf82590bb27ce913f6 | import json
import os
import random
import requests
from passlib.hash import pbkdf2_sha256 as pbk
from PyQt5.QtSql import QSqlDatabase, QSqlQuery
from pprint import pprint
ENCODING = 'utf-8'
DB_PATH = os.path.join(os.path.curdir, 'inventory.db')
def scrambleWord(word):
"""Randomize the letters in word and return the resulting string."""
word_list = list(word)
random.shuffle(word_list)
word = ''.join(word_list)
return word
def generateItems():
"""Generate a dictionary of retail products and store the data in items.json.
Pulls a list of items and artificially doubles it with scrambled item names.
Each item is given a random PLU, UPC, and department number.
Each dictionary key is the item's PLU.
"""
response = requests.get('https://www.randomlists.com/data/things.json')
json_data = response.json()
items = json_data['RandL']['items']
#double sample size by scrambling item names
scrambled_list = []
for item in items:
scrambled_item = scrambleWord(item)
scrambled_list.append(scrambled_item)
items = items + scrambled_list
data = {}
for item in items:
random.seed(item)
upc = random.randint(100000000000, 999999999999)
plu = random.randint(1000, 9999999)
department = (plu % 7) + 1
print('UPC:{0} | PLU:{1} | Item:{2} | D{3}'.format(upc, plu, item, department))
if plu in data:
print('Duplicate found: {}'.format(plu))
continue
data[plu] = {'upc':upc, 'department':department, 'model':item}
with open('items.json', 'w') as f:
json.dump(data, f)
def generatePO():
"""Create dumby Purchase Orders and store them in pos.json.
Each PO is asigned one random vendor and department number,
along with a random length list of items belonging to said department.
Returns: True if items.json successfully opens, False otherwise.
"""
try:
with open('items.json', 'r') as f:
items_dict = json.load(f)
except FileNotFoundError:
return False
vendors = ['Dyson', 'Ingrammicro', 'LKG', 'Inland', 'Sandisk', 'Seagate', 'Hasbro', 'Mattel',\
'Gear Head', 'Logitech', 'NTE', 'Dell', 'Microsoft', 'Right Stuff', 'Alliance', 'Energizer']
po_dict = {}
for i in range(50):
po_num = 24000000 + random.randint(1, 999999)
if po_num in po_dict:
continue
po_dict[po_num] = {'department': (po_num % 7) + 1, 'items': {}, 'vendor': random.choice(vendors)}
for key in items_dict:
match_found = False
loops = 0
while not match_found:
loops += 1
if loops > 200:
print('\n\nToo many loops.\n\n')
break
po, department = random.choice(list(po_dict.items()))
department = department['department']
print('PO department: {}'.format(department))
print('item plu: {} department: {}'.format(key, items_dict[key]['department']))
if items_dict[key]['department'] == department:
max_count = random.randint(1, 20)
po_dict[po]['items'][key] = max_count
match_found = True
with open('pos.json', 'w') as f:
json.dump(po_dict, f)
return True
def fillDB():
"""Create a database and populate two tables(named items and purchase_order).
The 'items' and 'purchase_order' tables are populated with the data from items.json
and pos.json respectively.
"""
with open('items.json') as f:
data = json.load(f)
db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(DB_PATH)
if not db.open():
print('DB could not be opened')
error = QSqlDatabase.lastError()
print(error.text())
return False
query = QSqlQuery()
if query.exec_("drop table items"):
print('successfully dropped table')
else:
print('unsuccessfully dropped table')
print(query.lastError().text())
if query.exec_("create table items(plu int primary key, upc varchar(12) unique, "
"model varchar(20), department int)"):
print('success')
else:
print('failure')
print(query.lastError().text())
for key in data:
if query.exec_("insert into items values({}, '{}', '{}', {})".format(key, data[key]['upc'],
data[key]['model'], data[key]['department'])):
print("values({}, {}, {}, {}) successfully inserted.".format(key, data[key]['upc'], data[key]['model'], data[key]['department']))
else:
print("values({}, {}, {}, {}) unsuccessfully inserted.".format(key, data[key]['upc'], data[key]['model'], data[key]['department']))
print(query.lastError().text())
with open('pos.json') as f:
po_dict = json.load(f)
if query.exec_("drop table purchase_order"):
print('successfully dropped table')
else:
print('unsuccessfully dropped table')
print(query.lastError().text())
if query.exec_("create table purchase_order(po int primary key, vendor varchar(30), "
"department int, items blob)"):
print('success')
else:
print('failure')
print(query.lastError().text())
for key in po_dict:
item_string = json.dumps(po_dict[key]['items'])
item_blob = item_string.encode(ENCODING)
if query.exec_("insert into purchase_order values({}, '{}', {}, '{}')"\
.format(key, po_dict[key]['vendor'], po_dict[key]['department'], item_string)):
print("values({}, {}, {}, {}) successfully inserted."\
.format(key, po_dict[key]['vendor'], po_dict[key]['department'], item_string))
else:
print("values({}, {}, {}, {}) unsuccessfully inserted."\
.format(key, po_dict[key]['vendor'], po_dict[key]['department'], item_blob))
print(query.lastError().text())
def createEmployeeTable():
db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(DB_PATH)
if not db.open():
print('DB could not be opened')
error = QSqlDatabase.lastError()
print(error.text())
return False
query = QSqlQuery()
if not query.exec_("drop table employee"):
print(query.lastError().text())
if not query.exec_("create table employee(id int primary key, first_name varchar(10), "\
"last_name varchar(10), posistion int, pass_hash varchar(200))"):
print(query.lastError().text())
if not query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(162973, 'Jon', 'Michie', 2, pbk.hash('Michie'))):
print(query.lastError().text())
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(131901, 'Ben', 'Terry', 3, pbk.hash('Terry')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(150697, 'Daniel', 'Silva', 2, pbk.hash('Silva')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(68412, 'James', 'Hutchetson', 2, pbk.hash('Hutchetson')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(161844, 'MacKenly', 'Gamble', 1, pbk.hash('Gamble')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(141047, 'George', 'Huston', 1, pbk.hash('Huston')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(46045, 'Arthur', 'Art', 1, pbk.hash('Art')))
def testHashVerification(name):
db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(DB_PATH)
if not db.open():
print('DB could not be opened')
error = QSqlDatabase.lastError()
print(error.text())
return False
query = QSqlQuery()
if not query.exec_("select pass_hash from employee where last_name = '{}'".format(name)):
print(query.lastError().text())
elif not query.next():
print('Table values not found')
else:
pass_hash = query.value(0)
if pbk.verify(name, pass_hash):
print('It\'s a match!')
else:
print('Match not found.')
if __name__ == '__main__':
generateItems()
generatePO()
fillDB()
createEmployeeTable()
testHashVerification('Terry')
| [((11, 10, 11, 54), 'os.path.join', 'os.path.join', ({(11, 23, 11, 37): 'os.path.curdir', (11, 39, 11, 53): '"""inventory.db"""'}, {}), "(os.path.curdir, 'inventory.db')", False, 'import os\n'), ((17, 4, 17, 29), 'random.shuffle', 'random.shuffle', ({(17, 19, 17, 28): 'word_list'}, {}), '(word_list)', False, 'import random\n'), ((29, 15, 29, 75), 'requests.get', 'requests.get', ({(29, 28, 29, 74): '"""https://www.randomlists.com/data/things.json"""'}, {}), "('https://www.randomlists.com/data/things.json')", False, 'import requests\n'), ((114, 9, 114, 44), 'PyQt5.QtSql.QSqlDatabase.addDatabase', 'QSqlDatabase.addDatabase', ({(114, 34, 114, 43): '"""QSQLITE"""'}, {}), "('QSQLITE')", False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((122, 12, 122, 23), 'PyQt5.QtSql.QSqlQuery', 'QSqlQuery', ({}, {}), '()', False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((170, 9, 170, 44), 'PyQt5.QtSql.QSqlDatabase.addDatabase', 'QSqlDatabase.addDatabase', ({(170, 34, 170, 43): '"""QSQLITE"""'}, {}), "('QSQLITE')", False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((177, 12, 177, 23), 'PyQt5.QtSql.QSqlQuery', 'QSqlQuery', ({}, {}), '()', False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((201, 9, 201, 44), 'PyQt5.QtSql.QSqlDatabase.addDatabase', 'QSqlDatabase.addDatabase', ({(201, 34, 201, 43): '"""QSQLITE"""'}, {}), "('QSQLITE')", False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((208, 12, 208, 23), 'PyQt5.QtSql.QSqlQuery', 'QSqlQuery', ({}, {}), '()', False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((43, 8, 43, 25), 'random.seed', 'random.seed', ({(43, 20, 43, 24): 'item'}, {}), '(item)', False, 'import random\n'), ((44, 14, 44, 56), 'random.randint', 'random.randint', ({(44, 29, 44, 41): '100000000000', (44, 43, 44, 55): '999999999999'}, {}), '(100000000000, 999999999999)', False, 'import random\n'), ((45, 14, 45, 43), 'random.randint', 'random.randint', ({(45, 29, 45, 33): '1000', (45, 35, 45, 42): '9999999'}, {}), '(1000, 9999999)', False, 'import random\n'), ((56, 8, 56, 26), 'json.dump', 'json.dump', ({(56, 18, 56, 22): 'data', (56, 24, 56, 25): 'f'}, {}), '(data, f)', False, 'import json\n'), ((101, 8, 101, 29), 'json.dump', 'json.dump', ({(101, 18, 101, 25): 'po_dict', (101, 27, 101, 28): 'f'}, {}), '(po_dict, f)', False, 'import json\n'), ((112, 15, 112, 27), 'json.load', 'json.load', ({(112, 25, 112, 26): 'f'}, {}), '(f)', False, 'import json\n'), ((118, 16, 118, 40), 'PyQt5.QtSql.QSqlDatabase.lastError', 'QSqlDatabase.lastError', ({}, {}), '()', False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((143, 18, 143, 30), 'json.load', 'json.load', ({(143, 28, 143, 29): 'f'}, {}), '(f)', False, 'import json\n'), ((157, 22, 157, 55), 'json.dumps', 'json.dumps', ({(157, 33, 157, 54): "po_dict[key]['items']"}, {}), "(po_dict[key]['items'])", False, 'import json\n'), ((174, 16, 174, 40), 'PyQt5.QtSql.QSqlDatabase.lastError', 'QSqlDatabase.lastError', ({}, {}), '()', False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((205, 16, 205, 40), 'PyQt5.QtSql.QSqlDatabase.lastError', 'QSqlDatabase.lastError', ({}, {}), '()', False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((69, 25, 69, 37), 'json.load', 'json.load', ({(69, 35, 69, 36): 'f'}, {}), '(f)', False, 'import json\n'), ((78, 28, 78, 53), 'random.randint', 'random.randint', ({(78, 43, 78, 44): '(1)', (78, 46, 78, 52): '(999999)'}, {}), '(1, 999999)', False, 'import random\n'), ((81, 82, 81, 104), 'random.choice', 'random.choice', ({(81, 96, 81, 103): 'vendors'}, {}), '(vendors)', False, 'import random\n'), ((187, 50, 187, 67), 'passlib.hash.pbkdf2_sha256.hash', 'pbk.hash', ({(187, 59, 187, 66): '"""Terry"""'}, {}), "('Terry')", True, 'from passlib.hash import pbkdf2_sha256 as pbk\n'), ((189, 53, 189, 70), 'passlib.hash.pbkdf2_sha256.hash', 'pbk.hash', ({(189, 62, 189, 69): '"""Silva"""'}, {}), "('Silva')", True, 'from passlib.hash import pbkdf2_sha256 as pbk\n'), ((191, 56, 191, 78), 'passlib.hash.pbkdf2_sha256.hash', 'pbk.hash', ({(191, 65, 191, 77): '"""Hutchetson"""'}, {}), "('Hutchetson')", True, 'from passlib.hash import pbkdf2_sha256 as pbk\n'), ((193, 56, 193, 74), 'passlib.hash.pbkdf2_sha256.hash', 'pbk.hash', ({(193, 65, 193, 73): '"""Gamble"""'}, {}), "('Gamble')", True, 'from passlib.hash import pbkdf2_sha256 as pbk\n'), ((195, 54, 195, 72), 'passlib.hash.pbkdf2_sha256.hash', 'pbk.hash', ({(195, 63, 195, 71): '"""Huston"""'}, {}), "('Huston')", True, 'from passlib.hash import pbkdf2_sha256 as pbk\n'), ((197, 50, 197, 65), 'passlib.hash.pbkdf2_sha256.hash', 'pbk.hash', ({(197, 59, 197, 64): '"""Art"""'}, {}), "('Art')", True, 'from passlib.hash import pbkdf2_sha256 as pbk\n'), ((216, 11, 216, 38), 'passlib.hash.pbkdf2_sha256.verify', 'pbk.verify', ({(216, 22, 216, 26): 'name', (216, 28, 216, 37): 'pass_hash'}, {}), '(name, pass_hash)', True, 'from passlib.hash import pbkdf2_sha256 as pbk\n'), ((96, 28, 96, 49), 'random.randint', 'random.randint', ({(96, 43, 96, 44): '1', (96, 46, 96, 48): '20'}, {}), '(1, 20)', False, 'import random\n'), ((184, 51, 184, 69), 'passlib.hash.pbkdf2_sha256.hash', 'pbk.hash', ({(184, 60, 184, 68): '"""Michie"""'}, {}), "('Michie')", True, 'from passlib.hash import pbkdf2_sha256 as pbk\n')] |
frennkie/lnbits | lnbits/core/views/lnurl.py | 5fe64d324dc7ac05d1d0fc25eb5ad6a5a414ea8a | import requests
from flask import abort, redirect, request, url_for
from lnurl import LnurlWithdrawResponse, handle as handle_lnurl
from lnurl.exceptions import LnurlException
from time import sleep
from lnbits.core import core_app
from lnbits.helpers import Status
from lnbits.settings import WALLET
from ..crud import create_account, get_user, create_wallet, create_payment
@core_app.route("/lnurlwallet")
def lnurlwallet():
memo = "LNbits LNURL funding"
try:
withdraw_res = handle_lnurl(request.args.get("lightning"), response_class=LnurlWithdrawResponse)
except LnurlException:
abort(Status.INTERNAL_SERVER_ERROR, "Could not process withdraw LNURL.")
try:
ok, checking_id, payment_request, error_message = WALLET.create_invoice(withdraw_res.max_sats, memo)
except Exception as e:
ok, error_message = False, str(e)
if not ok:
abort(Status.INTERNAL_SERVER_ERROR, error_message)
r = requests.get(
withdraw_res.callback.base,
params={**withdraw_res.callback.query_params, **{"k1": withdraw_res.k1, "pr": payment_request}},
)
if not r.ok:
abort(Status.INTERNAL_SERVER_ERROR, "Could not process withdraw LNURL.")
for i in range(10):
invoice_status = WALLET.get_invoice_status(checking_id)
sleep(i)
if not invoice_status.paid:
continue
break
user = get_user(create_account().id)
wallet = create_wallet(user_id=user.id)
create_payment(
wallet_id=wallet.id,
checking_id=checking_id,
amount=withdraw_res.max_sats * 1000,
memo=memo,
pending=invoice_status.pending,
)
return redirect(url_for("core.wallet", usr=user.id, wal=wallet.id))
| [((15, 1, 15, 31), 'lnbits.core.core_app.route', 'core_app.route', ({(15, 16, 15, 30): '"""/lnurlwallet"""'}, {}), "('/lnurlwallet')", False, 'from lnbits.core import core_app\n'), ((32, 8, 35, 5), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((25, 58, 25, 108), 'lnbits.settings.WALLET.create_invoice', 'WALLET.create_invoice', ({(25, 80, 25, 101): 'withdraw_res.max_sats', (25, 103, 25, 107): 'memo'}, {}), '(withdraw_res.max_sats, memo)', False, 'from lnbits.settings import WALLET\n'), ((30, 8, 30, 58), 'flask.abort', 'abort', ({(30, 14, 30, 42): 'Status.INTERNAL_SERVER_ERROR', (30, 44, 30, 57): 'error_message'}, {}), '(Status.INTERNAL_SERVER_ERROR, error_message)', False, 'from flask import abort, redirect, request, url_for\n'), ((38, 8, 38, 80), 'flask.abort', 'abort', ({(38, 14, 38, 42): 'Status.INTERNAL_SERVER_ERROR', (38, 44, 38, 79): '"""Could not process withdraw LNURL."""'}, {}), "(Status.INTERNAL_SERVER_ERROR, 'Could not process withdraw LNURL.')", False, 'from flask import abort, redirect, request, url_for\n'), ((41, 25, 41, 63), 'lnbits.settings.WALLET.get_invoice_status', 'WALLET.get_invoice_status', ({(41, 51, 41, 62): 'checking_id'}, {}), '(checking_id)', False, 'from lnbits.settings import WALLET\n'), ((42, 8, 42, 16), 'time.sleep', 'sleep', ({(42, 14, 42, 15): 'i'}, {}), '(i)', False, 'from time import sleep\n'), ((57, 20, 57, 70), 'flask.url_for', 'url_for', (), '', False, 'from flask import abort, redirect, request, url_for\n'), ((20, 36, 20, 65), 'flask.request.args.get', 'request.args.get', ({(20, 53, 20, 64): '"""lightning"""'}, {}), "('lightning')", False, 'from flask import abort, redirect, request, url_for\n'), ((22, 8, 22, 80), 'flask.abort', 'abort', ({(22, 14, 22, 42): 'Status.INTERNAL_SERVER_ERROR', (22, 44, 22, 79): '"""Could not process withdraw LNURL."""'}, {}), "(Status.INTERNAL_SERVER_ERROR, 'Could not process withdraw LNURL.')", False, 'from flask import abort, redirect, request, url_for\n')] |
munishm/MLOpsPython | driver_training/driver_training.py | e3ee31f6a0cac645a2b3ad945b8263e07d3085e4 | # Import libraries
import argparse
from azureml.core import Run
import joblib
import json
import os
import pandas as pd
import shutil
# Import functions from train.py
from train import split_data, train_model, get_model_metrics
# Get the output folder for the model from the '--output_folder' parameter
parser = argparse.ArgumentParser()
parser.add_argument('--output_folder', type=str, dest='output_folder', default="outputs")
args = parser.parse_args()
print(args)
output_folder = args.output_folder
# Get the experiment run context
run = Run.get_context()
# load the safe driver prediction dataset
train_df = pd.read_csv('porto_seguro_safe_driver_prediction_input.csv')
# Load the parameters for training the model from the file
with open("parameters.json") as f:
pars = json.load(f)
parameters = pars["training"]
# Log each of the parameters to the run
for param_name, param_value in parameters.items():
run.log(param_name, param_value)
# Call the functions defined in this file
train_data, valid_data = split_data(train_df)
data = [train_data, valid_data]
model = train_model(data, parameters)
# Print the resulting metrics for the model
model_metrics = get_model_metrics(model, data)
print(model_metrics)
for k, v in model_metrics.items():
run.log(k, v)
# Save the trained model to the output folder
os.makedirs(output_folder, exist_ok=True)
output_path = output_folder + "/porto_seguro_safe_driver_model.pkl"
joblib.dump(value=model, filename=output_path)
run.complete()
| [((14, 9, 14, 34), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((23, 6, 23, 23), 'azureml.core.Run.get_context', 'Run.get_context', ({}, {}), '()', False, 'from azureml.core import Run\n'), ((26, 11, 26, 71), 'pandas.read_csv', 'pd.read_csv', ({(26, 23, 26, 70): '"""porto_seguro_safe_driver_prediction_input.csv"""'}, {}), "('porto_seguro_safe_driver_prediction_input.csv')", True, 'import pandas as pd\n'), ((38, 25, 38, 45), 'train.split_data', 'split_data', ({(38, 36, 38, 44): 'train_df'}, {}), '(train_df)', False, 'from train import split_data, train_model, get_model_metrics\n'), ((40, 8, 40, 37), 'train.train_model', 'train_model', ({(40, 20, 40, 24): 'data', (40, 26, 40, 36): 'parameters'}, {}), '(data, parameters)', False, 'from train import split_data, train_model, get_model_metrics\n'), ((43, 16, 43, 46), 'train.get_model_metrics', 'get_model_metrics', ({(43, 34, 43, 39): 'model', (43, 41, 43, 45): 'data'}, {}), '(model, data)', False, 'from train import split_data, train_model, get_model_metrics\n'), ((50, 0, 50, 41), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((52, 0, 52, 46), 'joblib.dump', 'joblib.dump', (), '', False, 'import joblib\n'), ((30, 11, 30, 23), 'json.load', 'json.load', ({(30, 21, 30, 22): 'f'}, {}), '(f)', False, 'import json\n')] |
madman-bob/python-lua-imports | tests/__init__.py | 76d3765b03a0478544214022118e5a4a13f6e36a | from lua_imports import lua_importer
lua_importer.register()
| [((3, 0, 3, 23), 'lua_imports.lua_importer.register', 'lua_importer.register', ({}, {}), '()', False, 'from lua_imports import lua_importer\n')] |
VyachAp/SalesFABackend | app/models/product.py | dcbe1b5106c030ee07535795dfd7b97613a1203d | from sqlalchemy import Column, Integer, String, Float
from app.database.base_class import Base
class Product(Base):
id = Column(Integer, primary_key=True, index=True)
name = Column(String, nullable=False)
price = Column(Float, nullable=False)
| [((7, 9, 7, 54), 'sqlalchemy.Column', 'Column', (), '', False, 'from sqlalchemy import Column, Integer, String, Float\n'), ((8, 11, 8, 41), 'sqlalchemy.Column', 'Column', (), '', False, 'from sqlalchemy import Column, Integer, String, Float\n'), ((9, 12, 9, 41), 'sqlalchemy.Column', 'Column', (), '', False, 'from sqlalchemy import Column, Integer, String, Float\n')] |
warriorframework/Katanaframework | katana/utils/directory_traversal_utils.py | 9dc78df9d0c8f19ef5eaaa8690fbfa1ad885b323 | import glob
import os
import re
import errno
import shutil
def get_sub_dirs_and_files(path, abs_path=False):
"""
Gets the direct child sub-files and sub-folders of the given directory
Args:
path: Absolute path to the directory
abs_path: If set to True, it returns a list of absolute paths to the sub-directories and
sub-files instead of directory names only
Returns:
dict: {"folders": [list of (if abs_path is True, then path to) sub-folders],
"files": [list of (if abs_path is True, then path to) sub-files]}
"""
folders = get_sub_folders(path, abs_path=abs_path)
files = get_sub_files(path, abs_path=abs_path)
return {"folders": folders, "files": files}
def get_sub_folders(path, abs_path=False):
"""
Gets the direct child sub-folders of the given directory
Args:
path: Absolute path to the directory
abs_path: If set to True, it returns a list of absolute paths to the sub-directories
instead of directory names only
Returns:
only_folders: [list of sub-folders]
"""
folders = []
temp = glob.glob(path + os.sep + "*")
for folder in temp:
if os.path.isdir(folder) and not folder.endswith('__pycache__'):
folders.append(folder)
only_folders = [f.replace("\\", '/') for f in folders]
if not abs_path:
only_folders = [f.rpartition('/')[2] for f in only_folders]
return only_folders
def get_sub_files(path, abs_path=False):
"""
Gets the direct child sub-files of the given directory
Args:
path: Absolute path to the directory
abs_path: If set to True, it returns a list of absolute paths to the sub-files instead of
file names only
Returns:
only_files: [list of sub-files]
"""
files = glob.glob(path + os.sep + "*.*")
only_files = [f.replace("\\", '/') for f in files]
if not abs_path:
only_files = [f.rpartition('/')[2] for f in only_files]
return only_files
def get_abs_path(relative_path, base_path=None, silence_error=False):
"""
Gets the absolute path from the given relative_path and base_path
Args:
relative_path: relative path to the file/directory
base_path: absolute path from where the relative path should be traced. If not provided, the
current working directory path will be used.
silence_error: Setting this to True would not verify if the directory exists
Returns:
path: absolute path derived from relative_path and base_path
"""
if base_path is None:
base_path = os.getcwd()
path = os.path.join(base_path.strip(), relative_path.strip())
if not silence_error and not os.path.exists(path):
print("An Error Occurred: {0} does not exist".format(path))
path = None
return path
def get_parent_directory(directory_path, level=1):
"""
Gets the parent directory
Args:
directory_path: Absolute path to the file/dir who's parent needs to be returned
level: Indicates how many levels up to go to find the parent
eg: default of 1 goes one level up (to the parent directory)
level=2 would get the grandparent directory
Returns:
"""
if directory_path.endswith(os.sep):
directory_path = directory_path[:-1]
for i in range(0, level):
directory_path = os.path.dirname(directory_path)
return directory_path
def get_paths_of_subfiles(parent_dir, extension=re.compile("\..*")):
"""
This function returns a list of all the sub-files inside the given directory
Args:
parent_dir: Absolute path to the directory
extension: Regular Expression tha would match a file extension. If not provided, file paths
of all extension will be returned
Returns:
file_path: Returns a list of paths to sub-files inside the parent_dir
"""
file_paths = []
sub_files_and_folders = get_sub_dirs_and_files(parent_dir, abs_path=True)
for sub_file in sub_files_and_folders["files"]:
if extension.match(os.path.splitext(sub_file)[1]):
file_paths.append(sub_file)
for sub_folder in sub_files_and_folders["folders"]:
file_paths.extend(get_paths_of_subfiles(sub_folder, extension=extension))
return file_paths
def get_dir_from_path(path):
"""
This function is wrapper function for os.path.basename.
Args:
path: a file path [Eg: /home/user/Documents/GitHub/warriorframework]
Returns:
The base directory name: [Eg: warriorframework]
"""
return os.path.basename(path)
def get_parent_dir_path(path):
"""
This function is wrapper function for os.path.dirname(os.path.normpath(<path>)).
Args:
path: a file path [Eg: /home/user/Documents/GitHub/warriorframework]
Returns:
The parent directory path: [Eg: /home/user/Documents/GitHub]
"""
return os.path.dirname(os.path.normpath(path))
def join_path(path, *paths):
"""
This function is wrapper function for os.path.join.
Args:
path: a file path
*paths: paths to be joined to the file path above
Returns:
Joined path
"""
return os.path.join(path, *paths)
def get_relative_path(path, start_directory):
"""
This is a wrapper function for the os.path.relpath
Args:
path: Absolute path to the file/dir to which the relatove path needs to be calculated.
start_directory: The absolute path to the starting directory
Returns:
rel_path: A relative path from start_directory
"""
if start_directory == "":
print("-- Error -- start_directory is empty.")
relpath = path
else:
try:
relpath = os.path.relpath(path, start_directory)
except Exception as e:
print("-- Error -- {0}".format(e))
relpath = None
else:
if not relpath.startswith(".") and not relpath.startswith(os.sep):
relpath = os.sep + relpath
return relpath
def create_dir(path):
output = path
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
output = False
print("-- A Error Occurred -- {0}".format(exception))
return output
def delete_dir(src):
output = True
try:
shutil.rmtree(src)
except Exception as e:
print(e)
output = False
return output
def file_or_dir_exists(filepath):
output = False
if os.path.exists(filepath):
output = True
return output
def get_direct_sub_files(path, abs_path=False, extension=re.compile("\..*")):
"""
Gets the direct child sub-files of the given directory
Args:
path: Absolute path to the directory
abs_path: If set to True, it returns a list of absolute paths to the sub-files instead of
file names only
Returns:
only_files: [list of sub-files]
"""
files = glob.glob(path + os.sep + "*.*")
only_files = [f.replace("\\", '/') for f in files]
if not abs_path:
only_files = [f.rpartition('/')[2] for f in only_files]
final_files = []
for sub_file in only_files:
if extension.match(os.path.splitext(sub_file)[1]):
final_files.append(sub_file)
return final_files
| [((41, 11, 41, 41), 'glob.glob', 'glob.glob', ({(41, 21, 41, 40): "path + os.sep + '*'"}, {}), "(path + os.sep + '*')", False, 'import glob\n'), ((63, 12, 63, 44), 'glob.glob', 'glob.glob', ({(63, 22, 63, 43): "path + os.sep + '*.*'"}, {}), "(path + os.sep + '*.*')", False, 'import glob\n'), ((114, 48, 114, 66), 're.compile', 're.compile', ({(114, 59, 114, 65): '"""\\\\..*"""'}, {}), "('\\\\..*')", False, 'import re\n'), ((147, 11, 147, 33), 'os.path.basename', 'os.path.basename', ({(147, 28, 147, 32): 'path'}, {}), '(path)', False, 'import os\n'), ((174, 11, 174, 37), 'os.path.join', 'os.path.join', ({(174, 24, 174, 28): 'path', (174, 30, 174, 36): '*paths'}, {}), '(path, *paths)', False, 'import os\n'), ((227, 7, 227, 31), 'os.path.exists', 'os.path.exists', ({(227, 22, 227, 30): 'filepath'}, {}), '(filepath)', False, 'import os\n'), ((232, 57, 232, 75), 're.compile', 're.compile', ({(232, 68, 232, 74): '"""\\\\..*"""'}, {}), "('\\\\..*')", False, 'import re\n'), ((244, 12, 244, 44), 'glob.glob', 'glob.glob', ({(244, 22, 244, 43): "path + os.sep + '*.*'"}, {}), "(path + os.sep + '*.*')", False, 'import glob\n'), ((84, 20, 84, 31), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((110, 25, 110, 56), 'os.path.dirname', 'os.path.dirname', ({(110, 41, 110, 55): 'directory_path'}, {}), '(directory_path)', False, 'import os\n'), ((160, 27, 160, 49), 'os.path.normpath', 'os.path.normpath', ({(160, 44, 160, 48): 'path'}, {}), '(path)', False, 'import os\n'), ((207, 8, 207, 25), 'os.makedirs', 'os.makedirs', ({(207, 20, 207, 24): 'path'}, {}), '(path)', False, 'import os\n'), ((218, 8, 218, 26), 'shutil.rmtree', 'shutil.rmtree', ({(218, 22, 218, 25): 'src'}, {}), '(src)', False, 'import shutil\n'), ((43, 11, 43, 32), 'os.path.isdir', 'os.path.isdir', ({(43, 25, 43, 31): 'folder'}, {}), '(folder)', False, 'import os\n'), ((88, 33, 88, 53), 'os.path.exists', 'os.path.exists', ({(88, 48, 88, 52): 'path'}, {}), '(path)', False, 'import os\n'), ((194, 22, 194, 60), 'os.path.relpath', 'os.path.relpath', ({(194, 38, 194, 42): 'path', (194, 44, 194, 59): 'start_directory'}, {}), '(path, start_directory)', False, 'import os\n'), ((130, 27, 130, 53), 'os.path.splitext', 'os.path.splitext', ({(130, 44, 130, 52): 'sub_file'}, {}), '(sub_file)', False, 'import os\n'), ((250, 27, 250, 53), 'os.path.splitext', 'os.path.splitext', ({(250, 44, 250, 52): 'sub_file'}, {}), '(sub_file)', False, 'import os\n')] |
LucaOnline/theanine-synthetase | alignment.py | 75a9d1f6d853409e12bf9f3b6e5948b594a03217 | """The `alignment` module provides an implementation of the Needleman-Wunsch alignment algorithm."""
from typing import Tuple, Literal, List
from math import floor
import numpy as np
from stats import variance
MOVE_DIAGONAL = 0
MOVE_RIGHT = 1
MOVE_DOWN = 2
EditMove = Literal[MOVE_DIAGONAL, MOVE_RIGHT, MOVE_DOWN]
CHEMICAL_CLASS = {
"A": "Purine",
"G": "Purine",
"T": "Pyrimidine",
"C": "Pyrimidine",
}
class AlignmentResult:
"""
AlignmentResult represents the result of performing an alignment on two sequences.
"""
def __init__(self, alignment_1: str, alignment_2: str):
"""
Produces a new AlignmentResult representing the result of performing an alignment on
two sequences.
"""
if len(alignment_1) != len(alignment_2):
raise ValueError("input strings have differing lengths")
self.alignment_1 = alignment_1
self.alignment_2 = alignment_2
def get_alignment_length(self) -> int:
"""Returns the length of the alignment."""
return len(self.alignment_1)
def get_alignment_1(self) -> str:
"""Returns the first alignment string."""
return self.alignment_1
def get_alignment_2(self) -> str:
"""Returns the second alignment string."""
return self.alignment_2
def get_match_string(self) -> str:
"""Returns the match string for the alignment."""
return "".join(
[
"|" if self.alignment_1[i] == self.alignment_2[i] else " "
for i in range(len(self.alignment_1))
]
)
def clustered_mismatches(self, cluster_count: int) -> List[int]:
"""
Breaks the alignment into `cluster_count` clusters and
returns the number of mismatches in each cluster. If the
alignment cannot be equally divided into the number of
clusters, this leaves the last cluster with the remainder
of the mismatches.
"""
if cluster_count < 1:
raise ValueError("cluster count must be greater than or equal to 1")
match_string = self.get_match_string()
cluster_size = floor(len(match_string) / cluster_count)
return [
match_string[i * cluster_size : i * cluster_size + cluster_size].count(" ")
for i in range(0, len(match_string) // cluster_size)
]
def clustered_mismatch_variance(self, cluster_count: int) -> float:
"""
Returns the variance between the mismatch clusters. The
raw cluster mismatches can be retrieved with the
`clustered_mismatches` method. `cluster_count` controls
the number of clusters used.
"""
return variance(
np.array(self.clustered_mismatches(cluster_count=cluster_count)),
sample=False,
)
def matches(self) -> int:
"""Returns the number of matching elements for the alignment."""
return self.get_match_string().count("|")
def hamming_distance(self) -> int:
"""Returns the Hamming distance of the alignment."""
return len(self.alignment_1) - self.matches()
def largest_mismatch(self) -> Tuple[int, int]:
"""Returns the position and size of the largest mismatch in the alignment."""
matches = self.get_match_string()
found_mismatch = False
largest_mismatch = 0
largest_mismatch_pos = 0
current_mismatch = 0
for i, c in enumerate(matches):
if c == " ":
found_mismatch = True
current_mismatch += 1
if current_mismatch > largest_mismatch:
largest_mismatch = current_mismatch
largest_mismatch_pos = i - largest_mismatch + 1
else:
current_mismatch = 0
if found_mismatch:
return (largest_mismatch_pos, largest_mismatch)
return (-1, 0)
def format_result(self, line_length: int = 80):
"""
Formats the found alignment with pipes between
matching elements. The optional `line_length` parameter
allows for adjusting the number of elements on each set of
lines.
"""
matches = self.get_match_string()
# Chunk lines
alignment_1_lines = [
self.alignment_1[i : i + line_length]
for i in range(0, len(self.alignment_1), line_length)
]
alignment_2_lines = [
self.alignment_2[i : i + line_length]
for i in range(0, len(self.alignment_2), line_length)
]
match_lines = [
matches[i : i + line_length] for i in range(0, len(matches), line_length)
]
# Output line chunks in order
return "\n".join(
[
"\n".join(
[alignment_1_lines[i], match_lines[i], alignment_2_lines[i], ""]
)
for i in range(len(match_lines))
]
)
def examine(self, line_length: int = 80):
"""
Formats and prints the found alignment with pipes between
matching elements. The optional `line_length` parameter
allows for adjusting the number of elements on each set of
lines.
"""
print(self.format_result(line_length=line_length))
def backtrack(quad: np.ndarray) -> EditMove:
"""Trace one step back through an edit matrix."""
if quad.shape == (0, 2):
return MOVE_DOWN
elif quad.shape == (2, 0):
return MOVE_RIGHT
# numpy's argmax doesn't allow for prioritizing non-indels
next_pos = (0, 0)
if quad[0, 1] > quad[next_pos]:
next_pos = (0, 1)
if quad[1, 0] > quad[next_pos]:
next_pos = (1, 0)
if next_pos == (0, 0):
return MOVE_DIAGONAL
elif next_pos == (0, 1):
return MOVE_RIGHT
else:
return MOVE_DOWN
def score_cell(
quad: np.ndarray,
top_char: str,
left_char: str,
nucleotides: bool,
chemical_classes: dict,
) -> np.int:
"""Calculate the Needleman-Wunsch score for a cell."""
down_score = quad[0, 1] - 1
right_score = quad[1, 0] - 1
# Penalize transversions more heavily
if nucleotides and chemical_classes[top_char] != chemical_classes[left_char]:
down_score -= 1
right_score -= 1
diag_score = quad[0, 0] - 1
if top_char == left_char:
diag_score += 2
return max([down_score, right_score, diag_score])
def align_sequences(
top_seq: str, left_seq: str, nucleotides: bool = True
) -> AlignmentResult:
"""
This function aligns the two provided sequences using Needleman-Wunsch
alignment. It uses a scoring scheme with a gap penalty of -1, a match
bonus of 1, and a mismatch penalty of -1. If the two sequences are
`nucleotides`, then an additional -1 penalty is applied to transversions.
"""
size1 = len(top_seq) + 1
size2 = len(left_seq) + 1
chemical_classes = CHEMICAL_CLASS # Copy this into the local scope so it can be accessed more quickly
# Build search matrix
search = np.zeros((size2, size1), dtype=np.int)
search[0] = [i for i in range(0, -size1, -1)]
search[:, 0] = [i for i in range(0, -size2, -1)]
# Do scoring
for x in range(1, size2):
for y in range(1, size1):
search[x, y] = score_cell(
search[x - 1 : x + 1, y - 1 : y + 1],
top_seq[y - 1],
left_seq[x - 1],
nucleotides,
chemical_classes,
)
search = search.T
# Unwind result
final_top = ""
final_left = ""
bt_x, bt_y = (size1 - 1, size2 - 1)
while bt_x != 0 or bt_y != 0:
next_move = backtrack(search[bt_x - 1 : bt_x + 1, bt_y - 1 : bt_y + 1])
if next_move == MOVE_DIAGONAL:
final_top = top_seq[bt_x - 1] + final_top
final_left = left_seq[bt_y - 1] + final_left
bt_x -= 1
bt_y -= 1
elif next_move == MOVE_DOWN:
final_top = "-" + final_top
final_left = left_seq[bt_y - 1] + final_left
bt_y -= 1
elif next_move == MOVE_RIGHT:
final_top = top_seq[bt_x - 1] + final_top
final_left = "-" + final_left
bt_x -= 1
return AlignmentResult(final_top, final_left)
| [((223, 13, 223, 51), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n')] |
yujialuo/erdos | examples/the-feeling-of-success/mock_grasp_object_op.py | 7a631b55895f1a473b0f4d38a0d6053851e65b5d | from mock_gripper_op import MockGripType
from std_msgs.msg import Bool
from erdos.op import Op
from erdos.data_stream import DataStream
from erdos.message import Message
class MockGraspObjectOperator(Op):
"""
Sends a "close" action to the gripper.
"""
gripper_stream = "gripper-output-stream"
action_complete_stream_name = "grasp-action-complete-stream"
def __init__(self, name):
"""
Initializes a lock which blocks future actions to be sent until the
past actions are completed.
"""
super(MockGraspObjectOperator, self).__init__(name)
self.move_ahead_lock = True
@staticmethod
def setup_streams(input_streams, trigger_stream_name, gripper_stream_name):
"""
Registers callbacks on the given streams and returns two streams, one
of which sends the action to the gripper and the other returns a
message upon the completion of the action.
"""
input_streams.filter_name(trigger_stream_name)\
.add_callback(MockGraspObjectOperator.grasp_object)
input_streams.filter_name(gripper_stream_name)\
.add_callback(MockGraspObjectOperator.release_lock)
return [
DataStream(
data_type=MockGripType,
name=MockGraspObjectOperator.gripper_stream),
DataStream(
data_type=Bool,
name=MockGraspObjectOperator.action_complete_stream_name)
]
def grasp_object(self, msg):
"""
Sends a close action to the gripper and waits for its completion.
"""
mock_grasp_object = MockGripType("close")
mock_grasp_msg = Message(mock_grasp_object, msg.timestamp)
self.move_ahead_lock = False
self.get_output_stream(
MockGraspObjectOperator.gripper_stream).send(mock_grasp_msg)
while not self.move_ahead_lock:
pass
action_complete_msg = Message(True, msg.timestamp)
self.get_output_stream(
MockGraspObjectOperator.action_complete_stream_name).send(
action_complete_msg)
def release_lock(self, msg):
"""
Releases the lock so that new actions can be sent to the gripper.
"""
self.move_ahead_lock = True
def execute(self):
self.spin()
| [((48, 28, 48, 49), 'mock_gripper_op.MockGripType', 'MockGripType', ({(48, 41, 48, 48): '"""close"""'}, {}), "('close')", False, 'from mock_gripper_op import MockGripType\n'), ((49, 25, 49, 66), 'erdos.message.Message', 'Message', ({(49, 33, 49, 50): 'mock_grasp_object', (49, 52, 49, 65): 'msg.timestamp'}, {}), '(mock_grasp_object, msg.timestamp)', False, 'from erdos.message import Message\n'), ((56, 30, 56, 58), 'erdos.message.Message', 'Message', ({(56, 38, 56, 42): 'True', (56, 44, 56, 57): 'msg.timestamp'}, {}), '(True, msg.timestamp)', False, 'from erdos.message import Message\n'), ((36, 12, 38, 60), 'erdos.data_stream.DataStream', 'DataStream', (), '', False, 'from erdos.data_stream import DataStream\n'), ((39, 12, 41, 73), 'erdos.data_stream.DataStream', 'DataStream', (), '', False, 'from erdos.data_stream import DataStream\n')] |
taliamax/pyfsa | src/pyfsa/lib/fsa.py | d92faa96c1e17e4016df7b367c7d405a07f1253b | # -*- coding: utf-8 -*-
import pygraphviz as gv # type: ignore
import itertools as it
from typing import (
List,
Optional,
)
from pyfsa.lib.types import TransitionsTable
def get_state_graph(
transitions: TransitionsTable,
start: Optional[str] = None,
end: Optional[str] = None,
nodes: Optional[List[str]] = None,
name: str = 'output.png',
draw: bool = True,
engine: str = 'circo',
) -> gv.AGraph:
'''
From a transition dictionary, creates a pygraphviz graph
of all the possible states and how to reach the given state.
Returns the resulting graph.
'''
graph = gv.AGraph(directed=True, strict=False, ranksep='1')
key_num = it.count()
if nodes is not None:
graph.add_nodes_from(nodes)
else:
graph.add_nodes_from(transitions.keys())
for node, transition_row in transitions.items():
for label, targets in transition_row.items():
for target in targets:
graph.add_edge(
node,
target,
key=f'{next(key_num)}',
label=label,
weight=1,
)
if start:
n: gv.Node = graph.get_node(start)
n.attr['color'] = '#0000FF'
n.attr['style'] = 'filled'
if end:
n = graph.get_node(end)
n.attr['color'] = '#00FF00'
n.attr['style'] = 'filled'
if draw:
graph.layout(prog=engine)
graph.draw(name)
return graph
def verify_string(
string: str,
starting_state: str,
final_state: str,
transitions: TransitionsTable,
) -> bool:
'''
Given a transitions table, a start and end state, and
some string, verifies that executing the finite state machine
on the given string produces the desired final state.
'''
current_state = starting_state
for letter in string:
transition = transitions[current_state]
current_state = transition[letter][0]
return current_state == final_state
def render_string_graph(
string: str,
start: str,
end: str,
transitions: TransitionsTable,
name: str = 'output.png',
draw: bool = True,
engine: str = 'circo'
) -> gv.AGraph:
'''
Given a string, a start state, an end state, end a
transitions table, produces the graph resulting in
the traversal of the string through the states defined
in the transitions table. By default, it will
output a png file of the result, but that can be
suppressed.
'''
graph = gv.AGraph(directed=True)
graph.graph_attr['label'] = f'Evaluating {string}'
node_names = it.count()
current_state = start
node_name = next(node_names)
graph.add_node(node_name)
current_node = gv.Node(graph, node_name)
current_node.attr['label'] = current_state
current_node.attr['fillcolor'] = '#0000FF'
current_node.attr['style'] = 'filled'
for letter in string:
node_name = next(node_names)
graph.add_node(node_name)
next_node = gv.Node(graph, node_name)
# TODO: The algorithm prioritizes just the first
# found state, which may not produce a correct
# answer. Needs to fix this
next_state = transitions[current_state][letter][0]
next_node.attr['label'] = next_state
graph.add_edge(current_node, next_node, label=letter)
current_node = next_node
current_state = next_state
if current_state == end:
current_node.attr['style'] = 'filled'
current_node.attr['fillcolor'] = '#00FF00'
if draw:
graph.layout(prog=engine)
graph.draw(name)
return graph
| [((28, 12, 28, 63), 'pygraphviz.AGraph', 'gv.AGraph', (), '', True, 'import pygraphviz as gv\n'), ((29, 14, 29, 24), 'itertools.count', 'it.count', ({}, {}), '()', True, 'import itertools as it\n'), ((99, 12, 99, 36), 'pygraphviz.AGraph', 'gv.AGraph', (), '', True, 'import pygraphviz as gv\n'), ((103, 17, 103, 27), 'itertools.count', 'it.count', ({}, {}), '()', True, 'import itertools as it\n'), ((107, 19, 107, 44), 'pygraphviz.Node', 'gv.Node', ({(107, 27, 107, 32): 'graph', (107, 34, 107, 43): 'node_name'}, {}), '(graph, node_name)', True, 'import pygraphviz as gv\n'), ((115, 20, 115, 45), 'pygraphviz.Node', 'gv.Node', ({(115, 28, 115, 33): 'graph', (115, 35, 115, 44): 'node_name'}, {}), '(graph, node_name)', True, 'import pygraphviz as gv\n')] |
kapikantzari/MultiBench | examples/multimedia/mmimdb_MFM.py | 44ab6ea028682040a0c04de68239ce5cdf15123f | import torch
import sys
import os
sys.path.append(os.getcwd())
from utils.helper_modules import Sequential2
from unimodals.common_models import Linear, MLP, MaxOut_MLP
from datasets.imdb.get_data import get_dataloader
from fusions.common_fusions import Concat
from objective_functions.objectives_for_supervised_learning import MFM_objective
from objective_functions.recon import sigmloss1d
from training_structures.Supervised_Learning import train, test
filename = "best_mfm.pt"
traindata, validdata, testdata = get_dataloader(
"../video/multimodal_imdb.hdf5", "../video/mmimdb", vgg=True, batch_size=128)
classes = 23
n_latent = 512
fuse = Sequential2(Concat(), MLP(2*n_latent, n_latent, n_latent//2)).cuda()
encoders = [MaxOut_MLP(512, 512, 300, n_latent, False).cuda(
), MaxOut_MLP(512, 1024, 4096, n_latent, False).cuda()]
head = Linear(n_latent//2, classes).cuda()
decoders = [MLP(n_latent, 600, 300).cuda(), MLP(n_latent, 2048, 4096).cuda()]
intermediates = [MLP(n_latent, n_latent//2, n_latent//2).cuda(),
MLP(n_latent, n_latent//2, n_latent//2).cuda()]
recon_loss = MFM_objective(2.0, [sigmloss1d, sigmloss1d], [
1.0, 1.0], criterion=torch.nn.BCEWithLogitsLoss())
train(encoders, fuse, head, traindata, validdata, 1000, decoders+intermediates, early_stop=True, task="multilabel",
objective_args_dict={"decoders": decoders, "intermediates": intermediates}, save=filename, optimtype=torch.optim.AdamW, lr=5e-3, weight_decay=0.01, objective=recon_loss)
print("Testing:")
model = torch.load(filename).cuda()
test(model, testdata, method_name="MFM", dataset="imdb",
criterion=torch.nn.BCEWithLogitsLoss(), task="multilabel")
| [((16, 33, 17, 81), 'datasets.imdb.get_data.get_dataloader', 'get_dataloader', (), '', False, 'from datasets.imdb.get_data import get_dataloader\n'), ((33, 0, 34, 175), 'training_structures.Supervised_Learning.train', 'train', (), '', False, 'from training_structures.Supervised_Learning import train, test\n'), ((5, 16, 5, 27), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((24, 7, 24, 35), 'unimodals.common_models.Linear', 'Linear', ({(24, 14, 24, 25): 'n_latent // 2', (24, 27, 24, 34): 'classes'}, {}), '(n_latent // 2, classes)', False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n'), ((31, 48, 31, 76), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ({}, {}), '()', False, 'import torch\n'), ((37, 8, 37, 28), 'torch.load', 'torch.load', ({(37, 19, 37, 27): 'filename'}, {}), '(filename)', False, 'import torch\n'), ((39, 15, 39, 43), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ({}, {}), '()', False, 'import torch\n'), ((21, 19, 21, 27), 'fusions.common_fusions.Concat', 'Concat', ({}, {}), '()', False, 'from fusions.common_fusions import Concat\n'), ((21, 29, 21, 67), 'unimodals.common_models.MLP', 'MLP', ({(21, 33, 21, 43): '2 * n_latent', (21, 45, 21, 53): 'n_latent', (21, 55, 21, 66): 'n_latent // 2'}, {}), '(2 * n_latent, n_latent, n_latent // 2)', False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n'), ((22, 12, 22, 54), 'unimodals.common_models.MaxOut_MLP', 'MaxOut_MLP', ({(22, 23, 22, 26): '(512)', (22, 28, 22, 31): '(512)', (22, 33, 22, 36): '(300)', (22, 38, 22, 46): 'n_latent', (22, 48, 22, 53): '(False)'}, {}), '(512, 512, 300, n_latent, False)', False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n'), ((23, 3, 23, 47), 'unimodals.common_models.MaxOut_MLP', 'MaxOut_MLP', ({(23, 14, 23, 17): '(512)', (23, 19, 23, 23): '(1024)', (23, 25, 23, 29): '(4096)', (23, 31, 23, 39): 'n_latent', (23, 41, 23, 46): '(False)'}, {}), '(512, 1024, 4096, n_latent, False)', False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n'), ((26, 12, 26, 35), 'unimodals.common_models.MLP', 'MLP', ({(26, 16, 26, 24): 'n_latent', (26, 26, 26, 29): '(600)', (26, 31, 26, 34): '(300)'}, {}), '(n_latent, 600, 300)', False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n'), ((26, 44, 26, 69), 'unimodals.common_models.MLP', 'MLP', ({(26, 48, 26, 56): 'n_latent', (26, 58, 26, 62): '(2048)', (26, 64, 26, 68): '(4096)'}, {}), '(n_latent, 2048, 4096)', False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n'), ((27, 17, 27, 56), 'unimodals.common_models.MLP', 'MLP', ({(27, 21, 27, 29): 'n_latent', (27, 31, 27, 42): '(n_latent // 2)', (27, 44, 27, 55): '(n_latent // 2)'}, {}), '(n_latent, n_latent // 2, n_latent // 2)', False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n'), ((28, 17, 28, 56), 'unimodals.common_models.MLP', 'MLP', ({(28, 21, 28, 29): 'n_latent', (28, 31, 28, 42): '(n_latent // 2)', (28, 44, 28, 55): '(n_latent // 2)'}, {}), '(n_latent, n_latent // 2, n_latent // 2)', False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n')] |
izumin2000/izuminapp | subeana/migrations/0001_initial.py | 3464cebe1d98c85c2cd95c6fac779ec1f42ef930 | # Generated by Django 4.0.2 on 2022-06-01 04:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=50)),
('isexist', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='', max_length=50)),
('lyrics', models.CharField(default='', max_length=5000)),
('url', models.CharField(blank=True, default='', max_length=50, null=True)),
('isexist', models.BooleanField(default=True)),
('channel', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='song_channel', to='subeana.channel')),
('imitate', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='song_imitate', to='subeana.song')),
],
),
]
| [((18, 23, 18, 115), 'django.db.models.BigAutoField', 'models.BigAutoField', (), '', False, 'from django.db import migrations, models\n'), ((19, 25, 19, 68), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((20, 28, 20, 61), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import migrations, models\n'), ((26, 23, 26, 115), 'django.db.models.BigAutoField', 'models.BigAutoField', (), '', False, 'from django.db import migrations, models\n'), ((27, 26, 27, 69), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((28, 27, 28, 72), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((29, 24, 29, 90), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((30, 28, 30, 61), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import migrations, models\n'), ((31, 28, 31, 167), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((32, 28, 32, 164), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')] |
shirubana/bifacialvf | bifacialvf/vf.py | 7cd1c4c658bb7a68f0815b2bd1a6d5c492ca7300 | # -*- coding: utf-8 -*-
"""
ViewFactor module - VF calculation helper files for bifacial-viewfactor
@author Bill Marion
@translated to python by sayala 06/09/17
"""
# ensure python3 compatible division and printing
from __future__ import division, print_function, absolute_import
import math
import numpy as np
from sun import solarPos, sunIncident, perezComp, aOIcorrection
import logging
# TODO: set level or add formatters if more advanced logging required
LOGGER = logging.getLogger(__name__) # only used to raise errors
DTOR = math.pi / 180.0 # Factor for converting from degrees to radians
def getBackSurfaceIrradiances(rowType, maxShadow, PVbackSurface, beta, sazm,
dni, dhi, C, D, albedo, zen, azm, cellRows,
pvBackSH, rearGroundGHI, frontGroundGHI,
frontReflected, offset=0):
"""
This method calculates the AOI corrected irradiance on the back of the PV
module/panel. 11/19/2015
Added rowType and other changes to distinguish between types of rows.
4/19/2016
Added input of offset of reference cell from PV module back (in PV panel
slope lengths) for modeling Sara's reference cell measurements, should be
set to zero for PV module cell irradiances.
Added while loop so projected Xs aren't too negative causing array index
problems (<0) 12/13/2016::
while (projectedX1 < -100.0 || projectedX2 < -100.0):
# Offset so array indexes are >= -100.0 12/13/2016
projectedX1 += 100.0;
projectedX2 += 100.0;
Parameters
----------
rowType : str
Type of row: "first", "interior", "last", or "single"
maxShadow
Maximum shadow length projected to the front(-) or rear (+) from the
front of the module
PVbackSurface
PV module back surface material type, either "glass" or "ARglass"
beta
Tilt from horizontal of the PV modules/panels (deg) (for front surface)
sazm
Surface azimuth of PV panels (deg) (for front surface)
dni
Direct normal irradiance (W/m2)
dhi
Diffuse horizontal irradiance (W/m2)
C
Ground clearance of PV panel (in PV panel slope lengths)
D
Horizontal distance between rows of PV panels (in PV panel slope
lengths)
albedo
Ground albedo
zen
Sun zenith (in radians)
azm
Sun azimuth (in radians)
pvBackSH
Decimal fraction of the back surface of the PV panel that is shaded,
0.0 to 1.0
rearGroundGHI : array of size [100]
Global horizontal irradiance for each of 100 ground segments (W/m2)
frontGroundGHI : array of size [100]
Global horizontal irradiance for each of 100 ground segments (W/m2)
frontReflected : array of size [cellRows]
Irradiance reflected from the front of the PV module/panel (W/m2) in
the row behind the one of interest
offset
Offset of reference cell from PV module back (in PV panel slope
lengths), set to zero for PV module cell irradiances
Returns
-------
backGTI : array of size [cellRows]
AOI corrected irradiance on back side of PV module/panel, one for each
cell row (W/m2)
aveGroundGHI : numeric
Average GHI on ground under PV array
Notes
-----
1-degree hemispherical segment AOI correction factor for glass (index=0)
and ARglass (index=1)
"""
backGTI = []
SegAOIcor = [
[0.057563, 0.128570, 0.199651, 0.265024, 0.324661, 0.378968, 0.428391, 0.473670, 0.514788, 0.552454,
0.586857, 0.618484, 0.647076, 0.673762, 0.698029, 0.720118, 0.740726, 0.759671, 0.776946, 0.792833,
0.807374, 0.821010, 0.833534, 0.845241, 0.855524, 0.865562, 0.874567, 0.882831, 0.890769, 0.897939,
0.904373, 0.910646, 0.916297, 0.921589, 0.926512, 0.930906, 0.935179, 0.939074, 0.942627, 0.946009,
0.949096, 0.952030, 0.954555, 0.957157, 0.959669, 0.961500, 0.963481, 0.965353, 0.967387, 0.968580,
0.970311, 0.971567, 0.972948, 0.974114, 0.975264, 0.976287, 0.977213, 0.978142, 0.979057, 0.979662,
0.980460, 0.981100, 0.981771, 0.982459, 0.982837, 0.983199, 0.983956, 0.984156, 0.984682, 0.985026,
0.985364, 0.985645, 0.985954, 0.986241, 0.986484, 0.986686, 0.986895, 0.987043, 0.987287, 0.987388,
0.987541, 0.987669, 0.987755, 0.987877, 0.987903, 0.987996, 0.988022, 0.988091, 0.988104, 0.988114,
0.988114, 0.988104, 0.988091, 0.988022, 0.987996, 0.987903, 0.987877, 0.987755, 0.987669, 0.987541,
0.987388, 0.987287, 0.987043, 0.986895, 0.986686, 0.986484, 0.986240, 0.985954, 0.985645, 0.985364,
0.985020, 0.984676, 0.984156, 0.983956, 0.983199, 0.982837, 0.982459, 0.981771, 0.981100, 0.980460,
0.979662, 0.979057, 0.978142, 0.977213, 0.976287, 0.975264, 0.974114, 0.972947, 0.971567, 0.970311,
0.968580, 0.967387, 0.965353, 0.963481, 0.961501, 0.959671, 0.957157, 0.954555, 0.952030, 0.949096,
0.946009, 0.942627, 0.939074, 0.935179, 0.930906, 0.926512, 0.921589, 0.916297, 0.910646, 0.904373,
0.897939, 0.890769, 0.882831, 0.874567, 0.865562, 0.855524, 0.845241, 0.833534, 0.821010, 0.807374,
0.792833, 0.776946, 0.759671, 0.740726, 0.720118, 0.698029, 0.673762, 0.647076, 0.618484, 0.586857,
0.552454, 0.514788, 0.473670, 0.428391, 0.378968, 0.324661, 0.265024, 0.199651, 0.128570, 0.057563],
[0.062742, 0.139913, 0.216842, 0.287226, 0.351055, 0.408796, 0.460966, 0.508397, 0.551116, 0.589915,
0.625035, 0.657029, 0.685667, 0.712150, 0.735991, 0.757467, 0.777313, 0.795374, 0.811669, 0.826496,
0.839932, 0.852416, 0.863766, 0.874277, 0.883399, 0.892242, 0.900084, 0.907216, 0.914023, 0.920103,
0.925504, 0.930744, 0.935424, 0.939752, 0.943788, 0.947313, 0.950768, 0.953860, 0.956675, 0.959339,
0.961755, 0.964039, 0.965984, 0.967994, 0.969968, 0.971283, 0.972800, 0.974223, 0.975784, 0.976647,
0.977953, 0.978887, 0.979922, 0.980773, 0.981637, 0.982386, 0.983068, 0.983759, 0.984436, 0.984855,
0.985453, 0.985916, 0.986417, 0.986934, 0.987182, 0.987435, 0.988022, 0.988146, 0.988537, 0.988792,
0.989043, 0.989235, 0.989470, 0.989681, 0.989857, 0.990006, 0.990159, 0.990263, 0.990455, 0.990515,
0.990636, 0.990731, 0.990787, 0.990884, 0.990900, 0.990971, 0.990986, 0.991042, 0.991048, 0.991057,
0.991057, 0.991048, 0.991042, 0.990986, 0.990971, 0.990900, 0.990884, 0.990787, 0.990731, 0.990636,
0.990515, 0.990455, 0.990263, 0.990159, 0.990006, 0.989857, 0.989681, 0.989470, 0.989235, 0.989043,
0.988787, 0.988532, 0.988146, 0.988022, 0.987435, 0.987182, 0.986934, 0.986417, 0.985916, 0.985453,
0.984855, 0.984436, 0.983759, 0.983068, 0.982386, 0.981637, 0.980773, 0.979920, 0.978887, 0.977953,
0.976647, 0.975784, 0.974223, 0.972800, 0.971284, 0.969970, 0.967994, 0.965984, 0.964039, 0.961755,
0.959339, 0.956675, 0.953860, 0.950768, 0.947313, 0.943788, 0.939752, 0.935424, 0.930744, 0.925504,
0.920103, 0.914023, 0.907216, 0.900084, 0.892242, 0.883399, 0.874277, 0.863766, 0.852416, 0.839932,
0.826496, 0.811669, 0.795374, 0.777313, 0.757467, 0.735991, 0.712150, 0.685667, 0.657029, 0.625035,
0.589915, 0.551116, 0.508397, 0.460966, 0.408796, 0.351055, 0.287226, 0.216842, 0.139913, 0.062742]]
# Tilt from horizontal of the PV modules/panels, in radians
beta = beta * DTOR
sazm = sazm * DTOR # Surface azimuth of PV module/panels, in radians
# 1. Calculate and assign various paramters to be used for modeling
# irradiances
# For calling PerezComp to break diffuse into components for zero tilt
# (horizontal)
iso_dif = 0.0; circ_dif = 0.0; horiz_dif = 0.0; grd_dif = 0.0; beam = 0.0
# Call to get iso_dif for horizontal surface
ghi, iso_dif, circ_dif, horiz_dif, grd_dif, beam = perezComp(
dni, dhi, albedo, zen, 0.0, zen)
# Isotropic irradiance from sky on horizontal surface, used later for
# determining isotropic sky component
iso_sky_dif = iso_dif
# For calling PerezComp to break diffuse into components for 90 degree tilt
# (vertical)
inc, tiltr, sazmr = sunIncident(0, 90.0, 180.0, 45.0, zen, azm)
# Call to get horiz_dif for vertical surface
vti, iso_dif, circ_dif, horiz_dif, grd_dif, beam = perezComp(
dni, dhi, albedo, inc, tiltr, zen)
# Horizon diffuse irradiance on a vertical surface, used later for
# determining horizon brightening irradiance component
F2DHI = horiz_dif
index = -99
n2 = -99.9
if (PVbackSurface == "glass"):
# Index to use with 1-degree hemispherical segment AOI correction
# factor array
index = 0
n2 = 1.526 # Index of refraction for glass
elif (PVbackSurface == "ARglass"):
# Index to use with 1-degree hemispherical segment AOI correction
# factor array
index = 1
n2 = 1.300 # Index of refraction for ARglass
else:
raise Exception(
"Incorrect text input for PVbackSurface."
" Must be glass or ARglass.")
# Reflectance at normal incidence, Duffie and Beckman p217
Ro = math.pow((n2 - 1.0) / (n2 + 1.0), 2.0)
# Average GHI on ground under PV array for cases when x projection exceed
# 2*rtr
aveGroundGHI = 0.0
for i in range(0,100):
aveGroundGHI += rearGroundGHI[i] / 100.0
# Calculate x,y coordinates of bottom and top edges of PV row in back of desired PV row so that portions of sky and ground viewed by the
# PV cell may be determined. Origin of x-y axis is the ground pobelow the lower front edge of the desired PV row. The row in back of
# the desired row is in the positive x direction.
h = math.sin(beta); # Vertical height of sloped PV panel (in PV panel slope lengths)
x1 = math.cos(beta); # Horizontal distance from front of panel to rear of panel (in PV panel slope lengths)
rtr = D + x1; # Row-to-row distance (in PV panel slope lengths)
PbotX = rtr; # x value for poon bottom egde of PV module/panel of row in back of (in PV panel slope lengths)
PbotY = C; # y value for poon bottom egde of PV module/panel of row in back of (in PV panel slope lengths)
PtopX = rtr + x1; # x value for poon top egde of PV module/panel of row in back of (in PV panel slope lengths)
PtopY = h + C; # y value for poon top egde of PV module/panel of row in back of (in PV panel slope lengths)
# 2. Calculate diffuse and direct component irradiances for each cell row
for i in range (0, cellRows):
# Calculate diffuse irradiances and reflected amounts for each cell row over it's field of view of 180 degrees,
# beginning with the angle providing the upper most view of the sky (j=0)
#PcellX = x1 * (i + 0.5) / ((double)cellRows); # x value for location of PV cell
#PcellY = C + h * (i + 0.5) / ((double)cellRows); # y value for location of PV cell
PcellX = x1 * (i + 0.5) / (cellRows) + offset * math.sin(beta); # x value for location of PV cell with OFFSET FOR SARA REFERENCE CELLS 4/26/2016
PcellY = C + h * (i + 0.5) / (cellRows) - offset * math.cos(beta); # y value for location of PV cell with OFFSET FOR SARA REFERENCE CELLS 4/26/2016
elvUP = math.atan((PtopY - PcellY) / (PtopX - PcellX)); # Elevation angle up from PV cell to top of PV module/panel, radians
elvDOWN = math.atan((PcellY - PbotY) / (PbotX - PcellX)); # Elevation angle down from PV cell to bottom of PV module/panel, radians
if (rowType == "last" or rowType == "single"): # 4/19/16 No array to the rear for these cases
elvUP = 0.0;
elvDOWN = 0.0;
#Console.WriteLine("ElvUp = 0", elvUP / DTOR);
#if (i == 0)
# Console.WriteLine("ElvDown = 0", elvDOWN / DTOR);
#123
#iStopIso = Convert.ToInt32((beta - elvUP) / DTOR); # Last whole degree in arc range that sees sky, first is 0
#Console.WriteLine("iStopIso = 0", iStopIso);
#iHorBright = Convert.ToInt32(max(0.0, 6.0 - elvUP / DTOR)); # Number of whole degrees for which horizon brightening occurs
#iStartGrd = Convert.ToInt32((beta + elvDOWN) / DTOR); # First whole degree in arc range that sees ground, last is 180
iStopIso = int(round((beta - elvUP) / DTOR)); # Last whole degree in arc range that sees sky, first is 0
#Console.WriteLine("iStopIso = 0", iStopIso);
iHorBright = int(round(max(0.0, 6.0 - elvUP / DTOR))); # Number of whole degrees for which horizon brightening occurs
iStartGrd = int(round((beta + elvDOWN) / DTOR)); # First whole degree in arc range that sees ground, last is 180
backGTI.append(0.0) # Initialtize front GTI
for j in range (0, iStopIso): # Add sky diffuse component and horizon brightening if present
backGTI[i] += 0.5 * (math.cos(j * DTOR) - math.cos((j + 1) * DTOR)) * SegAOIcor[index][j] * iso_sky_dif; # Sky radiation
# backGTI[i] += 0.5 * (math.cos(j * DTOR) - math.cos((j + 1) * DTOR)) * iso_sky_dif; # Sky radiation
if ((iStopIso - j) <= iHorBright): # Add horizon brightening term if seen
backGTI[i] += 0.5 * (math.cos(j * DTOR) - math.cos((j + 1) * DTOR)) * SegAOIcor[index][j] * F2DHI / 0.052264; # 0.052246 = 0.5 * [cos(84) - cos(90)]
#backGTI[i] += 0.5 * (math.cos(j * DTOR) - math.cos((j + 1) * DTOR)) * F2DHI / 0.052264; # 0.052246 = 0.5 * [cos(84) - cos(90)]
if (rowType == "interior" or rowType == "first"): # 4/19/16 Only add reflections from PV modules for these cases
for j in range (iStopIso, iStartGrd): #j = iStopIso; j < iStartGrd; j++) # Add relections from PV module front surfaces
L = (PbotX - PcellX) / math.cos(elvDOWN); # Diagonal distance from cell to bottom of module in row behind
startAlpha = -(j - iStopIso) * DTOR + elvUP + elvDOWN;
stopAlpha = -(j + 1 - iStopIso) * DTOR + elvUP + elvDOWN;
m = L * math.sin(startAlpha);
theta = math.pi - elvDOWN - (math.pi / 2.0 - startAlpha) - beta;
projectedX2 = m / math.cos(theta); # Projected distance on sloped PV module
m = L * math.sin(stopAlpha);
theta = math.pi - elvDOWN - (math.pi / 2.0 - stopAlpha) - beta;
projectedX1 = m / math.cos(theta); # Projected distance on sloped PV module
projectedX1 = max(0.0, projectedX1);
#Console.WriteLine("j= 0 projected X1 = 1,6:0.000 projected X2 = 2,6:0.000", j, projectedX1, projectedX2);
PVreflectedIrr = 0.0; # Irradiance from PV module front cover reflections
deltaCell = 1.0 / cellRows; # Length of cell in sloped direction in module/panel units (dimensionless)
for k in range (0, cellRows): # Determine which cells in behind row are seen, and their reflected irradiance
cellBot = k * deltaCell; # Position of bottom of cell along PV module/panel
cellTop = (k + 1) * deltaCell; # Position of top of cell along PV module/panel
cellLengthSeen = 0.0; # Length of cell seen for this row, start with zero
if (cellBot >= projectedX1 and cellTop <= projectedX2):
cellLengthSeen = cellTop - cellBot; # Sees the whole cell
elif (cellBot <= projectedX1 and cellTop >= projectedX2):
cellLengthSeen = projectedX2 - projectedX1; # Sees portion in the middle of cell
elif (cellBot >= projectedX1 and projectedX2 > cellBot and cellTop >= projectedX2):
cellLengthSeen = projectedX2 - cellBot; # Sees bottom of cell
elif (cellBot <= projectedX1 and projectedX1 < cellTop and cellTop <= projectedX2):
cellLengthSeen = cellTop - projectedX1; # Sees top of cell
#Console.WriteLine("cell= 0 cellBot = 1,5:0.00 cellTop = 2,5:0.00 Cell length seen = 3,5:0.00", k, cellBot, cellTop, cellLengthSeen);
PVreflectedIrr += cellLengthSeen * frontReflected[k]; # Add reflected radiation for this PV cell, if seen, weight by cell length seen
PVreflectedIrr /= projectedX2 - projectedX1; # Reflected irradiance from PV modules (W/m2)
backGTI[i] += 0.5 * (math.cos(j * DTOR) - math.cos((j + 1) * DTOR)) * SegAOIcor[index][j] * PVreflectedIrr; # Radiation reflected from PV module surfaces onto back surface of module
# End of adding reflections from PV module surfaces
#Console.WriteLine("");
#if (i == 0)
#Console.WriteLine("iStartGrd = 0", iStartGrd);
for j in range (iStartGrd, 180): # Add ground reflected component
startElvDown = (j - iStartGrd) * DTOR + elvDOWN; # Start and ending down elevations for this j loop
stopElvDown = (j + 1 - iStartGrd) * DTOR + elvDOWN;
projectedX2 = PcellX + np.float64(PcellY) / math.tan(startElvDown); # Projection of ElvDown to ground in +x direction (X1 and X2 opposite nomenclature for front irradiance method)
projectedX1 = PcellX + PcellY / math.tan(stopElvDown);
actualGroundGHI = 0.0; # Actuall ground GHI from summing array values
#if (i == 0)
# Console.WriteLine("j= 0 projected X1 = 1,6:0.0", j, 100 * projectedX1 / rtr);
if (abs(projectedX1 - projectedX2) > 0.99 * rtr):
if (rowType == "last" or rowType == "single"): # 4/19/16 No array to rear for these cases
actualGroundGHI = ghi; # Use total value if projection approximates the rtr
else:
actualGroundGHI = aveGroundGHI; # Use average value if projection approximates the rtr
else:
projectedX1 = 100.0 * projectedX1 / rtr; # Normalize projections and multiply by 100
projectedX2 = 100.0 * projectedX2 / rtr;
#Console.WriteLine("projectedX1 = 0 projectedX2 = 1", projectedX1, projectedX2);
if ((rowType == "last" or rowType == "single") and (abs(projectedX1) > 99.0 or abs(projectedX2) > 99.0)): #4/19/2016
actualGroundGHI = ghi; # Use total value if projection > rtr for "last" or "single"
else:
while (projectedX1 >= 100.0 or projectedX2 >= 100.0): # Offset so array indexes are less than 100
projectedX1 -= 100.0;
projectedX2 -= 100.0;
while (projectedX1 < -100.0 or projectedX2 < -100.0): # Offset so array indexes are >= -100.0 12/13/2016
projectedX1 += 100.0;
projectedX2 += 100.0;
#Console.WriteLine("projectedX1 = 0 projectedX2 = 1", projectedX1, projectedX2);
index1 = (int)(projectedX1 + 100.0) - 100; # Determine indexes for use with rearGroundGHI array and frontGroundGHI array(truncates values)
index2 = (int)(projectedX2 + 100.0) - 100; # (int)(1.9) = 1 and (int)(-1.9) = -1; (int)(1.9+100) - 100 = 1 and (int)(-1.9+100) - 100 = -2
#Console.WriteLine("index1=0 index2=1", index1, index2);
if (index1 == index2):
if (index1 < 0):
actualGroundGHI = frontGroundGHI[index1 + 100];
#actualGroundGHI = 0.0;
else:
actualGroundGHI = rearGroundGHI[index1]; # x projections in same groundGHI element THIS SEEMS TO ADD HICCUP 4/26/2016 ***************************
#actualGroundGHI = 0.0;
else:
for k in range (index1, index2+1): #for (k = index1; k <= index2; k++) # Sum the irradiances on the ground if projections are in different groundGHI elements
if (k == index1):
if (k < 0):
actualGroundGHI += frontGroundGHI[k + 100] * (k + 1.0 - projectedX1);
else:
actualGroundGHI += rearGroundGHI[k] * (k + 1.0 - projectedX1);
elif (k == index2):
if (k < 0):
actualGroundGHI += frontGroundGHI[k + 100] * (projectedX2 - k);
else:
actualGroundGHI += rearGroundGHI[k] * (projectedX2 - k);
else:
if (k < 0):
actualGroundGHI += frontGroundGHI[k + 100];
else:
actualGroundGHI += rearGroundGHI[k];
actualGroundGHI /= projectedX2 - projectedX1; # Irradiance on ground in the 1 degree field of view
#if (i == 0)
# Console.WriteLine("j=0 index1=1 index2=2 projectX1=3,5:0.0 projectX2=4,5:0.0 actualGrdGHI=5,6:0.0", j, index1, index2, projectedX1, projectedX2, actualGroundGHI);
# End of if looping to determine actualGroundGHI
backGTI[i] += 0.5 * (math.cos(j * DTOR) - math.cos((j + 1) * DTOR)) * SegAOIcor[index][j] * actualGroundGHI * albedo; # Add ground reflected component
#Console.WriteLine("actualGroundGHI = 0,6:0.0 inputGHI = 1,6:0.0 aveArrayGroundGHI = 2,6:0.0", actualGroundGHI, dhi + dni * math.cos(zen), aveGroundGHI);
# End of j loop for adding ground reflected componenet
# Calculate and add direct and circumsolar irradiance components
inc, tiltr, sazmr = sunIncident(0, 180-beta / DTOR, sazm / DTOR - 180, 45.0, zen, azm) # For calling PerezComp to break diffuse into components for downward facing tilt
gtiAllpc, iso_dif, circ_dif, horiz_dif, grd_dif, beam = perezComp(dni, dhi, albedo, inc, tiltr, zen) # Call to get components for the tilt
cellShade = pvBackSH * cellRows - i;
if (cellShade > 1.0): # Fully shaded if > 1, no shade if < 0, otherwise fractionally shaded
cellShade = 1.0;
elif (cellShade < 0.0):
cellShade = 0.0;
if (cellShade < 1.0 and inc < math.pi / 2.0): # Cell not shaded entirely and inc < 90 deg
cor = aOIcorrection(n2, inc); # Get AOI correction for beam and circumsolar
backGTI[i] += (1.0 - cellShade) * (beam + circ_dif) * cor; # Add beam and circumsolar radiation
# End of for i = 0; i < cellRows loop
return backGTI, aveGroundGHI;
# End of GetBackSurfaceIrradiances
def getFrontSurfaceIrradiances(rowType, maxShadow, PVfrontSurface, beta, sazm,
dni, dhi, C, D, albedo, zen, azm, cellRows,
pvFrontSH, frontGroundGHI):
"""
This method calculates the AOI corrected irradiance on the front of the PV
module/panel and the irradiance reflected from the the front of the PV
module/panel. 11/12/2015
Added row type and MaxShadow and changed code to accommodate 4/19/2015
Parameters
----------
rowType : str
Type of row: "first", "interior", "last", or "single"
maxShadow
Maximum shadow length projected to the front (-) or rear (+) from the
front of the module row (in PV panel slope lengths), only used for
`rowTypes` other than "interior"
PVfrontSurface
PV module front surface material type, either "glass" or "ARglass"
beta
Tilt from horizontal of the PV modules/panels (deg)
sazm
Surface azimuth of PV panels (deg)
dni
Direct normal irradiance (W/m2)
dhi
Diffuse horizontal irradiance (W/m2)
C
Ground clearance of PV panel (in PV panel slope lengths)
D
Horizontal distance between rows of PV panels (in PV panel slope
lengths)
albedo
Ground albedo
zen
Sun zenith (in radians)
azm
Sun azimuth (in radians)
pvFrontSH
Decimal fraction of the front surface of the PV panel that is shaded,
0.0 to 1.0
froutGroundGHI : array of size [100]
Global horizontal irradiance for each of 100 ground segments in front
of the module row
Returns
-------
frontGTI : array of size [cellRows]
AOI corrected irradiance on front side of PV module/panel, one for each
cell row (W/m2)
frontReflected : array of size [cellRows]
Irradiance reflected from the front of the PV module/panel (W/m2)
aveGroundGHI : numeric
Average GHI on the ground (includes effects of shading by array) from
the array frontGroundGHI[100]
Notes
-----
1-degree hemispherical segment AOI correction factor for glass (index=0)
and ARglass (index=1). Creates a list containing 5 lists, each of 8 items,
all set to 0
"""
frontGTI = []
frontReflected = []
#w, h = 2, 180;
#SegAOIcor = [[0 for x in range(w)] for y in range(h)]
SegAOIcor = ([[0.057563, 0.128570, 0.199651, 0.265024, 0.324661, 0.378968, 0.428391, 0.473670, 0.514788, 0.552454,
0.586857, 0.618484, 0.647076, 0.673762, 0.698029, 0.720118, 0.740726, 0.759671, 0.776946, 0.792833,
0.807374, 0.821010, 0.833534, 0.845241, 0.855524, 0.865562, 0.874567, 0.882831, 0.890769, 0.897939,
0.904373, 0.910646, 0.916297, 0.921589, 0.926512, 0.930906, 0.935179, 0.939074, 0.942627, 0.946009,
0.949096, 0.952030, 0.954555, 0.957157, 0.959669, 0.961500, 0.963481, 0.965353, 0.967387, 0.968580,
0.970311, 0.971567, 0.972948, 0.974114, 0.975264, 0.976287, 0.977213, 0.978142, 0.979057, 0.979662,
0.980460, 0.981100, 0.981771, 0.982459, 0.982837, 0.983199, 0.983956, 0.984156, 0.984682, 0.985026,
0.985364, 0.985645, 0.985954, 0.986241, 0.986484, 0.986686, 0.986895, 0.987043, 0.987287, 0.987388,
0.987541, 0.987669, 0.987755, 0.987877, 0.987903, 0.987996, 0.988022, 0.988091, 0.988104, 0.988114,
0.988114, 0.988104, 0.988091, 0.988022, 0.987996, 0.987903, 0.987877, 0.987755, 0.987669, 0.987541,
0.987388, 0.987287, 0.987043, 0.986895, 0.986686, 0.986484, 0.986240, 0.985954, 0.985645, 0.985364,
0.985020, 0.984676, 0.984156, 0.983956, 0.983199, 0.982837, 0.982459, 0.981771, 0.981100, 0.980460,
0.979662, 0.979057, 0.978142, 0.977213, 0.976287, 0.975264, 0.974114, 0.972947, 0.971567, 0.970311,
0.968580, 0.967387, 0.965353, 0.963481, 0.961501, 0.959671, 0.957157, 0.954555, 0.952030, 0.949096,
0.946009, 0.942627, 0.939074, 0.935179, 0.930906, 0.926512, 0.921589, 0.916297, 0.910646, 0.904373,
0.897939, 0.890769, 0.882831, 0.874567, 0.865562, 0.855524, 0.845241, 0.833534, 0.821010, 0.807374,
0.792833, 0.776946, 0.759671, 0.740726, 0.720118, 0.698029, 0.673762, 0.647076, 0.618484, 0.586857,
0.552454, 0.514788, 0.473670, 0.428391, 0.378968, 0.324661, 0.265024, 0.199651, 0.128570, 0.057563],
[0.062742, 0.139913, 0.216842, 0.287226, 0.351055, 0.408796, 0.460966, 0.508397, 0.551116, 0.589915,
0.625035, 0.657029, 0.685667, 0.712150, 0.735991, 0.757467, 0.777313, 0.795374, 0.811669, 0.826496,
0.839932, 0.852416, 0.863766, 0.874277, 0.883399, 0.892242, 0.900084, 0.907216, 0.914023, 0.920103,
0.925504, 0.930744, 0.935424, 0.939752, 0.943788, 0.947313, 0.950768, 0.953860, 0.956675, 0.959339,
0.961755, 0.964039, 0.965984, 0.967994, 0.969968, 0.971283, 0.972800, 0.974223, 0.975784, 0.976647,
0.977953, 0.978887, 0.979922, 0.980773, 0.981637, 0.982386, 0.983068, 0.983759, 0.984436, 0.984855,
0.985453, 0.985916, 0.986417, 0.986934, 0.987182, 0.987435, 0.988022, 0.988146, 0.988537, 0.988792,
0.989043, 0.989235, 0.989470, 0.989681, 0.989857, 0.990006, 0.990159, 0.990263, 0.990455, 0.990515,
0.990636, 0.990731, 0.990787, 0.990884, 0.990900, 0.990971, 0.990986, 0.991042, 0.991048, 0.991057,
0.991057, 0.991048, 0.991042, 0.990986, 0.990971, 0.990900, 0.990884, 0.990787, 0.990731, 0.990636,
0.990515, 0.990455, 0.990263, 0.990159, 0.990006, 0.989857, 0.989681, 0.989470, 0.989235, 0.989043,
0.988787, 0.988532, 0.988146, 0.988022, 0.987435, 0.987182, 0.986934, 0.986417, 0.985916, 0.985453,
0.984855, 0.984436, 0.983759, 0.983068, 0.982386, 0.981637, 0.980773, 0.979920, 0.978887, 0.977953,
0.976647, 0.975784, 0.974223, 0.972800, 0.971284, 0.969970, 0.967994, 0.965984, 0.964039, 0.961755,
0.959339, 0.956675, 0.953860, 0.950768, 0.947313, 0.943788, 0.939752, 0.935424, 0.930744, 0.925504,
0.920103, 0.914023, 0.907216, 0.900084, 0.892242, 0.883399, 0.874277, 0.863766, 0.852416, 0.839932,
0.826496, 0.811669, 0.795374, 0.777313, 0.757467, 0.735991, 0.712150, 0.685667, 0.657029, 0.625035,
0.589915, 0.551116, 0.508397, 0.460966, 0.408796, 0.351055, 0.287226, 0.216842, 0.139913, 0.062742]]);
beta = beta * DTOR # Tilt from horizontal of the PV modules/panels, in radians
sazm = sazm * DTOR # Surface azimuth of PV module/panels, in radians
# 1. Calculate and assign various paramters to be used for modeling irradiances
iso_dif = 0.0; circ_dif = 0.0; horiz_dif = 0.0; grd_dif = 0.0; beam = 0.0; # For calling PerezComp to break diffuse into components for zero tilt (horizontal)
ghi, iso_dif, circ_dif, horiz_dif, grd_dif, beam = perezComp(dni, dhi, albedo, zen, 0.0, zen) # Call to get iso_dif for horizontal surface
# print "PEREZCOMP1 = "
# print "ghi = ", ghi
# print "iso_dif = ", iso_dif
# print "circ_dif = ", circ_dif
# print "horiz_dif = ", horiz_dif
# print "grd_dif = ", grd_dif
# print "beam = ", beam
iso_sky_dif = iso_dif; # Isotropic irradiance from sky on horizontal surface, used later for determining isotropic sky component
inc, tiltr, sazmr = sunIncident(0, 90.0, 180.0, 45.0, zen, azm) # For calling PerezComp to break diffuse into components for 90 degree tilt (vertical)
# print "sunIncident 1."
# print "inc = ", inc
# print "tiltr = ", tiltr
# print "sazmr = ", sazmr
vti, iso_dif, circ_dif, horiz_dif, grd_dif, beam = perezComp(dni, dhi, albedo, inc, tiltr, zen) # Call to get horiz_dif for vertical surface
# print "PEREZCOMP1 = "
# print "vti = ", vti
# print "iso_dif = ", iso_dif
# print "circ_dif = ", circ_dif
# print "horiz_dif = ", horiz_dif
# print "grd_dif = ", grd_dif
# print "beam = ", beam
F2DHI = horiz_dif; # Horizon diffuse irradiance on a vertical surface, used later for determining horizon brightening irradiance component
index = -99;
n2 = -99.9;
if (PVfrontSurface == "glass"):
index = 0; # Index to use with 1-degree hemispherical segment AOI correction factor array
n2 = 1.526; # Index of refraction for glass
elif (PVfrontSurface == "ARglass"):
index = 1; # Index to use with 1-degree hemispherical segment AOI correction factor array
n2 = 1.300; # Index of refraction for ARglass
else:
raise Exception("Incorrect text input for PVfrontSurface. Must be glass or ARglass.")
Ro = math.pow((n2 - 1.0) / (n2 + 1.0), 2.0); # Reflectance at normal incidence, Duffie and Beckman p217
aveGroundGHI = 0.0; # Average GHI on ground under PV array for cases when x projection exceed 2*rtr
for i in range (0,100):
aveGroundGHI += frontGroundGHI[i] / 100.0;
# Calculate x,y coordinates of bottom and top edges of PV row in front of desired PV row so that portions of sky and ground viewed by the
# PV cell may be determined. Origin of x-y axis is the ground pobelow the lower front edge of the desired PV row. The row in front of
# the desired row is in the negative x direction.
h = math.sin(beta); # Vertical height of sloped PV panel (in PV panel slope lengths)
x1 = math.cos(beta); # Horizontal distance from front of panel to rear of panel (in PV panel slope lengths)
rtr = D + x1; # Row-to-row distance (in PV panel slope lengths)
PbotX = -rtr; # x value for poon bottom egde of PV module/panel of row in front of (in PV panel slope lengths)
PbotY = C; # y value for poon bottom egde of PV module/panel of row in front of (in PV panel slope lengths)
PtopX = -D; # x value for poon top egde of PV module/panel of row in front of (in PV panel slope lengths)
PtopY = h + C; # y value for poon top egde of PV module/panel of row in front of (in PV panel slope lengths)
# 2. Calculate diffuse and direct component irradiances for each cell row
for i in range (0, cellRows):
# Calculate diffuse irradiances and reflected amounts for each cell row over it's field of view of 180 degrees,
# beginning with the angle providing the upper most view of the sky (j=0)
PcellX = x1 * (i + 0.5) / (cellRows); # x value for location of PV cell
PcellY = C + h * (i + 0.5) / (cellRows); # y value for location of PV cell
elvUP = math.atan((PtopY - PcellY) / (PcellX - PtopX)); # Elevation angle up from PV cell to top of PV module/panel, radians
elvDOWN = math.atan((PcellY - PbotY) / (PcellX - PbotX)); # Elevation angle down from PV cell to bottom of PV module/panel, radians
if (rowType == "first" or rowType == "single"): # 4/19/16 No array in front for these cases
elvUP = 0.0;
elvDOWN = 0.0;
#Console.WriteLine("ElvUp = 0", elvUP / DTOR);
#if (i == 0)
# Console.WriteLine("ElvDown = 0", elvDOWN / DTOR);
if math.isnan(beta):
print( "Beta is Nan")
if math.isnan(elvUP):
print( "elvUP is Nan")
if math.isnan((math.pi - beta - elvUP) / DTOR):
print( "division is Nan")
iStopIso = int(round(np.float64((math.pi - beta - elvUP)) / DTOR)) # Last whole degree in arc range that sees sky, first is 0
#Console.WriteLine("iStopIso = 0", iStopIso);
iHorBright = int(round(max(0.0, 6.0 - elvUP / DTOR))); # Number of whole degrees for which horizon brightening occurs
iStartGrd = int(round((math.pi - beta + elvDOWN) / DTOR)); # First whole degree in arc range that sees ground, last is 180
# print "iStopIso = ", iStopIso
# print "iHorBright = ", iHorBright
# print "iStartGrd = ", iStartGrd
frontGTI.append(0.0) # Initialtize front GTI
frontReflected.append(0.0); # Initialize reflected amount from front
for j in range (0, iStopIso): # Add sky diffuse component and horizon brightening if present
#for (j = 0; j < iStopIso; j++)
frontGTI[i] += 0.5 * (math.cos(j * DTOR) - math.cos((j + 1) * DTOR)) * SegAOIcor[index][j] * iso_sky_dif; # Sky radiation
frontReflected[i] += 0.5 * (math.cos(j * DTOR) - math.cos((j + 1) * DTOR)) * iso_sky_dif * (1.0 - SegAOIcor[index][j] * (1.0 - Ro)); # Reflected radiation from module
if ((iStopIso - j) <= iHorBright): # Add horizon brightening term if seen
frontGTI[i] += 0.5 * (math.cos(j * DTOR) - math.cos((j + 1) * DTOR)) * SegAOIcor[index][j] * F2DHI / 0.052264; # 0.052246 = 0.5 * [cos(84) - cos(90)]
frontReflected[i] += 0.5 * (math.cos(j * DTOR) - math.cos((j + 1) * DTOR)) * (F2DHI / 0.052264) * (1.0 - SegAOIcor[index][j] * (1.0 - Ro)); # Reflected radiation from module
#if (i == 0)
# Console.WriteLine("iStartGrd = 0", iStartGrd);
for j in range (iStartGrd, 180): # Add ground reflected component
#(j = iStartGrd; j < 180; j++)
startElvDown = (j - iStartGrd) * DTOR + elvDOWN; # Start and ending down elevations for this j loop
stopElvDown = (j + 1 - iStartGrd) * DTOR + elvDOWN;
projectedX1 = PcellX - np.float64(PcellY) / math.tan(startElvDown); # Projection of ElvDown to ground in -x direction
projectedX2 = PcellX - PcellY / math.tan(stopElvDown);
actualGroundGHI = 0.0; # Actuall ground GHI from summing array values
#if (i == 0)
# Console.WriteLine("j= 0 projected X1 = 1,6:0.0", j, 100 * projectedX1 / rtr);
if (abs(projectedX1 - projectedX2) > 0.99 * rtr):
if (rowType == "first" or rowType == "single"): # 4/19/16 No array in front for these cases
actualGroundGHI = ghi; # Use total value if projection approximates the rtr
else:
actualGroundGHI = aveGroundGHI; # Use average value if projection approximates the rtr
else:
projectedX1 = 100.0 * projectedX1 / rtr; # Normalize projections and multiply by 100
projectedX2 = 100.0 * projectedX2 / rtr;
if ((rowType == "first" or rowType == "single") and (abs(projectedX1) > rtr or abs(projectedX2) > rtr)): #4/19/2016
actualGroundGHI = ghi; # Use total value if projection > rtr for "first" or "single"
else:
while (projectedX1 < 0.0 or projectedX2 < 0.0): # Offset so array indexes are positive
projectedX1 += 100.0;
projectedX2 += 100.0;
index1 = int(projectedX1); # Determine indexes for use with groundGHI array (truncates values)
index2 = int(projectedX2);
if (index1 == index2):
actualGroundGHI = frontGroundGHI[index1]; # x projections in same groundGHI element
else:
for k in range (index1, index2+1): # Sum the irradiances on the ground if projections are in different groundGHI elements
#for (k = index1; k <= index2; k++)
#Console.WriteLine("index1=0 index2=1", index1,index2);
if (k == index1):
actualGroundGHI += frontGroundGHI[k] * (k + 1.0 - projectedX1);
elif (k == index2):
if (k < 100):
actualGroundGHI += frontGroundGHI[k] * (projectedX2 - k);
else:
actualGroundGHI += frontGroundGHI[k - 100] * (projectedX2 - k);
else:
if (k < 100):
actualGroundGHI += frontGroundGHI[k];
else:
actualGroundGHI += frontGroundGHI[k - 100];
actualGroundGHI /= projectedX2 - projectedX1; # Irradiance on ground in the 1 degree field of view
#if (i == 0)
# Console.WriteLine("j=0 index1=1 index2=2 projectX1=3,5:0.0 projectX2=4,5:0.0 actualGrdGHI=5,6:0.0", j, index1, index2, projectedX1, projectedX2, actualGroundGHI);
frontGTI[i] += 0.5 * (math.cos(j * DTOR) - math.cos((j + 1) * DTOR)) * SegAOIcor[index][j] * actualGroundGHI * albedo; # Add ground reflected component
frontReflected[i] += 0.5 * (math.cos(j * DTOR) - math.cos((j + 1) * DTOR)) * actualGroundGHI * albedo * (1.0 - SegAOIcor[index][j] * (1.0 - Ro)); # Reflected ground radiation from module
#Console.WriteLine("actualGroundGHI = 0,6:0.0 inputGHI = 1,6:0.0 aveArrayGroundGHI = 2,6:0.0", actualGroundGHI, dhi + dni * math.cos(zen), aveGroundGHI);
# End of j loop for adding ground reflected componenet
# Calculate and add direct and circumsolar irradiance components
inc, tiltr, sazmr = sunIncident(0, beta / DTOR, sazm / DTOR, 45.0, zen, azm) # For calling PerezComp to break diffuse into components for 90 degree tilt (vertical)
# print "sunIncident 2."
# print "inc = ", inc
# print "tiltr = ", tiltr
# print "sazmr = ", sazmr
# print " INCIDENT REALY NEEDED for AOI ", inc
gtiAllpc, iso_dif, circ_dif, horiz_dif, grd_dif, beam = perezComp(dni, dhi, albedo, inc, tiltr, zen) # Call to get components for the tilt
# print "PEREZCOMP 2 = "
# print "gtiAllpc = ", vti
# print "iso_dif = ", iso_dif
# print "circ_dif = ", circ_dif
# print "horiz_dif = ", horiz_dif
# print "grd_dif = ", grd_dif
# print "beam = ", beam
cellShade = pvFrontSH * cellRows - i;
if (cellShade > 1.0): # Fully shaded if > 1, no shade if < 0, otherwise fractionally shaded
cellShade = 1.0;
elif (cellShade < 0.0):
cellShade = 0.0;
if (cellShade < 1.0 and inc < math.pi / 2.0): # Cell not shaded entirely and inc < 90 deg
cor = aOIcorrection(n2, inc); # Get AOI correction for beam and circumsolar
frontGTI[i] += (1.0 - cellShade) * (beam + circ_dif) * cor; # Add beam and circumsolar radiation
#frontReflected[i] += (1.0 - cellShade) * (beam + circ_dif) * (1.0 - cor * (1.0 - Ro)); # Reflected beam and circumsolar radiation from module
# End of for i = 0; i < cellRows loop
return aveGroundGHI, frontGTI, frontReflected;
# End of GetFrontSurfaceIrradiances
def getGroundShadeFactors(rowType, beta, C, D, elv, azm, sazm):
"""
This method determines if the ground is shaded from direct beam radiation
for points on the ground from the leading edge of one row of PV panels to
the leading edge of the next row of PV panels behind it. This row-to-row
dimension is divided into 100 ground segments and a ground shade factor is
returned for each ground segment, with values of 1 for shaded segments and
values of 0 for non shaded segments. The fractional amounts of shading of
the front and back surfaces of the PV panel are also returned. 8/20/2015
4/18/2016 - Modified to account for different row types. Because the ground
factors may now be different depending on row, they are calculated for the
row-to-row dimension to the rear of the leading module edge and to the
front of the leading edge. Also returned is the maximum shadow length
projected to the front or rear from the front of the module row
Parameters
----------
rowType : str
"first", "interior", "last", or "single"
beta
Tilt from horizontal of the PV modules/panels (deg)
C
Ground clearance of PV panel (in PV panel slope lengths)
D
Horizontal distance between rows of PV panels (in PV panel slope
lengths)
elv
Sun elevation (in radians)
azm
Sun azimuth (in radians)
sazm
Surface azimuth of PV panels (deg)
Returns
-------
pvFrontSH : numeric
Decimal fraction of the front surface of the PV panel that is shaded,
0.0 to 1.0
pvBackSH : numeric
Decimal fraction of the back surface of the PV panel that is shaded,
0.0 to 1.0
rearGroundSH : array of size [100]
Ground shade factors for ground segments to the rear, 0 = not shaded,
1 = shaded
frontGroundSH : array of size [100]
Ground shade factors for ground segments to the front, 0 = not shaded,
1 = shaded
maxShadow : numeric
Maximum shadow length projected to the front(-) or rear (+) from the
front of the module row (in PV panel slope lengths), only used later
for rowTypes other than "interior"
"""
rearGroundSH = []
frontGroundSH = []
beta = beta * DTOR # Tilt from horizontal of the PV modules/panels, in radians
sazm = sazm * DTOR # Surface azimuth of PV module/pamels, in radians
h = math.sin(beta); # Vertical height of sloped PV panel (in PV panel slope lengths)
x1 = math.cos(beta); # Horizontal distance from front of panel to rear of panel (in PV panel slope lengths)
rtr = D + x1; # Row-to-row distance (in PV panel slope lengths)
# Divide the row-to-row spacing into 100 intervals for calculating ground shade factors
delta = rtr / 100.0;
x = -delta / 2.0; # Initialize horizontal dimension x to provide midpoof intervals
Lh = (h / math.tan(elv)) * math.cos(sazm - azm); # Horizontal length of shadow perpindicular to row from top of module to bottom of module
Lhc = ((h + C) / math.tan(elv)) * math.cos(sazm - azm); # Horizontal length of shadow perpindicular to row from top of module to ground level
Lc = (C / math.tan(elv)) * math.cos(sazm - azm); # Horizontal length of shadow perpindicular to row from bottom of module to ground level
ss1 = 0.0; se1 = 0.0; ss2 = 0.0; se2 = 0.0; # Initialize shading start (s) and end (e) to zeros for two potential shading segments
pvFrontSH = 0.0;
pvBackSH = 0.0;
if (rowType == "interior"):
if (Lh > D): # Front side of PV module partially shaded, back completely shaded, ground completely shaded
pvFrontSH = (Lh - D) / (Lh + x1);
pvBackSH = 1.0;
ss1 = 0.0; # Ground shaded from 0.0 to rtr
se1 = rtr;
elif (Lh < -(rtr + x1)): # Back side of PV module partially shaded, front completely shaded, ground completely shaded
pvFrontSH = 1.0;
pvBackSH = (Lh + rtr + x1) / (Lh + x1);
ss1 = 0.0; # Ground shaded from 0.0 to rtr
se1 = rtr;
else: # Ground is partially shaded (I assume)
if (Lhc >= 0.0): # Shadow to rear of row, module front unshaded, back shaded
pvFrontSH = 0.0;
pvBackSH = 1.0;
Ss = Lc; # Shadow starts at Lc
Se = Lhc + x1; # Shadow ends here
while (Ss > rtr):
Ss -= rtr; # Put shadow in correct rtr space if needed
Se -= rtr;
ss1 = Ss;
se1 = Se;
if (se1 > rtr): # then need to use two shade areas
se1 = rtr;
ss2 = 0.0;
se2 = Se - rtr;
if (se2 > ss1):
# This would mean ground completely shaded, does this occur?
ss1 = 0.0; # Ground shaded from 0.0 to rtr
se1 = rtr;
else: # Shadow to front of row, either front or back might be shaded, depending on tilt and other factors
Ss = 0.0; # Shadow starts at Lc, initialize
Se = 0.0; # Shadow ends here, initialize
if (Lc < Lhc + x1):
pvFrontSH = 0.0;
pvBackSH = 1.0;
Ss = Lc; # Shadow starts at Lc
Se = Lhc + x1; # Shadow ends here
else:
pvFrontSH = 1.0;
pvBackSH = 0.0;
Ss = Lhc + x1; # Shadow starts at Lhc + x1
Se = Lc; # Shadow ends here
while (Ss < 0.0):
Ss += rtr; # Put shadow in correct rtr space if needed
Se += rtr;
ss1 = Ss;
se1 = Se;
if (se1 > rtr): # then need to use two shade areas
se1 = rtr;
ss2 = 0.0;
se2 = Se - rtr;
if (se2 > ss1):
# This would mean ground completely shaded, does this occur?
ss1 = 0.0; # Ground shaded from 0.0 to rtr
se1 = rtr;
# End of if (Lh > D) else branching
delta = rtr / 100.0;
x = -delta / 2.0; # Initialize horizontal dimension x to provide midpoof intervals
#for (i = 0; i <= 99; i++)
for i in range(0,100):
x += delta;
#if ((x >= ss1 && x < se1) || (x >= ss2 && x < se2)):
if ((x >= ss1 and x < se1) or (x >= ss2 and x < se2)):
rearGroundSH.append(1); # x within a shaded interval, set groundSH to 1 to indicate shaded
frontGroundSH.append(1); # same for both front and rear
else:
rearGroundSH.append(0); # x not within a shaded interval, set groundSH to 0 to indicated not shaded, i.e. sunny
frontGroundSH.append(0); # same for both front and rear
#Console.WriteLine("x = 0,6:0.0000 groundSH = 1", x, groundSH[i]);
# End of if row type == "interior"
elif (rowType == "first"):
if (Lh > 0.0): # Sun is on front side of PV module
pvFrontSH = 0.0;
pvBackSH = 1.0;
ss1 = Lc; # Ground shaded from shadow of lower edge
se1 = x1 + Lhc; # to shadow of upper edge
# End of if sun on front side of PV module
elif (Lh < -(rtr + x1)): # Back side of PV module partially shaded from row to rear, front completely shaded, ground completely shaded
pvFrontSH = 1.0;
pvBackSH = (Lh + rtr + x1) / (Lh + x1);
ss1 = -rtr; # Ground shaded from -rtr to rtr
se1 = rtr;
# End of if back side of PV module partially shaded, front completely shaded, ground completely shaded
else: # Shadow to frontside of row, either front or back might be shaded, depending on tilt and other factors
if (Lc < Lhc + x1):
pvFrontSH = 0.0;
pvBackSH = 1.0;
ss1 = Lc; # Shadow starts at Lc
se1 = Lhc + x1; # Shadow ends here
else:
pvFrontSH = 1.0;
pvBackSH = 0.0;
ss1 = Lhc + x1; # Shadow starts at Lhc + x1
se1 = Lc; # Shadow ends here
# End of shadow to front of row
delta = rtr / 100.0;
x = -delta / 2.0; # Initialize horizontal dimension x to provide midpoof intervals
for i in range(0,100):
x += delta;
if (x >= ss1 and x < se1):
rearGroundSH.append(1) # x within a shaded interval, set groundSH to 1 to indicate shaded
else:
rearGroundSH.append(0) # x not within a shaded interval, set groundSH to 0 to indicated not shaded, i.e. sunny
x = -rtr - delta / 2.0; # Initialize horizontal dimension x to provide midpoof intervals for front interval
for i in range(0,100):
x += delta;
if (x >= ss1 and x < se1):
frontGroundSH.append(1) # x within a shaded interval, set groundSH to 1 to indicate shaded
else:
frontGroundSH.append(0) # x not within a shaded interval, set groundSH to 0 to indicated not shaded, i.e. sunny
# End of if row type == "first"
elif (rowType == "last"):
if (Lh > D): # Front side of PV module partially shaded, back completely shaded, ground completely shaded
pvFrontSH = (Lh - D) / (Lh + x1);
pvBackSH = 1.0;
ss1 = -rtr; # Ground shaded from -rtr to rtr
se1 = rtr;
else: # Shadow to frontside of row, either front or back might be shaded, depending on tilt and other factors
if (Lc < Lhc + x1):
pvFrontSH = 0.0;
pvBackSH = 1.0;
ss1 = Lc; # Shadow starts at Lc
se1 = Lhc + x1; # Shadow ends here
else:
pvFrontSH = 1.0;
pvBackSH = 0.0;
ss1 = Lhc + x1; # Shadow starts at Lhc + x1
se1 = Lc; # Shadow ends here
# End of shadow to front of row
delta = rtr / 100.0;
x = -delta / 2.0; # Initialize horizontal dimension x to provide midpoof intervals
for i in range(0,100):
x += delta;
if (x >= ss1 and x < se1):
rearGroundSH.append(1); # x within a shaded interval, set groundSH to 1 to indicate shaded
else:
rearGroundSH.append(0); # x not within a shaded interval, set groundSH to 0 to indicated not shaded, i.e. sunny
x = -rtr - delta / 2.0; # Initialize horizontal dimension x to provide midpoof intervals for front interval
for i in range(0,100):
x += delta;
if (x >= ss1 and x < se1):
frontGroundSH.append(1); # x within a shaded interval, set groundSH to 1 to indicate shaded
else:
frontGroundSH.append(0); # x not within a shaded interval, set groundSH to 0 to indicated not shaded, i.e. sunny
# End of if row type == "last"
elif (rowType == "single"):
if (Lh > 0.0): # Shadow to the rear
pvFrontSH = 0.0;
pvBackSH = 1.0;
ss1 = Lc; # Ground shaded from shadow of lower edge
se1 = x1 + Lhc; # to shadow of upper edge
# End of if sun on front side of PV module
else: # Shadow to frontside of row, either front or back might be shaded, depending on tilt and other factors
if (Lc < Lhc + x1):
pvFrontSH = 0.0;
pvBackSH = 1.0;
ss1 = Lc; # Shadow starts at Lc
se1 = Lhc + x1; # Shadow ends here
else:
pvFrontSH = 1.0;
pvBackSH = 0.0;
ss1 = Lhc + x1; # Shadow starts at Lhc + x1
se1 = Lc; # Shadow ends here
# End of shadow to front of row
delta = rtr / 100.0;
x = -delta / 2.0; # Initialize horizontal dimension x to provide midpoof intervals
for i in range(0,100):
x += delta;
if (x >= ss1 and x < se1):
rearGroundSH.append(1); # x within a shaded interval, set groundSH to 1 to indicate shaded
else:
rearGroundSH.append(0); # x not within a shaded interval, set groundSH to 0 to indicated not shaded, i.e. sunny
x = -rtr - delta / 2.0; # Initialize horizontal dimension x to provide midpoof intervals for front interval
for i in range(0,100):
x += delta;
if (x >= ss1 and x < se1):
frontGroundSH.append(1); # x within a shaded interval, set groundSH to 1 to indicate shaded
else:
frontGroundSH.append(0); # x not within a shaded interval, set groundSH to 0 to indicated not shaded, i.e. sunny
# End of if row type == "single"
else:
print ("ERROR: Incorrect row type not passed to function GetGroundShadedFactors ");
if (abs(ss1) > abs(se1)): # Maximum shadow length projected from the front of the PV module row
maxShadow = ss1;
else:
maxShadow = se1;
#Console.WriteLine("elv = 0,6:0.00 azm = 1,6:0.00 sazm = 2,6:0.00", elv * 180.0 / math.pi, azm * 180.0 / math.pi, sazm * 180.0 / math.pi);
#Console.WriteLine("ss1 = 0,6:0.0000 se1 = 1,6:0.0000 ss2 = 2,6:0.0000 se2 = 3,6:0.0000 rtr = 4,6:0.000", ss1, se1, ss2, se2, rtr);
#Console.WriteLine("pvFrontSH = 0,6:0.00 pvBackSH = 1,6:0.00", pvFrontSH, pvBackSH);
# End of GetGroundShadedFactors
#print "rearGroundSH", rearGroundSH[0]
return pvFrontSH, pvBackSH, maxShadow, rearGroundSH, frontGroundSH;
# End of getGroundShadeFactors
def getSkyConfigurationFactors(rowType, beta, C, D):
"""
This method determines the sky configuration factors for points on the
ground from the leading edge of one row of PV panels to the leading edge of
the next row of PV panels behind it. This row-to-row dimension is divided
into 100 ground segments and a sky configuration factor is returned for
each ground segment. The sky configuration factor represents the fraction
of the isotropic diffuse sky radiation (unobstructed) that is present on
the ground when partially obstructed by the rows of PV panels. The
equations follow that on pages in the notebook dated 8/12/2015. 8/20/2015
4/15/2016 Modifed for calculations other than just the interior rows. Row
type is identified with the string `rowType`, with the possilbe values:
* first = first row of the array
* interior = interior row of array
* last = last row of the array
* single = a single row array
Because the sky configuration factors may now be different depending on
row, they are calculated for the row-to-row dimension to the rear of the
leading module edge and to the front of the leading edge.
Parameters
----------
rowType : str
"first", "interior", "last", or "single"
beta : float
Tilt from horizontal of the PV modules/panels (deg)
C : float
Ground clearance of PV panel (in PV module/panel slope lengths)
D : float
Horizontal distance between rows of PV panels (in PV module/panel slope
lengths)
Returns
-------
rearSkyConfigFactors : array of size [100]
Sky configuration factors to rear of leading PVmodule edge (decimal
fraction)
frontSkyConfigFactors : array of size [100]
Sky configuration factors to rear of leading PVmodule edge (decimal
fraction)
Notes
-----
The horizontal distance between rows, `D`, is from the back edge of one row
to the front edge of the next, and it is not the row-to-row spacing.
"""
rearSkyConfigFactors = []
frontSkyConfigFactors = []
# Tilt from horizontal of the PV modules/panels, in radians
beta = beta * DTOR
# Vertical height of sloped PV panel (in PV panel slope lengths)
h = math.sin(beta)
# Horizontal distance from front of panel to rear of panel (in PV panel
# slope lengths)
x1 = math.cos(beta)
rtr = D + x1 # Row-to-row distance (in PV panel slope lengths)
# Forced fix for case of C = 0
# FIXME: for some reason the Config Factors go from 1 to 2 and not 0 to 1.
# TODO: investigate why this is happening in the code.
if C==0:
C=0.0000000001
if C < 0:
LOGGER.error(
"Height is below ground level. Function GetSkyConfigurationFactors"
" will continue but results might be unreliable")
# Divide the row-to-row spacing into 100 intervals and calculate
# configuration factors
delta = rtr / 100.0
if (rowType == "interior"):
# Initialize horizontal dimension x to provide midpoint of intervals
x = -delta / 2.0
for i in range(0,100):
x += delta
# <--rtr=x1+D--><--rtr=x1+D--><--rtr=x1+D-->
# |\ |\ |\ |\
# | \ ` | \ | \ /| \
# h \ ` h \ h \ / h \
# | \ ` | \ | \ / | \
# |_x1_\____D__`|_x1_\____D___|_x1_\_/_D____|_x1_\_
# | ` <------x-----/|
# C ` /
# | angA ` / angB
# *------------------------`-/---------------------
# x
# use ATAN2: 4-quadrant tangent instead of ATAN
# check 2 rows away
angA = math.atan2(h + C, (2.0 * rtr + x1 - x))
angB = math.atan2(C, (2.0 * rtr - x))
beta1 = max(angA, angB)
# check 1 rows away
angA = math.atan2(h + C, (rtr + x1 - x))
angB = math.atan2(C, (rtr - x))
beta2 = min(angA, angB)
# check 0 rows away
beta3 = max(angA, angB)
beta4 = math.atan2(h + C, (x1 - x))
beta5 = math.atan2(C, (-x))
beta6 = math.atan2(h + C, (-D - x))
sky1 =0; sky2 =0; sky3 =0
if (beta2 > beta1):
sky1 = 0.5 * (math.cos(beta1) - math.cos(beta2))
if (beta4 > beta3):
sky2 = 0.5 * (math.cos(beta3) - math.cos(beta4))
if (beta6 > beta5):
sky3 = 0.5 * (math.cos(beta5) - math.cos(beta6))
skyAll = sky1 + sky2 + sky3
# Save as arrays of values, same for both to the rear and front
rearSkyConfigFactors.append(skyAll)
frontSkyConfigFactors.append(skyAll)
# End of if "interior"
elif (rowType == "first"):
# RearSkyConfigFactors don't have a row in front, calculation of sky3
# changed, beta6 = 180 degrees
x = -delta / 2.0; # Initialize horizontal dimension x to provide midpoint of intervals
for i in range(0,100):
x += delta;
angA = math.atan((h + C) / (2.0 * rtr + x1 - x));
if (angA < 0.0):
angA += math.pi;
angB = math.atan(C / (2.0 * rtr - x));
if (angB < 0.0):
angB += math.pi;
beta1 = max(angA, angB);
angA = math.atan((h + C) / (rtr + x1 - x));
if (angA < 0.0):
angA += math.pi;
angB = math.atan(C / (rtr - x));
if (angB < 0.0):
angB += math.pi;
beta2 = min(angA, angB);
beta3 = max(angA, angB);
beta4 = math.atan((h + C) / (x1 - x));
if (beta4 < 0.0):
beta4 += math.pi;
beta5 = math.atan(C / (-x));
if (beta5 < 0.0):
beta5 += math.pi;
beta6 = math.pi;
sky1 = 0.0; sky2 = 0.0; sky3 = 0.0;
if (beta2 > beta1):
sky1 = 0.5 * (math.cos(beta1) - math.cos(beta2));
if (beta4 > beta3):
sky2 = 0.5 * (math.cos(beta3) - math.cos(beta4));
if (beta6 > beta5):
sky3 = 0.5 * (math.cos(beta5) - math.cos(beta6));
skyAll = sky1 + sky2 + sky3;
rearSkyConfigFactors.append(skyAll); # Save as arrays of values
#Console.WriteLine("0,5:0.000,1,5:0.000,2,5:0.000,3,5:0.000,4,5:0.000", x, sky1, sky2, sky3, skyAll);
#sw.WriteLine("0,5:0.000,1,5:0.000,2,5:0.000,3,5:0.000,4,5:0.000", x, sky1, sky2, sky3, skyAll);
# frontSkyConfigFactors don't have a row in front, calculation of sky3 included as part of revised sky2,
# beta 4 set to 180 degrees
x = -delta / 2.0; # Initialize horizontal dimension x to provide midpoint of intervals
for i in range(0,100):
x += delta;
angA = math.atan((h + C) / (2.0 * rtr + x1 - x));
if (angA < 0.0):
angA += math.pi;
angB = math.atan(C / (2.0 * rtr - x));
if (angB < 0.0):
angB += math.pi;
beta1 = max(angA, angB);
angA = math.atan((h + C) / (rtr + x1 - x));
if (angA < 0.0):
angA += math.pi;
angB = math.atan(C / (rtr - x));
if (angB < 0.0):
angB += math.pi;
beta2 = min(angA, angB);
beta3 = max(angA, angB);
beta4 = math.pi;
sky1 = 0.0; sky2 = 0.0;
if (beta2 > beta1):
sky1 = 0.5 * (math.cos(beta1) - math.cos(beta2));
if (beta4 > beta3):
sky2 = 0.5 * (math.cos(beta3) - math.cos(beta4));
skyAll = sky1 + sky2;
frontSkyConfigFactors.append(skyAll); # Save as arrays of values
#Console.WriteLine("0,5:0.000,1,5:0.000,2,5:0.000,3,5:0.000,4,5:0.000", x, sky1, sky2, sky3, skyAll);
#sw.WriteLine("0,5:0.000,1,5:0.000,2,5:0.000,3,5:0.000,4,5:0.000", x, sky1, sky2, sky3, skyAll);
# End of if "first"
elif (rowType == "last"):
# RearSkyConfigFactors don't have a row to the rear, combine sky1 into sky 2, set beta 3 = 0.0
x = -delta / 2.0; # Initialize horizontal dimension x to provide midpoint of intervals
for i in range(0,100):
x += delta;
beta3 = 0.0;
beta4 = math.atan((h + C) / (x1 - x));
if (beta4 < 0.0):
beta4 += math.pi;
beta5 = math.atan(C / (-x));
if (beta5 < 0.0):
beta5 += math.pi;
beta6 = math.atan((h + C) / (-D - x));
if (beta6 < 0.0):
beta6 += math.pi;
sky2 = 0.0; sky3 = 0.0;
if (beta4 > beta3):
sky2 = 0.5 * (math.cos(beta3) - math.cos(beta4));
if (beta6 > beta5):
sky3 = 0.5 * (math.cos(beta5) - math.cos(beta6));
skyAll = sky2 + sky3;
rearSkyConfigFactors.append(skyAll); # Save as arrays of values
#Console.WriteLine("0,5:0.000,1,5:0.000,2,5:0.000,3,5:0.000,4,5:0.000", x, sky1, sky2, sky3, skyAll);
#sw.WriteLine("0,5:0.000,1,5:0.000,2,5:0.000,3,5:0.000,4,5:0.000", x, sky1, sky2, sky3, skyAll);
# FrontSkyConfigFactors have beta1 = 0.0
x = -delta / 2.0; # Initialize horizontal dimension x to provide midpoint of intervals
for i in range(0,100):
x += delta;
angA = math.atan((h + C) / (2.0 * rtr + x1 - x));
if (angA < 0.0):
angA += math.pi;
angB = math.atan(C / (2.0 * rtr - x));
if (angB < 0.0):
angB += math.pi;
beta1 = max(angA, angB);
beta1 = 0.0;
angA = math.atan((h + C) / (rtr + x1 - x));
if (angA < 0.0):
angA += math.pi;
angB = math.atan(C / (rtr - x));
if (angB < 0.0):
angB += math.pi;
beta2 = min(angA, angB);
beta3 = max(angA, angB);
beta4 = math.atan((h + C) / (x1 - x));
if (beta4 < 0.0):
beta4 += math.pi;
beta5 = math.atan(C / (-x));
if (beta5 < 0.0):
beta5 += math.pi;
beta6 = math.atan((h + C) / (-D - x));
if (beta6 < 0.0):
beta6 += math.pi;
sky1 = 0.0; sky2 = 0.0; sky3 = 0.0;
if (beta2 > beta1):
sky1 = 0.5 * (math.cos(beta1) - math.cos(beta2));
if (beta4 > beta3):
sky2 = 0.5 * (math.cos(beta3) - math.cos(beta4));
if (beta6 > beta5):
sky3 = 0.5 * (math.cos(beta5) - math.cos(beta6));
skyAll = sky1 + sky2 + sky3;
frontSkyConfigFactors.append(skyAll); # Save as arrays of values,
#Console.WriteLine("0,5:0.000,1,5:0.000,2,5:0.000,3,5:0.000,4,5:0.000", x, sky1, sky2, sky3, skyAll);
#sw.WriteLine("0,5:0.000,1,5:0.000,2,5:0.000,3,5:0.000,4,5:0.000", x, sky1, sky2, sky3, skyAll);
# End of if "last" row
elif (rowType == "single"):
# RearSkyConfigFactors don't have a row to the rear ir front, combine sky1 into sky 2, set beta 3 = 0.0,
# for sky3, beta6 = 180.0.
x = -delta / 2.0; # Initialize horizontal dimension x to provide midpoint of intervals
for i in range(0,100):
x += delta;
beta3 = 0.0;
beta4 = math.atan((h + C) / (x1 - x));
if (beta4 < 0.0):
beta4 += math.pi;
beta5 = math.atan(C / (-x));
if (beta5 < 0.0):
beta5 += math.pi;
beta6 = math.pi;
sky2 = 0.0; sky3 = 0.0;
if (beta4 > beta3):
sky2 = 0.5 * (math.cos(beta3) - math.cos(beta4));
if (beta6 > beta5):
sky3 = 0.5 * (math.cos(beta5) - math.cos(beta6));
skyAll = sky2 + sky3;
rearSkyConfigFactors.append(skyAll); # Save as arrays of values
#Console.WriteLine("0,5:0.000,1,5:0.000,2,5:0.000,3,5:0.000,4,5:0.000", x, sky1, sky2, sky3, skyAll);
#sw.WriteLine("0,5:0.000,1,5:0.000,2,5:0.000,3,5:0.000,4,5:0.000", x, sky1, sky2, sky3, skyAll);
# FrontSkyConfigFactors have only a row to the rear, combine sky3 into sky2, set beta1 = 0, beta4 = 180
x = -delta / 2.0; # Initialize horizontal dimension x to provide midpoint of intervals
for i in range(0,100):
x += delta;
angA = math.atan((h + C) / (2.0 * rtr + x1 - x));
if (angA < 0.0):
angA += math.pi;
angB = math.atan(C / (2.0 * rtr - x));
if (angB < 0.0):
angB += math.pi;
beta1 = max(angA, angB);
beta1 = 0.0;
angA = math.atan((h + C) / (rtr + x1 - x));
if (angA < 0.0):
angA += math.pi;
angB = math.atan(C / (rtr - x));
if (angB < 0.0):
angB += math.pi;
beta2 = min(angA, angB);
beta3 = max(angA, angB);
beta4 = math.pi;
sky1 = 0.0; sky2 = 0.0;
if (beta2 > beta1):
sky1 = 0.5 * (math.cos(beta1) - math.cos(beta2));
if (beta4 > beta3):
sky2 = 0.5 * (math.cos(beta3) - math.cos(beta4));
skyAll = sky1 + sky2;
frontSkyConfigFactors.append(skyAll); # Save as arrays of values
#Console.WriteLine("0,5:0.000,1,5:0.000,2,5:0.000,3,5:0.000,4,5:0.000", x, sky1, sky2, sky3, skyAll);
#sw.WriteLine("0,5:0.000,1,5:0.000,2,5:0.000,3,5:0.000,4,5:0.000", x, sky1, sky2, sky3, skyAll);
# End of if "single"
else:
print("ERROR: Incorrect row type not passed to function GetSkyConfigurationFactors ");
return rearSkyConfigFactors, frontSkyConfigFactors;
# End of GetSkyConfigurationFactors
def rowSpacing(beta, sazm, lat, lng, tz, hour, minute):
"""
This method determines the horizontal distance D between rows of PV panels
(in PV module/panel slope lengths) for no shading on December 21 (north
hemisphere) June 21 (south hemisphere) for a module tilt angle beta and
surface azimuth sazm, and a given latitude, longitude, and time zone and
for the time passed to the method (typically 9 am).
(Ref: the row-to-row spacing is then ``D + cos(beta)``)
8/21/2015
Parameters
----------
beta : double
Tilt from horizontal of the PV modules/panels (deg)
sazm : double
Surface azimuth of the PV modules/panels (deg)
lat : double
Site latitude (deg)
lng : double
Site longitude (deg)
tz : double
Time zone (hrs)
hour : int
hour for no shading criteria
minute: double
minute for no shading
Returns
-------
D : numeric
Horizontal distance between rows of PV panels (in PV panel slope
lengths)
"""
beta = beta * DTOR # Tilt from horizontal of the PV modules/panels, in radians
sazm = sazm * DTOR # Surface azimuth of PV module/pamels, in radians
if lat >= 0:
[azm, zen, elv, dec, sunrise, sunset, Eo, tst] = solarPos (2014, 12, 21, hour, minute, lat, lng, tz)
else:
[azm, zen, elv, dec, sunrise, sunset, Eo, tst] = solarPos (2014, 6, 21, hour, minute, lat, lng, tz)
tst = 8.877 ##DLL Forced value
minute -= 60.0 * (tst - hour); # Adjust minute so sun position is calculated for a tst equal to the
# time passed to the function
if lat >= 0:
[azm, zen, elv, dec, sunrise, sunset, Eo, tst] = solarPos(2014, 12, 21, hour, minute, lat, lng, tz)
else:
[azm, zen, elv, dec, sunrise, sunset, Eo, tst] = solarPos(2014, 6, 21, hour, minute, lat, lng, tz)
# Console.WriteLine("tst = {0} azm = {1} elv = {2}", tst, azm * 180.0 / Math.PI, elv * 180.0 / Math.PI);
D = math.cos(sazm - azm) * math.sin(beta) / math.tan(elv)
return D
# End of RowSpacing
def trackingBFvaluescalculator(beta, hub_height, r2r):
'''
1-axis tracking helper file
Parameters
----------
beta : float
Tilt from horizontal of the PV modules/panels, in radians
hub_height : float
tracker hub height
r2r : float
Row-to-row distance (in PV panel slope lengths)
Returns
-------
C : float
ground clearance of PV panel
D : float
row-to-row distance (each in PV panel slope lengths)
'''
# Created on Tue Jun 13 08:01:56 2017
# @author: sayala
beta = beta * DTOR # Tilt from horizontal of the PV modules/panels, in radians
x1 = math.cos(beta); # Horizontal distance from front of panel to rear of panel (in PV panel slope lengths)
#rtr = D + x1; # Row-to-row distance (in PV panel slope lengths)
D = r2r - x1; # Calculates D DistanceBetweenRows(panel slope lengths)
hm = 0.5*math.sin(beta); # vertical distance from bottom of panel to top of panel (in PV panel slope lengths)
#C = 0.5+Cv-hm # Ground clearance of PV panel (in PV panel slope lengths).
C = hub_height - hm #Adding a 0.5 for half a panel slope length, since it is assumed the panel is rotating around its middle axis
return C, D
| [((18, 9, 18, 36), 'logging.getLogger', 'logging.getLogger', ({(18, 27, 18, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((152, 55, 153, 40), 'sun.perezComp', 'perezComp', ({(153, 8, 153, 11): 'dni', (153, 13, 153, 16): 'dhi', (153, 18, 153, 24): 'albedo', (153, 26, 153, 29): 'zen', (153, 31, 153, 34): '0.0', (153, 36, 153, 39): 'zen'}, {}), '(dni, dhi, albedo, zen, 0.0, zen)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((161, 24, 161, 67), 'sun.sunIncident', 'sunIncident', ({(161, 36, 161, 37): '0', (161, 39, 161, 43): '90.0', (161, 45, 161, 50): '180.0', (161, 52, 161, 56): '45.0', (161, 58, 161, 61): 'zen', (161, 63, 161, 66): 'azm'}, {}), '(0, 90.0, 180.0, 45.0, zen, azm)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((164, 55, 165, 42), 'sun.perezComp', 'perezComp', ({(165, 8, 165, 11): 'dni', (165, 13, 165, 16): 'dhi', (165, 18, 165, 24): 'albedo', (165, 26, 165, 29): 'inc', (165, 31, 165, 36): 'tiltr', (165, 38, 165, 41): 'zen'}, {}), '(dni, dhi, albedo, inc, tiltr, zen)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((189, 9, 189, 47), 'math.pow', 'math.pow', ({(189, 18, 189, 41): '(n2 - 1.0) / (n2 + 1.0)', (189, 43, 189, 46): '2.0'}, {}), '((n2 - 1.0) / (n2 + 1.0), 2.0)', False, 'import math\n'), ((201, 8, 201, 22), 'math.sin', 'math.sin', ({(201, 17, 201, 21): 'beta'}, {}), '(beta)', False, 'import math\n'), ((202, 9, 202, 23), 'math.cos', 'math.cos', ({(202, 18, 202, 22): 'beta'}, {}), '(beta)', False, 'import math\n'), ((522, 55, 522, 97), 'sun.perezComp', 'perezComp', ({(522, 65, 522, 68): 'dni', (522, 70, 522, 73): 'dhi', (522, 75, 522, 81): 'albedo', (522, 83, 522, 86): 'zen', (522, 88, 522, 91): '0.0', (522, 93, 522, 96): 'zen'}, {}), '(dni, dhi, albedo, zen, 0.0, zen)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((533, 24, 533, 67), 'sun.sunIncident', 'sunIncident', ({(533, 36, 533, 37): '0', (533, 39, 533, 43): '90.0', (533, 45, 533, 50): '180.0', (533, 52, 533, 56): '45.0', (533, 58, 533, 61): 'zen', (533, 63, 533, 66): 'azm'}, {}), '(0, 90.0, 180.0, 45.0, zen, azm)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((539, 55, 539, 99), 'sun.perezComp', 'perezComp', ({(539, 65, 539, 68): 'dni', (539, 70, 539, 73): 'dhi', (539, 75, 539, 81): 'albedo', (539, 83, 539, 86): 'inc', (539, 88, 539, 93): 'tiltr', (539, 95, 539, 98): 'zen'}, {}), '(dni, dhi, albedo, inc, tiltr, zen)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((565, 9, 565, 47), 'math.pow', 'math.pow', ({(565, 18, 565, 41): '(n2 - 1.0) / (n2 + 1.0)', (565, 43, 565, 46): '2.0'}, {}), '((n2 - 1.0) / (n2 + 1.0), 2.0)', False, 'import math\n'), ((575, 8, 575, 22), 'math.sin', 'math.sin', ({(575, 17, 575, 21): 'beta'}, {}), '(beta)', False, 'import math\n'), ((576, 9, 576, 23), 'math.cos', 'math.cos', ({(576, 18, 576, 22): 'beta'}, {}), '(beta)', False, 'import math\n'), ((802, 8, 802, 22), 'math.sin', 'math.sin', ({(802, 17, 802, 21): 'beta'}, {}), '(beta)', False, 'import math\n'), ((803, 9, 803, 23), 'math.cos', 'math.cos', ({(803, 18, 803, 22): 'beta'}, {}), '(beta)', False, 'import math\n'), ((1158, 8, 1158, 22), 'math.sin', 'math.sin', ({(1158, 17, 1158, 21): 'beta'}, {}), '(beta)', False, 'import math\n'), ((1161, 9, 1161, 23), 'math.cos', 'math.cos', ({(1161, 18, 1161, 22): 'beta'}, {}), '(beta)', False, 'import math\n'), ((1557, 9, 1557, 23), 'math.cos', 'math.cos', ({(1557, 18, 1557, 22): 'beta'}, {}), '(beta)', False, 'import math\n'), ((218, 16, 218, 62), 'math.atan', 'math.atan', ({(218, 26, 218, 61): '(PtopY - PcellY) / (PtopX - PcellX)'}, {}), '((PtopY - PcellY) / (PtopX - PcellX))', False, 'import math\n'), ((219, 18, 219, 64), 'math.atan', 'math.atan', ({(219, 28, 219, 63): '(PcellY - PbotY) / (PbotX - PcellX)'}, {}), '((PcellY - PbotY) / (PbotX - PcellX))', False, 'import math\n'), ((392, 28, 392, 94), 'sun.sunIncident', 'sunIncident', ({(392, 40, 392, 41): '0', (392, 43, 392, 58): '180 - beta / DTOR', (392, 60, 392, 77): 'sazm / DTOR - 180', (392, 79, 392, 83): '45.0', (392, 85, 392, 88): 'zen', (392, 90, 392, 93): 'azm'}, {}), '(0, 180 - beta / DTOR, sazm / DTOR - 180, 45.0, zen, azm)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((394, 64, 394, 108), 'sun.perezComp', 'perezComp', ({(394, 74, 394, 77): 'dni', (394, 79, 394, 82): 'dhi', (394, 84, 394, 90): 'albedo', (394, 92, 394, 95): 'inc', (394, 97, 394, 102): 'tiltr', (394, 104, 394, 107): 'zen'}, {}), '(dni, dhi, albedo, inc, tiltr, zen)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((592, 16, 592, 62), 'math.atan', 'math.atan', ({(592, 26, 592, 61): '(PtopY - PcellY) / (PcellX - PtopX)'}, {}), '((PtopY - PcellY) / (PcellX - PtopX))', False, 'import math\n'), ((593, 18, 593, 64), 'math.atan', 'math.atan', ({(593, 28, 593, 63): '(PcellY - PbotY) / (PcellX - PbotX)'}, {}), '((PcellY - PbotY) / (PcellX - PbotX))', False, 'import math\n'), ((603, 11, 603, 27), 'math.isnan', 'math.isnan', ({(603, 22, 603, 26): 'beta'}, {}), '(beta)', False, 'import math\n'), ((605, 11, 605, 28), 'math.isnan', 'math.isnan', ({(605, 22, 605, 27): 'elvUP'}, {}), '(elvUP)', False, 'import math\n'), ((607, 11, 607, 54), 'math.isnan', 'math.isnan', ({(607, 22, 607, 53): '((math.pi - beta - elvUP) / DTOR)'}, {}), '((math.pi - beta - elvUP) / DTOR)', False, 'import math\n'), ((710, 28, 710, 84), 'sun.sunIncident', 'sunIncident', ({(710, 40, 710, 41): '0', (710, 43, 710, 54): 'beta / DTOR', (710, 56, 710, 67): 'sazm / DTOR', (710, 69, 710, 73): '45.0', (710, 75, 710, 78): 'zen', (710, 80, 710, 83): 'azm'}, {}), '(0, beta / DTOR, sazm / DTOR, 45.0, zen, azm)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((717, 64, 717, 108), 'sun.perezComp', 'perezComp', ({(717, 74, 717, 77): 'dni', (717, 79, 717, 82): 'dhi', (717, 84, 717, 90): 'albedo', (717, 92, 717, 95): 'inc', (717, 97, 717, 102): 'tiltr', (717, 104, 717, 107): 'zen'}, {}), '(dni, dhi, albedo, inc, tiltr, zen)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((810, 31, 810, 51), 'math.cos', 'math.cos', ({(810, 40, 810, 50): '(sazm - azm)'}, {}), '(sazm - azm)', False, 'import math\n'), ((811, 38, 811, 58), 'math.cos', 'math.cos', ({(811, 47, 811, 57): '(sazm - azm)'}, {}), '(sazm - azm)', False, 'import math\n'), ((812, 31, 812, 51), 'math.cos', 'math.cos', ({(812, 40, 812, 50): '(sazm - azm)'}, {}), '(sazm - azm)', False, 'import math\n'), ((1515, 57, 1515, 108), 'sun.solarPos', 'solarPos', ({(1515, 67, 1515, 71): '2014', (1515, 73, 1515, 75): '12', (1515, 77, 1515, 79): '21', (1515, 81, 1515, 85): 'hour', (1515, 87, 1515, 93): 'minute', (1515, 95, 1515, 98): 'lat', (1515, 100, 1515, 103): 'lng', (1515, 105, 1515, 107): 'tz'}, {}), '(2014, 12, 21, hour, minute, lat, lng, tz)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((1517, 57, 1517, 107), 'sun.solarPos', 'solarPos', ({(1517, 67, 1517, 71): '2014', (1517, 73, 1517, 74): '6', (1517, 76, 1517, 78): '21', (1517, 80, 1517, 84): 'hour', (1517, 86, 1517, 92): 'minute', (1517, 94, 1517, 97): 'lat', (1517, 99, 1517, 102): 'lng', (1517, 104, 1517, 106): 'tz'}, {}), '(2014, 6, 21, hour, minute, lat, lng, tz)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((1523, 57, 1523, 107), 'sun.solarPos', 'solarPos', ({(1523, 66, 1523, 70): '2014', (1523, 72, 1523, 74): '12', (1523, 76, 1523, 78): '21', (1523, 80, 1523, 84): 'hour', (1523, 86, 1523, 92): 'minute', (1523, 94, 1523, 97): 'lat', (1523, 99, 1523, 102): 'lng', (1523, 104, 1523, 106): 'tz'}, {}), '(2014, 12, 21, hour, minute, lat, lng, tz)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((1525, 57, 1525, 106), 'sun.solarPos', 'solarPos', ({(1525, 66, 1525, 70): '2014', (1525, 72, 1525, 73): '6', (1525, 75, 1525, 77): '21', (1525, 79, 1525, 83): 'hour', (1525, 85, 1525, 91): 'minute', (1525, 93, 1525, 96): 'lat', (1525, 98, 1525, 101): 'lng', (1525, 103, 1525, 105): 'tz'}, {}), '(2014, 6, 21, hour, minute, lat, lng, tz)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((1528, 48, 1528, 61), 'math.tan', 'math.tan', ({(1528, 57, 1528, 60): 'elv'}, {}), '(elv)', False, 'import math\n'), ((1560, 13, 1560, 27), 'math.sin', 'math.sin', ({(1560, 22, 1560, 26): 'beta'}, {}), '(beta)', False, 'import math\n'), ((404, 18, 404, 40), 'sun.aOIcorrection', 'aOIcorrection', ({(404, 32, 404, 34): 'n2', (404, 36, 404, 39): 'inc'}, {}), '(n2, inc)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((734, 18, 734, 40), 'sun.aOIcorrection', 'aOIcorrection', ({(734, 32, 734, 34): 'n2', (734, 36, 734, 39): 'inc'}, {}), '(n2, inc)', False, 'from sun import solarPos, sunIncident, perezComp, aOIcorrection\n'), ((810, 14, 810, 27), 'math.tan', 'math.tan', ({(810, 23, 810, 26): 'elv'}, {}), '(elv)', False, 'import math\n'), ((811, 21, 811, 34), 'math.tan', 'math.tan', ({(811, 30, 811, 33): 'elv'}, {}), '(elv)', False, 'import math\n'), ((812, 14, 812, 27), 'math.tan', 'math.tan', ({(812, 23, 812, 26): 'elv'}, {}), '(elv)', False, 'import math\n'), ((1198, 19, 1198, 58), 'math.atan2', 'math.atan2', ({(1198, 30, 1198, 35): 'h + C', (1198, 38, 1198, 56): '2.0 * rtr + x1 - x'}, {}), '(h + C, 2.0 * rtr + x1 - x)', False, 'import math\n'), ((1199, 19, 1199, 49), 'math.atan2', 'math.atan2', ({(1199, 30, 1199, 31): 'C', (1199, 34, 1199, 47): '2.0 * rtr - x'}, {}), '(C, 2.0 * rtr - x)', False, 'import math\n'), ((1202, 19, 1202, 52), 'math.atan2', 'math.atan2', ({(1202, 30, 1202, 35): 'h + C', (1202, 38, 1202, 50): 'rtr + x1 - x'}, {}), '(h + C, rtr + x1 - x)', False, 'import math\n'), ((1203, 19, 1203, 43), 'math.atan2', 'math.atan2', ({(1203, 30, 1203, 31): 'C', (1203, 34, 1203, 41): 'rtr - x'}, {}), '(C, rtr - x)', False, 'import math\n'), ((1207, 20, 1207, 47), 'math.atan2', 'math.atan2', ({(1207, 31, 1207, 36): 'h + C', (1207, 39, 1207, 45): 'x1 - x'}, {}), '(h + C, x1 - x)', False, 'import math\n'), ((1208, 20, 1208, 39), 'math.atan2', 'math.atan2', ({(1208, 31, 1208, 32): 'C', (1208, 35, 1208, 37): '-x'}, {}), '(C, -x)', False, 'import math\n'), ((1209, 20, 1209, 47), 'math.atan2', 'math.atan2', ({(1209, 31, 1209, 36): 'h + C', (1209, 39, 1209, 45): '-D - x'}, {}), '(h + C, -D - x)', False, 'import math\n'), ((1528, 8, 1528, 28), 'math.cos', 'math.cos', ({(1528, 17, 1528, 27): '(sazm - azm)'}, {}), '(sazm - azm)', False, 'import math\n'), ((1528, 31, 1528, 45), 'math.sin', 'math.sin', ({(1528, 40, 1528, 44): 'beta'}, {}), '(beta)', False, 'import math\n'), ((216, 56, 216, 70), 'math.sin', 'math.sin', ({(216, 65, 216, 69): 'beta'}, {}), '(beta)', False, 'import math\n'), ((217, 59, 217, 73), 'math.cos', 'math.cos', ({(217, 68, 217, 72): 'beta'}, {}), '(beta)', False, 'import math\n'), ((1233, 19, 1233, 60), 'math.atan', 'math.atan', ({(1233, 29, 1233, 59): '(h + C) / (2.0 * rtr + x1 - x)'}, {}), '((h + C) / (2.0 * rtr + x1 - x))', False, 'import math\n'), ((1236, 19, 1236, 49), 'math.atan', 'math.atan', ({(1236, 29, 1236, 48): 'C / (2.0 * rtr - x)'}, {}), '(C / (2.0 * rtr - x))', False, 'import math\n'), ((1241, 19, 1241, 54), 'math.atan', 'math.atan', ({(1241, 29, 1241, 53): '(h + C) / (rtr + x1 - x)'}, {}), '((h + C) / (rtr + x1 - x))', False, 'import math\n'), ((1244, 19, 1244, 43), 'math.atan', 'math.atan', ({(1244, 29, 1244, 42): 'C / (rtr - x)'}, {}), '(C / (rtr - x))', False, 'import math\n'), ((1251, 20, 1251, 49), 'math.atan', 'math.atan', ({(1251, 30, 1251, 48): '(h + C) / (x1 - x)'}, {}), '((h + C) / (x1 - x))', False, 'import math\n'), ((1255, 20, 1255, 39), 'math.atan', 'math.atan', ({(1255, 30, 1255, 38): 'C / -x'}, {}), '(C / -x)', False, 'import math\n'), ((1281, 19, 1281, 60), 'math.atan', 'math.atan', ({(1281, 29, 1281, 59): '(h + C) / (2.0 * rtr + x1 - x)'}, {}), '((h + C) / (2.0 * rtr + x1 - x))', False, 'import math\n'), ((1284, 19, 1284, 49), 'math.atan', 'math.atan', ({(1284, 29, 1284, 48): 'C / (2.0 * rtr - x)'}, {}), '(C / (2.0 * rtr - x))', False, 'import math\n'), ((1289, 19, 1289, 54), 'math.atan', 'math.atan', ({(1289, 29, 1289, 53): '(h + C) / (rtr + x1 - x)'}, {}), '((h + C) / (rtr + x1 - x))', False, 'import math\n'), ((1292, 19, 1292, 43), 'math.atan', 'math.atan', ({(1292, 29, 1292, 42): 'C / (rtr - x)'}, {}), '(C / (rtr - x))', False, 'import math\n'), ((259, 39, 259, 56), 'math.cos', 'math.cos', ({(259, 48, 259, 55): 'elvDOWN'}, {}), '(elvDOWN)', False, 'import math\n'), ((262, 24, 262, 44), 'math.sin', 'math.sin', ({(262, 33, 262, 43): 'startAlpha'}, {}), '(startAlpha)', False, 'import math\n'), ((264, 34, 264, 49), 'math.cos', 'math.cos', ({(264, 43, 264, 48): 'theta'}, {}), '(theta)', False, 'import math\n'), ((265, 24, 265, 43), 'math.sin', 'math.sin', ({(265, 33, 265, 42): 'stopAlpha'}, {}), '(stopAlpha)', False, 'import math\n'), ((267, 34, 267, 49), 'math.cos', 'math.cos', ({(267, 43, 267, 48): 'theta'}, {}), '(theta)', False, 'import math\n'), ((300, 35, 300, 53), 'numpy.float64', 'np.float64', ({(300, 46, 300, 52): 'PcellY'}, {}), '(PcellY)', True, 'import numpy as np\n'), ((300, 56, 300, 78), 'math.tan', 'math.tan', ({(300, 65, 300, 77): 'startElvDown'}, {}), '(startElvDown)', False, 'import math\n'), ((301, 44, 301, 65), 'math.tan', 'math.tan', ({(301, 53, 301, 64): 'stopElvDown'}, {}), '(stopElvDown)', False, 'import math\n'), ((611, 29, 611, 65), 'numpy.float64', 'np.float64', ({(611, 41, 611, 63): 'math.pi - beta - elvUP'}, {}), '(math.pi - beta - elvUP)', True, 'import numpy as np\n'), ((640, 35, 640, 53), 'numpy.float64', 'np.float64', ({(640, 46, 640, 52): 'PcellY'}, {}), '(PcellY)', True, 'import numpy as np\n'), ((640, 56, 640, 78), 'math.tan', 'math.tan', ({(640, 65, 640, 77): 'startElvDown'}, {}), '(startElvDown)', False, 'import math\n'), ((641, 44, 641, 65), 'math.tan', 'math.tan', ({(641, 53, 641, 64): 'stopElvDown'}, {}), '(stopElvDown)', False, 'import math\n'), ((1325, 20, 1325, 49), 'math.atan', 'math.atan', ({(1325, 30, 1325, 48): '(h + C) / (x1 - x)'}, {}), '((h + C) / (x1 - x))', False, 'import math\n'), ((1329, 20, 1329, 39), 'math.atan', 'math.atan', ({(1329, 30, 1329, 38): 'C / -x'}, {}), '(C / -x)', False, 'import math\n'), ((1333, 20, 1333, 49), 'math.atan', 'math.atan', ({(1333, 30, 1333, 48): '(h + C) / (-D - x)'}, {}), '((h + C) / (-D - x))', False, 'import math\n'), ((1354, 19, 1354, 60), 'math.atan', 'math.atan', ({(1354, 29, 1354, 59): '(h + C) / (2.0 * rtr + x1 - x)'}, {}), '((h + C) / (2.0 * rtr + x1 - x))', False, 'import math\n'), ((1357, 19, 1357, 49), 'math.atan', 'math.atan', ({(1357, 29, 1357, 48): 'C / (2.0 * rtr - x)'}, {}), '(C / (2.0 * rtr - x))', False, 'import math\n'), ((1363, 19, 1363, 54), 'math.atan', 'math.atan', ({(1363, 29, 1363, 53): '(h + C) / (rtr + x1 - x)'}, {}), '((h + C) / (rtr + x1 - x))', False, 'import math\n'), ((1366, 19, 1366, 43), 'math.atan', 'math.atan', ({(1366, 29, 1366, 42): 'C / (rtr - x)'}, {}), '(C / (rtr - x))', False, 'import math\n'), ((1373, 20, 1373, 49), 'math.atan', 'math.atan', ({(1373, 30, 1373, 48): '(h + C) / (x1 - x)'}, {}), '((h + C) / (x1 - x))', False, 'import math\n'), ((1377, 20, 1377, 39), 'math.atan', 'math.atan', ({(1377, 30, 1377, 38): 'C / -x'}, {}), '(C / -x)', False, 'import math\n'), ((1381, 20, 1381, 49), 'math.atan', 'math.atan', ({(1381, 30, 1381, 48): '(h + C) / (-D - x)'}, {}), '((h + C) / (-D - x))', False, 'import math\n'), ((1212, 30, 1212, 45), 'math.cos', 'math.cos', ({(1212, 39, 1212, 44): 'beta1'}, {}), '(beta1)', False, 'import math\n'), ((1212, 48, 1212, 63), 'math.cos', 'math.cos', ({(1212, 57, 1212, 62): 'beta2'}, {}), '(beta2)', False, 'import math\n'), ((1214, 30, 1214, 45), 'math.cos', 'math.cos', ({(1214, 39, 1214, 44): 'beta3'}, {}), '(beta3)', False, 'import math\n'), ((1214, 48, 1214, 63), 'math.cos', 'math.cos', ({(1214, 57, 1214, 62): 'beta4'}, {}), '(beta4)', False, 'import math\n'), ((1216, 30, 1216, 45), 'math.cos', 'math.cos', ({(1216, 39, 1216, 44): 'beta5'}, {}), '(beta5)', False, 'import math\n'), ((1216, 48, 1216, 63), 'math.cos', 'math.cos', ({(1216, 57, 1216, 62): 'beta6'}, {}), '(beta6)', False, 'import math\n'), ((1412, 20, 1412, 49), 'math.atan', 'math.atan', ({(1412, 30, 1412, 48): '(h + C) / (x1 - x)'}, {}), '((h + C) / (x1 - x))', False, 'import math\n'), ((1416, 20, 1416, 39), 'math.atan', 'math.atan', ({(1416, 30, 1416, 38): 'C / -x'}, {}), '(C / -x)', False, 'import math\n'), ((1439, 19, 1439, 60), 'math.atan', 'math.atan', ({(1439, 29, 1439, 59): '(h + C) / (2.0 * rtr + x1 - x)'}, {}), '((h + C) / (2.0 * rtr + x1 - x))', False, 'import math\n'), ((1442, 19, 1442, 49), 'math.atan', 'math.atan', ({(1442, 29, 1442, 48): 'C / (2.0 * rtr - x)'}, {}), '(C / (2.0 * rtr - x))', False, 'import math\n'), ((1448, 19, 1448, 54), 'math.atan', 'math.atan', ({(1448, 29, 1448, 53): '(h + C) / (rtr + x1 - x)'}, {}), '((h + C) / (rtr + x1 - x))', False, 'import math\n'), ((1451, 19, 1451, 43), 'math.atan', 'math.atan', ({(1451, 29, 1451, 42): 'C / (rtr - x)'}, {}), '(C / (rtr - x))', False, 'import math\n'), ((244, 33, 244, 51), 'math.cos', 'math.cos', ({(244, 42, 244, 50): '(j * DTOR)'}, {}), '(j * DTOR)', False, 'import math\n'), ((244, 54, 244, 78), 'math.cos', 'math.cos', ({(244, 63, 244, 77): '((j + 1) * DTOR)'}, {}), '((j + 1) * DTOR)', False, 'import math\n'), ((625, 34, 625, 52), 'math.cos', 'math.cos', ({(625, 43, 625, 51): '(j * DTOR)'}, {}), '(j * DTOR)', False, 'import math\n'), ((625, 55, 625, 79), 'math.cos', 'math.cos', ({(625, 64, 625, 78): '((j + 1) * DTOR)'}, {}), '((j + 1) * DTOR)', False, 'import math\n'), ((626, 40, 626, 58), 'math.cos', 'math.cos', ({(626, 49, 626, 57): '(j * DTOR)'}, {}), '(j * DTOR)', False, 'import math\n'), ((626, 61, 626, 85), 'math.cos', 'math.cos', ({(626, 70, 626, 84): '((j + 1) * DTOR)'}, {}), '((j + 1) * DTOR)', False, 'import math\n'), ((1263, 30, 1263, 45), 'math.cos', 'math.cos', ({(1263, 39, 1263, 44): 'beta1'}, {}), '(beta1)', False, 'import math\n'), ((1263, 48, 1263, 63), 'math.cos', 'math.cos', ({(1263, 57, 1263, 62): 'beta2'}, {}), '(beta2)', False, 'import math\n'), ((1265, 30, 1265, 45), 'math.cos', 'math.cos', ({(1265, 39, 1265, 44): 'beta3'}, {}), '(beta3)', False, 'import math\n'), ((1265, 48, 1265, 63), 'math.cos', 'math.cos', ({(1265, 57, 1265, 62): 'beta4'}, {}), '(beta4)', False, 'import math\n'), ((1267, 30, 1267, 45), 'math.cos', 'math.cos', ({(1267, 39, 1267, 44): 'beta5'}, {}), '(beta5)', False, 'import math\n'), ((1267, 48, 1267, 63), 'math.cos', 'math.cos', ({(1267, 57, 1267, 62): 'beta6'}, {}), '(beta6)', False, 'import math\n'), ((1303, 30, 1303, 45), 'math.cos', 'math.cos', ({(1303, 39, 1303, 44): 'beta1'}, {}), '(beta1)', False, 'import math\n'), ((1303, 48, 1303, 63), 'math.cos', 'math.cos', ({(1303, 57, 1303, 62): 'beta2'}, {}), '(beta2)', False, 'import math\n'), ((1305, 30, 1305, 45), 'math.cos', 'math.cos', ({(1305, 39, 1305, 44): 'beta3'}, {}), '(beta3)', False, 'import math\n'), ((1305, 48, 1305, 63), 'math.cos', 'math.cos', ({(1305, 57, 1305, 62): 'beta4'}, {}), '(beta4)', False, 'import math\n'), ((290, 37, 290, 55), 'math.cos', 'math.cos', ({(290, 46, 290, 54): '(j * DTOR)'}, {}), '(j * DTOR)', False, 'import math\n'), ((290, 58, 290, 82), 'math.cos', 'math.cos', ({(290, 67, 290, 81): '((j + 1) * DTOR)'}, {}), '((j + 1) * DTOR)', False, 'import math\n'), ((385, 33, 385, 51), 'math.cos', 'math.cos', ({(385, 42, 385, 50): '(j * DTOR)'}, {}), '(j * DTOR)', False, 'import math\n'), ((385, 54, 385, 78), 'math.cos', 'math.cos', ({(385, 63, 385, 77): '((j + 1) * DTOR)'}, {}), '((j + 1) * DTOR)', False, 'import math\n'), ((630, 44, 630, 62), 'math.cos', 'math.cos', ({(630, 53, 630, 61): '(j * DTOR)'}, {}), '(j * DTOR)', False, 'import math\n'), ((630, 65, 630, 89), 'math.cos', 'math.cos', ({(630, 74, 630, 88): '((j + 1) * DTOR)'}, {}), '((j + 1) * DTOR)', False, 'import math\n'), ((704, 34, 704, 52), 'math.cos', 'math.cos', ({(704, 43, 704, 51): '(j * DTOR)'}, {}), '(j * DTOR)', False, 'import math\n'), ((704, 55, 704, 79), 'math.cos', 'math.cos', ({(704, 64, 704, 78): '((j + 1) * DTOR)'}, {}), '((j + 1) * DTOR)', False, 'import math\n'), ((705, 40, 705, 58), 'math.cos', 'math.cos', ({(705, 49, 705, 57): '(j * DTOR)'}, {}), '(j * DTOR)', False, 'import math\n'), ((705, 61, 705, 85), 'math.cos', 'math.cos', ({(705, 70, 705, 84): '((j + 1) * DTOR)'}, {}), '((j + 1) * DTOR)', False, 'import math\n'), ((1339, 30, 1339, 45), 'math.cos', 'math.cos', ({(1339, 39, 1339, 44): 'beta3'}, {}), '(beta3)', False, 'import math\n'), ((1339, 48, 1339, 63), 'math.cos', 'math.cos', ({(1339, 57, 1339, 62): 'beta4'}, {}), '(beta4)', False, 'import math\n'), ((1341, 30, 1341, 45), 'math.cos', 'math.cos', ({(1341, 39, 1341, 44): 'beta5'}, {}), '(beta5)', False, 'import math\n'), ((1341, 48, 1341, 63), 'math.cos', 'math.cos', ({(1341, 57, 1341, 62): 'beta6'}, {}), '(beta6)', False, 'import math\n'), ((1387, 30, 1387, 45), 'math.cos', 'math.cos', ({(1387, 39, 1387, 44): 'beta1'}, {}), '(beta1)', False, 'import math\n'), ((1387, 48, 1387, 63), 'math.cos', 'math.cos', ({(1387, 57, 1387, 62): 'beta2'}, {}), '(beta2)', False, 'import math\n'), ((1389, 30, 1389, 45), 'math.cos', 'math.cos', ({(1389, 39, 1389, 44): 'beta3'}, {}), '(beta3)', False, 'import math\n'), ((1389, 48, 1389, 63), 'math.cos', 'math.cos', ({(1389, 57, 1389, 62): 'beta4'}, {}), '(beta4)', False, 'import math\n'), ((1391, 30, 1391, 45), 'math.cos', 'math.cos', ({(1391, 39, 1391, 44): 'beta5'}, {}), '(beta5)', False, 'import math\n'), ((1391, 48, 1391, 63), 'math.cos', 'math.cos', ({(1391, 57, 1391, 62): 'beta6'}, {}), '(beta6)', False, 'import math\n'), ((249, 37, 249, 55), 'math.cos', 'math.cos', ({(249, 46, 249, 54): '(j * DTOR)'}, {}), '(j * DTOR)', False, 'import math\n'), ((249, 58, 249, 82), 'math.cos', 'math.cos', ({(249, 67, 249, 81): '((j + 1) * DTOR)'}, {}), '((j + 1) * DTOR)', False, 'import math\n'), ((629, 38, 629, 56), 'math.cos', 'math.cos', ({(629, 47, 629, 55): '(j * DTOR)'}, {}), '(j * DTOR)', False, 'import math\n'), ((629, 59, 629, 83), 'math.cos', 'math.cos', ({(629, 68, 629, 82): '((j + 1) * DTOR)'}, {}), '((j + 1) * DTOR)', False, 'import math\n'), ((1424, 30, 1424, 45), 'math.cos', 'math.cos', ({(1424, 39, 1424, 44): 'beta3'}, {}), '(beta3)', False, 'import math\n'), ((1424, 48, 1424, 63), 'math.cos', 'math.cos', ({(1424, 57, 1424, 62): 'beta4'}, {}), '(beta4)', False, 'import math\n'), ((1426, 30, 1426, 45), 'math.cos', 'math.cos', ({(1426, 39, 1426, 44): 'beta5'}, {}), '(beta5)', False, 'import math\n'), ((1426, 48, 1426, 63), 'math.cos', 'math.cos', ({(1426, 57, 1426, 62): 'beta6'}, {}), '(beta6)', False, 'import math\n'), ((1462, 30, 1462, 45), 'math.cos', 'math.cos', ({(1462, 39, 1462, 44): 'beta1'}, {}), '(beta1)', False, 'import math\n'), ((1462, 48, 1462, 63), 'math.cos', 'math.cos', ({(1462, 57, 1462, 62): 'beta2'}, {}), '(beta2)', False, 'import math\n'), ((1464, 30, 1464, 45), 'math.cos', 'math.cos', ({(1464, 39, 1464, 44): 'beta3'}, {}), '(beta3)', False, 'import math\n'), ((1464, 48, 1464, 63), 'math.cos', 'math.cos', ({(1464, 57, 1464, 62): 'beta4'}, {}), '(beta4)', False, 'import math\n')] |
Habidatum/slisonner | tests/test_slison.py | 488be30a199a5d29271e24377c37a7ad83d52e3e | from slisonner import decoder, encoder
from tests import mocker
from tempfile import mkdtemp
from shutil import rmtree
def test_full_encode_decode_cycle():
temp_out_dir = mkdtemp()
slice_id = '2015-01-02 00:00:00'
x_size, y_size = 10, 16
temp_slice_path = mocker.generate_slice(x_size, y_size, 'float32')
slice_meta_encoded, slison_filepath = encoder.encode_slice_file(
filepath=temp_slice_path,
slice_duration=300,
timestamp=slice_id,
layer_id='london',
x_size=x_size,
y_size=y_size,
value_type='float32',
out_dir=temp_out_dir)
slice_data, slice_meta_decoded = decoder.decode_slison(slison_filepath)
for key, encoded_value in slice_meta_encoded.items():
assert encoded_value == slice_meta_decoded[key]
rmtree(temp_out_dir)
| [((8, 19, 8, 28), 'tempfile.mkdtemp', 'mkdtemp', ({}, {}), '()', False, 'from tempfile import mkdtemp\n'), ((12, 22, 12, 70), 'tests.mocker.generate_slice', 'mocker.generate_slice', ({(12, 44, 12, 50): 'x_size', (12, 52, 12, 58): 'y_size', (12, 60, 12, 69): '"""float32"""'}, {}), "(x_size, y_size, 'float32')", False, 'from tests import mocker\n'), ((14, 42, 22, 29), 'slisonner.encoder.encode_slice_file', 'encoder.encode_slice_file', (), '', False, 'from slisonner import decoder, encoder\n'), ((23, 37, 23, 75), 'slisonner.decoder.decode_slison', 'decoder.decode_slison', ({(23, 59, 23, 74): 'slison_filepath'}, {}), '(slison_filepath)', False, 'from slisonner import decoder, encoder\n'), ((27, 4, 27, 24), 'shutil.rmtree', 'rmtree', ({(27, 11, 27, 23): 'temp_out_dir'}, {}), '(temp_out_dir)', False, 'from shutil import rmtree\n')] |
vignesharumainayagam/cartrade | cartrade/cartrade/doctype/category/category.py | 81349bc3cd9dbd441491304734077aab10dca56f | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Tridots Tech Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.website.website_generator import WebsiteGenerator
class Category(WebsiteGenerator):
def validate(self):
if ' ' in self.category_name:
self.route = self.category_name.replace(" ", "").lower() | [] |
TUDelft-AE-Python/ae1205-exercises | exercises/pyfiles/ex812_polarsincos.py | 342d1d567b64d3ccb3371ce9826c02a87a155fa8 | import matplotlib.pyplot as plt
import math
xtab = []
ytab = []
for i in range(0, 628):
# Calculate polar coordinates for provided equation
phi = float(i) / 100.0
r = 4 * math.cos(2 * phi)
# Convert to Cartesian and store in lists
x = r * math.cos(phi)
y = r * math.sin(phi)
xtab.append(x)
ytab.append(y)
plt.plot(xtab, ytab)
plt.show() | [((18, 0, 18, 20), 'matplotlib.pyplot.plot', 'plt.plot', ({(18, 9, 18, 13): 'xtab', (18, 15, 18, 19): 'ytab'}, {}), '(xtab, ytab)', True, 'import matplotlib.pyplot as plt\n'), ((19, 0, 19, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((11, 12, 11, 29), 'math.cos', 'math.cos', ({(11, 21, 11, 28): '(2 * phi)'}, {}), '(2 * phi)', False, 'import math\n'), ((13, 12, 13, 25), 'math.cos', 'math.cos', ({(13, 21, 13, 24): 'phi'}, {}), '(phi)', False, 'import math\n'), ((14, 12, 14, 25), 'math.sin', 'math.sin', ({(14, 21, 14, 24): 'phi'}, {}), '(phi)', False, 'import math\n')] |
jgmize/kitsune | vendor/packages/logilab-astng/__pkginfo__.py | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | # Copyright (c) 2003-2010 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
# copyright 2003-2010 Sylvain Thenault, all rights reserved.
# contact mailto:[email protected]
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""
logilab.astng packaging information
"""
distname = 'logilab-astng'
modname = 'astng'
subpackage_of = 'logilab'
numversion = (0, 20, 1)
version = '.'.join([str(num) for num in numversion])
install_requires = ['logilab-common >= 0.49.0']
pyversions = ["2.3", "2.4", "2.5", '2.6']
license = 'LGPL'
author = 'Logilab'
author_email = '[email protected]'
mailinglist = "mailto://%s" % author_email
web = "http://www.logilab.org/project/%s" % distname
ftp = "ftp://ftp.logilab.org/pub/%s" % modname
short_desc = "rebuild a new abstract syntax tree from Python's ast"
long_desc = """The aim of this module is to provide a common base \
representation of python source code for projects such as pychecker, pyreverse,
pylint... Well, actually the development of this library is essentially
governed by pylint's needs.
It rebuilds the tree generated by the compiler.ast [1] module (python <= 2.4)
or by the builtin _ast module (python >= 2.5) by recursively walking down the
AST and building an extended ast (let's call it astng ;). The new node classes
have additional methods and attributes for different usages.
Furthermore, astng builds partial trees by inspecting living objects."""
from os.path import join
include_dirs = [join('test', 'regrtest_data'),
join('test', 'data'), join('test', 'data2')]
| [((75, 16, 75, 45), 'os.path.join', 'join', ({(75, 21, 75, 27): '"""test"""', (75, 29, 75, 44): '"""regrtest_data"""'}, {}), "('test', 'regrtest_data')", False, 'from os.path import join\n'), ((76, 16, 76, 36), 'os.path.join', 'join', ({(76, 21, 76, 27): '"""test"""', (76, 29, 76, 35): '"""data"""'}, {}), "('test', 'data')", False, 'from os.path import join\n'), ((76, 38, 76, 59), 'os.path.join', 'join', ({(76, 43, 76, 49): '"""test"""', (76, 51, 76, 58): '"""data2"""'}, {}), "('test', 'data2')", False, 'from os.path import join\n')] |
lmyybh/pytorch-networks | W-DCGAN/model.py | 8da055f5042c3803b275734afc89d33d239d7585 | import torch
import torch.nn as nn
class Generator(nn.Module):
def __init__(self, signal_size, out_channels=3):
super(Generator, self).__init__()
self.linear = nn.Linear(signal_size, 1024*4*4)
convs = []
channels = [1024, 512, 256, 128]
for i in range(1, len(channels)):
convs.append(nn.ConvTranspose2d(channels[i-1], channels[i], 2, stride=2))
convs.append(nn.BatchNorm2d(channels[i]))
convs.append(nn.LeakyReLU(0.2, inplace=True))
convs.append(nn.ConvTranspose2d(channels[-1], out_channels, 2, stride=2))
convs.append(nn.Tanh())
self.convs = nn.Sequential(*convs)
def forward(self, x):
x = self.linear(x)
x = x.view(x.size(0), 1024, 4, 4)
x = self.convs(x)
return x
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
channels = [3, 32, 64, 128, 256, 512, 1024]
convs = []
for i in range(1, len(channels)):
convs.append(nn.Conv2d(channels[i-1], channels[i], 3, padding=1, stride=2))
convs.append(nn.BatchNorm2d(channels[i]))
convs.append(nn.LeakyReLU(0.2, inplace=True))
self.convs = nn.Sequential(*convs)
self.linear = nn.Linear(1024*1*1, 1)
def forward(self, x):
x = self.convs(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
# x = torch.sigmoid(x)
return x
| [((7, 22, 7, 54), 'torch.nn.Linear', 'nn.Linear', ({(7, 32, 7, 43): 'signal_size', (7, 45, 7, 53): '1024 * 4 * 4'}, {}), '(signal_size, 1024 * 4 * 4)', True, 'import torch.nn as nn\n'), ((18, 21, 18, 42), 'torch.nn.Sequential', 'nn.Sequential', ({(18, 35, 18, 41): '*convs'}, {}), '(*convs)', True, 'import torch.nn as nn\n'), ((35, 21, 35, 42), 'torch.nn.Sequential', 'nn.Sequential', ({(35, 35, 35, 41): '*convs'}, {}), '(*convs)', True, 'import torch.nn as nn\n'), ((36, 22, 36, 44), 'torch.nn.Linear', 'nn.Linear', ({(36, 32, 36, 40): '1024 * 1 * 1', (36, 42, 36, 43): '1'}, {}), '(1024 * 1 * 1, 1)', True, 'import torch.nn as nn\n'), ((15, 21, 15, 80), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (), '', True, 'import torch.nn as nn\n'), ((16, 21, 16, 30), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((12, 25, 12, 84), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (), '', True, 'import torch.nn as nn\n'), ((13, 25, 13, 52), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(13, 40, 13, 51): 'channels[i]'}, {}), '(channels[i])', True, 'import torch.nn as nn\n'), ((14, 25, 14, 56), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (), '', True, 'import torch.nn as nn\n'), ((32, 25, 32, 86), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((33, 25, 33, 52), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(33, 40, 33, 51): 'channels[i]'}, {}), '(channels[i])', True, 'import torch.nn as nn\n'), ((34, 25, 34, 56), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (), '', True, 'import torch.nn as nn\n')] |
bioShaun/omsCabinet | bioinformatics/analysis/rnaseq/prepare/split_gtf_by_type.py | 741179a06cbd5200662cd03bc2e0115f4ad06917 | import fire
import gtfparse
from pathlib import Path
GENCODE_CATEGORY_MAP = {
'IG_C_gene': 'protein_coding',
'IG_D_gene': 'protein_coding',
'IG_J_gene': 'protein_coding',
'IG_V_gene': 'protein_coding',
'IG_LV_gene': 'protein_coding',
'TR_C_gene': 'protein_coding',
'TR_J_gene': 'protein_coding',
'TR_V_gene': 'protein_coding',
'TR_D_gene': 'protein_coding',
'TEC': 'protein_coding',
'nonsense_mediated_decay': 'protein_coding',
'non_stop_decay': 'protein_coding',
'retained_intron': 'lncRNA',
'protein_coding': 'protein_coding',
'ambiguous_orf': 'lncRNA',
'Mt_rRNA': 'ncRNA',
'Mt_tRNA': 'ncRNA',
'miRNA': 'ncRNA',
'misc_RNA': 'ncRNA',
'rRNA': 'ncRNA',
'snRNA': 'ncRNA',
'snoRNA': 'ncRNA',
'ribozyme': 'ncRNA',
'sRNA': 'ncRNA',
'scaRNA': 'ncRNA',
'scRNA': 'ncRNA',
'non_coding': 'lncRNA',
'known_ncrna': 'ncRNA',
'3prime_overlapping_ncrna': 'lncRNA',
'3prime_overlapping_ncRNA': 'lncRNA',
'vaultRNA': 'ncRNA',
'processed_transcript': 'lncRNA',
'lincRNA': 'lncRNA',
'macro_lncRNA': 'lncRNA',
'sense_intronic': 'lncRNA',
'sense_overlapping': 'lncRNA',
'antisense': 'lncRNA',
'antisense_RNA': 'lncRNA',
'bidirectional_promoter_lncRNA': 'lncRNA',
'IG_pseudogene': 'pseudogene',
'IG_D_pseudogene': 'pseudogene',
'IG_C_pseudogene': 'pseudogene',
'IG_J_pseudogene': 'pseudogene',
'IG_V_pseudogene': 'pseudogene',
'TR_V_pseudogene': 'pseudogene',
'TR_J_pseudogene': 'pseudogene',
'Mt_tRNA_pseudogene': 'pseudogene',
'tRNA_pseudogene': 'pseudogene',
'snoRNA_pseudogene': 'pseudogene',
'snRNA_pseudogene': 'pseudogene',
'scRNA_pseudogene': 'pseudogene',
'rRNA_pseudogene': 'pseudogene',
'misc_RNA_pseudogene': 'pseudogene',
'miRNA_pseudogene': 'pseudogene',
'pseudogene': 'pseudogene',
'processed_pseudogene': 'pseudogene',
'polymorphic_pseudogene': 'pseudogene',
'retrotransposed': 'pseudogene',
'transcribed_processed_pseudogene': 'pseudogene',
'transcribed_unprocessed_pseudogene': 'pseudogene',
'transcribed_unitary_pseudogene': 'pseudogene',
'translated_processed_pseudogene': 'pseudogene',
'translated_unprocessed_pseudogene': 'pseudogene',
'unitary_pseudogene': 'pseudogene',
'unprocessed_pseudogene': 'pseudogene',
'novel_lncRNA': 'lncRNA',
'TUCP': 'TUCP',
'lncRNA': 'lncRNA'
}
def simplify_gene_type(gene_type):
if gene_type in GENCODE_CATEGORY_MAP:
sim_type = GENCODE_CATEGORY_MAP.get(gene_type)
if sim_type == 'lncRNA':
sim_type = f'annotated_{sim_type}'
elif sim_type == 'ncRNA':
sim_type = f'other_{sim_type}'
else:
pass
return sim_type
else:
raise ValueError(gene_type)
def dfline2gtfline(dfline):
basic_inf = dfline[:8]
basic_inf.fillna('.', inplace=True)
basic_inf.frame = '.'
basic_inf_list = [str(each) for each in basic_inf]
basic_inf_line = '\t'.join(basic_inf_list)
attr_inf = dfline[8:]
attr_inf_list = []
for key, val in attr_inf.items():
if val:
attr_inf_list.append(f'{key} "{val}";')
attr_inf_line = ' '.join(attr_inf_list)
return f'{basic_inf_line}\t{attr_inf_line}\n'
def split_gtf(gtf, outdir, novel=False):
gtf_df = gtfparse.read_gtf(gtf)
if 'gene_type' in gtf_df.columns:
gtf_df.loc[:, 'gene_biotype'] = gtf_df.gene_type
gtf_df.drop('gene_type', axis=1, inplace=True)
elif 'gene_biotype' in gtf_df.columns:
pass
else:
gtf_df.loc[:, 'gene_biotype'] = 'protein_coding'
type_label = 'gene_biotype'
if novel:
gtf_df.loc[
:, type_label] = gtf_df.loc[:, type_label].map(
GENCODE_CATEGORY_MAP)
else:
gtf_df.loc[
:, type_label] = gtf_df.loc[:, type_label].map(
simplify_gene_type)
outdir = Path(outdir)
outdir.mkdir(parents=True, exist_ok=True)
for gt, grp in gtf_df.groupby(type_label):
gt_file = outdir / f'{gt}.gtf'
with open(gt_file, 'w') as gt_inf:
for idx in grp.index:
outline = dfline2gtfline(grp.loc[idx])
gt_inf.write(outline)
if __name__ == '__main__':
fire.Fire(split_gtf)
| [((108, 13, 108, 35), 'gtfparse.read_gtf', 'gtfparse.read_gtf', ({(108, 31, 108, 34): 'gtf'}, {}), '(gtf)', False, 'import gtfparse\n'), ((128, 13, 128, 25), 'pathlib.Path', 'Path', ({(128, 18, 128, 24): 'outdir'}, {}), '(outdir)', False, 'from pathlib import Path\n'), ((140, 4, 140, 24), 'fire.Fire', 'fire.Fire', ({(140, 14, 140, 23): 'split_gtf'}, {}), '(split_gtf)', False, 'import fire\n')] |
navel0810/chibi | rational.py | d2e9a791492352c3c1b76c841a3ad30df2f444fd | import math
class Q(object):
def __init__(self,a,b=1):
gcd=math.gcd(a,b)
self.a=a//gcd
self.b=b//gcd
def __repr__(self):
if self.b==1:
return str(self.a)
return f'{self.a}/{self.b}'
def __add__(self,q):
a=self.a
b=self.b
c=q.a
d=q.b
return Q(a*d+b*c,b*d)
def __sub__(self,q):
a=self.a
b=self.b
c=q.a
d=q.b
return Q(a*d-b*c,b*d)
def __mul__(self,q):
a=self.a
b=self.b
c=q.a
d=q.b
return Q(a*c,b*d)
def __truediv__(self,q):
a=self.a
b=self.b
c=q.a
d=q.b
return Q(a*d,b*c)
q1=Q(1,2)
q2=Q(1,3)
print(q1/q2) | [((5, 12, 5, 25), 'math.gcd', 'math.gcd', ({(5, 21, 5, 22): 'a', (5, 23, 5, 24): 'b'}, {}), '(a, b)', False, 'import math\n')] |
jsandovalc/django-cities-light | cities_light/tests/test_import.py | a1c6af08938b7b01d4e12555bd4cb5040905603d | from __future__ import unicode_literals
import glob
import os
from dbdiff.fixture import Fixture
from .base import TestImportBase, FixtureDir
from ..settings import DATA_DIR
class TestImport(TestImportBase):
"""Load test."""
def test_single_city(self):
"""Load single city."""
fixture_dir = FixtureDir('import')
self.import_data(
fixture_dir,
'angouleme_country',
'angouleme_region',
'angouleme_subregion',
'angouleme_city',
'angouleme_translations'
)
Fixture(fixture_dir.get_file_path('angouleme.json')).assertNoDiff()
def test_single_city_zip(self):
"""Load single city."""
filelist = glob.glob(os.path.join(DATA_DIR, "angouleme_*.txt"))
for f in filelist:
os.remove(f)
fixture_dir = FixtureDir('import_zip')
self.import_data(
fixture_dir,
'angouleme_country',
'angouleme_region',
'angouleme_subregion',
'angouleme_city',
'angouleme_translations',
file_type="zip"
)
Fixture(FixtureDir('import').get_file_path('angouleme.json')).assertNoDiff()
def test_city_wrong_timezone(self):
"""Load single city with wrong timezone."""
fixture_dir = FixtureDir('import')
self.import_data(
fixture_dir,
'angouleme_country',
'angouleme_region',
'angouleme_subregion',
'angouleme_city_wtz',
'angouleme_translations'
)
Fixture(fixture_dir.get_file_path('angouleme_wtz.json')).assertNoDiff()
from ..loading import get_cities_model
city_model = get_cities_model('City')
cities = city_model.objects.all()
for city in cities:
print(city.get_timezone_info().zone)
| [((29, 29, 29, 70), 'os.path.join', 'os.path.join', ({(29, 42, 29, 50): 'DATA_DIR', (29, 52, 29, 69): '"""angouleme_*.txt"""'}, {}), "(DATA_DIR, 'angouleme_*.txt')", False, 'import os\n'), ((31, 12, 31, 24), 'os.remove', 'os.remove', ({(31, 22, 31, 23): 'f'}, {}), '(f)', False, 'import os\n')] |
Cosik/HAHoymiles | custom_components/hoymiles/__init__.py | e956f8fafc4ae59d4c05755c6e8a5d5d7caa37f9 | import datetime
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_PLANT_ID,
)
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(seconds=600)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_PLANT_ID): cv.string,
}
) | [((14, 10, 14, 37), 'logging.getLogger', 'logging.getLogger', ({(14, 28, 14, 36): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((16, 27, 16, 58), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((20, 8, 20, 35), 'voluptuous.Required', 'vol.Required', ({(20, 21, 20, 34): 'CONF_USERNAME'}, {}), '(CONF_USERNAME)', True, 'import voluptuous as vol\n'), ((21, 8, 21, 35), 'voluptuous.Required', 'vol.Required', ({(21, 21, 21, 34): 'CONF_PASSWORD'}, {}), '(CONF_PASSWORD)', True, 'import voluptuous as vol\n'), ((22, 8, 22, 35), 'voluptuous.Required', 'vol.Required', ({(22, 21, 22, 34): 'CONF_PLANT_ID'}, {}), '(CONF_PLANT_ID)', True, 'import voluptuous as vol\n')] |
bluebibi/flask_rest | views/auth.py | 9b1ee876060bca5d97459bb894c73530f66c4c15 | from flask import Blueprint, redirect, render_template, request, flash, session
from database import base
from database.base import User
from forms import UserForm, LoginForm, MyPageUserForm
from flask_login import login_required, login_user, logout_user, current_user
import requests
auth_blueprint = Blueprint('auth', __name__)
kakao_oauth = {}
@auth_blueprint.route('/my_page', methods=['GET', 'POST'])
@login_required
def _user():
form = MyPageUserForm()
q = base.db_session.query(User).filter(User.email == current_user.email)
user = q.first()
if request.method == 'POST':
if form.validate_on_submit():
user.email = request.form['email']
user.name = request.form['name']
user.set_password(request.form['password'])
user.affiliation = request.form['affiliation']
base.db_session.commit()
flash('귀하의 회원정보가 수정 되었습니다.')
return redirect('/auth/my_page')
return render_template("my_page.html", user=user, form=form, kakao_oauth=kakao_oauth)
def login_process(email, password):
q = base.db_session.query(User).filter(User.email == email)
user = q.first()
if user:
if user.authenticate(password):
login_result = login_user(user)
if login_result:
print("사용자(사용자 이메일:{0})의 로그인 성공!".format(current_user.email))
return '/'
else:
flash('비밀번호를 다시 확인하여 입력해주세요.')
return '/auth/login'
else:
flash('이메일 및 비밀번호를 다시 확인하여 입력해주세요.')
return '/auth/login'
@auth_blueprint.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect('/')
form = LoginForm()
if request.method == 'POST':
if form.validate_on_submit():
redirect_url = login_process(form.data['email'], form.data['password'])
return redirect(redirect_url)
return render_template('login.html', form=form, current_user=current_user)
@auth_blueprint.route('kakao_oauth_redirect')
def kakao_oauth_redirect():
code = str(request.args.get('code'))
url = "https://kauth.kakao.com/oauth/token"
data = "grant_type=authorization_code" \
"&client_id=0eb67d9cd0372c01d3915bbd934b4f6d" \
"&redirect_uri=http://localhost:8080/auth/kakao_oauth_redirect" \
"&code={0}".format(code)
headers = {
"Content-Type": "application/x-www-form-urlencoded;charset=utf-8",
"Cache-Control": "no-cache"
}
response = requests.post(
url=url,
data=data,
headers=headers
)
#print("kakao_oauth_redirect", response.json())
kakao_oauth["access_token"] = response.json()["access_token"]
kakao_oauth["expires_in"] = response.json()["expires_in"]
kakao_oauth["refresh_token"] = response.json()["refresh_token"]
kakao_oauth["refresh_token_expires_in"] = response.json()["refresh_token_expires_in"]
kakao_oauth["scope"] = response.json()["scope"]
kakao_oauth["token_type"] = response.json()["token_type"]
if "kaccount_email" not in kakao_oauth or kakao_oauth['kaccount_email'] is None:
kakao_me_and_signup()
redirect_url = login_process(kakao_oauth["kaccount_email"], "1234")
return redirect(redirect_url)
def kakao_me_and_signup():
url = "https://kapi.kakao.com/v1/user/me"
headers = {
"Authorization": "Bearer {0}".format(kakao_oauth["access_token"]),
"Content-Type": "application/x-www-form-urlencoded;charset=utf-8"
}
response = requests.post(
url=url,
headers=headers
)
#print("kakao_me_and_signup", response.json())
kakao_oauth["kaccount_email"] = response.json()["kaccount_email"]
kakao_oauth["id"] = response.json()["id"]
kakao_oauth["kakao_profile_image"] = response.json()["properties"]["profile_image"]
kakao_oauth["nickname"] = response.json()["properties"]["nickname"]
kakao_oauth["kakao_thumbnail_image"] = response.json()["properties"]["thumbnail_image"]
c = base.db_session.query(User).filter(User.email == kakao_oauth["kaccount_email"]).count()
if c == 0:
user = User(name=kakao_oauth["nickname"], email=kakao_oauth["kaccount_email"], affiliation=None)
user.set_password("1234")
base.db_session.add(user)
base.db_session.commit()
def kakao_logout():
url = "https://kapi.kakao.com/v1/user/logout"
headers = {
"Authorization": "Bearer {0}".format(kakao_oauth["access_token"])
}
response = requests.post(
url=url,
headers=headers
)
if response.status_code == 200:
kakao_oauth["kaccount_email"] = None
kakao_oauth["id"] = None
kakao_oauth["kakao_profile_image"] = None
kakao_oauth["nickname"] = None
kakao_oauth["kakao_thumbnail_image"] = None
@auth_blueprint.route("/logout")
@login_required
def logout():
logout_user()
if kakao_oauth and "kaccount_email" in kakao_oauth:
kakao_logout()
return redirect('/')
@auth_blueprint.route('/signup', methods=['GET', 'POST'])
def signup():
form = UserForm()
if request.method == 'POST':
if form.validate_on_submit():
new_user = User()
new_user.email = request.form['email']
new_user.name = request.form['name']
new_user.set_password(request.form['password'])
new_user.affiliation = request.form['affiliation']
base.db_session.add(new_user)
base.db_session.commit()
flash('귀하는 회원가입이 성공적으로 완료되었습니다. 가입하신 정보로 로그인을 다시 하시기 바랍니다.')
return redirect('/auth/login')
return render_template("signup.html", form=form) | [((9, 17, 9, 44), 'flask.Blueprint', 'Blueprint', ({(9, 27, 9, 33): '"""auth"""', (9, 35, 9, 43): '__name__'}, {}), "('auth', __name__)", False, 'from flask import Blueprint, redirect, render_template, request, flash, session\n'), ((16, 11, 16, 27), 'forms.MyPageUserForm', 'MyPageUserForm', ({}, {}), '()', False, 'from forms import UserForm, LoginForm, MyPageUserForm\n'), ((31, 11, 31, 89), 'flask.render_template', 'render_template', (), '', False, 'from flask import Blueprint, redirect, render_template, request, flash, session\n'), ((57, 11, 57, 22), 'forms.LoginForm', 'LoginForm', ({}, {}), '()', False, 'from forms import UserForm, LoginForm, MyPageUserForm\n'), ((64, 11, 64, 78), 'flask.render_template', 'render_template', (), '', False, 'from flask import Blueprint, redirect, render_template, request, flash, session\n'), ((79, 15, 83, 5), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((98, 11, 98, 33), 'flask.redirect', 'redirect', ({(98, 20, 98, 32): 'redirect_url'}, {}), '(redirect_url)', False, 'from flask import Blueprint, redirect, render_template, request, flash, session\n'), ((108, 15, 111, 5), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((134, 15, 137, 5), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((150, 4, 150, 17), 'flask_login.logout_user', 'logout_user', ({}, {}), '()', False, 'from flask_login import login_required, login_user, logout_user, current_user\n'), ((154, 11, 154, 24), 'flask.redirect', 'redirect', ({(154, 20, 154, 23): '"""/"""'}, {}), "('/')", False, 'from flask import Blueprint, redirect, render_template, request, flash, session\n'), ((159, 11, 159, 21), 'forms.UserForm', 'UserForm', ({}, {}), '()', False, 'from forms import UserForm, LoginForm, MyPageUserForm\n'), ((174, 11, 174, 52), 'flask.render_template', 'render_template', (), '', False, 'from flask import Blueprint, redirect, render_template, request, flash, session\n'), ((48, 8, 48, 86), 'flask.flash', 'flash', ({(48, 14, 48, 85): '"""이메일 및 비밀번호를 다시 확인하여 입력해주세요."""'}, {}), "('이메일 및 비밀번호를 다시 확인하여 입력해주세요.')", False, 'from flask import Blueprint, redirect, render_template, request, flash, session\n'), ((55, 15, 55, 28), 'flask.redirect', 'redirect', ({(55, 24, 55, 27): '"""/"""'}, {}), "('/')", False, 'from flask import Blueprint, redirect, render_template, request, flash, session\n'), ((69, 15, 69, 39), 'flask.request.args.get', 'request.args.get', ({(69, 32, 69, 38): '"""code"""'}, {}), "('code')", False, 'from flask import Blueprint, redirect, render_template, request, flash, session\n'), ((123, 15, 123, 104), 'database.base.User', 'User', (), '', False, 'from database.base import User\n'), ((125, 8, 125, 33), 'database.base.db_session.add', 'base.db_session.add', ({(125, 28, 125, 32): 'user'}, {}), '(user)', False, 'from database import base\n'), ((126, 8, 126, 32), 'database.base.db_session.commit', 'base.db_session.commit', ({}, {}), '()', False, 'from database import base\n'), ((18, 8, 18, 35), 'database.base.db_session.query', 'base.db_session.query', ({(18, 30, 18, 34): 'User'}, {}), '(User)', False, 'from database import base\n'), ((27, 12, 27, 36), 'database.base.db_session.commit', 'base.db_session.commit', ({}, {}), '()', False, 'from database import base\n'), ((28, 12, 28, 70), 'flask.flash', 'flash', ({(28, 18, 28, 69): '"""귀하의 회원정보가 수정 되었습니다."""'}, {}), "('귀하의 회원정보가 수정 되었습니다.')", False, 'from flask import Blueprint, redirect, render_template, request, flash, session\n'), ((29, 19, 29, 44), 'flask.redirect', 'redirect', ({(29, 28, 29, 43): '"""/auth/my_page"""'}, {}), "('/auth/my_page')", False, 'from flask import Blueprint, redirect, render_template, request, flash, session\n'), ((35, 8, 35, 35), 'database.base.db_session.query', 'base.db_session.query', ({(35, 30, 35, 34): 'User'}, {}), '(User)', False, 'from database import base\n'), ((40, 27, 40, 43), 'flask_login.login_user', 'login_user', ({(40, 38, 40, 42): 'user'}, {}), '(user)', False, 'from flask_login import login_required, login_user, logout_user, current_user\n'), ((45, 12, 45, 76), 'flask.flash', 'flash', ({(45, 18, 45, 75): '"""비밀번호를 다시 확인하여 입력해주세요."""'}, {}), "('비밀번호를 다시 확인하여 입력해주세요.')", False, 'from flask import Blueprint, redirect, render_template, request, flash, session\n'), ((62, 19, 62, 41), 'flask.redirect', 'redirect', ({(62, 28, 62, 40): 'redirect_url'}, {}), '(redirect_url)', False, 'from flask import Blueprint, redirect, render_template, request, flash, session\n'), ((163, 23, 163, 29), 'database.base.User', 'User', ({}, {}), '()', False, 'from database.base import User\n'), ((169, 12, 169, 41), 'database.base.db_session.add', 'base.db_session.add', ({(169, 32, 169, 40): 'new_user'}, {}), '(new_user)', False, 'from database import base\n'), ((170, 12, 170, 36), 'database.base.db_session.commit', 'base.db_session.commit', ({}, {}), '()', False, 'from database import base\n'), ((171, 12, 171, 152), 'flask.flash', 'flash', ({(171, 18, 171, 151): '"""귀하는 회원가입이 성공적으로 완료되었습니다. 가입하신 정보로 로그인을 다시 하시기 바랍니다."""'}, {}), "('귀하는 회원가입이 성공적으로 완료되었습니다. 가입하신 정보로 로그인을 다시 하시기 바랍니다.')", False, 'from flask import Blueprint, redirect, render_template, request, flash, session\n'), ((172, 19, 172, 42), 'flask.redirect', 'redirect', ({(172, 28, 172, 41): '"""/auth/login"""'}, {}), "('/auth/login')", False, 'from flask import Blueprint, redirect, render_template, request, flash, session\n'), ((121, 8, 121, 35), 'database.base.db_session.query', 'base.db_session.query', ({(121, 30, 121, 34): 'User'}, {}), '(User)', False, 'from database import base\n')] |
hplgit/fem-book | doc/.src/book/exer/cable_sin.py | c23099715dc3cb72e7f4d37625e6f9614ee5fc4e | import matplotlib.pyplot as plt
def model():
"""Solve u'' = -1, u(0)=0, u'(1)=0."""
import sympy as sym
x, c_0, c_1, = sym.symbols('x c_0 c_1')
u_x = sym.integrate(1, (x, 0, x)) + c_0
u = sym.integrate(u_x, (x, 0, x)) + c_1
r = sym.solve([u.subs(x,0) - 0,
sym.diff(u,x).subs(x, 1) - 0],
[c_0, c_1])
u = u.subs(c_0, r[c_0]).subs(c_1, r[c_1])
u = sym.simplify(sym.expand(u))
return u
def midpoint_rule(f, M=100000):
"""Integrate f(x) over [0,1] using M intervals."""
from numpy import sum, linspace
dx = 1.0/M # interval length
x = linspace(dx/2, 1-dx/2, M) # integration points
return dx*sum(f(x))
def check_integral_b():
from numpy import pi, sin
for i in range(12):
exact = 2/(pi*(2*i+1))
numerical = midpoint_rule(
f=lambda x: sin((2*i+1)*pi*x/2))
print(i, abs(exact - numerical))
def sine_sum(x, N):
s = 0
from numpy import pi, sin, zeros
u = [] # u[k] is the sum i=0,...,k
k = 0
for i in range(N+1):
s += - 16.0/((2*i+1)**3*pi**3)*sin((2*i+1)*pi*x/2)
u.append(s.copy()) # important with copy!
return u
def plot_sine_sum():
from numpy import linspace
x = linspace(0, 1, 501) # coordinates for plot
u = sine_sum(x, N=10)
u_e = 0.5*x*(x-2)
N_values = 0, 1, 10
for k in N_values:
plt.plot(x, u[k])
plt.plot(x, u_e)
plt.legend(['N=%d' % k for k in N_values] + ['exact'],
loc='upper right')
plt.xlabel('$x$'); plt.ylabel('$u$')
plt.savefig('tmpc.png'); plt.savefig('tmpc.pdf')
def check_integral_d():
from numpy import pi, sin
for i in range(24):
if i % 2 == 0:
exact = 2/(pi*(i+1))
elif (i-1) % 4 == 0:
exact = 2*2/(pi*(i+1))
else:
exact = 0
numerical = midpoint_rule(
f=lambda x: sin((i+1)*pi*x/2))
print(i, abs(exact - numerical))
def check_integral_d_sympy_answer():
from numpy import pi, sin
for i in range(12):
exact = 2/(pi*(i+1))
numerical = midpoint_rule(
f=lambda x: sin((i+1)*pi*x/2))
print(i, abs(exact - numerical))
def sine_sum_d(x, N):
s = 0
from numpy import pi, sin, zeros
u = [] # u[k] is the sum i=0,...,k
k = 0
for i in range(N+1):
if i % 2 == 0: # even i
s += - 16.0/((i+1)**3*pi**3)*sin((i+1)*pi*x/2)
elif (i-1) % 4 == 0: # 1, 5, 9, 13, 17
s += - 2*16.0/((i+1)**3*pi**3)*sin((i+1)*pi*x/2)
else:
s += 0
u.append(s.copy())
return u
def plot_sine_sum_d():
from numpy import linspace
x = linspace(0, 1, 501) # coordinates for plot
u = sine_sum_d(x, N=20)
u_e = 0.5*x*(x-2)
N_values = 0, 1, 2, 3, 20
for k in N_values:
plt.plot(x, u[k])
plt.plot(x, u_e)
plt.legend(['N=%d' % k for k in N_values] + ['exact'],
loc='upper right')
plt.xlabel('$x$'); plt.ylabel('$u$')
#plt.axis([0.9, 1, -0.52, -0.49])
plt.savefig('tmpd.png'); plt.savefig('tmpd.pdf')
if __name__ == '__main__':
import sys
print(model())
print('sine 2*i+1 integral:')
check_integral_b()
print('sine i+1 integral, sympy answer:')
check_integral_d_sympy_answer()
print('sine i+1 integral:')
check_integral_d()
#sys.exit(0)
plot_sine_sum()
plt.figure()
plot_sine_sum_d()
plt.show()
| [((6, 19, 6, 43), 'sympy.symbols', 'sym.symbols', ({(6, 31, 6, 42): '"""x c_0 c_1"""'}, {}), "('x c_0 c_1')", True, 'import sympy as sym\n'), ((20, 8, 20, 33), 'numpy.linspace', 'linspace', ({(20, 17, 20, 21): 'dx / 2', (20, 23, 20, 29): '1 - dx / 2', (20, 31, 20, 32): 'M'}, {}), '(dx / 2, 1 - dx / 2, M)', False, 'from numpy import linspace\n'), ((43, 8, 43, 27), 'numpy.linspace', 'linspace', ({(43, 17, 43, 18): '0', (43, 20, 43, 21): '1', (43, 23, 43, 26): '501'}, {}), '(0, 1, 501)', False, 'from numpy import linspace\n'), ((49, 4, 49, 20), 'matplotlib.pyplot.plot', 'plt.plot', ({(49, 13, 49, 14): 'x', (49, 16, 49, 19): 'u_e'}, {}), '(x, u_e)', True, 'import matplotlib.pyplot as plt\n'), ((50, 4, 51, 33), 'matplotlib.pyplot.legend', 'plt.legend', (), '', True, 'import matplotlib.pyplot as plt\n'), ((52, 4, 52, 21), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(52, 15, 52, 20): '"""$x$"""'}, {}), "('$x$')", True, 'import matplotlib.pyplot as plt\n'), ((52, 24, 52, 41), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(52, 35, 52, 40): '"""$u$"""'}, {}), "('$u$')", True, 'import matplotlib.pyplot as plt\n'), ((53, 4, 53, 27), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(53, 16, 53, 26): '"""tmpc.png"""'}, {}), "('tmpc.png')", True, 'import matplotlib.pyplot as plt\n'), ((53, 29, 53, 52), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(53, 41, 53, 51): '"""tmpc.pdf"""'}, {}), "('tmpc.pdf')", True, 'import matplotlib.pyplot as plt\n'), ((93, 8, 93, 27), 'numpy.linspace', 'linspace', ({(93, 17, 93, 18): '0', (93, 20, 93, 21): '1', (93, 23, 93, 26): '501'}, {}), '(0, 1, 501)', False, 'from numpy import linspace\n'), ((99, 4, 99, 20), 'matplotlib.pyplot.plot', 'plt.plot', ({(99, 13, 99, 14): 'x', (99, 16, 99, 19): 'u_e'}, {}), '(x, u_e)', True, 'import matplotlib.pyplot as plt\n'), ((100, 4, 101, 33), 'matplotlib.pyplot.legend', 'plt.legend', (), '', True, 'import matplotlib.pyplot as plt\n'), ((102, 4, 102, 21), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(102, 15, 102, 20): '"""$x$"""'}, {}), "('$x$')", True, 'import matplotlib.pyplot as plt\n'), ((102, 24, 102, 41), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(102, 35, 102, 40): '"""$u$"""'}, {}), "('$u$')", True, 'import matplotlib.pyplot as plt\n'), ((104, 4, 104, 27), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(104, 16, 104, 26): '"""tmpd.png"""'}, {}), "('tmpd.png')", True, 'import matplotlib.pyplot as plt\n'), ((104, 29, 104, 52), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(104, 41, 104, 51): '"""tmpd.pdf"""'}, {}), "('tmpd.pdf')", True, 'import matplotlib.pyplot as plt\n'), ((118, 4, 118, 16), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((120, 4, 120, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((7, 10, 7, 37), 'sympy.integrate', 'sym.integrate', ({(7, 24, 7, 25): '(1)', (7, 27, 7, 36): '(x, 0, x)'}, {}), '(1, (x, 0, x))', True, 'import sympy as sym\n'), ((8, 8, 8, 37), 'sympy.integrate', 'sym.integrate', ({(8, 22, 8, 25): 'u_x', (8, 27, 8, 36): '(x, 0, x)'}, {}), '(u_x, (x, 0, x))', True, 'import sympy as sym\n'), ((13, 21, 13, 34), 'sympy.expand', 'sym.expand', ({(13, 32, 13, 33): 'u'}, {}), '(u)', True, 'import sympy as sym\n'), ((48, 8, 48, 25), 'matplotlib.pyplot.plot', 'plt.plot', ({(48, 17, 48, 18): 'x', (48, 20, 48, 24): 'u[k]'}, {}), '(x, u[k])', True, 'import matplotlib.pyplot as plt\n'), ((98, 8, 98, 25), 'matplotlib.pyplot.plot', 'plt.plot', ({(98, 17, 98, 18): 'x', (98, 20, 98, 24): 'u[k]'}, {}), '(x, u[k])', True, 'import matplotlib.pyplot as plt\n'), ((37, 39, 37, 58), 'numpy.sin', 'sin', ({(37, 43, 37, 57): '((2 * i + 1) * pi * x / 2)'}, {}), '((2 * i + 1) * pi * x / 2)', False, 'from numpy import pi, sin, zeros\n'), ((83, 43, 83, 60), 'numpy.sin', 'sin', ({(83, 47, 83, 59): '((i + 1) * pi * x / 2)'}, {}), '((i + 1) * pi * x / 2)', False, 'from numpy import pi, sin, zeros\n'), ((28, 24, 28, 43), 'numpy.sin', 'sin', ({(28, 28, 28, 42): '(2 * i + 1) * pi * x / 2'}, {}), '((2 * i + 1) * pi * x / 2)', False, 'from numpy import pi, sin, zeros\n'), ((65, 24, 65, 41), 'numpy.sin', 'sin', ({(65, 28, 65, 40): '(i + 1) * pi * x / 2'}, {}), '((i + 1) * pi * x / 2)', False, 'from numpy import pi, sin, zeros\n'), ((73, 24, 73, 41), 'numpy.sin', 'sin', ({(73, 28, 73, 40): '(i + 1) * pi * x / 2'}, {}), '((i + 1) * pi * x / 2)', False, 'from numpy import pi, sin, zeros\n'), ((85, 43, 85, 60), 'numpy.sin', 'sin', ({(85, 47, 85, 59): '((i + 1) * pi * x / 2)'}, {}), '((i + 1) * pi * x / 2)', False, 'from numpy import pi, sin, zeros\n'), ((10, 19, 10, 32), 'sympy.diff', 'sym.diff', ({(10, 28, 10, 29): 'u', (10, 30, 10, 31): 'x'}, {}), '(u, x)', True, 'import sympy as sym\n')] |
liuliu663/speaker-recognition-py3 | skgmm.py | 8fd0f77ac011e4a11c7cac751dc985b9cd1f2c4d | from sklearn.mixture import GaussianMixture
import operator
import numpy as np
import math
class GMMSet:
def __init__(self, gmm_order = 32):
self.gmms = []
self.gmm_order = gmm_order
self.y = []
def fit_new(self, x, label):
self.y.append(label)
gmm = GaussianMixture(self.gmm_order)
gmm.fit(x)
self.gmms.append(gmm)
def gmm_score(self, gmm, x):
return np.sum(gmm.score(x))
@staticmethod
def softmax(scores):
scores_sum = sum([math.exp(i) for i in scores])
score_max = math.exp(max(scores))
return round(score_max / scores_sum, 3)
def predict_one(self, x):
scores = [self.gmm_score(gmm, x) / len(x) for gmm in self.gmms]
p = sorted(enumerate(scores), key=operator.itemgetter(1), reverse=True)
p = [(str(self.y[i]), y, p[0][1] - y) for i, y in p]
result = [(self.y[index], value) for (index, value) in enumerate(scores)]
p = max(result, key=operator.itemgetter(1))
softmax_score = self.softmax(scores)
return p[0], softmax_score
def before_pickle(self):
pass
def after_pickle(self):
pass
| [((16, 14, 16, 45), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ({(16, 30, 16, 44): 'self.gmm_order'}, {}), '(self.gmm_order)', False, 'from sklearn.mixture import GaussianMixture\n'), ((25, 26, 25, 37), 'math.exp', 'math.exp', ({(25, 35, 25, 36): 'i'}, {}), '(i)', False, 'import math\n'), ((31, 42, 31, 64), 'operator.itemgetter', 'operator.itemgetter', ({(31, 62, 31, 63): '1'}, {}), '(1)', False, 'import operator\n'), ((34, 28, 34, 50), 'operator.itemgetter', 'operator.itemgetter', ({(34, 48, 34, 49): '1'}, {}), '(1)', False, 'import operator\n')] |
apcarrik/kaggle | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/12_features/numtrees_20/rule_6.py | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | def findDecision(obj): #obj[0]: Passanger, obj[1]: Time, obj[2]: Coupon, obj[3]: Gender, obj[4]: Age, obj[5]: Education, obj[6]: Occupation, obj[7]: Bar, obj[8]: Coffeehouse, obj[9]: Restaurant20to50, obj[10]: Direction_same, obj[11]: Distance
# {"feature": "Age", "instances": 51, "metric_value": 0.9662, "depth": 1}
if obj[4]>0:
# {"feature": "Occupation", "instances": 44, "metric_value": 0.9024, "depth": 2}
if obj[6]>1:
# {"feature": "Bar", "instances": 33, "metric_value": 0.9834, "depth": 3}
if obj[7]<=1.0:
# {"feature": "Education", "instances": 22, "metric_value": 0.994, "depth": 4}
if obj[5]>0:
# {"feature": "Passanger", "instances": 17, "metric_value": 0.9774, "depth": 5}
if obj[0]<=2:
# {"feature": "Time", "instances": 11, "metric_value": 0.994, "depth": 6}
if obj[1]<=2:
# {"feature": "Restaurant20to50", "instances": 8, "metric_value": 0.9544, "depth": 7}
if obj[9]>0.0:
# {"feature": "Coffeehouse", "instances": 6, "metric_value": 0.65, "depth": 8}
if obj[8]<=2.0:
return 'True'
elif obj[8]>2.0:
return 'False'
else: return 'False'
elif obj[9]<=0.0:
return 'False'
else: return 'False'
elif obj[1]>2:
return 'False'
else: return 'False'
elif obj[0]>2:
# {"feature": "Gender", "instances": 6, "metric_value": 0.65, "depth": 6}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
# {"feature": "Time", "instances": 2, "metric_value": 1.0, "depth": 7}
if obj[1]<=2:
return 'True'
elif obj[1]>2:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[5]<=0:
return 'False'
else: return 'False'
elif obj[7]>1.0:
# {"feature": "Coupon", "instances": 11, "metric_value": 0.684, "depth": 4}
if obj[2]>2:
return 'True'
elif obj[2]<=2:
# {"feature": "Direction_same", "instances": 4, "metric_value": 1.0, "depth": 5}
if obj[10]>0:
return 'True'
elif obj[10]<=0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[6]<=1:
return 'True'
else: return 'True'
elif obj[4]<=0:
# {"feature": "Passanger", "instances": 7, "metric_value": 0.5917, "depth": 2}
if obj[0]>0:
return 'False'
elif obj[0]<=0:
return 'True'
else: return 'True'
else: return 'False'
| [] |
kennethwdk/PINet | lib/loss/__init__.py | 3a0abbd653146c56e39612384891c94c3fb49b35 | from .heatmaploss import HeatmapLoss
from .offsetloss import OffsetLoss
from .refineloss import RefineLoss | [] |
AndrewBezold/trinity | eth2/beacon/types/historical_batch.py | bc656da4dece431a0c929a99349d45faf75decf8 | from typing import Sequence
from eth.constants import ZERO_HASH32
from eth_typing import Hash32
import ssz
from ssz.sedes import Vector, bytes32
from eth2.configs import Eth2Config
from .defaults import default_tuple, default_tuple_of_size
class HistoricalBatch(ssz.Serializable):
fields = [("block_roots", Vector(bytes32, 1)), ("state_roots", Vector(bytes32, 1))]
def __init__(
self,
*,
block_roots: Sequence[Hash32] = default_tuple,
state_roots: Sequence[Hash32] = default_tuple,
config: Eth2Config = None
) -> None:
if config:
# try to provide sane defaults
if block_roots == default_tuple:
block_roots = default_tuple_of_size(
config.SLOTS_PER_HISTORICAL_ROOT, ZERO_HASH32
)
if state_roots == default_tuple:
state_roots = default_tuple_of_size(
config.SLOTS_PER_HISTORICAL_ROOT, ZERO_HASH32
)
super().__init__(block_roots=block_roots, state_roots=state_roots)
| [((15, 30, 15, 48), 'ssz.sedes.Vector', 'Vector', ({(15, 37, 15, 44): 'bytes32', (15, 46, 15, 47): '(1)'}, {}), '(bytes32, 1)', False, 'from ssz.sedes import Vector, bytes32\n'), ((15, 67, 15, 85), 'ssz.sedes.Vector', 'Vector', ({(15, 74, 15, 81): 'bytes32', (15, 83, 15, 84): '(1)'}, {}), '(bytes32, 1)', False, 'from ssz.sedes import Vector, bytes32\n')] |
nikosk/fastAPI-microservice-example- | app/settings.py | a1a61ab4e521bc0c48eee5b3a755db134c098546 | import os
from pydantic import BaseSettings
class Settings(BaseSettings):
DEBUG: bool
DATABASE_URL: str
class Config:
env_file = os.getenv("CONFIG_FILE", ".env")
| [((11, 19, 11, 51), 'os.getenv', 'os.getenv', ({(11, 29, 11, 42): '"""CONFIG_FILE"""', (11, 44, 11, 50): '""".env"""'}, {}), "('CONFIG_FILE', '.env')", False, 'import os\n')] |
john-science/ADVECTOR | ADVECTOR/io_tools/create_bathymetry.py | 5c5ca7595c2c051f1a088b1f0e694936c3da3610 | import numpy as np
import xarray as xr
def create_bathymetry_from_land_mask(land_mask: xr.DataArray) -> xr.DataArray:
"""Method: identifies the lower depth bound of the shallowest
ocean cell (non-null) in each vertical grid column.
:param land_mask: dimensions {time, depth, lat, lon}, boloean array, True where cell is land"""
assert np.all(land_mask.depth <= 0), "depth coordinate must be positive up"
assert np.all(
np.diff(land_mask.depth) > 0
), "depth coordinate must be sorted ascending"
# In the kernel, particles look up data based on the nearest cell-center.
# Thus cell bounds are the midpoints between each centers.
# Very top cell bound is surface, and bottom cell bounds are
# assumed to be symmetric about bottom cell center.
depth_diff = np.diff(land_mask.depth)
depth_bnds = np.concatenate(
[
land_mask.depth.values[:1] - depth_diff[0] / 2,
land_mask.depth.values[:-1] + depth_diff / 2,
[0],
]
)
bathy = (
(~land_mask)
.assign_coords({"depth": depth_bnds[:-1]})
.idxmax(dim="depth")
.where(~land_mask.isel(depth=-1), depth_bnds[-1])
)
bathy = bathy.drop(["time", "depth"])
bathy.name = "bathymetry"
bathy.attrs = {"units": "m", "positive": "up"}
return bathy
| [((9, 11, 9, 39), 'numpy.all', 'np.all', ({(9, 18, 9, 38): '(land_mask.depth <= 0)'}, {}), '(land_mask.depth <= 0)', True, 'import numpy as np\n'), ((18, 17, 18, 41), 'numpy.diff', 'np.diff', ({(18, 25, 18, 40): 'land_mask.depth'}, {}), '(land_mask.depth)', True, 'import numpy as np\n'), ((19, 17, 25, 5), 'numpy.concatenate', 'np.concatenate', ({(20, 8, 24, 9): '[land_mask.depth.values[:1] - depth_diff[0] / 2, land_mask.depth.values[:-1\n ] + depth_diff / 2, [0]]'}, {}), '([land_mask.depth.values[:1] - depth_diff[0] / 2, land_mask.\n depth.values[:-1] + depth_diff / 2, [0]])', True, 'import numpy as np\n'), ((11, 8, 11, 32), 'numpy.diff', 'np.diff', ({(11, 16, 11, 31): 'land_mask.depth'}, {}), '(land_mask.depth)', True, 'import numpy as np\n')] |
cr2630git/unitconvert | unitconvert/distance.py | 64a530f53b27a9412988877c7ae1b3b34f9ce8a6 | """
A simple python module for converting kilometers to miles or vice versa.
So simple that it doesn't even have any dependencies.
"""
def kilometers_to_miles(dist_in_km):
"""
Actually does the conversion of distance from km to mi.
PARAMETERS
--------
dist_in_km: float
A distance in kilometers.
RETURNS
-------
dist_in_mi: float
The same distance converted to miles.
"""
return (dist_in_km)/1.609344
def miles_to_kilometers(dist_in_mi):
"""
Actually does the conversion of distance from mi to km.
PARAMETERS
----------
dist_in_mi: float
A distance to miles.
RETURNS
-------
dist_in_km: float
The same distance converted to kilometers.
"""
return (dist_in_mi)*1.609344
| [] |
I-TECH-UW/mwachx | contacts/migrations_old/0006_data_status.py | e191755c3369208d678fceec68dbb4f5f51c453a | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools as it
from django.db import models, migrations
def convert_status(apps, schema_editor):
''' Migrate Visit.skipped and ScheduledPhoneCall.skipped -> status
(pending,missed,deleted,attended)
'''
Visit = apps.get_model("contacts","Visit")
ScheduledPhoneCall = apps.get_model("contacts","ScheduledPhoneCall")
for obj in it.chain(Visit.objects.all(), ScheduledPhoneCall.objects.all()):
if obj.skipped is None:
obj.status = 'pending'
elif obj.skipped == False:
obj.status = 'attended'
elif obj.skipped == True:
obj.status = 'missed'
obj.save()
def unconvert_status(apps, schema_editor):
''' Reverse function sets skipped based on status'''
Visit = apps.get_model("contacts","Visit")
ScheduledPhoneCall = apps.get_model("contacts","ScheduledPhoneCall")
for obj in it.chain(Visit.objects.all(), ScheduledPhoneCall.objects.all()):
if obj.status == 'pending':
obj.skipped = None
elif obj.status == 'attended':
obj.skipped = False
elif obj.status == 'missed':
obj.skipped = True
obj.save()
class Migration(migrations.Migration):
dependencies = [
('contacts', '0005_auto_add_visit_status'),
]
operations = [
migrations.RunPython(convert_status,unconvert_status),
]
| [((46, 8, 46, 61), 'django.db.migrations.RunPython', 'migrations.RunPython', ({(46, 29, 46, 43): 'convert_status', (46, 44, 46, 60): 'unconvert_status'}, {}), '(convert_status, unconvert_status)', False, 'from django.db import models, migrations\n')] |
One-Green/plant-keeper-master | core/tests/test_base_time_range_controller.py | 67101a4cc7070d26fd1685631a710ae9a60fc5e8 | import os
import sys
from datetime import time
import unittest
sys.path.append(
os.path.dirname(
os.path.dirname(os.path.join("..", "..", "..", os.path.dirname("__file__")))
)
)
from core.controller import BaseTimeRangeController
class TestTimeRangeController(unittest.TestCase):
def test_time_range(self):
start_at = time(10, 0, 0)
end_at = time(12, 0, 0)
time_range_controller = BaseTimeRangeController(start_at, end_at)
time_now = time(11, 0, 0)
time_range_controller.set_current_time(time_now)
self.assertTrue(time_range_controller.action)
time_now = time(12, 15, 0)
time_range_controller.set_current_time(time_now)
self.assertFalse(time_range_controller.action)
if __name__ == "__main__":
unittest.main()
| [((30, 4, 30, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((16, 19, 16, 33), 'datetime.time', 'time', ({(16, 24, 16, 26): '10', (16, 28, 16, 29): '0', (16, 31, 16, 32): '0'}, {}), '(10, 0, 0)', False, 'from datetime import time\n'), ((17, 17, 17, 31), 'datetime.time', 'time', ({(17, 22, 17, 24): '12', (17, 26, 17, 27): '0', (17, 29, 17, 30): '0'}, {}), '(12, 0, 0)', False, 'from datetime import time\n'), ((18, 32, 18, 73), 'core.controller.BaseTimeRangeController', 'BaseTimeRangeController', ({(18, 56, 18, 64): 'start_at', (18, 66, 18, 72): 'end_at'}, {}), '(start_at, end_at)', False, 'from core.controller import BaseTimeRangeController\n'), ((20, 19, 20, 33), 'datetime.time', 'time', ({(20, 24, 20, 26): '11', (20, 28, 20, 29): '0', (20, 31, 20, 32): '0'}, {}), '(11, 0, 0)', False, 'from datetime import time\n'), ((24, 19, 24, 34), 'datetime.time', 'time', ({(24, 24, 24, 26): '12', (24, 28, 24, 30): '15', (24, 32, 24, 33): '0'}, {}), '(12, 15, 0)', False, 'from datetime import time\n'), ((8, 55, 8, 82), 'os.path.dirname', 'os.path.dirname', ({(8, 71, 8, 81): '"""__file__"""'}, {}), "('__file__')", False, 'import os\n')] |
jurganson/spingen | generator_code/mp3_generator.py | f8421a26356d0cd1d94a0692846791eb45fce6f5 | from gtts import gTTS as ttos
from pydub import AudioSegment
import os
def generate_mp3 (segments, fade_ms, speech_gain, comment_fade_ms, language = "en", output_file_name = "generated_program_sound") :
def apply_comments (exercise_audio, segment) :
new_exercise_audio = exercise_audio
for comment in segment.comments :
comment_audio = comment["comment_audio"]
comment_time_ms = comment["second"]*1000 + comment["minute"]*60000
part_01 = new_exercise_audio[comment_time_ms:comment_time_ms+len(comment_audio)+comment_fade_ms*2]
part_02 = part_01.fade(to_gain=-speech_gain, start=0, end=comment_fade_ms)
part_02 = part_02.fade(to_gain= speech_gain, start=comment_fade_ms+len(comment_audio), end=len(part_02))
part_02 = part_02.overlay(comment_audio, position=comment_fade_ms)
new_exercise_audio = new_exercise_audio[:comment_time_ms] + part_02 + new_exercise_audio[comment_time_ms+len(part_02):]
return new_exercise_audio
def append_segment (current_audio, next_segment, future_segment) :
segment_audio = next_segment.song_audio
segment_audio_faded = segment_audio - speech_gain
segment_text_audio = next_segment.text_audio
part_01 = segment_audio_faded[:len(segment_text_audio)] # First part of next segment
part_01 = current_audio[-len(segment_text_audio):].append(part_01, crossfade=len(segment_text_audio)).overlay(segment_text_audio) #
part_02 = part_01 + segment_audio_faded[len(part_01):len(part_01)+fade_ms].fade(to_gain=speech_gain, start=0, end=fade_ms) # Faded up to exercise gain
part_03 = apply_comments(segment_audio[len(part_02):len(part_02)+next_segment.get_exercise_duration_ms()+fade_ms], next_segment) # Apply comments to exercise
part_03 = part_02 + part_03.fade(to_gain=-speech_gain, start=len(part_03)-fade_ms, end=len(part_03))
part_04 = current_audio[:-len(segment_text_audio)] + part_03
if not future_segment :
part_05 = part_04.fade_out(fade_ms)
ttos(text="Program finished", lang=language, slow=False).save("output.mp3")
finish_voice = AudioSegment.from_file("output.mp3")
print("Cleaning up output.mp3")
os.remove("output.mp3")
return part_05 + finish_voice
else :
part_05 = part_04 + segment_audio_faded[len(part_03):len(part_03)+len(future_segment.text_audio)]
return part_05
print("Generating MP3 for segment 1 of " + str(len(segments)))
intro_segment_audio = segments[0].song_audio
intro_segment_text_audio = segments[0].text_audio
intro_segment_audio_faded = intro_segment_audio - speech_gain
part_01 = intro_segment_audio_faded[:fade_ms].fade_in(fade_ms)
part_02 = part_01 + intro_segment_audio_faded[len(part_01):len(part_01)+len(intro_segment_text_audio)].overlay(intro_segment_text_audio)
part_03 = part_02 + intro_segment_audio_faded[len(part_02):len(part_02)+fade_ms].fade(to_gain=speech_gain, start=0, end=fade_ms)
part_04 = apply_comments(intro_segment_audio[len(part_03):len(part_03)+segments[0].get_exercise_duration_ms()+fade_ms], segments[0])
part_04 = part_03 + part_04.fade(to_gain=-speech_gain, start=len(part_04)-fade_ms, end=len(part_04))
part_05 = part_04 + intro_segment_audio_faded[len(part_04):len(part_04)+len(segments[1].text_audio)]
program_audio = part_05
for i in range(1, len(segments)) :
print("Generating MP3 for segment " + str(i+1) + " of " + str(len(segments)))
if i+1 >= len(segments) :
program_audio = append_segment(program_audio, segments[i], None)
else :
program_audio = append_segment(program_audio, segments[i], segments[i+1])
if not os.path.exists("./output") :
os.mkdir("./output")
print("Exporting final mp3 ...")
file_path = "./output/"+output_file_name+".mp3"
program_audio.export(file_path, format="mp3")
print("Done! Exported mp3 to "+ file_path)
| [((68, 11, 68, 37), 'os.path.exists', 'os.path.exists', ({(68, 26, 68, 36): '"""./output"""'}, {}), "('./output')", False, 'import os\n'), ((69, 8, 69, 28), 'os.mkdir', 'os.mkdir', ({(69, 17, 69, 27): '"""./output"""'}, {}), "('./output')", False, 'import os\n'), ((36, 27, 36, 63), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', ({(36, 50, 36, 62): '"""output.mp3"""'}, {}), "('output.mp3')", False, 'from pydub import AudioSegment\n'), ((38, 12, 38, 35), 'os.remove', 'os.remove', ({(38, 22, 38, 34): '"""output.mp3"""'}, {}), "('output.mp3')", False, 'import os\n'), ((35, 12, 35, 68), 'gtts.gTTS', 'ttos', (), '', True, 'from gtts import gTTS as ttos\n')] |
deeplearninc/relaax | relaax/algorithms/ddpg/parameter_server.py | a0cf280486dc74dca3857c85ec0e4c34e88d6b2b | from __future__ import absolute_import
from relaax.server.parameter_server import parameter_server_base
from relaax.server.common import session
from . import ddpg_model
class ParameterServer(parameter_server_base.ParameterServerBase):
def init_session(self):
self.session = session.Session(ddpg_model.SharedParameters())
self.session.op_initialize()
self.session.op_init_target_weights()
def n_step(self):
return self.session.op_n_step()
def score(self):
return self.session.op_score()
def get_session(self):
return self.session
| [] |
nvoron23/brython | scripts/make_VFS.py | b1ce5fa39b5d38c0dde138b4e75723fbb3e574ab | # -*- coding: utf-8 -*-
import json
import os
import pyminifier
try:
import io as StringIO
except ImportError:
import cStringIO as StringIO # lint:ok
# Check to see if slimit or some other minification library is installed and
# Set minify equal to slimit's minify function.
try:
import slimit
js_minify = slimit.minify
except ImportError as error:
print(error)
js_minify = slimit = None
###############################################################################
def process_unittest(filename):
"""Process a VFS filename for Brython."""
print("Generating {}".format(filename))
nb = 0
nb_err = 0
_main_root = os.path.dirname(filename)
_VFS = {}
for _mydir in ("Lib",):
for _root, _dir, _files in os.walk(os.path.join(_main_root, _mydir)):
if 'unittest' not in _root:
continue
if '__pycache__' in _root:
continue
for _file in _files:
_ext = os.path.splitext(_file)[1]
if _ext not in ('.py'):
continue
nb += 1
file_name = os.path.join(_root, _file)
try: # python 3
with open(file_name, encoding="utf-8") as file_with_data:
_data = file_with_data.read()
except Exception as reason: # python 2
with open(file_name, "r") as file_with_data:
_data = str(file_with_data.read()).decode("utf-8")
if not len(_data):
print("No data for {} ({}).".format(_file, type(_data)))
if _ext.lower() == '.py' and _data:
try:
_data = pyminifier.remove_comments_and_docstrings(
_data)
_data = pyminifier.dedent(_data)
except Exception as error:
print(error)
nb_err += 1
_vfs_filename = os.path.join(
_root, _file).replace(_main_root, '')
_vfs_filename = _vfs_filename.replace("\\", "/")
mod_name = _vfs_filename[len(_mydir) + 2:].replace('/', '.')
mod_name, ext = os.path.splitext(mod_name)
is_package = mod_name.endswith('__init__')
if is_package:
mod_name = mod_name[:-9]
_VFS[mod_name] = [_data, 1]
else:
_VFS[mod_name] = [_data]
print(("Adding %s %s" % (mod_name, _vfs_filename)))
print('%s files, %s errors' % (nb, nb_err))
with open(filename, "w") as file_to_write_VFS:
file_to_write_VFS.write('__BRYTHON__.libs = __BRYTHON__.libs || {};\n')
file_to_write_VFS.write("__BRYTHON__.=libs['unittest']=%s;\n\n" % json.dumps(_VFS))
file_to_write_VFS.write("""
__BRYTHON__.import_from_unittest function(mod_name){
var stored = __BRYTHON__.libs['unittest'][mod_name]
if(stored!==undefined){
var module_contents = stored[0]
var is_package = stored[1]
var path = 'py_unittest'
var module = {name:mod_name,__class__:$B.$ModuleDict,is_package:is_package}
if(is_package){var package=mod_name}
else{
var elts = mod_name.split('.')
elts.pop()
var package = elts.join('.')
}
$B.modules[mod_name].$package = is_package
$B.modules[mod_name].__package__ = package
run_py(module,path,module_contents)
return true
}
return null
}
// add this import function to brython by doing the following:
// <body onload="brython({custom_import_funcs:[__BRYTHON__.import_from_unittest]})">
// this will allow us to import unittest modules.
""")
def process(filename, exclude_dirs=['unittest',]):
"""Process a VFS filename for Brython."""
print("Generating {}".format(filename))
nb = 0
nb_err = 0
_main_root = os.path.dirname(filename)
_VFS = {}
for _mydir in ("libs", "Lib"):
for _root, _dir, _files in os.walk(os.path.join(_main_root, _mydir)):
#if _root.endswith('lib_migration'):
_flag=False
for _exclude in exclude_dirs:
if _exclude in _root: #_root.endswith(_exclude):
_flag=True
continue
if _flag:
continue # skip these modules
if '__pycache__' in _root:
continue
nb += 1
for _file in _files:
_ext = os.path.splitext(_file)[1]
if _ext not in ('.js', '.py'):
continue
nb += 1
with open(os.path.join(_root, _file), "r") as file_with_data:
_data = file_with_data.read()
if len(_data) == 0:
print('no data for %s' % _file)
_data = unicode('')
print(_data, type(_data))
else:
_data = _data.decode('utf-8')
if _ext in '.js':
if js_minify is not None:
try:
_data = js_minify(_data)
except Exception as error:
print(error)
elif _ext == '.py' and len(_data) > 0:
try:
_data = pyminifier.remove_comments_and_docstrings(_data)
_data = pyminifier.dedent(_data)
except Exception as error:
print(error)
nb_err += 1
_vfs_filename = os.path.join(_root, _file).replace(_main_root, '')
_vfs_filename = _vfs_filename.replace("\\", "/")
if _vfs_filename.startswith('/libs/crypto_js/rollups/'):
if _file not in ('md5.js', 'sha1.js', 'sha3.js',
'sha224.js', 'sha384.js', 'sha512.js'):
continue
mod_name = _vfs_filename[len(_mydir) + 2:].replace('/', '.')
mod_name, ext = os.path.splitext(mod_name)
is_package = mod_name.endswith('__init__')
if is_package:
mod_name = mod_name[:-9]
_VFS[mod_name] = [ext, _data, 1]
else:
_VFS[mod_name] = [ext, _data]
print(("adding %s %s" % (mod_name, _vfs_filename)))
print('%s files, %s errors' % (nb, nb_err))
with open(filename, "w") as file_to_write_VFS:
file_to_write_VFS.write('__BRYTHON__.use_VFS = true;\n')
file_to_write_VFS.write('__BRYTHON__.VFS=%s;\n\n' % json.dumps(_VFS))
###############################################################################
if __name__ == '__main__':
_main_root = os.path.join(os.getcwd(), '../src')
process(os.path.join(_main_root, "py_VFS.js"))
| [((30, 17, 30, 42), 'os.path.dirname', 'os.path.dirname', ({(30, 33, 30, 41): 'filename'}, {}), '(filename)', False, 'import os\n'), ((117, 17, 117, 42), 'os.path.dirname', 'os.path.dirname', ({(117, 33, 117, 41): 'filename'}, {}), '(filename)', False, 'import os\n'), ((190, 30, 190, 41), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((191, 12, 191, 49), 'os.path.join', 'os.path.join', ({(191, 25, 191, 35): '_main_root', (191, 37, 191, 48): '"""py_VFS.js"""'}, {}), "(_main_root, 'py_VFS.js')", False, 'import os\n'), ((33, 43, 33, 75), 'os.path.join', 'os.path.join', ({(33, 56, 33, 66): '_main_root', (33, 68, 33, 74): '_mydir'}, {}), '(_main_root, _mydir)', False, 'import os\n'), ((120, 43, 120, 75), 'os.path.join', 'os.path.join', ({(120, 56, 120, 66): '_main_root', (120, 68, 120, 74): '_mydir'}, {}), '(_main_root, _mydir)', False, 'import os\n'), ((45, 28, 45, 54), 'os.path.join', 'os.path.join', ({(45, 41, 45, 46): '_root', (45, 48, 45, 53): '_file'}, {}), '(_root, _file)', False, 'import os\n'), ((70, 32, 70, 58), 'os.path.splitext', 'os.path.splitext', ({(70, 49, 70, 57): 'mod_name'}, {}), '(mod_name)', False, 'import os\n'), ((82, 74, 82, 90), 'json.dumps', 'json.dumps', ({(82, 85, 82, 89): '_VFS'}, {}), '(_VFS)', False, 'import json\n'), ((172, 32, 172, 58), 'os.path.splitext', 'os.path.splitext', ({(172, 49, 172, 57): 'mod_name'}, {}), '(mod_name)', False, 'import os\n'), ((183, 58, 183, 74), 'json.dumps', 'json.dumps', ({(183, 69, 183, 73): '_VFS'}, {}), '(_VFS)', False, 'import json\n'), ((40, 23, 40, 46), 'os.path.splitext', 'os.path.splitext', ({(40, 40, 40, 45): '_file'}, {}), '(_file)', False, 'import os\n'), ((134, 23, 134, 46), 'os.path.splitext', 'os.path.splitext', ({(134, 40, 134, 45): '_file'}, {}), '(_file)', False, 'import os\n'), ((58, 32, 59, 34), 'pyminifier.remove_comments_and_docstrings', 'pyminifier.remove_comments_and_docstrings', ({(59, 28, 59, 33): '_data'}, {}), '(_data)', False, 'import pyminifier\n'), ((60, 32, 60, 56), 'pyminifier.dedent', 'pyminifier.dedent', ({(60, 50, 60, 55): '_data'}, {}), '(_data)', False, 'import pyminifier\n'), ((65, 32, 66, 33), 'os.path.join', 'os.path.join', ({(66, 20, 66, 25): '_root', (66, 27, 66, 32): '_file'}, {}), '(_root, _file)', False, 'import os\n'), ((139, 26, 139, 52), 'os.path.join', 'os.path.join', ({(139, 39, 139, 44): '_root', (139, 46, 139, 51): '_file'}, {}), '(_root, _file)', False, 'import os\n'), ((163, 32, 163, 58), 'os.path.join', 'os.path.join', ({(163, 45, 163, 50): '_root', (163, 52, 163, 57): '_file'}, {}), '(_root, _file)', False, 'import os\n'), ((157, 29, 157, 77), 'pyminifier.remove_comments_and_docstrings', 'pyminifier.remove_comments_and_docstrings', ({(157, 71, 157, 76): '_data'}, {}), '(_data)', False, 'import pyminifier\n'), ((158, 29, 158, 53), 'pyminifier.dedent', 'pyminifier.dedent', ({(158, 47, 158, 52): '_data'}, {}), '(_data)', False, 'import pyminifier\n')] |
rcox771/spectrum_scanner | main.py | 71559d62ca9dc9f66d66b7ada4491de42c6cdd52 | from rtlsdr import RtlSdr
from contextlib import closing
from matplotlib import pyplot as plt
import numpy as np
from scipy.signal import spectrogram, windows
from scipy import signal
from skimage.io import imsave, imread
from datetime import datetime
import json
import os
from tqdm import tqdm
import time
from queue import Queue
import asyncio
from pathlib import Path
import warnings
for cat in [RuntimeWarning, UserWarning, FutureWarning]:
warnings.filterwarnings("ignore", category=cat)
def split_images(dir="sdr_captures/specs_raw"):
jpgs = list(Path(dir).rglob('*.jpg'))
pngs = list(Path(dir).rglob('*.png'))
img_files = pngs + jpgs
img_files = list(filter(lambda x: 'chk' not in str(x), img_files))
for img_file in tqdm(img_files, desc="splitting images"):
im = imread(img_file)
shp = list(im.shape)
shp = list(filter(lambda x: x != 1, shp))
shp = np.array(shp)
dim_to_slide_over = shp.argmax()
win_size = shp[shp.argmin()]
im_size = shp[dim_to_slide_over]
for start in range(0, im_size, win_size):
stop = start + win_size
if stop >= im_size:
break
if dim_to_slide_over == 0:
chunk = im[start:stop, :]
elif dim_to_slide_over == 1:
chunk = im[:, start:stop]
file_out = str(
Path(img_file).with_suffix(f".chk_{start}_{stop}.png"))
imsave(file_out, chunk)
# y -- spectrogram, nf by nt array
# dbf -- Dynamic range of the spectrum
def adjust_dyn_range(x, mx=3, mn=10, rel_to=np.median):
r = rel_to(x)
zmax = r+mx
zmin = r-mn
x[x<zmin] = zmin
x[x>zmax] = zmax
return x
def to_spec(y, fs, fc, NFFT=1024, dbf=60, nperseg=128, normalize=True):
#w = windows.hamming(nperseg)
#window = signal.kaiser(nperseg, beta=14)
f, t, y = spectrogram(y, detrend=None, noverlap=int(nperseg/2), nfft=NFFT, fs=fs)
y = np.fft.fftshift(y, axes=0)
if normalize:
#y = norm_spectrum(y)
y = np.sqrt(np.power(y.real, 2) + np.power(y.imag, 2))
y = 20 * np.log10(np.abs(y)/ np.abs(y).max())
y = np.abs(y)
y = y / y.max()
return y
from sklearn.preprocessing import MinMaxScaler, StandardScaler
def spectrogram(x, fs, fc, m=None, dbf=60):
if not m:
m = 1024
isreal_bool = np.isreal(x).all()
lx = len(x);
nt = (lx + m - 1) // m
x = np.append(x,np.zeros(-lx+nt*m))
x = x.reshape((int(m/2),nt*2), order='F')
x = np.concatenate((x,x),axis=0)
x = x.reshape((m*nt*2,1),order='F')
x = x[np.r_[m//2:len(x),np.ones(m//2)*(len(x)-1)].astype(int)].reshape((m,nt*2),order='F')
xmw = x * windows.hamming(m)[:,None]
t_range = [0.0, lx / fs]
if isreal_bool:
f_range = [ fc, fs / 2.0 + fc]
xmf = np.fft.fft(xmw,len(xmw),axis=0)
xmf = xmf[0:m/2,:]
else:
f_range = [-fs / 2.0 + fc, fs / 2.0 + fc]
xmf = np.fft.fftshift( np.fft.fft( xmw ,len(xmw),axis=0), axes=0 )
f_range = np.linspace(*f_range, xmf.shape[0])
t_range = np.linspace(*t_range, xmf.shape[1])
h = xmf.shape[0]
each = int(h*.10)
xmf = xmf[each:-each, :]
xmf = np.sqrt(np.power(xmf.real, 2) + np.power(xmf.imag, 2))
xmf = np.abs(xmf)
xmf /= xmf.max()
#throw away sides
xmf = 20 * np.log10(xmf)
xmf = np.clip(xmf, -dbf, 0)
xmf = MinMaxScaler().fit_transform(StandardScaler(with_mean=True, with_std=True).fit_transform(xmf))
xmf = np.abs(xmf)
#xmf-=np.median(xmf)
xmf/=xmf.max()
print(xmf.min(), xmf.max())
return f_range, t_range, xmf
def append_json(data, path):
with open(path, 'a') as f:
f.write(json.dumps(data) + '\n')
async def stream(sdr, N):
samples_buffer = Queue()
total = 0
with tqdm(total=N, desc='sampling') as pbar:
#for i in range(10):
# time.sleep(0.1)
async for samples in sdr.stream():
# do something with samples
# ...
samples_buffer.put(samples)
#print(f'put {len(samples)} into buffer')
total += len(samples)
pbar.update(len(samples))
if total >= N:
break
# to stop streaming:
await sdr.stop()
# done
sdr.close()
return samples_buffer
def capture(fc=94.3e6,
fs=int(1e6),
gain='auto',
seconds_dwell=.4
#offset_dc=5e4
):
N = int(seconds_dwell * fs)
with closing(RtlSdr()) as sdr:
sdr.sample_rate = fs
sdr.center_freq = fc# + int(offset_dc)
sdr.gain = gain
t = datetime.now()
stamp = datetime.timestamp(t)
loop = asyncio.get_event_loop()
samples_buffer = loop.run_until_complete(stream(sdr, N))
iq_samples = np.hstack(np.array(list(samples_buffer.queue)))[:N].astype("complex64")
#iq_samples = shift_mix(iq_samples, -offset_dc, fs)
#path = os.path.join(out_dir, f'{stamp}.png')
meta = dict(
fs=fs,
fc=fc,
gain=gain,
seconds_dwell=seconds_dwell,
dt_start=stamp
)
return iq_samples, meta
def shift_mix(x, hz, fs):
return x*np.exp(1j*2*np.pi*hz/fs*np.arange(len(x)))
def save_capture(path, spec_img, meta, meta_path):
imsave(path, spec_img.T)
append_json(meta, meta_path)
def scan(
low=80e6,
high=1000e6,
repeats=10,
target_hpb=300,
):
out_dir="sdr_captures/specs_raw"
meta_path="sdr_captures/dataset.json"
os.makedirs(out_dir, exist_ok=True)
for repeat in tqdm(range(repeats), desc='repeats'):
for fs in [int(3.2e6)]:#list(map(int, (3.2e6, 2e6, 1e6))):
#for NFFT in [1024, 2048, 2048 * 2]:
fcs = []
fc = low
while fc < high:
fc += int((fs * (1/3.)))
fcs.append(fc)
fcs = np.array(fcs)
print(f'scanning {len(fcs)} total frequencies...')
for fc in tqdm(fcs, desc='fcs'):
try:
iq, meta = capture(fc=fc, fs=fs)
meta['NFFT'] = closest_power_of_two(fs / target_hpb)
meta['hpb'] = fs/meta['NFFT']
ff, tt, spec_img = spectrogram(iq, fs, fc, m=meta['NFFT'])
img_path = os.path.join(out_dir, f"{meta['dt_start']}.png")
save_capture(img_path, spec_img, meta, meta_path)
except Exception as e:
print(e)
time.sleep(1)
pass
def get_optimal_fs(max_fs=3e6):
fss = np.array([np.power(2,i) for i in range(30)])
fss = fss[fss<=max_fs][-1]
return fss
def optimal_scan(
min_freq=80e6,
max_freq=107e6,
fs=3e6,
hpb_target=4096
):
fs2 = get_optimal_fs(fs)
if fs2!=fs:
print(f'optimal fs found: {fs2}, original: {fs}')
fs = fs2
del fs2
n_bins = closest_power_of_two(fs / hpb_target)
print(f'given hz per bin target: {hpb_target} -> nfft bins per sweep: {n_bins}')
assert fs == hpb_target * n_bins
print(f'{fs} = {hpb_target} * {n_bins}')
diff_bw = max_freq-min_freq
sweeps = np.ceil(diff_bw/fs) + 1
sweep_bw = sweeps * fs
delta_bw = sweep_bw - diff_bw
adjusted_min_freq = min_freq - int(delta_bw//2)
adjusted_max_freq = max_freq + int(delta_bw//2)
assert (adjusted_max_freq-adjusted_min_freq) == sweep_bw
print(f'optimal min/max frequecies: {adjusted_min_freq}/{adjusted_max_freq}')
min_freq = adjusted_min_freq
max_freq = adjusted_max_freq
freq_bins = np.arange(n_bins*sweeps)
fz = np.arange(min_freq, max_freq, hpb_target).astype(int)
return freq_bins, fz
def closest_power_of_two(number):
# Returns next power of two following 'number'
n = np.ceil(np.log2(number))
a = np.array([np.power(2, n - 1), np.power(2, n), np.power(2, n + 1)])
return int(a[np.argmin(np.abs(a - number))])
def norm_spectrum(spec_img):
spec_img = 20 * np.log10(np.abs(spec_img) / np.max(np.abs(spec_img)))
mid = np.median(spec_img)
# high = mid + 30
# low = mid - 30
# spec_img[spec_img < low] = low
# spec_img[spec_img > high] = high
spec_img = np.abs(spec_img)
spec_img /= spec_img.max()
print('spec max:', spec_img.max(), 'spec min:', spec_img.min())
return spec_img
def parse_measure(s):
s = s.lower()
if s[-1].isalpha():
h, mod = float(s[:-1]), s[-1]
if mod == 'm':
h*=1e6
elif mod == 'k':
h*=1e3
else:
h = int(s)
return h
def string_to_linspace(s, delim=':'):
return np.arange(*list(map(parse_measure, s.split(delim))))
#string_to_linspace('24M:28M:3M')
def plot_one(fc=94.3 * 1e6, fs=3e6, target_hpb=300, seconds_dwell=.2):
NFFT = closest_power_of_two(fs / target_hpb)
iq_samples, meta = capture(fc=fc, fs=fs, seconds_dwell=seconds_dwell)
spec_img = to_spec(iq_samples, fs, fc, NFFT=NFFT)
#spec_img = norm_spectrum(spec_img)
#spec_img = np.abs(spec_img)
#spec_img /= spec_img.max()
#spec_img = 1 - spec_img
print('img shape:', spec_img.shape)
fig, ax = plt.subplots(1, 1, figsize=(14, 4))
ax.matshow(spec_img.T[:NFFT], cmap=plt.get_cmap('viridis'))
print(spec_img.T.shape)
#Wplt.plot(spec_img.T[0, :])
plt.show()
if __name__ == "__main__":
#split_images()
#plot_one()
scan(repeats=3, target_hpb=1500)
split_images()
#plot_one() | [((19, 4, 19, 51), 'warnings.filterwarnings', 'warnings.filterwarnings', (), '', False, 'import warnings\n'), ((28, 20, 28, 60), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n'), ((68, 8, 68, 34), 'numpy.fft.fftshift', 'np.fft.fftshift', (), '', True, 'import numpy as np\n'), ((92, 8, 92, 36), 'numpy.concatenate', 'np.concatenate', (), '', True, 'import numpy as np\n'), ((104, 14, 104, 49), 'numpy.linspace', 'np.linspace', ({(104, 26, 104, 34): '*f_range', (104, 36, 104, 48): 'xmf.shape[0]'}, {}), '(*f_range, xmf.shape[0])', True, 'import numpy as np\n'), ((105, 14, 105, 49), 'numpy.linspace', 'np.linspace', ({(105, 26, 105, 34): '*t_range', (105, 36, 105, 48): 'xmf.shape[1]'}, {}), '(*t_range, xmf.shape[1])', True, 'import numpy as np\n'), ((112, 10, 112, 21), 'numpy.abs', 'np.abs', ({(112, 17, 112, 20): 'xmf'}, {}), '(xmf)', True, 'import numpy as np\n'), ((119, 10, 119, 31), 'numpy.clip', 'np.clip', ({(119, 18, 119, 21): 'xmf', (119, 23, 119, 27): '-dbf', (119, 29, 119, 30): '0'}, {}), '(xmf, -dbf, 0)', True, 'import numpy as np\n'), ((121, 10, 121, 21), 'numpy.abs', 'np.abs', ({(121, 17, 121, 20): 'xmf'}, {}), '(xmf)', True, 'import numpy as np\n'), ((136, 21, 136, 28), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((199, 4, 199, 28), 'skimage.io.imsave', 'imsave', ({(199, 11, 199, 15): 'path', (199, 17, 199, 27): 'spec_img.T'}, {}), '(path, spec_img.T)', False, 'from skimage.io import imsave, imread\n'), ((213, 4, 213, 39), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((279, 16, 279, 40), 'numpy.arange', 'np.arange', ({(279, 26, 279, 39): 'n_bins * sweeps'}, {}), '(n_bins * sweeps)', True, 'import numpy as np\n'), ((293, 10, 293, 29), 'numpy.median', 'np.median', ({(293, 20, 293, 28): 'spec_img'}, {}), '(spec_img)', True, 'import numpy as np\n'), ((299, 15, 299, 31), 'numpy.abs', 'np.abs', ({(299, 22, 299, 30): 'spec_img'}, {}), '(spec_img)', True, 'import numpy as np\n'), ((333, 14, 333, 49), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'from matplotlib import pyplot as plt\n'), ((337, 4, 337, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((29, 13, 29, 29), 'skimage.io.imread', 'imread', ({(29, 20, 29, 28): 'img_file'}, {}), '(img_file)', False, 'from skimage.io import imsave, imread\n'), ((32, 14, 32, 27), 'numpy.array', 'np.array', ({(32, 23, 32, 26): 'shp'}, {}), '(shp)', True, 'import numpy as np\n'), ((73, 12, 73, 21), 'numpy.abs', 'np.abs', ({(73, 19, 73, 20): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((90, 20, 90, 38), 'numpy.zeros', 'np.zeros', ({(90, 29, 90, 37): '-lx + nt * m'}, {}), '(-lx + nt * m)', True, 'import numpy as np\n'), ((118, 15, 118, 28), 'numpy.log10', 'np.log10', ({(118, 24, 118, 27): 'xmf'}, {}), '(xmf)', True, 'import numpy as np\n'), ((138, 9, 138, 39), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n'), ((173, 12, 173, 26), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((174, 16, 174, 37), 'datetime.datetime.timestamp', 'datetime.timestamp', ({(174, 35, 174, 36): 't'}, {}), '(t)', False, 'from datetime import datetime\n'), ((176, 15, 176, 39), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((269, 13, 269, 32), 'numpy.ceil', 'np.ceil', ({(269, 21, 269, 31): '(diff_bw / fs)'}, {}), '(diff_bw / fs)', True, 'import numpy as np\n'), ((285, 16, 285, 31), 'numpy.log2', 'np.log2', ({(285, 24, 285, 30): 'number'}, {}), '(number)', True, 'import numpy as np\n'), ((46, 12, 46, 35), 'skimage.io.imsave', 'imsave', ({(46, 19, 46, 27): 'file_out', (46, 29, 46, 34): 'chunk'}, {}), '(file_out, chunk)', False, 'from skimage.io import imsave, imread\n'), ((86, 18, 86, 30), 'numpy.isreal', 'np.isreal', ({(86, 28, 86, 29): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((95, 14, 95, 32), 'scipy.signal.windows.hamming', 'windows.hamming', ({(95, 30, 95, 31): 'm'}, {}), '(m)', False, 'from scipy.signal import spectrogram, windows\n'), ((111, 18, 111, 39), 'numpy.power', 'np.power', ({(111, 27, 111, 35): 'xmf.real', (111, 37, 111, 38): '2'}, {}), '(xmf.real, 2)', True, 'import numpy as np\n'), ((111, 42, 111, 63), 'numpy.power', 'np.power', ({(111, 51, 111, 59): 'xmf.imag', (111, 61, 111, 62): '2'}, {}), '(xmf.imag, 2)', True, 'import numpy as np\n'), ((120, 10, 120, 24), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ({}, {}), '()', False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), ((169, 17, 169, 25), 'rtlsdr.RtlSdr', 'RtlSdr', ({}, {}), '()', False, 'from rtlsdr import RtlSdr\n'), ((223, 18, 223, 31), 'numpy.array', 'np.array', ({(223, 27, 223, 30): 'fcs'}, {}), '(fcs)', True, 'import numpy as np\n'), ((226, 22, 226, 43), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n'), ((245, 20, 245, 33), 'numpy.power', 'np.power', ({(245, 29, 245, 30): '2', (245, 31, 245, 32): 'i'}, {}), '(2, i)', True, 'import numpy as np\n'), ((280, 9, 280, 50), 'numpy.arange', 'np.arange', ({(280, 19, 280, 27): 'min_freq', (280, 29, 280, 37): 'max_freq', (280, 39, 280, 49): 'hpb_target'}, {}), '(min_freq, max_freq, hpb_target)', True, 'import numpy as np\n'), ((286, 18, 286, 36), 'numpy.power', 'np.power', ({(286, 27, 286, 28): '2', (286, 30, 286, 35): 'n - 1'}, {}), '(2, n - 1)', True, 'import numpy as np\n'), ((286, 38, 286, 52), 'numpy.power', 'np.power', ({(286, 47, 286, 48): '2', (286, 50, 286, 51): 'n'}, {}), '(2, n)', True, 'import numpy as np\n'), ((286, 54, 286, 72), 'numpy.power', 'np.power', ({(286, 63, 286, 64): '2', (286, 66, 286, 71): 'n + 1'}, {}), '(2, n + 1)', True, 'import numpy as np\n'), ((334, 39, 334, 62), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', ({(334, 52, 334, 61): '"""viridis"""'}, {}), "('viridis')", True, 'from matplotlib import pyplot as plt\n'), ((23, 16, 23, 25), 'pathlib.Path', 'Path', ({(23, 21, 23, 24): 'dir'}, {}), '(dir)', False, 'from pathlib import Path\n'), ((24, 16, 24, 25), 'pathlib.Path', 'Path', ({(24, 21, 24, 24): 'dir'}, {}), '(dir)', False, 'from pathlib import Path\n'), ((71, 20, 71, 39), 'numpy.power', 'np.power', ({(71, 29, 71, 35): 'y.real', (71, 37, 71, 38): '2'}, {}), '(y.real, 2)', True, 'import numpy as np\n'), ((71, 42, 71, 61), 'numpy.power', 'np.power', ({(71, 51, 71, 57): 'y.imag', (71, 59, 71, 60): '2'}, {}), '(y.imag, 2)', True, 'import numpy as np\n'), ((120, 39, 120, 84), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', (), '', False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), ((132, 16, 132, 32), 'json.dumps', 'json.dumps', ({(132, 27, 132, 31): 'data'}, {}), '(data)', False, 'import json\n'), ((291, 29, 291, 45), 'numpy.abs', 'np.abs', ({(291, 36, 291, 44): 'spec_img'}, {}), '(spec_img)', True, 'import numpy as np\n'), ((72, 26, 72, 35), 'numpy.abs', 'np.abs', ({(72, 33, 72, 34): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((231, 39, 231, 78), 'scipy.signal.spectrogram', 'spectrogram', (), '', False, 'from scipy.signal import spectrogram, windows\n'), ((233, 31, 233, 79), 'os.path.join', 'os.path.join', ({(233, 44, 233, 51): 'out_dir', (233, 53, 233, 78): 'f"""{meta[\'dt_start\']}.png"""'}, {}), '(out_dir, f"{meta[\'dt_start\']}.png")', False, 'import os\n'), ((287, 27, 287, 45), 'numpy.abs', 'np.abs', ({(287, 34, 287, 44): '(a - number)'}, {}), '(a - number)', True, 'import numpy as np\n'), ((291, 55, 291, 71), 'numpy.abs', 'np.abs', ({(291, 62, 291, 70): 'spec_img'}, {}), '(spec_img)', True, 'import numpy as np\n'), ((45, 16, 45, 30), 'pathlib.Path', 'Path', ({(45, 21, 45, 29): 'img_file'}, {}), '(img_file)', False, 'from pathlib import Path\n'), ((240, 20, 240, 33), 'time.sleep', 'time.sleep', ({(240, 31, 240, 32): '(1)'}, {}), '(1)', False, 'import time\n'), ((72, 37, 72, 46), 'numpy.abs', 'np.abs', ({(72, 44, 72, 45): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((94, 28, 94, 41), 'numpy.ones', 'np.ones', ({(94, 36, 94, 40): 'm // 2'}, {}), '(m // 2)', True, 'import numpy as np\n')] |
donbowman/rdflib | test/__init__.py | c1be731c8e6bbe997cc3f25890bbaf685499c517 | #
import os
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
| [((4, 27, 4, 52), 'os.path.dirname', 'os.path.dirname', ({(4, 43, 4, 51): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
braingineer/pyromancy | examples/mnist1.py | 7a7ab1a6835fd63b9153463dd08bb53630f15c62 | from __future__ import print_function
import argparse
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
from tqdm import tqdm
from pyromancy import pyromq
from pyromancy.losses import LossGroup, NegativeLogLikelihood
from pyromancy.metrics import MetricGroup, Accuracy
from pyromancy.subscribers import LogSubscriber
def parse_args():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--weight-decay', default=1e-4, type=float)
parser.add_argument('--grad-clip-norm', default=10.0, type=float)
parser.add_argument('--disable-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# Name the experiment
parser.add_argument('--experiment-name', required=True)
parser.add_argument("--experimentdb", default=None)
parser.add_argument('--log-to-console', default=False, action='store_true')
args = parser.parse_args()
if args.experimentdb is None:
args.experimentdb = args.experiment_name + '.db'
return args
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
# noinspection PyCallingNonCallable,PyCallingNonCallable
def run_once(args, train_loader, test_loader):
broker = pyromq.Broker()
model = Net()
if args.cuda:
model.cuda()
training_events = pyromq.TrainingEventPublisher(broker=broker)
broker.add_subscriber(LogSubscriber(experiment_uid=args.experiment_name,
log_file=os.path.join('logs', args.experiment_name),
to_console=args.log_to_console))
opt = torch.optim.SGD(params=model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
momentum=args.momentum)
losses = LossGroup(optimizer=opt,
grad_clip_norm=args.grad_clip_norm,
name='losses',
channel_name=pyromq.channels.METRIC_EVENTS,
broker=broker)
losses.add(NegativeLogLikelihood(name='nll',
target_name='y_target',
output_name='y_pred'),
data_target='train')
# Metrics
metrics = MetricGroup(name='metrics',
channel_name=pyromq.channels.METRIC_EVENTS,
broker=broker)
metrics.add(Accuracy(name='acc',
target_name='y_target',
output_name='y_pred'),
data_target='*')
metrics.add(NegativeLogLikelihood(name='nll',
target_name='y_target',
output_name='y_pred'),
data_target='val')
training_events.training_start()
for _ in tqdm(range(args.epochs), total=args.epochs):
training_events.epoch_start()
model.train(True)
for data, target in train_loader:
# From the original example
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
# put the incoming batch data into a dictionary
batch_dict = {'x_data': data, 'y_target': target}
# Training Event
training_events.batch_start()
# Get model outputs
predictions = {'y_pred': model(batch_dict['x_data'])}
# Compute Metrics
metrics.compute(in_dict=batch_dict, out_dict=predictions,
data_type='train')
# Compute Losses
losses.compute(in_dict=batch_dict, out_dict=predictions,
data_type='train')
losses.step()
# Training Event
training_events.batch_end()
model.train(False)
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
batch_dict = {'x_data': data, 'y_target': target}
# Training Event
training_events.batch_start()
predictions = {'y_pred': model(batch_dict['x_data'])}
metrics.compute(in_dict=batch_dict,
out_dict=predictions,
data_type='val')
training_events.batch_end()
training_events.epoch_end()
def main():
args = parse_args()
args.cuda = not args.disable_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
dataload_kwargs = {}
if args.cuda:
dataload_kwargs = {'num_workers': 1, 'pin_memory': True}
train_dataset = datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# noinspection PyUnresolvedReferences
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True, **dataload_kwargs)
test_dataset = datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# noinspection PyUnresolvedReferences
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=args.batch_size,
shuffle=True, **dataload_kwargs)
run_once(args, train_loader, test_loader)
if __name__ == "__main__":
main()
| [((21, 13, 21, 73), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((80, 13, 80, 28), 'pyromancy.pyromq.Broker', 'pyromq.Broker', ({}, {}), '()', False, 'from pyromancy import pyromq\n'), ((86, 22, 86, 66), 'pyromancy.pyromq.TrainingEventPublisher', 'pyromq.TrainingEventPublisher', (), '', False, 'from pyromancy import pyromq\n'), ((97, 13, 101, 37), 'pyromancy.losses.LossGroup', 'LossGroup', (), '', False, 'from pyromancy.losses import LossGroup, NegativeLogLikelihood\n'), ((110, 14, 112, 40), 'pyromancy.metrics.MetricGroup', 'MetricGroup', (), '', False, 'from pyromancy.metrics import MetricGroup, Accuracy\n'), ((184, 4, 184, 32), 'torch.manual_seed', 'torch.manual_seed', ({(184, 22, 184, 31): 'args.seed'}, {}), '(args.seed)', False, 'import torch\n'), ((198, 19, 200, 79), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (), '', False, 'import torch\n'), ((207, 18, 209, 78), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (), '', False, 'import torch\n'), ((62, 21, 62, 52), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((63, 21, 63, 53), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((64, 26, 64, 40), 'torch.nn.Dropout2d', 'nn.Dropout2d', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((65, 19, 65, 37), 'torch.nn.Linear', 'nn.Linear', ({(65, 29, 65, 32): '320', (65, 34, 65, 36): '50'}, {}), '(320, 50)', True, 'import torch.nn as nn\n'), ((66, 19, 66, 36), 'torch.nn.Linear', 'nn.Linear', ({(66, 29, 66, 31): '50', (66, 33, 66, 35): '10'}, {}), '(50, 10)', True, 'import torch.nn as nn\n'), ((73, 12, 73, 48), 'torch.nn.functional.dropout', 'F.dropout', (), '', True, 'import torch.nn.functional as F\n'), ((75, 15, 75, 31), 'torch.nn.functional.log_softmax', 'F.log_softmax', ({(75, 29, 75, 30): 'x'}, {}), '(x)', True, 'import torch.nn.functional as F\n'), ((103, 15, 105, 58), 'pyromancy.losses.NegativeLogLikelihood', 'NegativeLogLikelihood', (), '', False, 'from pyromancy.losses import LossGroup, NegativeLogLikelihood\n'), ((114, 16, 116, 46), 'pyromancy.metrics.Accuracy', 'Accuracy', (), '', False, 'from pyromancy.metrics import MetricGroup, Accuracy\n'), ((119, 16, 121, 59), 'pyromancy.losses.NegativeLogLikelihood', 'NegativeLogLikelihood', (), '', False, 'from pyromancy.losses import LossGroup, NegativeLogLikelihood\n'), ((182, 42, 182, 67), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((186, 8, 186, 41), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', ({(186, 31, 186, 40): 'args.seed'}, {}), '(args.seed)', False, 'import torch\n'), ((89, 49, 89, 91), 'os.path.join', 'os.path.join', ({(89, 62, 89, 68): '"""logs"""', (89, 70, 89, 90): 'args.experiment_name'}, {}), "('logs', args.experiment_name)", False, 'import os\n'), ((135, 27, 135, 41), 'torch.autograd.Variable', 'Variable', ({(135, 36, 135, 40): 'data'}, {}), '(data)', False, 'from torch.autograd import Variable\n'), ((135, 43, 135, 59), 'torch.autograd.Variable', 'Variable', ({(135, 52, 135, 58): 'target'}, {}), '(target)', False, 'from torch.autograd import Variable\n'), ((163, 27, 163, 56), 'torch.autograd.Variable', 'Variable', (), '', False, 'from torch.autograd import Variable\n'), ((163, 58, 163, 74), 'torch.autograd.Variable', 'Variable', ({(163, 67, 163, 73): 'target'}, {}), '(target)', False, 'from torch.autograd import Variable\n'), ((194, 39, 194, 60), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', False, 'from torchvision import datasets, transforms\n'), ((195, 39, 195, 81), 'torchvision.transforms.Normalize', 'transforms.Normalize', ({(195, 60, 195, 69): '(0.1307,)', (195, 71, 195, 80): '(0.3081,)'}, {}), '((0.1307,), (0.3081,))', False, 'from torchvision import datasets, transforms\n'), ((203, 8, 203, 29), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', False, 'from torchvision import datasets, transforms\n'), ((204, 8, 204, 50), 'torchvision.transforms.Normalize', 'transforms.Normalize', ({(204, 29, 204, 38): '(0.1307,)', (204, 40, 204, 49): '(0.3081,)'}, {}), '((0.1307,), (0.3081,))', False, 'from torchvision import datasets, transforms\n')] |
matthewklinko/openpilot | selfdrive/locationd/calibrationd.py | b0563a59684d0901f99abbb58ac1fbd729ded1f9 | #!/usr/bin/env python
import os
import copy
import json
import numpy as np
import selfdrive.messaging as messaging
from selfdrive.locationd.calibration_helpers import Calibration
from selfdrive.swaglog import cloudlog
from common.params import Params
from common.transformations.model import model_height
from common.transformations.camera import view_frame_from_device_frame, get_view_frame_from_road_frame, \
eon_intrinsics, get_calib_from_vp, H, W
MPH_TO_MS = 0.44704
MIN_SPEED_FILTER = 15 * MPH_TO_MS
MAX_YAW_RATE_FILTER = np.radians(2) # per second
INPUTS_NEEDED = 300 # allow to update VP every so many frames
INPUTS_WANTED = 600 # We want a little bit more than we need for stability
WRITE_CYCLES = 400 # write every 400 cycles
VP_INIT = np.array([W/2., H/2.])
# These validity corners were chosen by looking at 1000
# and taking most extreme cases with some margin.
VP_VALIDITY_CORNERS = np.array([[W//2 - 150, 280], [W//2 + 150, 540]])
DEBUG = os.getenv("DEBUG") is not None
def is_calibration_valid(vp):
return vp[0] > VP_VALIDITY_CORNERS[0,0] and vp[0] < VP_VALIDITY_CORNERS[1,0] and \
vp[1] > VP_VALIDITY_CORNERS[0,1] and vp[1] < VP_VALIDITY_CORNERS[1,1]
class Calibrator(object):
def __init__(self, param_put=False):
self.param_put = param_put
self.vp = copy.copy(VP_INIT)
self.vps = []
self.cal_status = Calibration.UNCALIBRATED
self.write_counter = 0
self.just_calibrated = False
self.params = Params()
calibration_params = self.params.get("CalibrationParams")
if calibration_params:
try:
calibration_params = json.loads(calibration_params)
self.vp = np.array(calibration_params["vanishing_point"])
self.vps = np.tile(self.vp, (calibration_params['valid_points'], 1)).tolist()
self.update_status()
except Exception:
cloudlog.exception("CalibrationParams file found but error encountered")
def update_status(self):
start_status = self.cal_status
if len(self.vps) < INPUTS_NEEDED:
self.cal_status = Calibration.UNCALIBRATED
else:
self.cal_status = Calibration.CALIBRATED if is_calibration_valid(self.vp) else Calibration.INVALID
end_status = self.cal_status
self.just_calibrated = False
if start_status == Calibration.UNCALIBRATED and end_status == Calibration.CALIBRATED:
self.just_calibrated = True
def handle_cam_odom(self, log):
trans, rot = log.trans, log.rot
if np.linalg.norm(trans) > MIN_SPEED_FILTER and abs(rot[2]) < MAX_YAW_RATE_FILTER:
new_vp = eon_intrinsics.dot(view_frame_from_device_frame.dot(trans))
new_vp = new_vp[:2]/new_vp[2]
self.vps.append(new_vp)
self.vps = self.vps[-INPUTS_WANTED:]
self.vp = np.mean(self.vps, axis=0)
self.update_status()
self.write_counter += 1
if self.param_put and (self.write_counter % WRITE_CYCLES == 0 or self.just_calibrated):
cal_params = {"vanishing_point": list(self.vp),
"valid_points": len(self.vps)}
self.params.put("CalibrationParams", json.dumps(cal_params))
return new_vp
else:
return None
def send_data(self, pm):
calib = get_calib_from_vp(self.vp)
extrinsic_matrix = get_view_frame_from_road_frame(0, calib[1], calib[2], model_height)
cal_send = messaging.new_message()
cal_send.init('liveCalibration')
cal_send.liveCalibration.calStatus = self.cal_status
cal_send.liveCalibration.calPerc = min(len(self.vps) * 100 // INPUTS_NEEDED, 100)
cal_send.liveCalibration.extrinsicMatrix = [float(x) for x in extrinsic_matrix.flatten()]
cal_send.liveCalibration.rpyCalib = [float(x) for x in calib]
pm.send('liveCalibration', cal_send)
def calibrationd_thread(sm=None, pm=None):
if sm is None:
sm = messaging.SubMaster(['cameraOdometry'])
if pm is None:
pm = messaging.PubMaster(['liveCalibration'])
calibrator = Calibrator(param_put=True)
# buffer with all the messages that still need to be input into the kalman
while 1:
sm.update()
new_vp = calibrator.handle_cam_odom(sm['cameraOdometry'])
if DEBUG and new_vp is not None:
print 'got new vp', new_vp
calibrator.send_data(pm)
def main(sm=None, pm=None):
calibrationd_thread(sm, pm)
if __name__ == "__main__":
main()
| [] |
datastax-labs/hunter | hunter/main.py | 3631cc3fa529991297a8b631bbae15b138cce307 | import argparse
import copy
import logging
import sys
from dataclasses import dataclass
from datetime import datetime, timedelta
from slack_sdk import WebClient
from typing import Dict, Optional, List
import pytz
from hunter import config
from hunter.attributes import get_back_links
from hunter.config import ConfigError, Config
from hunter.data_selector import DataSelector
from hunter.grafana import GrafanaError, Grafana, Annotation
from hunter.graphite import GraphiteError
from hunter.importer import DataImportError, Importers
from hunter.report import Report
from hunter.series import (
AnalysisOptions,
ChangePointGroup,
SeriesComparison,
compare,
AnalyzedSeries,
)
from hunter.slack import SlackNotifier, NotificationError
from hunter.test_config import TestConfigError, TestConfig, GraphiteTestConfig
from hunter.util import parse_datetime, DateFormatError, interpolate
@dataclass
class HunterError(Exception):
message: str
class Hunter:
__conf: Config
__importers: Importers
__grafana: Optional[Grafana]
__slack: Optional[SlackNotifier]
def __init__(self, conf: Config):
self.__conf = conf
self.__importers = Importers(conf)
self.__grafana = None
self.__slack = self.__maybe_create_slack_notifier()
def list_tests(self, group_names: Optional[List[str]]):
if group_names is not None:
test_names = []
for group_name in group_names:
group = self.__conf.test_groups.get(group_name)
if group is None:
raise HunterError(f"Test group not found: {group_name}")
test_names += (t.name for t in group)
else:
test_names = self.__conf.tests
for test_name in sorted(test_names):
print(test_name)
def list_test_groups(self):
for group_name in sorted(self.__conf.test_groups):
print(group_name)
def get_test(self, test_name: str) -> TestConfig:
test = self.__conf.tests.get(test_name)
if test is None:
raise HunterError(f"Test not found {test_name}")
return test
def get_tests(self, *names: str) -> List[TestConfig]:
tests = []
for name in names:
group = self.__conf.test_groups.get(name)
if group is not None:
tests += group
else:
test = self.__conf.tests.get(name)
if test is not None:
tests.append(test)
else:
raise HunterError(f"Test or group not found: {name}")
return tests
def list_metrics(self, test: TestConfig):
importer = self.__importers.get(test)
for metric_name in importer.fetch_all_metric_names(test):
print(metric_name)
def analyze(
self, test: TestConfig, selector: DataSelector, options: AnalysisOptions
) -> AnalyzedSeries:
importer = self.__importers.get(test)
series = importer.fetch_data(test, selector)
analyzed_series = series.analyze(options)
change_points = analyzed_series.change_points_by_time
report = Report(series, change_points)
print(test.name + ":")
print(report.format_log_annotated())
return analyzed_series
def __get_grafana(self) -> Grafana:
if self.__grafana is None:
self.__grafana = Grafana(self.__conf.grafana)
return self.__grafana
def update_grafana_annotations(self, test: GraphiteTestConfig, series: AnalyzedSeries):
grafana = self.__get_grafana()
begin = datetime.fromtimestamp(series.time()[0], tz=pytz.UTC)
end = datetime.fromtimestamp(series.time()[len(series.time()) - 1], tz=pytz.UTC)
logging.info(f"Fetching Grafana annotations for test {test.name}...")
tags_to_query = ["hunter", "change-point", "test:" + test.name]
old_annotations_for_test = grafana.fetch_annotations(begin, end, list(tags_to_query))
logging.info(f"Found {len(old_annotations_for_test)} annotations")
created_count = 0
for metric_name, change_points in series.change_points.items():
path = test.get_path(series.branch_name(), metric_name)
metric_tag = f"metric:{metric_name}"
tags_to_create = (
tags_to_query
+ [metric_tag]
+ test.tags
+ test.annotate
+ test.metrics[metric_name].annotate
)
substitutions = {
"TEST_NAME": test.name,
"METRIC_NAME": metric_name,
"GRAPHITE_PATH": [path],
"GRAPHITE_PATH_COMPONENTS": path.split("."),
"GRAPHITE_PREFIX": [test.prefix],
"GRAPHITE_PREFIX_COMPONENTS": test.prefix.split("."),
}
tmp_tags_to_create = []
for t in tags_to_create:
tmp_tags_to_create += interpolate(t, substitutions)
tags_to_create = tmp_tags_to_create
old_annotations = [a for a in old_annotations_for_test if metric_tag in a.tags]
old_annotation_times = set((a.time for a in old_annotations if a.tags))
target_annotations = []
for cp in change_points:
attributes = series.attributes_at(cp.index)
annotation_text = get_back_links(attributes)
target_annotations.append(
Annotation(
id=None,
time=datetime.fromtimestamp(cp.time, tz=pytz.UTC),
text=annotation_text,
tags=tags_to_create,
)
)
target_annotation_times = set((a.time for a in target_annotations))
to_delete = [a for a in old_annotations if a.time not in target_annotation_times]
if to_delete:
logging.info(
f"Removing {len(to_delete)} annotations "
f"for test {test.name} and metric {metric_name}..."
)
grafana.delete_annotations(*(a.id for a in to_delete))
to_create = [a for a in target_annotations if a.time not in old_annotation_times]
if to_create:
logging.info(
f"Creating {len(to_create)} annotations "
f"for test {test.name} and metric {metric_name}..."
)
grafana.create_annotations(*to_create)
created_count += len(to_create)
if created_count == 0:
logging.info("All annotations up-to-date. No new annotations needed.")
else:
logging.info(f"Created {created_count} annotations.")
def remove_grafana_annotations(self, test: Optional[TestConfig], force: bool):
"""Removes all Hunter annotations (optionally for a given test) in Grafana"""
grafana = self.__get_grafana()
if test:
logging.info(f"Fetching Grafana annotations for test {test.name}...")
else:
logging.info(f"Fetching Grafana annotations...")
tags_to_query = {"hunter", "change-point"}
if test:
tags_to_query.add("test:" + test.name)
annotations = grafana.fetch_annotations(None, None, list(tags_to_query))
if not annotations:
logging.info("No annotations found.")
return
if not force:
print(
f"Are you sure to remove {len(annotations)} annotations from {grafana.url}? [y/N]"
)
decision = input().strip()
if decision.lower() != "y" and decision.lower() != "yes":
return
logging.info(f"Removing {len(annotations)} annotations...")
grafana.delete_annotations(*(a.id for a in annotations))
def regressions(
self, test: TestConfig, selector: DataSelector, options: AnalysisOptions
) -> bool:
importer = self.__importers.get(test)
# Even if user is interested only in performance difference since some point X,
# we really need to fetch some earlier points than X.
# Otherwise, if performance went down very early after X, e.g. at X + 1, we'd have
# insufficient number of data points to compute the baseline performance.
# Instead of using `since-` selector, we're fetching everything from the
# beginning and then we find the baseline performance around the time pointed by
# the original selector.
since_version = selector.since_version
since_commit = selector.since_commit
since_time = selector.since_time
baseline_selector = copy.deepcopy(selector)
baseline_selector.last_n_points = sys.maxsize
baseline_selector.branch = None
baseline_selector.since_version = None
baseline_selector.since_commit = None
baseline_selector.since_time = since_time - timedelta(days=30)
baseline_series = importer.fetch_data(test, baseline_selector)
if since_version:
baseline_index = baseline_series.find_by_attribute("version", since_version)
if not baseline_index:
raise HunterError(f"No runs of test {test.name} with version {since_version}")
baseline_index = max(baseline_index)
elif since_commit:
baseline_index = baseline_series.find_by_attribute("commit", since_commit)
if not baseline_index:
raise HunterError(f"No runs of test {test.name} with commit {since_commit}")
baseline_index = max(baseline_index)
else:
baseline_index = baseline_series.find_first_not_earlier_than(since_time)
baseline_series = baseline_series.analyze()
if selector.branch:
target_series = importer.fetch_data(test, selector).analyze()
else:
target_series = baseline_series
cmp = compare(baseline_series, baseline_index, target_series, target_series.len())
regressions = []
for metric_name, stats in cmp.stats.items():
direction = baseline_series.metric(metric_name).direction
m1 = stats.mean_1
m2 = stats.mean_2
change_percent = stats.forward_rel_change() * 100.0
if m2 * direction < m1 * direction and stats.pvalue < options.max_pvalue:
regressions.append(
" {:16}: {:#8.3g} --> {:#8.3g} ({:+6.1f}%)".format(
metric_name, m1, m2, change_percent
)
)
if regressions:
print(f"{test.name}:")
for r in regressions:
print(r)
else:
print(f"{test.name}: OK")
return len(regressions) > 0
def __maybe_create_slack_notifier(self):
if not self.__conf.slack:
return None
return SlackNotifier(WebClient(token=self.__conf.slack.bot_token))
def notify_slack(
self,
test_change_points: Dict[str, AnalyzedSeries],
selector: DataSelector,
channels: List[str],
since: datetime,
):
if not self.__slack:
logging.error(
"Slack definition is missing from the configuration, cannot send notification"
)
return
self.__slack.notify(test_change_points, selector=selector, channels=channels, since=since)
def validate(self):
valid = True
unique_metrics = set()
for name, test in self.__conf.tests.items():
logging.info("Checking {}".format(name))
test_metrics = test.fully_qualified_metric_names()
for test_metric in test_metrics:
if test_metric not in unique_metrics:
unique_metrics.add(test_metric)
else:
valid = False
logging.error(f"Found duplicated metric: {test_metric}")
try:
importer = self.__importers.get(test)
series = importer.fetch_data(test)
for metric, metric_data in series.data.items():
if not metric_data:
logging.warning(f"Test's metric does not have data: {name} {metric}")
except Exception as err:
logging.error(f"Invalid test definition: {name}\n{repr(err)}\n")
valid = False
logging.info(f"Validation finished: {'VALID' if valid else 'INVALID'}")
if not valid:
exit(1)
def setup_data_selector_parser(parser: argparse.ArgumentParser):
parser.add_argument(
"--branch", metavar="STRING", dest="branch", help="name of the branch", nargs="?"
)
parser.add_argument(
"--metrics",
metavar="LIST",
dest="metrics",
help="a comma-separated list of metrics to analyze",
)
parser.add_argument(
"--attrs",
metavar="LIST",
dest="attributes",
help="a comma-separated list of attribute names associated with the runs "
"(e.g. commit, branch, version); "
"if not specified, it will be automatically filled based on available information",
)
since_group = parser.add_mutually_exclusive_group()
since_group.add_argument(
"--since-commit",
metavar="STRING",
dest="since_commit",
help="the commit at the start of the time span to analyze",
)
since_group.add_argument(
"--since-version",
metavar="STRING",
dest="since_version",
help="the version at the start of the time span to analyze",
)
since_group.add_argument(
"--since",
metavar="DATE",
dest="since_time",
help="the start of the time span to analyze; "
"accepts ISO, and human-readable dates like '10 weeks ago'",
)
until_group = parser.add_mutually_exclusive_group()
until_group.add_argument(
"--until-commit",
metavar="STRING",
dest="until_commit",
help="the commit at the end of the time span to analyze",
)
until_group.add_argument(
"--until-version",
metavar="STRING",
dest="until_version",
help="the version at the end of the time span to analyze",
)
until_group.add_argument(
"--until",
metavar="DATE",
dest="until_time",
help="the end of the time span to analyze; same syntax as --since",
)
parser.add_argument(
"--last",
type=int,
metavar="COUNT",
dest="last_n_points",
help="the number of data points to take from the end of the series"
)
def data_selector_from_args(args: argparse.Namespace) -> DataSelector:
data_selector = DataSelector()
if args.branch:
data_selector.branch = args.branch
if args.metrics is not None:
data_selector.metrics = list(args.metrics.split(","))
if args.attributes is not None:
data_selector.attributes = list(args.attributes.split(","))
if args.since_commit is not None:
data_selector.since_commit = args.since_commit
if args.since_version is not None:
data_selector.since_version = args.since_version
if args.since_time is not None:
data_selector.since_time = parse_datetime(args.since_time)
if args.until_commit is not None:
data_selector.until_commit = args.until_commit
if args.until_version is not None:
data_selector.until_version = args.until_version
if args.until_time is not None:
data_selector.until_time = parse_datetime(args.until_time)
if args.last_n_points is not None:
data_selector.last_n_points = args.last_n_points
return data_selector
def setup_analysis_options_parser(parser: argparse.ArgumentParser):
parser.add_argument(
"-P, --p-value",
dest="pvalue",
type=float,
default=0.001,
help="maximum accepted P-value of a change-point; "
"P denotes the probability that the change-point has "
"been found by a random coincidence, rather than a real "
"difference between the data distributions",
)
parser.add_argument(
"-M",
"--magnitude",
dest="magnitude",
type=float,
default=0.0,
help="minimum accepted magnitude of a change-point "
"computed as abs(new_mean / old_mean - 1.0); use it "
"to filter out stupidly small changes like < 0.01",
)
parser.add_argument(
"--window",
default=50,
type=int,
dest="window",
help="the number of data points analyzed at once; "
"the window size affects the discriminative "
"power of the change point detection algorithm; "
"large windows are less susceptible to noise; "
"however, a very large window may cause dismissing short regressions "
"as noise so it is best to keep it short enough to include not more "
"than a few change points (optimally at most 1)",
)
def analysis_options_from_args(args: argparse.Namespace) -> AnalysisOptions:
conf = AnalysisOptions()
if args.pvalue is not None:
conf.max_pvalue = args.pvalue
if args.magnitude is not None:
conf.min_magnitude = args.magnitude
if args.window is not None:
conf.window_len = args.window
return conf
def main():
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
parser = argparse.ArgumentParser(description="Hunts performance regressions in Fallout results")
subparsers = parser.add_subparsers(dest="command")
list_tests_parser = subparsers.add_parser("list-tests", help="list available tests")
list_tests_parser.add_argument("group", help="name of the group of the tests", nargs="*")
list_metrics_parser = subparsers.add_parser(
"list-metrics", help="list available metrics for a test"
)
list_metrics_parser.add_argument("test", help="name of the test")
subparsers.add_parser("list-groups", help="list available groups of tests")
analyze_parser = subparsers.add_parser(
"analyze",
help="analyze performance test results",
formatter_class=argparse.RawTextHelpFormatter,
)
analyze_parser.add_argument("tests", help="name of the test or group of the tests", nargs="+")
analyze_parser.add_argument(
"--update-grafana",
help="Update Grafana dashboards with appropriate annotations of change points",
action="store_true",
)
analyze_parser.add_argument(
"--notify-slack",
help="Send notification containing a summary of change points to given Slack channels",
nargs="+",
)
analyze_parser.add_argument(
"--cph-report-since",
help="Sets a limit on the date range of the Change Point History reported to Slack. Same syntax as --since.",
metavar="DATE",
dest="cph_report_since",
)
setup_data_selector_parser(analyze_parser)
setup_analysis_options_parser(analyze_parser)
regressions_parser = subparsers.add_parser("regressions", help="find performance regressions")
regressions_parser.add_argument(
"tests", help="name of the test or group of the tests", nargs="+"
)
setup_data_selector_parser(regressions_parser)
setup_analysis_options_parser(regressions_parser)
remove_annotations_parser = subparsers.add_parser("remove-annotations")
remove_annotations_parser.add_argument(
"tests", help="name of the test or test group", nargs="*"
)
remove_annotations_parser.add_argument(
"--force", help="don't ask questions, just do it", dest="force", action="store_true"
)
validate_parser = subparsers.add_parser("validate",
help="validates the tests and metrics defined in the configuration")
try:
args = parser.parse_args()
conf = config.load_config()
hunter = Hunter(conf)
if args.command == "list-groups":
hunter.list_test_groups()
if args.command == "list-tests":
group_names = args.group if args.group else None
hunter.list_tests(group_names)
if args.command == "list-metrics":
test = hunter.get_test(args.test)
hunter.list_metrics(test)
if args.command == "analyze":
update_grafana_flag = args.update_grafana
slack_notification_channels = args.notify_slack
slack_cph_since = parse_datetime(args.cph_report_since)
data_selector = data_selector_from_args(args)
options = analysis_options_from_args(args)
tests = hunter.get_tests(*args.tests)
tests_analyzed_series = {test.name: None for test in tests}
for test in tests:
try:
analyzed_series = hunter.analyze(test, selector=data_selector, options=options)
if update_grafana_flag:
if not isinstance(test, GraphiteTestConfig):
raise GrafanaError(f"Not a Graphite test")
hunter.update_grafana_annotations(test, analyzed_series)
if slack_notification_channels:
tests_analyzed_series[test.name] = analyzed_series
except DataImportError as err:
logging.error(err.message)
except GrafanaError as err:
logging.error(
f"Failed to update grafana dashboards for {test.name}: {err.message}"
)
if slack_notification_channels:
hunter.notify_slack(
tests_analyzed_series,
selector=data_selector,
channels=slack_notification_channels,
since=slack_cph_since,
)
if args.command == "regressions":
data_selector = data_selector_from_args(args)
options = analysis_options_from_args(args)
tests = hunter.get_tests(*args.tests)
regressing_test_count = 0
errors = 0
for test in tests:
try:
regressions = hunter.regressions(
test, selector=data_selector, options=options
)
if regressions:
regressing_test_count += 1
except HunterError as err:
logging.error(err.message)
errors += 1
except DataImportError as err:
logging.error(err.message)
errors += 1
if regressing_test_count == 0:
print("No regressions found!")
elif regressing_test_count == 1:
print("Regressions in 1 test found")
else:
print(f"Regressions in {regressing_test_count} tests found")
if errors > 0:
print(f"Some tests were skipped due to import / analyze errors. Consult error log.")
if args.command == "remove-annotations":
if args.tests:
tests = hunter.get_tests(*args.tests)
for test in tests:
hunter.remove_grafana_annotations(test, args.force)
else:
hunter.remove_grafana_annotations(None, args.force)
if args.command == "validate":
hunter.validate()
if args.command is None:
parser.print_usage()
except ConfigError as err:
logging.error(err.message)
exit(1)
except TestConfigError as err:
logging.error(err.message)
exit(1)
except GraphiteError as err:
logging.error(err.message)
exit(1)
except GrafanaError as err:
logging.error(err.message)
exit(1)
except DataImportError as err:
logging.error(err.message)
exit(1)
except HunterError as err:
logging.error(err.message)
exit(1)
except DateFormatError as err:
logging.error(err.message)
exit(1)
except NotificationError as err:
logging.error(err.message)
exit(1)
if __name__ == "__main__":
main()
| [((386, 20, 386, 34), 'hunter.data_selector.DataSelector', 'DataSelector', ({}, {}), '()', False, 'from hunter.data_selector import DataSelector\n'), ((447, 11, 447, 28), 'hunter.series.AnalysisOptions', 'AnalysisOptions', ({}, {}), '()', False, 'from hunter.series import AnalysisOptions, ChangePointGroup, SeriesComparison, compare, AnalyzedSeries\n'), ((458, 4, 458, 80), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((460, 13, 460, 100), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((46, 27, 46, 42), 'hunter.importer.Importers', 'Importers', ({(46, 37, 46, 41): 'conf'}, {}), '(conf)', False, 'from hunter.importer import DataImportError, Importers\n'), ((100, 17, 100, 46), 'hunter.report.Report', 'Report', ({(100, 24, 100, 30): 'series', (100, 32, 100, 45): 'change_points'}, {}), '(series, change_points)', False, 'from hunter.report import Report\n'), ((115, 8, 115, 77), 'logging.info', 'logging.info', ({(115, 21, 115, 76): 'f"""Fetching Grafana annotations for test {test.name}..."""'}, {}), "(f'Fetching Grafana annotations for test {test.name}...')", False, 'import logging\n'), ((224, 28, 224, 51), 'copy.deepcopy', 'copy.deepcopy', ({(224, 42, 224, 50): 'selector'}, {}), '(selector)', False, 'import copy\n'), ((314, 8, 314, 79), 'logging.info', 'logging.info', ({(314, 21, 314, 78): 'f"""Validation finished: {\'VALID\' if valid else \'INVALID\'}"""'}, {}), '(f"Validation finished: {\'VALID\' if valid else \'INVALID\'}")', False, 'import logging\n'), ((398, 35, 398, 66), 'hunter.util.parse_datetime', 'parse_datetime', ({(398, 50, 398, 65): 'args.since_time'}, {}), '(args.since_time)', False, 'from hunter.util import parse_datetime, DateFormatError, interpolate\n'), ((404, 35, 404, 66), 'hunter.util.parse_datetime', 'parse_datetime', ({(404, 50, 404, 65): 'args.until_time'}, {}), '(args.until_time)', False, 'from hunter.util import parse_datetime, DateFormatError, interpolate\n'), ((518, 15, 518, 35), 'hunter.config.load_config', 'config.load_config', ({}, {}), '()', False, 'from hunter import config\n'), ((107, 29, 107, 57), 'hunter.grafana.Grafana', 'Grafana', ({(107, 37, 107, 56): 'self.__conf.grafana'}, {}), '(self.__conf.grafana)', False, 'from hunter.grafana import GrafanaError, Grafana, Annotation\n'), ((181, 12, 181, 82), 'logging.info', 'logging.info', ({(181, 25, 181, 81): '"""All annotations up-to-date. No new annotations needed."""'}, {}), "('All annotations up-to-date. No new annotations needed.')", False, 'import logging\n'), ((183, 12, 183, 65), 'logging.info', 'logging.info', ({(183, 25, 183, 64): 'f"""Created {created_count} annotations."""'}, {}), "(f'Created {created_count} annotations.')", False, 'import logging\n'), ((189, 12, 189, 81), 'logging.info', 'logging.info', ({(189, 25, 189, 80): 'f"""Fetching Grafana annotations for test {test.name}..."""'}, {}), "(f'Fetching Grafana annotations for test {test.name}...')", False, 'import logging\n'), ((191, 12, 191, 60), 'logging.info', 'logging.info', ({(191, 25, 191, 59): 'f"""Fetching Grafana annotations..."""'}, {}), "(f'Fetching Grafana annotations...')", False, 'import logging\n'), ((197, 12, 197, 49), 'logging.info', 'logging.info', ({(197, 25, 197, 48): '"""No annotations found."""'}, {}), "('No annotations found.')", False, 'import logging\n'), ((229, 52, 229, 70), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((277, 29, 277, 73), 'slack_sdk.WebClient', 'WebClient', (), '', False, 'from slack_sdk import WebClient\n'), ((287, 12, 289, 13), 'logging.error', 'logging.error', ({(288, 16, 288, 94): '"""Slack definition is missing from the configuration, cannot send notification"""'}, {}), "(\n 'Slack definition is missing from the configuration, cannot send notification'\n )", False, 'import logging\n'), ((535, 30, 535, 67), 'hunter.util.parse_datetime', 'parse_datetime', ({(535, 45, 535, 66): 'args.cph_report_since'}, {}), '(args.cph_report_since)', False, 'from hunter.util import parse_datetime, DateFormatError, interpolate\n'), ((606, 8, 606, 34), 'logging.error', 'logging.error', ({(606, 22, 606, 33): 'err.message'}, {}), '(err.message)', False, 'import logging\n'), ((609, 8, 609, 34), 'logging.error', 'logging.error', ({(609, 22, 609, 33): 'err.message'}, {}), '(err.message)', False, 'import logging\n'), ((612, 8, 612, 34), 'logging.error', 'logging.error', ({(612, 22, 612, 33): 'err.message'}, {}), '(err.message)', False, 'import logging\n'), ((615, 8, 615, 34), 'logging.error', 'logging.error', ({(615, 22, 615, 33): 'err.message'}, {}), '(err.message)', False, 'import logging\n'), ((618, 8, 618, 34), 'logging.error', 'logging.error', ({(618, 22, 618, 33): 'err.message'}, {}), '(err.message)', False, 'import logging\n'), ((621, 8, 621, 34), 'logging.error', 'logging.error', ({(621, 22, 621, 33): 'err.message'}, {}), '(err.message)', False, 'import logging\n'), ((624, 8, 624, 34), 'logging.error', 'logging.error', ({(624, 22, 624, 33): 'err.message'}, {}), '(err.message)', False, 'import logging\n'), ((627, 8, 627, 34), 'logging.error', 'logging.error', ({(627, 22, 627, 33): 'err.message'}, {}), '(err.message)', False, 'import logging\n'), ((143, 38, 143, 67), 'hunter.util.interpolate', 'interpolate', ({(143, 50, 143, 51): 't', (143, 53, 143, 66): 'substitutions'}, {}), '(t, substitutions)', False, 'from hunter.util import parse_datetime, DateFormatError, interpolate\n'), ((152, 34, 152, 60), 'hunter.attributes.get_back_links', 'get_back_links', ({(152, 49, 152, 59): 'attributes'}, {}), '(attributes)', False, 'from hunter.attributes import get_back_links\n'), ((304, 20, 304, 76), 'logging.error', 'logging.error', ({(304, 34, 304, 75): 'f"""Found duplicated metric: {test_metric}"""'}, {}), "(f'Found duplicated metric: {test_metric}')", False, 'import logging\n'), ((310, 24, 310, 93), 'logging.warning', 'logging.warning', ({(310, 40, 310, 92): 'f"""Test\'s metric does not have data: {name} {metric}"""'}, {}), '(f"Test\'s metric does not have data: {name} {metric}")', False, 'import logging\n'), ((550, 20, 550, 46), 'logging.error', 'logging.error', ({(550, 34, 550, 45): 'err.message'}, {}), '(err.message)', False, 'import logging\n'), ((552, 20, 554, 21), 'logging.error', 'logging.error', ({(553, 24, 553, 93): 'f"""Failed to update grafana dashboards for {test.name}: {err.message}"""'}, {}), "(\n f'Failed to update grafana dashboards for {test.name}: {err.message}')", False, 'import logging\n'), ((577, 20, 577, 46), 'logging.error', 'logging.error', ({(577, 34, 577, 45): 'err.message'}, {}), '(err.message)', False, 'import logging\n'), ((580, 20, 580, 46), 'logging.error', 'logging.error', ({(580, 34, 580, 45): 'err.message'}, {}), '(err.message)', False, 'import logging\n'), ((156, 29, 156, 73), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (), '', False, 'from datetime import datetime, timedelta\n'), ((545, 34, 545, 70), 'hunter.grafana.GrafanaError', 'GrafanaError', ({(545, 47, 545, 69): 'f"""Not a Graphite test"""'}, {}), "(f'Not a Graphite test')", False, 'from hunter.grafana import GrafanaError, Grafana, Annotation\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.