content
stringlengths 5
1.05M
|
---|
from keras.preprocessing.image import img_to_array, load_img
import numpy as np
from ast import literal_eval
import traceback
def make_image_sample(photo_id_file,image_array_file):
images_root = "data/images_150X150/"
x_train = []
count = 0
means = None
stds = None
with open(photo_id_file,'r') as f:
train_tups = f.readlines()
for tup in train_tups:
id,owner = literal_eval(tup)
# print id,owner
file_path = images_root+owner+"/"+id+".jpg"
# print file_path
try:
img_arr = img_to_array(load_img(file_path,target_size=(227,227)))
for ind in range(img_arr.shape[0]):
img_arr[ind] = np.fliplr(img_arr[ind])
if means is None:
means = [np.mean(img_arr,axis=(0,1))]
stds = [np.std(img_arr, axis=(0, 1))]
else:
means = np.append(means,[np.mean(img_arr,axis=(0,1))],axis=0)
stds = np.append(stds, [np.std(img_arr, axis=(0, 1))], axis=0)
x_train.append(img_arr)
count += 1
if count % 100 == 0:
print count
except:
# traceback.print_exc()
pass
fin_mean = np.mean(means, axis=0)
fin_stds = np.mean(stds, axis=0)
fin_x_train = []
for x in x_train:
fin_x_train += [(x - fin_mean) / fin_stds]
fin_x_train = np.array(fin_x_train, dtype=float)
print fin_x_train.shape
# print x_train.shape
# x_train = x_train
np.save(image_array_file, fin_x_train)
# make_image_sample("new_photo_ids_train_100.txt","image_train_array")
# print "----------------------Validate---------------------------"
# make_image_sample("new_photo_ids_validate_100.txt","image_validate_array")
# print "----------------------Test---------------------------"
# make_image_sample("new_photo_ids_test_100.txt","image_test_array")
# make_image_sample("ids_train.txt","image_train_array")
# print "----------------------Validate---------------------------"
# make_image_sample("ids_validate.txt","image_validate_array")
# print "----------------------Test---------------------------"
# make_image_sample("ids_test.txt","image_test_array")
|
from .msvc import msvc_mangle, msvc_demangle
def mangle(name):
return msvc_mangle(name)
def demangle(obj):
return msvc_demangle(obj)
|
"""
This plugin adds a ``/server_part`` command to leave all rooms
on a server.
Command
-------
.. glossary::
/server_part
**Usage:** ``/server_part [<server> [message]]``
Leave all rooms on ``<server>``, if not provided and the current
tab is a chatroom tab, it will leave all rooms on the current server.
``[message]`` can indicate a quit message.
"""
from slixmpp import JID, InvalidJID
from poezio.plugin import BasePlugin
from poezio.tabs import MucTab
from poezio.decorators import command_args_parser
from poezio.core.structs import Completion
class Plugin(BasePlugin):
def init(self):
self.api.add_command(
'server_part',
self.command_server_part,
usage='[<server> [message]]',
short='Leave all the rooms on a server',
help='Leave all the rooms on a sever.',
completion=self.completion_server_part)
@command_args_parser.quoted(0, 2, defaults=[])
def command_server_part(self, args):
current_tab = self.api.current_tab()
if not args and not isinstance(current_tab, MucTab):
return self.core.command_help('server_part')
elif not args:
jid = current_tab.jid.bare
message = None
elif len(args) == 1:
try:
jid = JID(args[0]).domain
except InvalidJID:
return self.core.command_help('server_part')
message = None
else:
try:
jid = JID(args[0]).domain
except InvalidJID:
return self.core.command_help('server_part')
message = args[1]
for tab in self.core.get_tabs(MucTab):
if tab.name.endswith(jid):
tab.command_part(message)
def completion_server_part(self, the_input):
serv_list = set()
for tab in self.core.get_tabs(MucTab):
if tab.joined:
serv = tab.jid.server
serv_list.add(serv)
return Completion(the_input.new_completion, sorted(serv_list), 1, ' ')
|
# -*- coding: utf-8 -*-
"""
ngram.py
Class for NGram Analysis
@author: Douglas Daly
@date: 1/11/2017
"""
#
# Imports
#
import os.path
from collections import Counter
from .. import string_helpers
#
# Variables
#
_ngram_base_dir = "../resources/ngrams/"
#
# Class
#
class NGram(object):
"""
N-Gram class for Text Analysis
"""
def __init__(self, n, language='english'):
""" Default Constructor
"""
filename = language + "_" + str(n) + "grams.txt"
# - Throw Exception if no resource file for this
if not os.path.isfile(_ngram_base_dir + filename):
raise Exception("No N-Gram Resource File for N=" + str(n) + " and lang=" + language + " exists.")
self.__n = n
self.__gram_dict = self.__load_grams_from_file(_ngram_base_dir + filename)
@staticmethod
def __load_grams_from_file(filepath):
""" Loads the N-Gram Dict from File
:param str filepath: Filepath to load the N-Gram Dict from
"""
occurrences = dict()
run_tot = 0.0
# - Load from File
with open(filepath, 'r') as f:
for line in f:
splat = line.split(' ')
tnum = float(splat[1])
occurrences[splat[0].upper()] = tnum
run_tot += tnum
# - Convert to Probabilities/Rates
output = dict()
for (k, v) in occurrences.iteritems():
output[k] = v / run_tot
return output
def get_single_word_occurrences(self, word):
""" Gets occurrences of this N-Gram from the given Word
:param str word: Word to get N-Gram occurrences for
:return: Dictionary of N-Gram to Count
:rtype: dict
"""
if len(word) < self.__n:
return dict()
output = dict()
for c in range(len(word)-self.__n+1):
tw = word[c:c+self.__n]
if tw in self.__gram_dict.keys():
if tw not in output.keys():
output[tw] = 1
else:
output[tw] += 1
return output
def get_text_occurrences(self, text, remove_spaces=False):
""" Gets occurrences of this N-Gram from the given Text
:param str text: Text to get N-Gram analysis for
:return: Dictionary of N-Gram to occurrence count
:rtype: dict
"""
# - First strip punctuation
text = string_helpers.remove_punctuation(text).upper()
if remove_spaces:
text = text.replace(' ', '')
splat = text.split(' ')
rc = Counter()
for word in splat:
tc = Counter(self.get_single_word_occurrences(word))
rc.update(tc)
return dict(rc)
def convert_occurrences_to_sum_probabilities(self, occurrences):
""" Converts the given Occurrence dictionary to the sum of the NGram probabilities
:param dict occurrences: N-Gram Occurrences count dictionary
:returns: Dictionary of N-Gram to sum of probability of N-Gram occurring
:rtype: dict
"""
output = dict()
for (k, v) in occurrences.iteritems():
output[k] = v * self.__gram_dict[k]
return output
def get_text_occurrence_rates(self, text, remove_spaces=False):
""" Gets the occurrence rates for the given text
:param str text: Text to get occurrence rates for
:param bool remove_spaces: [Optional] Whether or not to remove spaces during the N-Gram analysis
:return: Dictionary of N-Gram to percentage occurrence
:rtype: dict
"""
occurrences = self.get_text_occurrences(text, remove_spaces=remove_spaces)
run_tot = 0.0
for val in occurrences.values():
run_tot += val
output = dict()
for (k, v) in occurrences.iteritems():
output[k] = v / run_tot
return output
def get_relative_occurrence_rates(self, occurrence_rates):
""" Gets the relative occurrence rates given a Dictionary of Occurrence Rates
:param dict occurrence_rates: Occurrence rates to get relative values for
:return: Dictionary of given occurrence rates normalized to 100 percent
:rtype: dict
"""
output = dict()
run_tot = 0.0
for k in occurrence_rates.keys():
t = self.__gram_dict[k]
run_tot += t
output[k] = t
for k in output.keys():
output[k] /= run_tot
return output
|
from __future__ import print_function
import os
import os.path as osp
import pickle
from scipy import io
import datetime
import time
from contextlib import contextmanager
import torch
from torch.autograd import Variable
def time_str(fmt=None):
if fmt is None:
fmt = '%Y-%m-%d_%H:%M:%S'
return datetime.datetime.today().strftime(fmt)
def load_pickle(path):
"""Check and load pickle object.
According to this post: https://stackoverflow.com/a/41733927, cPickle and
disabling garbage collector helps with loading speed."""
print(path)
assert osp.exists(path)
# gc.disable()
with open(path, 'rb') as f:
ret = pickle.load(f)
# gc.enable()
return ret
def save_pickle(obj, path):
"""Create dir and save file."""
may_make_dir(osp.dirname(osp.abspath(path)))
with open(path, 'wb') as f:
pickle.dump(obj, f, protocol=2)
def save_mat(ndarray, path):
"""Save a numpy ndarray as .mat file."""
io.savemat(path, dict(ndarray=ndarray))
def to_scalar(vt):
"""Transform a length-1 pytorch Variable or Tensor to scalar.
Suppose tx is a torch Tensor with shape tx.size() = torch.Size([1]),
then npx = tx.cpu().numpy() has shape (1,), not 1."""
if isinstance(vt, Variable):
return vt.data.cpu().numpy().flatten()[0]
if torch.is_tensor(vt):
return vt.cpu().numpy().flatten()[0]
raise TypeError('Input should be a variable or tensor')
def transfer_optim_state(state, device_id=-1):
"""Transfer an optimizer.state to cpu or specified gpu, which means
transferring tensors of the optimizer.state to specified device.
The modification is in place for the state.
Args:
state: An torch.optim.Optimizer.state
device_id: gpu id, or -1 which means transferring to cpu
"""
for key, val in state.items():
if isinstance(val, dict):
transfer_optim_state(val, device_id=device_id)
elif isinstance(val, Variable):
raise RuntimeError("Oops, state[{}] is a Variable!".format(key))
elif isinstance(val, torch.nn.Parameter):
raise RuntimeError("Oops, state[{}] is a Parameter!".format(key))
else:
try:
if device_id == -1:
state[key] = val.cpu()
else:
state[key] = val.cuda(device=device_id)
except:
pass
def may_transfer_optims(optims, device_id=-1):
"""Transfer optimizers to cpu or specified gpu, which means transferring
tensors of the optimizer to specified device. The modification is in place
for the optimizers.
Args:
optims: A list, which members are either torch.nn.optimizer or None.
device_id: gpu id, or -1 which means transferring to cpu
"""
for optim in optims:
if isinstance(optim, torch.optim.Optimizer):
transfer_optim_state(optim.state, device_id=device_id)
def may_transfer_modules_optims(modules_and_or_optims, device_id=-1):
"""Transfer optimizers/modules to cpu or specified gpu.
Args:
modules_and_or_optims: A list, which members are either torch.nn.optimizer
or torch.nn.Module or None.
device_id: gpu id, or -1 which means transferring to cpu
"""
for item in modules_and_or_optims:
if isinstance(item, torch.optim.Optimizer):
transfer_optim_state(item.state, device_id=device_id)
elif isinstance(item, torch.nn.Module):
if device_id == -1:
item.cpu()
else:
item.cuda(device=device_id)
elif item is not None:
print('[Warning] Invalid type {}'.format(item.__class__.__name__))
class TransferVarTensor(object):
"""Return a copy of the input Variable or Tensor on specified device."""
def __init__(self, device_id=-1):
self.device_id = device_id
def __call__(self, var_or_tensor):
return var_or_tensor.cpu() if self.device_id == -1 \
else var_or_tensor.cuda(self.device_id)
class TransferModulesOptims(object):
"""Transfer optimizers/modules to cpu or specified gpu."""
def __init__(self, device_id=-1):
self.device_id = device_id
def __call__(self, modules_and_or_optims):
may_transfer_modules_optims(modules_and_or_optims, self.device_id)
def set_devices(sys_device_ids):
"""
It sets some GPUs to be visible and returns some wrappers to transferring
Variables/Tensors and Modules/Optimizers.
Args:
sys_device_ids: a tuple; which GPUs to use
e.g. sys_device_ids = (), only use cpu
sys_device_ids = (3,), use the 4th gpu
sys_device_ids = (0, 1, 2, 3,), use first 4 gpus
sys_device_ids = (0, 2, 4,), use the 1st, 3rd and 5th gpus
Returns:
TVT: a `TransferVarTensor` callable
TMO: a `TransferModulesOptims` callable
"""
# Set the CUDA_VISIBLE_DEVICES environment variable
import os
visible_devices = ''
for i in sys_device_ids:
visible_devices += '{}, '.format(i)
os.environ['CUDA_VISIBLE_DEVICES'] = visible_devices
# Return wrappers.
# Models and user defined Variables/Tensors would be transferred to the
# first device.
device_id = 0 if len(sys_device_ids) > 0 else -1
TVT = TransferVarTensor(device_id)
TMO = TransferModulesOptims(device_id)
return TVT, TMO
def set_devices_for_ml(sys_device_ids):
"""This version is for mutual learning.
It sets some GPUs to be visible and returns some wrappers to transferring
Variables/Tensors and Modules/Optimizers.
Args:
sys_device_ids: a tuple of tuples; which devices to use for each model,
len(sys_device_ids) should be equal to number of models. Examples:
sys_device_ids = ((-1,), (-1,))
the two models both on CPU
sys_device_ids = ((-1,), (2,))
the 1st model on CPU, the 2nd model on GPU 2
sys_device_ids = ((3,),)
the only one model on the 4th gpu
sys_device_ids = ((0, 1), (2, 3))
the 1st model on GPU 0 and 1, the 2nd model on GPU 2 and 3
sys_device_ids = ((0,), (0,))
the two models both on GPU 0
sys_device_ids = ((0,), (0,), (1,), (1,))
the 1st and 2nd model on GPU 0, the 3rd and 4th model on GPU 1
Returns:
TVTs: a list of `TransferVarTensor` callables, one for one model.
TMOs: a list of `TransferModulesOptims` callables, one for one model.
relative_device_ids: a list of lists; `sys_device_ids` transformed to
relative ids; to be used in `DataParallel`
"""
import os
all_ids = []
for ids in sys_device_ids:
all_ids += ids
unique_sys_device_ids = list(set(all_ids))
unique_sys_device_ids.sort()
if -1 in unique_sys_device_ids:
unique_sys_device_ids.remove(-1)
# Set the CUDA_VISIBLE_DEVICES environment variable
visible_devices = ''
for i in unique_sys_device_ids:
visible_devices += '{}, '.format(i)
os.environ['CUDA_VISIBLE_DEVICES'] = visible_devices
# Return wrappers
relative_device_ids = []
TVTs, TMOs = [], []
for ids in sys_device_ids:
relative_ids = []
for id in ids:
if id != -1:
id = find_index(unique_sys_device_ids, id)
relative_ids.append(id)
relative_device_ids.append(relative_ids)
# Models and user defined Variables/Tensors would be transferred to the
# first device.
TVTs.append(TransferVarTensor(relative_ids[0]))
TMOs.append(TransferModulesOptims(relative_ids[0]))
return TVTs, TMOs, relative_device_ids
def load_ckpt(modules_optims, ckpt_file, load_to_cpu=True, verbose=True):
"""Load state_dict's of modules/optimizers from file.
Args:
modules_optims: A list, which members are either torch.nn.optimizer
or torch.nn.Module.
ckpt_file: The file path.
load_to_cpu: Boolean. Whether to transform tensors in modules/optimizers
to cpu type.
"""
map_location = (lambda storage, loc: storage) if load_to_cpu else None
ckpt = torch.load(ckpt_file, map_location=map_location)
for m, sd in zip(modules_optims, ckpt['state_dicts']):
m.load_state_dict(sd)
if verbose:
print('Resume from ckpt {}, \nepoch {}, \nscores {}'.format(
ckpt_file, ckpt['ep'], ckpt['scores']))
return ckpt['ep'], ckpt['scores']
def save_ckpt(modules_optims, ep, scores, ckpt_file):
"""Save state_dict's of modules/optimizers to file.
Args:
modules_optims: A list, which members are either torch.nn.optimizer
or torch.nn.Module.
ep: the current epoch number
scores: the performance of current model
ckpt_file: The file path.
Note:
torch.save() reserves device type and id of tensors to save, so when
loading ckpt, you have to inform torch.load() to load these tensors to
cpu or your desired gpu, if you change devices.
"""
state_dicts = [m.state_dict() for m in modules_optims]
ckpt = dict(state_dicts=state_dicts,
ep=ep,
scores=scores)
may_make_dir(osp.dirname(osp.abspath(ckpt_file)))
torch.save(ckpt, ckpt_file)
def load_state_dict(model, src_state_dict):
"""Copy parameters and buffers from `src_state_dict` into `model` and its
descendants. The `src_state_dict.keys()` NEED NOT exactly match
`model.state_dict().keys()`. For dict key mismatch, just
skip it; for copying error, just output warnings and proceed.
Arguments:
model: A torch.nn.Module object.
src_state_dict (dict): A dict containing parameters and persistent buffers.
Note:
This is modified from torch.nn.modules.module.load_state_dict(), to make
the warnings and errors more detailed.
"""
from torch.nn import Parameter
dest_state_dict = model.state_dict()
for name, param in src_state_dict.items():
if name not in dest_state_dict:
continue
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
dest_state_dict[name].copy_(param)
except Exception as msg:
print("Warning: Error occurs when copying '{}': {}"
.format(name, str(msg)))
src_missing = set(dest_state_dict.keys()) - set(src_state_dict.keys())
if len(src_missing) > 0:
print("Keys not found in source state_dict: ")
for n in src_missing:
print('\t', n)
dest_missing = set(src_state_dict.keys()) - set(dest_state_dict.keys())
if len(dest_missing) > 0:
print("Keys not found in destination state_dict: ")
for n in dest_missing:
print('\t', n)
def is_iterable(obj):
return hasattr(obj, '__len__')
def may_set_mode(maybe_modules, mode):
"""maybe_modules: an object or a list of objects."""
assert mode in ['train', 'eval']
if not is_iterable(maybe_modules):
maybe_modules = [maybe_modules]
for m in maybe_modules:
if isinstance(m, torch.nn.Module):
if mode == 'train':
m.train()
else:
m.eval()
def may_make_dir(path):
"""
Args:
path: a dir, or result of `osp.dirname(osp.abspath(file_path))`
Note:
`osp.exists('')` returns `False`, while `osp.exists('.')` returns `True`!
"""
# This clause has mistakes:
# if path is None or '':
if path in [None, '']:
return
if not osp.exists(path):
os.makedirs(path)
class AverageMeter(object):
"""Modified from Tong Xiao's open-reid.
Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = float(self.sum) / (self.count + 1e-20)
class RunningAverageMeter(object):
"""Computes and stores the running average and current value"""
def __init__(self, hist=0.99):
self.val = None
self.avg = None
self.hist = hist
def reset(self):
self.val = None
self.avg = None
def update(self, val):
if self.avg is None:
self.avg = val
else:
self.avg = self.avg * self.hist + val * (1 - self.hist)
self.val = val
class RecentAverageMeter(object):
"""Stores and computes the average of recent values."""
def __init__(self, hist_size=100):
self.hist_size = hist_size
self.fifo = []
self.val = 0
def reset(self):
self.fifo = []
self.val = 0
def update(self, val):
self.val = val
self.fifo.append(val)
if len(self.fifo) > self.hist_size:
del self.fifo[0]
@property
def avg(self):
assert len(self.fifo) > 0
return float(sum(self.fifo)) / len(self.fifo)
def get_model_wrapper(model, multi_gpu):
from torch.nn.parallel import DataParallel
if multi_gpu:
return DataParallel(model)
else:
return model
class ReDirectSTD(object):
"""Modified from Tong Xiao's `Logger` in open-reid.
This class overwrites sys.stdout or sys.stderr, so that console logs can
also be written to file.
Args:
fpath: file path
console: one of ['stdout', 'stderr']
immediately_visible: If `False`, the file is opened only once and closed
after exiting. In this case, the message written to file may not be
immediately visible (Because the file handle is occupied by the
program?). If `True`, each writing operation of the console will
open, write to, and close the file. If your program has tons of writing
operations, the cost of opening and closing file may be obvious. (?)
Usage example:
`ReDirectSTD('stdout.txt', 'stdout', False)`
`ReDirectSTD('stderr.txt', 'stderr', False)`
NOTE: File will be deleted if already existing. Log dir and file is created
lazily -- if no message is written, the dir and file will not be created.
"""
def __init__(self, fpath=None, console='stdout', immediately_visible=False):
import sys
import os
import os.path as osp
assert console in ['stdout', 'stderr']
self.console = sys.stdout if console == 'stdout' else sys.stderr
self.file = fpath
self.f = None
self.immediately_visible = immediately_visible
if fpath is not None:
# Remove existing log file.
if osp.exists(fpath):
os.remove(fpath)
# Overwrite
if console == 'stdout':
sys.stdout = self
else:
sys.stderr = self
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
may_make_dir(os.path.dirname(osp.abspath(self.file)))
if self.immediately_visible:
with open(self.file, 'a') as f:
f.write(msg)
else:
if self.f is None:
self.f = open(self.file, 'w')
self.f.write(msg)
def flush(self):
self.console.flush()
if self.f is not None:
self.f.flush()
import os
os.fsync(self.f.fileno())
def close(self):
self.console.close()
if self.f is not None:
self.f.close()
def set_seed(seed):
import random
random.seed(seed)
print('setting random-seed to {}'.format(seed))
import numpy as np
np.random.seed(seed)
print('setting np-random-seed to {}'.format(seed))
import torch
torch.backends.cudnn.enabled = False
print('cudnn.enabled set to {}'.format(torch.backends.cudnn.enabled))
# set seed for CPU
torch.manual_seed(seed)
print('setting torch-seed to {}'.format(seed))
def print_array(array, fmt='{:.2f}', end=' '):
"""Print a 1-D tuple, list, or numpy array containing digits."""
s = ''
for x in array:
s += fmt.format(float(x)) + end
s += '\n'
print(s)
return s
# Great idea from https://github.com/amdegroot/ssd.pytorch
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def tight_float_str(x, fmt='{:.4f}'):
return fmt.format(x).rstrip('0').rstrip('.')
def find_index(seq, item):
for i, x in enumerate(seq):
if item == x:
return i
return -1
def adjust_lr_exp(optimizer, base_lr, ep, total_ep, start_decay_at_ep):
"""Decay exponentially in the later phase of training. All parameters in the
optimizer share the same learning rate.
Args:
optimizer: a pytorch `Optimizer` object
base_lr: starting learning rate
ep: current epoch, ep >= 1
total_ep: total number of epochs to train
start_decay_at_ep: start decaying at the BEGINNING of this epoch
Example:
base_lr = 2e-4
total_ep = 300
start_decay_at_ep = 201
It means the learning rate starts at 2e-4 and begins decaying after 200
epochs. And training stops after 300 epochs.
NOTE:
It is meant to be called at the BEGINNING of an epoch.
"""
assert ep >= 1, "Current epoch number should be >= 1"
if ep < start_decay_at_ep:
return
for g in optimizer.param_groups:
g['lr'] = (base_lr * (0.001 ** (float(ep + 1 - start_decay_at_ep)
/ (total_ep + 1 - start_decay_at_ep))))
print('=====> lr adjusted to {:.10f}'.format(g['lr']).rstrip('0'))
def adjust_lr_staircase(optimizer, base_lr, ep, decay_at_epochs, factor):
"""Multiplied by a factor at the BEGINNING of specified epochs. All
parameters in the optimizer share the same learning rate.
Args:
optimizer: a pytorch `Optimizer` object
base_lr: starting learning rate
ep: current epoch, ep >= 1
decay_at_epochs: a list or tuple; learning rate is multiplied by a factor
at the BEGINNING of these epochs
factor: a number in range (0, 1)
Example:
base_lr = 1e-3
decay_at_epochs = [51, 101]
factor = 0.1
It means the learning rate starts at 1e-3 and is multiplied by 0.1 at the
BEGINNING of the 51'st epoch, and then further multiplied by 0.1 at the
BEGINNING of the 101'st epoch, then stays unchanged till the end of
training.
NOTE:
It is meant to be called at the BEGINNING of an epoch.
"""
assert ep >= 1, "Current epoch number should be >= 1"
if ep not in decay_at_epochs:
return
ind = find_index(decay_at_epochs, ep)
for g in optimizer.param_groups:
g['lr'] = base_lr * factor ** (ind + 1)
print('=====> lr adjusted to {:.10f}'.format(g['lr']).rstrip('0'))
@contextmanager
def measure_time(enter_msg, verbose=True):
if verbose:
st = time.time()
print(enter_msg)
yield
if verbose:
print('Done, {:.2f}s'.format(time.time() - st)) |
"""
/*****************************************************************************
* Copyright (c) 2016, Palo Alto Networks. All rights reserved. *
* *
* This Software is the property of Palo Alto Networks. The Software and all *
* accompanying documentation are copyrighted.
*****************************************************************************/
Copyright 2016 Palo Alto Networks
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import urllib2
import ssl
import xml.etree.ElementTree as et
import datetime
import time
import sys
import json
stackname=""
account=""
ScalingParameter=""
sqs_msg=None
sys.path.append('lib/')
import pan.asglib as lib
tag_key="PANW-NAT-STATUS"
remote=1
if remote > 0:
import boto3
cw_client = boto3.client('cloudwatch')
ec2 = boto3.resource('ec2')
ec2_client = ec2.meta.client
lambda_client = boto3.client('lambda')
gwMgmtIp=""
firewall_cmd = {'ActiveSessions': '<show><system><state><filter>sw.mprelay.s1.dp0.stats.session</filter></state></system></show>',
'DataPlaneBufferUtilization': '<show><system><state><filter>sw.mprelay.s1.dp0.packetbuffers</filter></state></system></show>',
'DataPlaneCPUUtilization': '<show><system><state><filter>sys.monitor.s1.dp0.exports</filter></state></system></show>',
'GPGatewayUtilization': '<show><system><state><filter>sw.rasmgr.resource.tunnel</filter></state></system></show>',
'SessionUtilization': '<show><system><state><filter>sw.mprelay.s1.dp0.stats.session</filter></state></system></show>'
}
def pan_print(s):
if remote > 0:
logger.info(s)
return
print(s)
return
def getChassisReady(response):
s1=response.replace('\n',"")
s1=s1.replace(" ","")
if s1.find("<![CDATA[no]]") > 0:
return False
if s1.find("<![CDATA[yes]]>") > 0:
return True
return False
def getJobStatus(response):
s1=response.replace("/","")
index=s1.find("<status>")
list=s1.split("<status>")
return list[1]
def getJobResult(response):
s1=response.replace("/","")
index=s1.find("<result>")
list=s1.split("<result>")
return list[2]
def getJobTfin(response):
s1=response.replace("/","")
index=s1.find("<tfin>")
list=s1.split("<tfin>")
return list[1]
def getJobProgress(response):
s1=response.replace("/","")
index=s1.find("<progress>")
list=s1.split("<progress>")
return list[1]
def getTag(instanceid):
logger.info('Getting all the tags for instance: ' + instanceid)
response=ec2_client.describe_tags(Filters=[{'Name': 'resource-id', 'Values': [instanceid]}])
logger.info(response)
for i in response['Tags']:
if i['Key'] == tag_key:
return i['Value']
return None
def setTag(instanceid, value):
ec2_client.create_tags(Resources=[instanceid], Tags=[{'Key': tag_key, 'Value': value}])
return
def runCommand(gcontext, cmd, gwMgmtIp, api_key):
try:
response = urllib2.urlopen(cmd, context=gcontext, timeout=5).read()
#pan_print("[RESPONSE] in send command: {}".format(response))
except Exception as e:
logger.error("[RunCommand Response Fail]: {}".format(e))
pan_print("[RunCommand Response Fail]: {}".format(e))
return None
resp_header = et.fromstring(response)
if resp_header.tag != 'response':
logger.error("[ERROR]: didn't get a valid response from Firewall command: " + cmd)
pan_print("[ERROR]: didn't get a valid response from Firewall")
return None
if resp_header.attrib['status'] == 'error':
logger.error("[ERROR]: Got an error for the command: " + cmd)
pan_print("[ERROR]: Got an error for the command: " + cmd)
return None
if resp_header.attrib['status'] == 'success':
return response
return None
def isChassisReady(gcontext, gwMgmtIp, api_key):
pan_print('Checking whether Chassis is ready or not')
cmd="<show><chassis-ready/></show>"
fw_cmd= "https://"+gwMgmtIp+"/api/?type=op&cmd=" + cmd + "&key="+api_key
try:
response = runCommand(gcontext, fw_cmd, gwMgmtIp, api_key)
if response is None:
pan_print('Failed to run command: ' + fw_cmd)
return False
status=getChassisReady(response)
if status == True:
pan_print('Chassis is in ready state')
return True
else:
pan_print('Chassis is not ready yet')
pan_print("[RESPONSE] in send command: {}".format(response))
except Exception as e:
logger.error("[AutoCommit RESPONSE]: {}".format(e))
return False
def isAutoCommit(gcontext, gwMgmtIp, api_key):
pan_print('Checking whether AutoCommit is done or not')
cmd="<show><jobs><id>1</id></jobs></show>"
fw_cmd= "https://"+gwMgmtIp+"/api/?type=op&cmd=" + cmd + "&key="+api_key
try:
response = runCommand(gcontext, fw_cmd, gwMgmtIp, api_key)
if response is None:
pan_print('Failed to run command: ' + fw_cmd)
return False
status=getJobStatus(response)
if status == "FIN":
pan_print('AutoCommit is Done')
pan_print('AutoCommit job status is : ' + getJobStatus(response))
pan_print('AutoCommit job result is : ' + getJobResult(response))
pan_print('AutoCommit job tfin is : ' + getJobTfin(response))
pan_print('AutoCommit job Progress is : ' + getJobProgress(response))
return True
else:
pan_print('AutoCommit is not done or over or failed')
pan_print('AutoCommit job status is : ' + getJobStatus(response))
pan_print('AutoCommit job result is : ' + getJobResult(response))
pan_print('AutoCommit job tfin is : ' + getJobTfin(response))
pan_print('AutoCommit job Progress is : ' + getJobProgress(response))
pan_print("[RESPONSE] in send command: {}".format(response))
except Exception as e:
logger.error("[AutoCommit RESPONSE]: {}".format(e))
return False
def isNatRule(gcontext, gwMgmtIp, api_key):
pan_print('Checking whether NAT Rules were pushed or not')
cmd="<show><jobs><id>2</id></jobs></show>"
fw_cmd= "https://"+gwMgmtIp+"/api/?type=op&cmd=" + cmd + "&key="+api_key
try:
response = runCommand(gcontext, fw_cmd, gwMgmtIp, api_key)
if response is None:
pan_print('Failed to run command: ' + fw_cmd)
return False
except Exception as e:
logger.error("[AutoCommit RESPONSE]: {}".format(e))
return False
if response.find("<status>FIN</status>") >= 1:
pan_print('Nat Rule commit was Done. Good job')
status=getJobStatus(response)
if status != "FIN":
pan_print('Job status is : ' + getJobStatus(response))
pan_print('Job result is : ' + getJobResult(response))
pan_print('Job tfin is : ' + getJobTfin(response))
pan_print('Job Progress is : ' + getJobProgress(response))
return False
return True
def pushNatRules(gcontext, gwMgmtIp, api_key, untrust, ilb_ip, hostname):
pan_print('Pushing NAT rule IP address')
fw_cmd="https://"+gwMgmtIp+"/api/?type=config&action=set&key="+api_key+"&xpath=/config/devices/entry/vsys/entry/address&element=<entry%20name='AWS-NAT-ILB'><description>ILB-IP-address</description><ip-netmask>"+ilb_ip+"</ip-netmask></entry>"
try:
response = runCommand(gcontext, fw_cmd, gwMgmtIp, api_key)
if response is None:
pan_print('AWS-NAT-ILB: Failed to run command: ' + fw_cmd)
return False
except Exception as e:
#logger.error("[NAT Address RESPONSE]: {}".format(e))
pan_print("[NAT Address RESPONSE]: {}".format(e))
return False
logger.info('Untrust: ' + str(untrust))
logger.info('gwMgmtIp: ' + str(gwMgmtIp))
fw_cmd="https://"+gwMgmtIp+"/api/?type=config&action=set&key="+api_key+"&xpath=/config/devices/entry/vsys/entry/address&element=<entry%20name='AWS-NAT-UNTRUST'><description>UNTRUST-IP-address</description><ip-netmask>"+untrust+"</ip-netmask></entry>"
try:
response = runCommand(gcontext, fw_cmd, gwMgmtIp, api_key)
if response is None:
pan_print('AWS-NAT-ILB: Failed to run command: ' + fw_cmd)
return False
except Exception as e:
#logger.error("[NAT Address RESPONSE]: {}".format(e))
pan_print("[NAT Address RESPONSE]: {}".format(e))
return False
if hostname == "":
hostname="PA-VM"
fw_cmd="https://"+gwMgmtIp+"/api/?type=config&action=set&key="+api_key+"&xpath=/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system&element=<hostname>"+hostname+"-"+gwMgmtIp+"</hostname>"
try:
response = runCommand(gcontext, fw_cmd, gwMgmtIp, api_key)
if response is None:
logger.error('AWS-NAT-ILB: Hostname Failed to run command: ' + fw_cmd)
except Exception as e:
logger.error("[HostName RESPONSE]: {}".format(e))
return False
fw_cmd="https://"+gwMgmtIp+"/api/?type=config&action=set&key="+api_key+"&xpath=/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system&element=<server-verification>yes</server-verification>"
try:
response = runCommand(gcontext, fw_cmd, gwMgmtIp, api_key)
if response is None:
logger.error('AWS-NAT-ILB: API server-verification failed: ' + fw_cmd)
except Exception as e:
logger.error("[server-verification RESPONSE]: {}".format(e))
return False
fw_cmd="https://"+gwMgmtIp+"/api/?type=commit&cmd=<commit></commit>&key="+api_key
try:
response = runCommand(gcontext, fw_cmd, gwMgmtIp, api_key)
if response is None:
pan_print('Commit: Failed to run command: ' + fw_cmd)
return False
except Exception as e:
#logger.error("[Commit RESPONSE]: {}".format(e))
pan_print("[Commit RESPONSE]: {}".format(e))
return False
return True
def getUntrustIP(instanceid, untrust):
logger.info('Getting IP address of Untrust Interface for instance: ' + instanceid)
ip=""
found=False
response=ec2_client.describe_instances(InstanceIds=[instanceid])
logger.info(response)
for r in response['Reservations']:
for i in r['Instances']:
for s in i['NetworkInterfaces']:
if s['SubnetId'] == untrust:
found=True
ip=s['PrivateIpAddress']
break
if found == True:
break
if found == True:
return ip
return None
def valueToDict(v, s):
d={}
try:
str= v.replace(s, "")
str=str.replace("'", "\"")
str=str.replace(", }", "}")
d = json.loads(str)
pan_print(json.dumps(d, indent=4))
except Exception as e:
logger.error("[valueToDict]: {}".format(e))
pan_print("[valueToDict]: {}".format(e))
return None
return d
def valueToString(v, s):
str=""
try:
str= v.replace(s, "")
str=str.replace("'", "\"")
str=str.replace(", }", "")
str=str.replace('\n', "")
str=str.replace(',', "")
pan_print(str)
except Exception as e:
logger.error("[valueToDict]: {}".format(e))
pan_print("[valueToDict]: {}".format(e))
return None
return str
def ActiveSessions(root, namespace, asg_name):
#Now to find number of active sessions
logger.info('ActiveSessions...');
logger.info('root[0][1].text: ' + str(root[0].text))
value=""
d=valueToDict(str(root[0].text), "sw.mprelay.s1.dp0.stats.session:")
if d is None:
pan_print('Error happened in ActiveSessions: ' + str(root[0].text))
return
value=float(d['session_active'])
pan_print('ActiveSessions in numbers: ' + str(value))
if remote == 0:
return
if sqs_msg is not None:
v=lib.getScalingValue(sqs_msg, ScalingParameter)
if v is not None:
print(sqs_msg)
logger.info('Pushing simulated data to CW: ' + str(v))
value=float(v)
else:
logger.info('Starting to Publish metrics in namespace: ' + namespace)
else:
logger.info('Starting to Publish metrics in namespace: ' + namespace)
timestamp = datetime.datetime.utcnow()
response = cw_client.put_metric_data(
Namespace=namespace,
MetricData=[{
'MetricName': 'ActiveSessions',
'Dimensions':[{
'Name': 'AutoScalingGroupName',
'Value': asg_name
}],
'Timestamp': timestamp,
'Value': value,
'Unit': 'Count'
}]
)
logger.info("[INFO]: Published GOOD metric for {}".format(gwMgmtIp))
return
def DataPlaneCPUUtilization(root, namespace, asg_name):
logger.info('DataPlaneCPUUtilization');
logger.info('root[0][1].text: ' + str(root[0].text))
cpu=""
d=valueToDict(str(root[0].text), "sys.monitor.s1.dp0.exports:")
if d is None:
pan_print('Error happened in DataPlaneCPUUtilization: ' + str(root[0].text))
return
cpu=float(d['cpu']['1minavg'])
pan_print('DataPlaneCPUUtilization in percentage: ' + str(cpu))
if remote == 0:
return
if sqs_msg is not None:
v=lib.getScalingValue(sqs_msg, ScalingParameter)
if v is not None:
print(sqs_msg)
logger.info('Pushing simulated data to CW: ' + str(v))
cpu=float(v)
else:
logger.info('Starting to Publish metrics in namespace: ' + namespace)
else:
logger.info('Starting to Publish metrics in namespace: ' + namespace)
timestamp = datetime.datetime.utcnow()
response = cw_client.put_metric_data(
Namespace=namespace,
MetricData=[{
'MetricName': 'DataPlaneCPUUtilization',
'Dimensions':[{
'Name': 'AutoScalingGroupName',
'Value': asg_name
}],
'Timestamp': timestamp,
'Value': cpu,
'Unit': 'Percent'
}]
)
logger.info("[INFO]: Published GOOD metric for {}".format(gwMgmtIp))
return
def DataPlaneBufferUtilization(root, namespace, asg_name):
logger.info('DataPlaneBufferUtilization...');
logger.info('root[0][1].text: ' + str(root[0].text))
hw_buf=str(root[0].text)
hw_buf=hw_buf.replace("hardware buffer", '"hardware buffer"')
hw_buf=hw_buf.replace("packet descriptor", '"packet descriptor"')
hw_buf=hw_buf.replace("software buffer", '"software buffer"')
d=valueToDict(hw_buf, "sw.mprelay.s1.dp0.packetbuffers:")
if d is None:
pan_print('Error happened in DataPlaneBufferUtilization: ' + str(root[0].text))
return
pan_print('Get is: ' + str(d.get('hw-buf')))
max=str(d['hw-buf']['max'])
used=str(d['hw-buf']['used'])
m=float(max)
u=float(used)
v=(u/m) * 100
value=float("{0:.2f}".format(v))
pan_print('DataPlaneBufferUtilization in percentage: Max: ' + max + ' Used: ' + used + ' Util: ' + str(value))
if remote == 0:
return
if sqs_msg is not None:
v=lib.getScalingValue(sqs_msg, ScalingParameter)
if v is not None:
print(sqs_msg)
logger.info('Pushing simulated data to CW: ' + str(v))
value=float(v)
else:
logger.info('Starting to Publish metrics in namespace: ' + namespace)
else:
logger.info('Starting to Publish metrics in namespace: ' + namespace)
timestamp = datetime.datetime.utcnow()
response = cw_client.put_metric_data(
Namespace=namespace,
MetricData=[{
'MetricName': 'DataPlaneBufferUtilization',
'Dimensions':[{
'Name': 'AutoScalingGroupName',
'Value': asg_name
}],
'Timestamp': timestamp,
'Value': value,
'Unit': 'Percent'
}]
)
logger.info("[INFO]: Published GOOD metric for {}".format(gwMgmtIp))
return
def GPActiveTunnels(root, namespace, asg_name):
pan_print('Not Supported')
return
def GPGatewayUtilization(root, namespace, asg_name):
logger.info('GPGatewayUtilization...');
logger.info('root[0][1].text: ' + str(root[0].text))
d=valueToString(str(root[0].text), "sw.rasmgr.resource.tunnel:")
if d is None:
pan_print('Error happened in DataPlaneBufferUtilization: ' + str(root[0].text))
return
list=d.split(" ")
cur=list[3]
max=list[5]
cur=str(int(cur, 16))
max=str(int(max, 16))
m=float(max)
u=float(cur)
v=(u/m) * 100
value=float("{0:.2f}".format(v))
pan_print('GPGatewayUtilization in percentage: Max: ' + max + ' Cur: ' + cur + ' Util: ' + str(value))
if remote == 0:
return
if sqs_msg is not None:
v=lib.getScalingValue(sqs_msg, ScalingParameter)
if v is not None:
print(sqs_msg)
logger.info('Pushing simulated data to CW: ' + str(v))
value=float(v)
else:
logger.info('Starting to Publish metrics in namespace: ' + namespace)
else:
logger.info('Starting to Publish metrics in namespace: ' + namespace)
timestamp = datetime.datetime.utcnow()
response = cw_client.put_metric_data(
Namespace=namespace,
MetricData=[{
'MetricName': 'GPGatewayUtilization',
'Dimensions':[{
'Name': 'AutoScalingGroupName',
'Value': asg_name
}],
'Timestamp': timestamp,
'Value': value,
'Unit': 'Percent'
}]
)
logger.info("[INFO]: Published GOOD metric for {}".format(gwMgmtIp))
return
def SessionUtilization(root, namespace, asg_name):
logger.info('SessionUtilization');
logger.info('root[0][1].text: ' + str(root[0].text))
sess=0.0
d=valueToDict(str(root[0].text), "sw.mprelay.s1.dp0.stats.session:")
if d is None:
pan_print('Error happened in SessionUtilization: ' + str(root[0].text))
return
sess=float(d['session_util'])
pan_print('SessionUtilization in percentage: ' + str(sess))
if remote == 0:
return
if sqs_msg is not None:
v=lib.getScalingValue(sqs_msg, ScalingParameter)
if v is not None:
print(sqs_msg)
logger.info('Pushing simulated data to CW: ' + str(v))
sess=float(v)
else:
logger.info('Starting to Publish metrics in namespace: ' + namespace)
else:
logger.info('Starting to Publish metrics in namespace: ' + namespace)
timestamp = datetime.datetime.utcnow()
response = cw_client.put_metric_data(
Namespace=namespace,
MetricData=[{
'MetricName': 'SessionUtilization',
'Dimensions':[{
'Name': 'AutoScalingGroupName',
'Value': asg_name
}],
'Timestamp': timestamp,
'Value': sess,
'Unit': 'Percent'
}]
)
logger.info("[INFO]: Published GOOD metric for {}".format(gwMgmtIp))
return
cw_func_metrics = { 'DataPlaneCPUUtilization': DataPlaneCPUUtilization,
'ActiveSessions': ActiveSessions,
'SessionUtilization': SessionUtilization,
'GPGatewayUtilization': GPGatewayUtilization,
'GPActiveTunnels': GPActiveTunnels,
'DataPlaneBufferUtilization': DataPlaneBufferUtilization}
def lambda_handler(event, context):
global gwMgmtIp
global logger
global stackname
global account
global sqs_msg
global ScalingParameter
#logger = logging.getLogger()
#print(event)
stackname=event['StackName']
Namespace=event['Namespace']
KeyPANWFirewall=event['KeyPANWFirewall']
KeyPANWPanorama=event['KeyPANWPanorama']
ScalingParameter=event['ScalingParameter']
api_key=event['KeyPANWFirewall']
instanceid=event['EC2InstanceId']
ilb_ip=event['ILBIPAddress']
untrust_subnet=event['UntrustSubnet']
hostname=event['Hostname']
logger = logging.getLogger()
#logger.setLevel(logging.DEBUG)
account=event['Arn']
sqs_msg=lib.getSqsMessages(stackname, account)
if sqs_msg is not None:
lvl=lib.getDebugLevelFromMsg(sqs_msg)
if lvl is not None:
lib.setDebugLevelFromMsg(logger, lvl)
try:
asg_name = event.get('ASGName')
lfunc=lib.get_lambda_cloud_watch_func_name(stackname, asg_name, instanceid)
lresponse=lambda_client.get_function(FunctionName=lfunc)
logger.info(json.dumps(lresponse))
except Exception as e:
logger.info("Error getting lambda function name")
logger.info('got event{}'.format(event))
logger.info('StackName: ' + event['StackName'] + ' FW IP: ' + event['FWIP'] + ' SP: ' + ScalingParameter + ' NameSpace: ' + Namespace)
# In case if instance is no longer there, we should remove ourself
remove=False
try:
response=ec2_client.describe_instance_status(InstanceIds=[instanceid])
status=response['InstanceStatuses']
if len(status) == 0:
remove=True
except Exception as e:
logger.error("[InstanceNotFound]: {}".format(e))
remove=True
if remove == True:
logger.info('Instance ID: ' + instanceid + ' not FOUND')
PIP=event['PIP']
PDG=event['PDG']
PTPL=event['PTPL']
if PIP != "":
for i in range(1,250):
tr=context.get_remaining_time_in_millis()
if tr < 15000:
logger.error('Exiting CloudWatch Lambda without removing instance/firewall from Panorama. InstaceId: ' + str(instanceid))
break
try:
if lib.remove_fw_from_panorama(stackname, instanceid, KeyPANWPanorama, event.get('FWPIP'), PIP, PDG, PTPL) == False:
logger.error('Device can not be removed from Panorama at this time. We will retry after a minute')
else:
break
except Exception as e:
logger.error("[Remove FW From Panorama CloudWatch Lambda]: {}".format(e))
logger.error('Not removing this lambda because of failure. We will retry after a minute')
time.sleep(1)
asg_name = event.get('ASGName')
lib.delete_cw_metrics_lambda(stackname, asg_name, instanceid, None)
return
gwMgmtIp = event.get('FWIP')
if gwMgmtIp == None:
logger.error("[ERROR]: Didn't get GW MGMT IP in event")
return
asg_name = event.get('ASGName')
if asg_name == None:
logger.error("[ERROR]: Didn't get auto scaling group name in event")
return
# Need this to by pass invalid certificate issue.
gcontext = lib.get_ssl_context()
value=getTag(instanceid)
if value is None:
if isChassisReady(gcontext, gwMgmtIp, api_key) == False:
logger.info('Chassis is not in ready state yet')
isAutoCommit(gcontext, gwMgmtIp, api_key)
return
untrust=getUntrustIP(instanceid, untrust_subnet)
if pushNatRules(gcontext, gwMgmtIp, api_key, untrust, ilb_ip, hostname) == False:
logger.error('Unable to push NAT IP address');
setTag(instanceid, "NatCommitFailure")
return
else:
setTag(instanceid, "NatCommitSuccess")
elif value != "NatCommitSuccess":
untrust=getUntrustIP(instanceid, untrust_subnet)
if pushNatRules(gcontext, gwMgmtIp, api_key, untrust, ilb_ip, hostname) == False:
logger.error('Unable to push NAT IP address');
setTag(instanceid, "NatCommitFailure")
return
else:
setTag(instanceid, "NatCommitSuccess")
logger.info('Instance Tag state is : ' + str(value))
cmd = firewall_cmd[ScalingParameter]
fw_cmd = "https://"+gwMgmtIp+"/api/?type=op&cmd=" + cmd + "&key="+api_key
logger.info('[INFO]: Sending API command : %s', fw_cmd)
try:
response = urllib2.urlopen(fw_cmd, context=gcontext, timeout=5).read()
logger.debug("[RESPONSE] in send command: {}".format(response))
except Exception as e:
logger.error("[ERROR]: Something bad happened when sending command")
logger.error("[RESPONSE]: {}".format(e))
return
else:
logger.info("[INFO]: Got a response from command urlopen")
resp_header = et.fromstring(response)
if resp_header.tag != 'response':
logger.error("[ERROR]: didn't get a valid response from GW")
return
if resp_header.attrib['status'] == 'error':
logger.error("[ERROR]: Got an error for the command")
return
if resp_header.attrib['status'] == 'success':
logger.info("[INFO]: Successfully executed command urlopen. Now publish metrics")
cw_func_metrics[ScalingParameter](resp_header, Namespace, asg_name)
def test():
pan_print('Local Test Start...........')
ScalingParameter="DataPlaneCPUUtilization"
ScalingParameter="ActiveSessions"
ScalingParameter="DataPlaneBufferUtilization"
ScalingParameter="SessionUtilization"
ScalingParameter="GPGatewayUtilization"
ScalingParameter="DataPlaneBufferUtilization"
Namespace="panw"
asg_name="test-asg"
gwMgmtIp="10.4.20.90"
untrust="1.1.1.1"
ilb_ip="2.2.2.2"
api_key = "LUFRPT14MW5xOEo1R09KVlBZNnpnemh0VHRBOWl6TGM9bXcwM3JHUGVhRlNiY0dCR0srNERUQT09"
# Need this to by pass invalid certificate issue. Should try to fix this
gcontext = get_ssl_context()
if isChassisReady(gcontext, gwMgmtIp, api_key) == False:
pan_print('Chassis is not ready yet')
return
cmd = firewall_cmd[ScalingParameter]
fw_cmd = "https://"+gwMgmtIp+"/api/?type=op&cmd=" + cmd + "&key="+api_key
logger.info('[INFO]: Sending API command : %s', fw_cmd)
try:
response = urllib2.urlopen(fw_cmd, context=gcontext, timeout=5).read()
logger.info("[RESPONSE] in send command: {}".format(response))
except Exception as e:
logger.error("[ERROR]: Something bad happened when sending command")
logger.error("[RESPONSE]: {}".format(e))
return
else:
logger.info("[INFO]: Got a response from command urlopen")
resp_header = et.fromstring(response)
if resp_header.tag != 'response':
logger.error("[ERROR]: didn't get a valid response from GW")
return
if resp_header.attrib['status'] == 'error':
logger.error("[ERROR]: Got an error for the command")
return
if resp_header.attrib['status'] == 'success':
#The fw responded with a successful command execution.
logger.info("[INFO]: Successfully executed command urlopen. Now publish metrics")
pan_print(response)
cw_func_metrics[ScalingParameter](resp_header, Namespace, asg_name)
if remote == 0:
test()
|
from stable_baselines3.ppo.policies import CnnPolicy, MlpPolicy
from stable_baselines3.ppo.ppo import PPO
from stable_baselines3.ppo.ppo_custom import PPO_CUSTOM
from stable_baselines3.ppo.ppo_with_reward import PPO_REWARD
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import yfinance as yf
import pandas as pd
import os
from tqdm import tqdm
import time
import numpy as np
import datetime
import json
import collections
# In[ ]:
# download stock data and calculater factors for training
def cal_mean(data,r,period,fact):
return data.iloc[r-period+1:r+1][fact].mean()
def cal_std(data,r,period,fact):
return data.iloc[r-period+1:r+1][fact].std()
def cal_RVI(data,r,std_length = 21):
lstd,hstd =[0]*std_length,[0]*std_length
for i in range(std_length-1,-1,-1):
if data.iloc[r-i]['day_change']<0:
lstd.append(data.iloc[r-i]['10_day_Close_std'])
elif data.iloc[r-i]['day_change']>0:
hstd.append(data.iloc[r-i]['10_day_Close_std'])
lsum = sum(lstd)/len(lstd)
hsum = sum(hstd)/len(hstd)
return 100 * hsum/(hsum+lsum)
def UpOrDown(data,r,period = 21,avg_line = 21):
if avg_line == 1:
try:
return data.iloc[r+period]['Close'] - data.iloc[r]['Close']
except:
return None
else:
try:
return data.iloc[r+period][str(avg_line)+'_day_Close_mean']-data.iloc[r][str(avg_line)+'_day_Close_mean']
except:
return None
def avg_value(data,r,period = 21, avg_line = 21):
if avg_line == 1:
try:
return data.iloc[r+period]['Close']
except:
return None
else:
try:
return data.iloc[r+period][str(avg_line)+'_day_Close_mean']
except:
return None
def avg_value_percentage(data,r,period = 21, avg_line = 21):
if avg_line == 1:
try:
return (data.iloc[r+period]['Close'] - data.iloc[r]['Close'])/data.iloc[r]['Close']
except:
return None
else:
try:
return (data.iloc[r+period][str(avg_line)+'_day_Close_mean']-data.iloc[r][str(avg_line)+'_day_Close_mean'])/data.iloc[r][str(avg_line)+'_day_Close_mean']
except:
return None
for stock in tqdm(["tal","aapl","amzn","baba","dis","fb","googl","nflx","nvda","se","tsla","uber"]):
data = yf.download(stock,period = 'max',interval ='1d')
data.to_csv('test_'+stock+'.csv',header = True)
data = pd.read_csv('test_'+stock+'.csv')
for i,row in data.iterrows():
data.at[i,'inday_change'] = (data.iloc[i]['Close']-data.iloc[i]['Open'])/data.iloc[i]['Open']
data.at[i,'day_change'] = (data.iloc[i]['Close']-data.iloc[i-1]['Close'])/data.iloc[i-1]['Close']
data.at[i,'data_amplitude'] = (data.iloc[i]['High']-data.iloc[i-1]['Low'])/data.iloc[i-1]['Close']
for f in ['High','Low','Close','Open','Adj Close','Volume']:
data.at[i,'5_day_{}_mean'.format(f)] = cal_mean(data,i,5,f)
data.at[i,'10_day_{}_mean'.format(f)] = cal_mean(data,i,10,f)
data.at[i,'21_day_{}_mean'.format(f)] = cal_mean(data,i,21,f)
data.at[i,'5_day_{}_std'.format(f)] = cal_std(data,i,5,f)
data.at[i,'10_day_{}_std'.format(f)] = cal_std(data,i,10,f)
data.at[i,'21_day_{}_std'.format(f)] = cal_std(data,i,21,f)
data.at[i,'RVI'] = cal_RVI(data,i)
for i,row in data.iterrows():
data.at[i,'score_g'] = 0
data.at[i,'score_n'] = 0
data.at[i,'score_p'] = 0
data.at[i,'score_ralative'] = 0
for avg_line in [1,5,10,21]:
for period in [1,5,10,21]:
data.at[i,'after_'+str(period)+'_day_'+str(avg_line)+'_avgline'] = avg_value(data,i,period=period,avg_line=avg_line)
data.at[i,str(period)+'_day_UpDown_'+str(avg_line)+'_avgline'] = UpOrDown(data,i,period=period,avg_line=avg_line)
data.at[i,str(period)+'_day_UpDown_percentage_'+str(avg_line)+'_avgline'] = avg_value_percentage(data,i,period=period,avg_line=avg_line)
data.to_csv('./stock_data_new/{}.csv'.format(stock),header = True)
# In[ ]:
"""
# this is the code to download all the stock data to local from using yfinance which is not suggested
tickers = gt.get_tickers()
for i in range(len(tickers)):
tickers[i] = tickers[i].strip().replace('/','-')
tickers = list(set(tickers))
total_batches = len(tickers)//100+1
print(total_batches)
for i in range(129):
temp = tickers[i*100:min((i+1)*100,len(tickers))]
tickers_batch = ' '.join(temp)
data = yf.download(tickers_batch, period = "max", interval = "1d",group_by='tickers')
print(data)
input()
if not os.path.exists('./stock_data/'):
os.makedirs('./stock_data/')
print("saving a batch "+str(i) + "...")
for s in temp:
df = data[s].dropna()
df.to_csv('./stock_data/'+s+'.csv')
if (i+1)% 20 == 0:
print("System asleep...")
time.sleep(120)
"""
# In[ ]:
"""
# this is the code to upload the stock data to bigquery
import numpy
from google.cloud import bigquery
PROJECT_ID = ''
DATASET_ID = ''
CSV_DIR = './stock_data_new/'
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]=""
# create a client instance for your project
client = bigquery.Client(project=PROJECT_ID, location="US")
client.create_dataset(DATASET_ID,exists_ok=True)
for file in os.listdir(CSV_DIR):
file_id = file.split('.')[0]
client.delete_table(f"{PROJECT_ID}.{DATASET_ID}.{file_id}",not_found_ok=True)
client.create_table(f"{PROJECT_ID}.{DATASET_ID}.{file_id}",exists_ok=True)
dataset_ref = client.dataset(DATASET_ID)
table_ref = dataset_ref.table(file_id)
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.CSV
job_config.autodetect = True
with open(CSV_DIR + file, "rb") as source_file:
job = client.load_table_from_file(source_file, table_ref, job_config = job_config )
job.result()
print("Loaded {} rows into {}:{}.".format(job.output_rows, DATASET_ID, file_id))
break"""
|
import numpy as np
import pandas as pd
NEARBY_THRESHOLD = 5 / 6371
def hav_dist(lat1, lon1, lat2, lon2):
lat1, lon1, lat2, lon2 = map(np.radians, [lat1, lon1, lat2, lon2])
dlat = lat2 - lat1
dlon = lon2 - lon1
hav = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2
angle = 2 * np.arcsin(np.sqrt(hav))
return angle
def get_nearby():
userdata = pd.read_csv('userdata.csv', usecols=['user_id', 'lat', 'long'])
ids = userdata['user_id'].to_numpy()
lats = userdata['lat'].to_numpy()
lons = userdata['long'].to_numpy()
dists = {}
for i, (lat, lon) in enumerate(zip(lats, lons)):
dists[ids[i]] = hav_dist(lat, lon, lats, lons)
nearby_ids = {}
for i in ids:
nearby_ids[i] = []
for j, d in zip(ids, dists[i]):
if d <= NEARBY_THRESHOLD:
nearby_ids[i].append(j)
return nearby_ids
|
from django.db import connection
from django.core.exceptions import ImproperlyConfigured
def multitenant_key_func(key, key_prefix, version):
tenant_prefix = connection.get_threadlocal().get_cache_prefix()
if tenant_prefix is None:
raise ImproperlyConfigured('Multi-tenant cache prefix not available')
return '%s:%s:%s:%s' % (tenant_prefix, key_prefix, version, key)
|
import json
import sys
from collections import OrderedDict
from functools import wraps
# Ignore PyImportSortBear, PyUnusedCodeBear
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Set
import attr
from dotenv import load_dotenv
from .exceptions import MissingError
from .vault.base import BaseVaultBackend
from .laziness import LazyVariable
from .log import core_logger
from . import exceptions, loaders
from ._compat import class_types
from .environ import EnvVariable
from .utils import iscoroutinefunction, rebuild_dict, NOT_SET
from .vault import VaultVariable
def get_config_option_names(module):
# type: (ConfigHolder) -> List[str]
"""Get all configuration option names defined in the given module."""
return [attr_name for attr_name in dir(module) if attr_name.isupper()]
@attr.s(slots=True)
class ConfigHolder(object):
konfig = attr.ib()
sources = attr.ib(type=List, factory=list)
def __iter__(self):
# newer config sources have higher precedence
for source in reversed(self.sources):
yield source.get(self.konfig)
def __dir__(self):
output = set() # type: Set[Any]
for source in self:
output |= set(dir(source))
return list(output)
def __getattr__(self, item):
for source in self:
try:
return getattr(source, item)
except AttributeError:
pass
raise AttributeError
def append(self, item):
self.sources.append(Lazy(item))
@attr.s(slots=True)
class Lazy(object):
closure = attr.ib()
value = attr.ib(init=False, default=NOT_SET)
def get(self, *args, **kwargs):
if self.value is NOT_SET:
self.value = self.closure(*args, **kwargs)
core_logger.info("Configuration loaded")
return self.value
@attr.s(slots=True)
class Konfig(object):
"""Configuration holder."""
vault_backend = attr.ib(
default=None, validator=attr.validators.optional(attr.validators.instance_of(BaseVaultBackend))
)
dotenv = attr.ib(default=None)
dotenv_override = attr.ib(default=False, type=bool)
# Forbids overriding with options that are not defined in the config module
strict_override = attr.ib(default=True, type=bool)
config_variable_name = attr.ib(default="KONFETTI_SETTINGS", type=str)
loader = attr.ib(factory=loaders.default_loader_factory)
_dotenv_loaded = attr.ib(type=bool, init=False, default=False)
_conf = attr.ib(init=False)
_vault = attr.ib(init=False, default=None)
_config_overrides = attr.ib(init=False, type="OrderedDict[str, Dict]", factory=OrderedDict)
def __attrs_post_init__(self):
self._conf = ConfigHolder(self)
self._conf.append(self.loader)
@classmethod
def from_object(cls, obj, **kwargs):
"""Create a config from the given object, mapping or an importable string."""
factory = loaders.get_loader_factory(obj)
return cls(loader=factory, **kwargs)
@classmethod
def from_json(cls, path, loads=json.loads, **kwargs):
"""Create a config from the given path to JSON file."""
return cls(loader=loaders.json_loader_factory(path, loads), **kwargs)
def extend_with_json(self, path, loads=json.loads):
"""Extend the config with data from the JSON file."""
self._conf.append(loaders.json_loader_factory(path, loads))
def extend_with_object(self, obj):
"""Extend the config with the given mapping."""
factory = loaders.get_loader_factory(obj)
self._conf.append(factory)
def _load_dotenv(self):
# type: () -> None
"""Load environment from `.env` file.
No-op if .env is already loaded.
"""
if self._dotenv_loaded:
return
load_dotenv(dotenv_path=self.dotenv, override=self.dotenv_override)
self._dotenv_loaded = True
core_logger.info(".env is loaded")
def _configure(self, override_id, **kwargs):
# type: (str, **Any) -> None
"""Custom settings for overriding."""
core_logger.debug("Start overriding with %s", kwargs)
if self.strict_override:
self._validate_override(kwargs)
# check for intersecting keys? maybe forbid merging if keys are intersecting
self._config_overrides[override_id] = kwargs
def _validate_override(self, kwargs):
# type: (Dict[str, int]) -> None
"""Prevent setting config options that are not defined in the config module / class.
This helps to keep all overrides up-to-date if some options are removed from the config.
"""
config_option_names = get_config_option_names(self._conf)
for key in kwargs:
if key not in config_option_names:
raise exceptions.ForbiddenOverrideError(
"Can't override `{}` config option, because it is not defined in the config module".format(key)
)
def _unconfigure(self, override_id):
# type: (str) -> None
"""Remove the given override."""
core_logger.debug("Stop overriding with %s", self._config_overrides[override_id])
del self._config_overrides[override_id]
def _unconfigure_all(self):
# type: () -> None
"""Remove all overrides."""
core_logger.debug("Stop overriding")
self._config_overrides = OrderedDict()
def override(self, **kwargs):
# type: (**Any) -> OverrideContextManager
"""Override the config with provided keyword arguments."""
return OverrideContextManager(self, **kwargs)
def require(self, *keys):
# type: (*str) -> None
"""Check if the given keys are present in the config."""
if not keys:
raise RuntimeError("You need to specify at least one key")
missing_keys = []
for key in keys:
try:
getattr(self, key)
except (exceptions.MissingError, exceptions.SecretKeyMissing):
missing_keys.append(key)
if missing_keys:
raise exceptions.MissingError("Options {keys} are required".format(keys=missing_keys))
def __getattr__(self, item):
# type: (str) -> Any
"""A core place for config options access. Provides `config.SECRET` API."""
if item.startswith("_"):
raise AttributeError
core_logger.debug('Accessing "%s" option', item)
value = self._get_from_override(item)
if value is not None:
return value
return self._get_from_config(item)
def _get_from_override(self, item):
# type: (str) -> Any
"""Look up in override levels from top to bottom."""
if self._config_overrides:
# are values ordered as well?
for key in reversed(self._config_overrides):
try:
return self._config_overrides[key][item]
except KeyError:
continue
return None
def _get_from_config(self, item):
# type: (str) -> Any
"""Get given option from actual config."""
try:
obj = getattr(self._conf, item)
except AttributeError:
raise exceptions.MissingError("Option `{}` is not present in `{}`".format(item, self._conf.__name__))
return self._evaluate(obj)
def _evaluate(self, obj):
"""Evaluate given config option."""
if isinstance(obj, EnvVariable):
self._load_dotenv()
return obj.evaluate()
if isinstance(obj, VaultVariable):
self._load_dotenv()
return self._get_secret(obj)
if isinstance(obj, LazyVariable):
self._load_dotenv()
return obj.evaluate(self)
if isinstance(obj, dict):
return self._evaluate_dict(obj)
return obj
def __contains__(self, item):
# type: (str) -> bool
"""If given config option name exists in the config."""
if not isinstance(item, str):
raise TypeError("Config options names are strings, got: `{}`".format(type(item).__name__))
value = self._get_from_override(item)
if value is not None:
return True
return bool(getattr(self._conf, item, False))
def get_secret(self, path):
# type: (str) -> Any
"""Access a value via secret backend."""
core_logger.info('Access secret "%s"', path)
variable = VaultVariable(path)
return self._get_secret(variable)
def _get_secret(self, obj):
# type: (VaultVariable) -> Any
if self.vault_backend is None:
raise exceptions.VaultBackendMissing(
"Vault backend is not configured. "
"Please specify `vault_backend` option in "
"your `Konfig` initialization"
)
# A closure is needed to avoid a need to evaluate VAULT_{ADDR,TOKEN} variables
# before environment overriding was checked
def closure():
# type: () -> Tuple[str, Optional[str], Optional[str], Optional[str]]
address = self.VAULT_ADDR
# Try to load token/credentials but raise exception only if both of them are missing
token = get_option("VAULT_TOKEN")
username = get_option("VAULT_USERNAME")
password = get_option("VAULT_PASSWORD")
if token is None and (username is None or password is None):
raise MissingError
return address, token, username, password
def get_option(name):
try:
return getattr(self, name)
except MissingError:
pass
return obj.evaluate(self.vault_backend, closure) # type: ignore
def asdict(self):
"""Convert the config to a dictionary.
Depending on the Vault backend could return an awaitable object.
"""
keys = get_config_option_names(self._conf)
result = {key: getattr(self, key) for key in keys}
return self._evaluate_dict(result)
def _evaluate_dict(self, obj):
# Configuration dict could be multilevel and contain config variables, that should be evaluated
# The basic strategy is to create a copy of the given dict with all options evaluated (+ coros awaited)
# Why copy?
# Because of call-by-reference for dicts - otherwise the original dictionary in the config module / class
# will be modified
def evaluate_option(value):
if isinstance(value, (VaultVariable, EnvVariable, LazyVariable)):
value = self._evaluate(value)
return value
if self.vault_backend and self.vault_backend.is_async:
from ._async import async_process_dict
return async_process_dict(obj, evaluate_option)
return rebuild_dict(obj, evaluate_option)
@property
def vault(self):
"""A namespace for Vault-related actions."""
if not self._vault:
self._vault = _Vault(self)
return self._vault
@attr.s(slots=True)
class _Vault(object):
"""A namespace holder to provide `config.vault.get_override_examples()` API."""
_config = attr.ib(type=Konfig)
_overrides = attr.ib(init=False, default=None, type=Dict[str, Dict[str, str]])
def get_override_examples(self):
# type: () -> Dict[str, Dict[str, Any]]
"""To simplify overriding process it could be helpful to look at examples."""
if not self._overrides:
vault_variables = {} # type: Dict[str, Dict[str, Any]]
# Traverse via config content
for attr_name in dir(self._config._conf):
value = getattr(self._config._conf, attr_name)
# Filter vault variables
if not attr_name.startswith("_") and isinstance(value, VaultVariable):
# Fetch override_variable_name
vault_variables[attr_name] = value.override_example
self._overrides = vault_variables
return self._overrides
class OverrideContextManager:
"""Apply temporal changes to certain config module."""
__slots__ = ("config", "kwargs")
def __init__(self, config, **kwargs):
# type: (Konfig, **Any) -> None
self.config = config
self.kwargs = kwargs
def __enter__(self):
# type: () -> None
self.enable()
def __exit__(self, exc_type, exc_val, exc_tb):
# type: (Optional[Any], Optional[Any], Optional[Any]) -> None
self.disable()
def enable(self):
# type: () -> None
# Changes should be atomic?
# Rollback on error
self.config._configure(str(id(self)), **self.kwargs)
def disable(self):
# type: () -> None
self.config._unconfigure(str(id(self)))
def __call__(self, decorated):
# type: (Union[Callable, type]) -> Union[Callable, type]
"""Wrap an object with config-overriding logic.
Supported object types:
- coroutines;
- callables;
- classes including unittest.TestCase subclasses;
"""
if isinstance(decorated, class_types):
return self.wrap_class(decorated)
if iscoroutinefunction(decorated):
return self.wrap_coro(decorated)
if callable(decorated):
return self.wrap_callable(decorated)
raise TypeError("Don't know how to use `override` for `{}`".format(type(decorated).__name__))
def wrap_class(self, cls):
# type: (type) -> type
"""Apply config overriding for given class."""
decorated_set_up = getattr(cls, "setup_class", lambda: None)
decorated_tear_down = getattr(cls, "teardown_class", lambda: None)
@classmethod # type: ignore
def setup_class(_):
# type: (type) -> None
self.enable()
try:
decorated_set_up()
except Exception:
self.disable()
raise
@classmethod # type: ignore
def teardown_class(_):
# type: (type) -> None
try:
decorated_tear_down()
finally:
self.disable()
cls.setup_class = setup_class # type: ignore
cls.teardown_class = teardown_class # type: ignore
return cls
def wrap_callable(self, func):
# type: (Callable) -> Callable
"""Apply config override to sync test functions."""
if sys.version_info[0] == 2:
return self._wrap_callable_py2(func)
return self._wrap_callable_py3(func)
def _wrap_callable_py2(self, func):
"""On Python 2.7 having functools.wraps is not enough for pytest fixture injecting.
To avoid extra dependency on `wrapt` it is specified only for Python 2 dependencies and
done differently here. Maybe it will be better to just use
"""
import wrapt
@wrapt.decorator
def wrapper(func, instance, args, kwargs):
with self:
return func(*args, **kwargs)
return wrapper(func)
def _wrap_callable_py3(self, func):
@wraps(func)
def inner(*args, **kwargs):
# type: (*Any, **Any) -> Any
with self:
return func(*args, **kwargs)
return inner
def wrap_coro(self, coro):
# type: (Callable) -> Callable
"""Apply config override to async test functions."""
from ._async import wrap_coro
return wrap_coro(self, coro)
|
converters = ['Humdrum', 'ABC notation', 'Braille notation', 'Capella notation', 'LilyPond file', 'MEI notation', 'MuseData', 'Noteworthy file', 'TinyNotation', 'Vexflow easyscore'] |
from typing import Union
from probability.custom_types.external_custom_types import AnyFloatMap
from probability.custom_types.internal_custom_types import AnyBetaMap, \
AnyDirichletMap
from probability.distributions import Beta, Dirichlet
class BayesRuleMixin(object):
_prior: Union[float, Beta, AnyFloatMap, Dirichlet]
_likelihood: Union[float, AnyFloatMap, AnyBetaMap, AnyDirichletMap]
@property
def prior(self) -> Union[float, Beta, AnyFloatMap, Dirichlet]:
return self._prior
@property
def likelihood(self) -> Union[
float, AnyFloatMap, AnyBetaMap, AnyDirichletMap
]:
return self._likelihood
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Mailing List Archive',
'category': 'Website',
'description': """
Odoo Mail Group : Mailing List Archives
==========================================
""",
'depends': ['website_mail'],
'data': [
'data/mail_template_data.xml',
'views/website_mail_channel_templates.xml',
'views/snippets.xml',
],
}
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from solum.objects import registry
from solum.objects.sqlalchemy import extension
from solum.tests import base
from solum.tests import utils
class TestExtension(base.BaseTestCase):
def setUp(self):
super(TestExtension, self).setUp()
self.db = self.useFixture(utils.Database())
self.ctx = utils.dummy_context()
self.data = [{'uuid': 'test-uuid-42',
'project_id': self.ctx.tenant,
'user_id': '55f41cf46df74320b9486a35f5d28a11',
'name': 'logstash',
'version': '2.13',
'description': 'This logstash extension provides a tool'
' for managing your application events'
' and logs.',
'documentation': 'http://example.com/docs/ext/logstash',
}]
utils.create_models_from_data(extension.Extension, self.data, self.ctx)
def test_objects_registered(self):
self.assertTrue(registry.Extension)
self.assertTrue(registry.ExtensionList)
def test_get_all(self):
lst = extension.ExtensionList()
self.assertIsNotNone(lst)
self.assertEqual(1, len(lst.get_all(self.ctx)))
def test_check_data_by_id(self):
e = extension.Extension().get_by_id(self.ctx, self.data[0]['id'])
self.assertIsNotNone(e)
for key, value in self.data[0].items():
self.assertEqual(value, getattr(e, key))
def test_check_data_by_uuid(self):
e = extension.Extension().get_by_uuid(self.ctx, self.data[0]['uuid'])
self.assertIsNotNone(e)
for key, value in self.data[0].items():
self.assertEqual(value, getattr(e, key))
|
#!/usr/bin/env python
# _*_coding:utf-8 _*_
# @Time :2020/5/26 15:46
# @Author : Cathy
# @FileName: lstm.py
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
# Hyper Parameters
sequence_length = 28 # 序列长度,将图像的每一列作为一个序列
input_size = 28 # 输入数据的维度
hidden_size = 128 # 隐藏层的size
num_layers = 2 # 有多少层
num_classes = 10
batch_size = 100
num_epochs = 20
learning_rate = 0.01
# MNIST Dataset
train_dataset = dsets.MNIST(root='../data/',
train=True,
transform=transforms.ToTensor(),
download=False)
test_dataset = dsets.MNIST(root='../data/',
train=False,
transform=transforms.ToTensor())
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# RNN Model (Many-to-One)
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) # batch_first=True仅仅针对输入而言
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
# 设置初始状态h_0与c_0的状态是初始的状态,一般设置为0,尺寸是,x.size(0)
# h0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))
# c0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))
# Forward propagate RNN
# out, (h_n, c_n) = self.lstm(x, (h0, c0)) # 送入一个初始的x值,作为输入以及(h0, c0)
out, (h_n, c_n) = self.lstm(x)
out = out[:, -1, :]
# Decode hidden state of last time step
out = self.fc(out[:, -1, :]) # output也是batch_first, 实际上h_n与c_n并不是batch_first
return out
rnn = RNN(input_size, hidden_size, num_layers, num_classes)
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)
# Train the Model
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# a = images.numpy()
images = Variable(images.view(-1, sequence_length, input_size)) # 100*1*28*28 -> 100*28*28
# b = images.data.cpu().numpy()
labels = Variable(labels)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = rnn(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i+1) % 2 == 0:
print ('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'
%(epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.item()))
# Test the Model
correct = 0
total = 0
for images, labels in test_loader:
images = Variable(images.view(-1, sequence_length, input_size))
outputs = rnn(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted.cpu() == labels).sum()
print('Test Accuracy of the model on the 10000 test images: %d %%' % (100 * correct / total))
# Save the Model
torch.save(rnn.state_dict(), 'rnn.pkl')
|
import logging
from datetime import datetime
from datetime import timezone
from aiogram.types import Chat as TelegramChat
from dasbot.models.chat import Chat, ChatSchema
log = logging.getLogger(__name__)
class ChatsRepo(object):
def __init__(self, chats_col, scores_col):
self._chats = chats_col
self._scores = scores_col
self.__status()
def __status(self):
log.info("%s chat(s) in DB" % self._chats.count_documents({}))
log.info("%s scores(s) in DB" % self._scores.count_documents({}))
def load_chat(self, tg_chat: TelegramChat):
"""
:param chat: Telegram chat
:return: Chat instance, loaded from DB, or new if not found
"""
chat_data = self._chats.find_one({"chat_id": tg_chat.id}, {"_id": 0})
log.debug("requested chat %s, result: %s", tg_chat.id, chat_data)
if chat_data:
chat = ChatSchema().load(chat_data)
else:
user = {'username': tg_chat.username, 'first_name': tg_chat.first_name, 'last_name': tg_chat.last_name}
chat = Chat(tg_chat.id, user)
return chat
def save_chat(self, chat: Chat):
""" Returns pymongo UpdateResult instance """
query = {"chat_id": chat.id}
data = ChatSchema().dump(chat)
update = {"$set": data}
result = self._chats.update_one(query, update, upsert=True)
log.debug("saved chat %s, result: %s", chat.id, result.raw_result)
return result
def get_pending_chats(self, now=None):
"""
:param now: timestamp when the function is called
:return: list of chats that have pending quizzes
"""
if now is None:
now = datetime.now(tz=timezone.utc)
query = {"subscribed": True, "quiz_scheduled_time": {"$lte": now}}
results_cursor = self._chats.find(query, {"_id": 0})
chats = [ChatSchema().load(chat_data) for chat_data in results_cursor]
return chats
# TODO: make Score a separate model?
def load_scores(self, chat_id):
"""
:param chat_id: chat id
:return: dict of scores {word: (score, due_date)}
"""
query = {"chat_id": chat_id}
results_cursor = self._scores.find(query, {"_id": 0})
scores = {item["word"]: (item["score"], item["revisit"])
for item in results_cursor}
# log.debug("loaded scores for chat %s, result: %s", chat.id, scores)
return scores
# TODO: check if saved successfully?
def save_score(self, chat: Chat, word, score):
"""
:param chat: chat instance
:param word: word to save the score for
:param score: a tuple (score, due_date)
:return: pymongo UpdateResult instance
"""
query = {"chat_id": chat.id, "word": word}
update = {"$set": {"score": score[0], "revisit": score[1]}}
result = self._scores.update_one(query, update, upsert=True)
# log.debug("saved score for chat %s, result: %s", chat.id, result.raw_result)
return result
if __name__ == "__main__":
pass
|
import os
# the path of "examples/autonomous"
SELF_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ALGORITHMS = [
"orchestra_sb", # 1
"orchestra_rb_s", # 2
"orchestra_rb_ns", # 3
#"orchestra_rb_ns_sr", # 4
# "link", # 5
# "msf", # 6
# "emsf", # 7
"alice", # 8
"alice_rx", # 9
]
BEST_ALGORITHMS = [
"orchestra_sb",
"orchestra_rb_ns",
"link",
"alice",
]
COMPARATIVE_ALGORITHMS = [
"dataset1",
"dataset2",
]
ALGONAMES = {
"orchestra_sb" : "Orchestra SB",
"orchestra_rb_s" : "Orchestra RB / Storing",
"orchestra_rb_ns" : "Orchestra RB / Non-Storing",
"orchestra_rb_ns_sr" : "Orchestra RB / Non-Storing (SR)", # with RPL storing rule
"link" : "Link-based", # ALICE-like - do not use the name ALICE because it implies other features!
"msf" : "MSF",
"emsf" : "Extended MSF",
"alice" : "ALICE",
"alice_rx" : "ALICE, node based\nchannel offsets",
# generic names
"dataset1" : "Dataset 1",
"dataset2" : "Dataset 2",
}
COLORS = {
"orchestra_sb" : "green",
"orchestra_rb_s" : "slateblue",
"orchestra_rb_ns" : "orange",
"orchestra_rb_ns_sr" : "grey",
"link" : "lightblue",
"msf" : "red",
"emsf" : "brown",
"alice" : "blue",
"alice_rx" : "#ff4444",
"dataset1" : "red",
"dataset2" : "green",
}
FIRMWARE_TYPES = {
"orchestra_sb" : 1,
"orchestra_rb_s" : 2,
"orchestra_rb_ns" : 3,
"orchestra_rb_ns_sr" : 4,
"link" : 5,
"msf" : 6,
"emsf" : 7,
"alice" : 8,
"alice_rx" : 9,
}
EXPERIMENTS = [
"exp-collection",
"exp-query",
# "exp-local"
]
SLOTFRAME_SIZES_A =[
7, # initial
11, # =7+4
19, # =11+8
35, # =19+16
67, # =35+32
101 # =67+34
]
SLOTFRAME_SIZES_B =[
7, # initial
15, # =7+8
31, # =15+16
63, # =31+32
101 # =63+39
]
SLOTFRAME_SIZES_C =[
7, # initial
19, # =7+12
43, # =19+24
101 # =43+58
]
SLOTFRAME_SIZES = SLOTFRAME_SIZES_C
SEND_INTERVALS = [
6, # 10 packets per minute (6x5^0)
# 30, # 2 packets per minute (6x5^1)
# 150 # packet per 2.5 minutes (6x5^2)
]
NUM_NEIGHBORS = [
4, # sparse
10 # dense
]
|
"""
File: linked_list.py
Name:
--------------------------
This file shows how to construct a linked list
from scratch and use it to implement a priority queue.
"""
class ListNode:
def __init__(self, data, pointer):
# value & next 為慣用語
self.value = data
self.next = pointer
def main():
# Way 1
node1 = ListNode(('A', 3), None)
node2 = ListNode(('B', 5), None)
node1.next = node2
node3 = ListNode(('C', 7), None)
node2.next = node3
# Way 2
node3 = ListNode(('C', 7), None)
node2 = ListNode(('B', 5), node3)
node1 = ListNode(('A', 3), node2)
linked_list = node1
traversal(linked_list)
def traversal(linked_list):
cur = linked_list
while cur is not None:
print(cur.value)
cur = cur.next
if __name__ == '__main__':
main()
|
"""
Robot Framework Assistant offers IDE features for editing Robot Framework test
data in Sublime Text 3.
"""
import sys
import os
from string import Template
from .commands import *
if sys.version_info < (3, 3):
raise RuntimeError('Plugin only works with Sublime Text 3')
def plugin_loaded():
package_folder = os.path.dirname(__file__)
if not os.path.exists(os.path.join(package_folder, 'Main.sublime-menu')):
template_file = os.path.join(
package_folder, 'templates', 'Main.sublime-menu.tpl'
)
with open(template_file, 'r', encoding='utf8') as tplfile:
template = Template(tplfile.read())
menu_file = os.path.join(package_folder, 'Main.sublime-menu')
with open(menu_file, 'w', encoding='utf8') as menu:
menu.write(template.safe_substitute({
'package_folder': os.path.basename(package_folder)
}))
|
from re import T
import re
import gstgva
import numpy as np
import json
from configuration import env
import os, sys, traceback
import logging
import datetime
import time
from notification import notification
from centroidtracker import CentroidTracker
from shapely.geometry import Point, Polygon
from db_query import DBQuery
from db_common import DBCommon
from db_ingest import DBIngest
office = list(map(float, env["OFFICE"].split(",")))
print("officeoffice",office)
dbhost = env["DBHOST"]
class WaitTime:
def __init__(self):
self.tracker = CentroidTracker(maxDisappeared=30, maxDistance=90)
self._db_analytics = DBIngest(host = dbhost, index="analytics", office=office)
def fetch_data(self, BOTNAME):
_path1=os.getcwd()
_path2=os.path.join(_path1,"video/")
os.makedirs(os.path.dirname(_path2), exist_ok=True)
botconfig_ids = []
dbb = DBQuery(host = dbhost, index = "botconfigs", office=office)
for botconfig in dbb.search_without_office("algorithm:'"+str(BOTNAME)+"'"):
botconfig_id = botconfig["_id"]
botconfig_ids.append(botconfig_id)
print("botconfig_ids",botconfig_ids,flush=True)
json_data = []
# cnt = 0
for botconfig_id in botconfig_ids:
sensor_ids = []
_dbb = DBCommon(host=dbhost, index="botconfigs", office=office)
botconfig_index = _dbb.get_without_office(botconfig_id)
sensor_ids.append(botconfig_index["_source"]["sensorId"])
_site_id = botconfig_index["_source"]["siteId"]
print("sensor_ids",sensor_ids,flush=True)
rtspurls = []
_dbp = DBCommon(host=dbhost, index="provisions", office=office)
provisions_index = _dbp.get(sensor_ids[0])
rtspurls.append(provisions_index["_source"]["rtspurl"])
sensor_name = provisions_index["_source"]["name"]
start_time = provisions_index["_source"]["startTime"]
_dbs = DBCommon(host=dbhost, index="sites", office=office)
sites_index = _dbs.get(_site_id)
site_name = sites_index["_source"]["name"]
print("sensor_ids",sensor_ids,flush=True)
print("rtspurls",rtspurls, flush=True)
print("site_name", site_name)
coordinates = []
zone_names = []
dbz = DBQuery(host=dbhost, index="zones", office=office)
for zones in dbz.search_without_office("botName:'"+str(BOTNAME)+"' and sensorId:'"+str(sensor_ids[0])+"'"):
coordinate = zones["_source"]["coordinates"][0]
coordinates.append(coordinate)
zone_name = zones["_source"]["name"]
zone_names.append(zone_name)
print("coordinates",coordinates, flush=True)
print("zone_names",zone_names, flush=True)
dbn = DBQuery(host = dbhost, index = "nodes", office=office)
for nodes in dbn.search("coordinates:'"+str(office)+"'"):
node_name = nodes["_source"]["name"]
print("node_name",node_name, flush=True)
_sensors = []
for sensors_num in range(len(sensor_ids)):
_url = rtspurls[sensors_num]
rois = []
for rois_num in range(len(coordinates)):
rois.append({
"roi_box":coordinates[rois_num],
"roi_name":zone_names[rois_num],
})
_sensors.append({
"url":_url,
"camera_id":sensor_ids[sensors_num],
"roi":rois,
"sensor_name":sensor_name,
"start_time":start_time
})
json_data.append({
"sensors":_sensors,
"botconfig_id":botconfig_id,
"site_id":_site_id,
"site_name":site_name,
"node_name":node_name
})
# cnt +=1
botconfig_data = {
"botconfig_data":json_data
}
print("botconfig_databotconfig_databotconfig_data",botconfig_data,flush=True)
return botconfig_data
def object_monitor(self, zonename, objectid, curr_time):
try:
# print("__________try_______________________")
with open("/home/custom_transforms/"+"object_monitor"+str(zonename)+".json","r") as object_id_list:
data = json.load(object_id_list)
except:
# print("__________except_______________________")
_object_ids = []
_dtime = dict()
_average_time = dict()
data = {
"object_id_list":_object_ids,
"dtime":_dtime,
"average_time":_average_time
}
with open("/home/custom_transforms/"+"object_monitor"+str(zonename)+".json","w") as object_id_list:
json.dump(data,object_id_list)
if objectid not in data["object_id_list"]:
# print("if_________",flush=True)
data["object_id_list"].append(objectid)
data["dtime"][str(objectid)] = curr_time
data["average_time"][str(objectid)] = 0
else:
# print("else_______",flush=True)
old_time = data["dtime"][str(objectid)]
time_diff = curr_time - old_time
data["average_time"][str(objectid)] += time_diff
data["dtime"][str(objectid)] = curr_time
with open("/home/custom_transforms/"+"object_monitor"+str(zonename)+".json","w") as object_id_list:
json.dump(data,object_id_list)
# print("datadatadata",data, flush=True)
return data["average_time"][str(objectid)]
def last_time(self):
json_time = {"last_time":time.time()}
try:
with open("/home/jsontime.json", "r") as data:
json_time = json.load(data)
last_time = json_time["last_time"]
with open("/home/jsontime.json", "w") as data:
json.dump(json_time, data)
except:
with open("/home/jsontime.json", "w") as data:
json.dump(json_time, data)
last_time = json_time["last_time"]
return last_time
def data_storage(self, zonename, wait_objects, curr_time, last_time, timelimit):
send_data_storage = False
time_length = (curr_time - float(last_time))
try:
with open("/home/custom_transforms/"+"data_storage"+str(zonename)+".json","r") as object_id_list:
data = json.load(object_id_list)
except:
data = {
"wait_objects":[],
"start_time":time.time()
}
with open("/home/custom_transforms/"+"data_storage"+str(zonename)+".json","w") as object_id_list:
json.dump(data,object_id_list)
# data["start_time"] += time_length
if len(wait_objects) > 0:
for _object in wait_objects:
data["wait_objects"].append(_object)
with open("/home/custom_transforms/"+"data_storage"+str(zonename)+".json","w") as object_id_list:
json.dump(data,object_id_list)
wait_objects_storage = data["wait_objects"]
start_time = data["start_time"]
time_diff = curr_time - start_time
if time_diff > timelimit:
send_data_storage = True
data["start_time"] = time.time()
data["wait_objects"] = []
with open("/home/custom_transforms/"+"data_storage"+str(zonename)+".json","w") as object_id_list:
json.dump(data,object_id_list)
return [send_data_storage, wait_objects_storage, time_diff]
# def dynamic
def process_frame(self, frame):
try:
messages = list(frame.messages())
if len(messages) > 0: #checking if frame message is not empty, if it is empty it will move to next frame
try:
BOTNAME = "heatmap"
new_data = self.fetch_data(BOTNAME)
except Exception as error:
print("wrong in fetch_data", error, flush=True)
for botconfig_data in new_data["botconfig_data"]:
botConfigId = botconfig_data["botconfig_id"]
siteId = botconfig_data["site_id"]
site_name = botconfig_data["site_name"]
print("site_namesite_name",site_name)
node_name = botconfig_data["node_name"]
botName = BOTNAME
checklist = "None"
if len(messages)>0:
try:
rects = []
output_centroid = dict()
messages=list(frame.messages())
# if len(messages)>0: # checking if the frame message is not empty and loading in json_msg
json_msg = json.loads(messages[0])
for i in json_msg: # getting all detcted object's coordinates
if i=='objects':
dic=json_msg[i]
for j in range(len(dic)):
if dic[j]['detection']['label']=='person': # checking if detected object is person and then storing it's coordinates in rects list
x_max=dic[j]['x']+dic[j]['w']
y_max=dic[j]['y']+dic[j]['h']
x=dic[j]['x']
y=dic[j]['y']
rects.append([x,y,x_max,y_max])
break
# else:
# json_msg={}
except Exception as error:
print("wrong in starting:{}".format(error),flush=True)
data = botconfig_data
for i in data['sensors']:
sensorId = i["camera_id"]
sensor_name = i["sensor_name"]
start_time = i["start_time"]
zone_cnt = 0
for zone_ in i["roi"]:
new_rects = []
roi_box = zone_["roi_box"]
roiName = zone_["roi_name"]
print("roiname__________1",roiName)
try:
for rect in rects: # iterating through rects and checking person is in roi or not
startX, startY, endX, endY = rect
cX = int((startX + endX) / 2.0)
cY = int((startY + endY) / 2.0)
info = self.inroi(cX, cY, roi_box)
if info==True: # if person is in roi then storing rect in new_rects
new_rects.append(rect)
except Exception as error:
print("wrong in inroi:{}".format(error),flush=True)
try:
objects = self.tracker.update(new_rects) # getting objectId for all person in roi
# print("objectsobjects",objects, len(new_rects))
wait_objects = []
for (objectId, centroid) in objects.items(): # getting individual objectId of a person
output_centroid = dict()
cX, cY = centroid
curr_time = time.time()
object_monitor = self.object_monitor(roiName, objectId, curr_time)
if object_monitor > 10:
output_centroid['X'] = int(cX)
output_centroid['Y'] = int(cY)
output_centroid['Id'] = int(objectId)
output_centroid["unique"] = str(cX) +" + "+str(cY)
# print("ouput_centroid ", output_centroid, flush=True)
wait_objects.append(output_centroid)
# print("wait_objects", wait_objects, roiName, flush=True)
except Exception as error:
print("wrong in tracker:{}".format(error),flush=True)
# if len(wait_objects)>0:
dump_storage_data = self.data_storage(roiName, wait_objects, curr_time, self.last_time(), 300)
print("__________________do not dump_______________________",dump_storage_data[0], dump_storage_data[2])
if dump_storage_data[0] == True:
print("________x____x______dump_______x________x________",dump_storage_data[0], dump_storage_data[1], dump_storage_data[2])
if start_time == None:
new_time = curr_time * 1000 # for live streaming
else:
new_time = start_time + (curr_time-float(self.last_time())) # for simulation
self._db_analytics.ingest(
{
"new_time":new_time,
"count":len(dump_storage_data[1]),
"coordinates":dump_storage_data[1],
"siteId":siteId,
"roiName":roiName,
"botName":botName,
"checklist":checklist,
"botConfigId":botConfigId,
"sensorId":sensorId
}
)["_id"]
# if start_time == None:
# json_msg["new_time"] = curr_time * 1000 # for live streaming
# else:
# json_msg["new_time"] = start_time + (curr_time-float(self.last_time())) # for simulation
# json_msg["count"] = len(dump_storage_data[1])
# json_msg["coordinates"] = dump_storage_data[1]
# json_msg["siteId"] = siteId
# json_msg["roiName"] = roiName
# json_msg["botName"] = botName
# json_msg["checklist"] = checklist
# json_msg["botConfigId"] = botConfigId
# json_msg["sensorId"] = sensorId
# print("messages___1",messages[0])
try:
frame.remove_message(messages[0])
except:
ignore = None
print("dont remove for ", roiName)
print("roiname__________2",roiName)
# print("messages___2",messages[0])
# frame.add_message(json.dumps(json_msg))
# try:
# frame.remove_message(messages[0])
# except:
# ignore = None
else:
try:
frame.remove_message(messages[0])
except:
ignore = None
except Exception as error:
print("wrong in processing_frame:{}".format(error), flush=True)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno, flush=True)
print(traceback.format_exc(), flush=True)
# or
print(sys.exc_info()[2], flush=True)
return True
def inroi(self,x,y,line): # function checks whether the x and y point of a person detected is in roi, return True or False
k=line
p1 = Point(x,y)
coords = [(k[0],k[1]), (k[2],k[3]), (k[4],k[5]), (k[6],k[7])]
poly = Polygon(coords)
info=p1.within(poly)
return info
def convert(self, seconds): # function converts the waitTime from seconds to h:m:s format
seconds = seconds % (24 * 3600)
hour = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%d:%02d:%02d" % (hour, minutes, seconds)
|
class UserStatistics:
def __init__(self):
self.files = 0
self.insertions = 0
self.deletions = 0
@property
def is_zero(self):
return self.files == self.insertions == self.deletions == 0
def __repr__(self):
return "files changed: {}, insertions: {}, deletions: {}".format(self.files, self.insertions, self.deletions)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
dp_cluster_reg - a tool to onboard clusters managed by Ambari and Cloudera Manager on Dataplane
"""
__version__ = '1.0.0'
# Check minimum required Python version
import sys
if sys.version_info < (2, 7):
print("dp-cluster-reg %s requires Python 2.7" % __version__)
sys.exit(1)
# imports
from .helpers import BColors, ScriptPrerequisites
from .registrationflow import FlowManager
from .userinput import User
|
# BE VERY CAREFUL WHEN USING THIS SCRIPT IT RENAMES THINGS THAT ARE POTENTIALLY IMPORTANT
# this script makes target.txt files that correspond to the directory each RUN is in
# this script also renumbers runs from being 8 times 0-~39 to be one consecutive list of 0 to 316
from glob import glob
import re
import os
run_paths = glob('./packaged_models/fah-projects/*/*')
run_number = 0
for run_path in run_paths:
# Define our target by the directory name
target = run_path.split('/')[3]
if run_path.split('.')[-1] == 'xml':
#there are some xml files we don't want to include that come in the glob selection
pass
else:
print run_path
print target
# write the target.txt file in the RUN* directory
with open("%s/target.txt"%run_path, "w") as text_file:
text_file.write("{0}".format(target))
# Define our new run number
print run_number
# Rename our directory to be labeled by that new run number
base_path = run_path.rsplit('/',1)[0]
new_path = '%s/RUN%s'%(base_path,run_number)
os.rename(run_path,new_path)
# Iterate the run_number before it moves onto the next loop
run_number += 1
|
a="hello world"
print(a)
|
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from Instrucciones.Excepcion import Excepcion
from Instrucciones.Sql_select.Select import Select
from Instrucciones.Tablas.Tablas import Tablas
class SelectLista(Instruccion):
def __init__(self, lista, strGram, linea, columna):
Instruccion.__init__(self,Tipo(Tipo_Dato.QUERY),linea,columna,strGram)
self.lista = lista
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
columnas = []
valores = []
selectEncontrado = 0
for ins in self.lista:
if isinstance(ins, Alias):
resultado = ins.expresion.ejecutar(tabla, arbol)
if isinstance(resultado, Excepcion):
return resultado
valores.append(str(resultado))
columnas.append(ins.id)
elif isinstance(ins, Select):
resultado = ins.ejecutar(tabla, arbol)
if isinstance(resultado, Excepcion):
return resultado
valores = resultado
columnas = ins.devolverColumnas(tabla,arbol)
if isinstance(columnas, Excepcion):
return columnas
selectEncontrado = 1
else:
resultado = ins.ejecutar(tabla, arbol)
if isinstance(resultado, Excepcion):
return resultado
valores.append(str(resultado))
columnas.append('col')
#print("COLUMNAS-------------------------->",columnas)
#print("VALORES-------------------------->",valores)
if(selectEncontrado == 0):
valores = [valores]
if(arbol.getRelaciones() == False):
arbol.getMensajeTabla(columnas,valores)
else:
n = Tablas("tabla",None)
n.data = valores
n.lista_de_campos = columnas
return n
else:
if(arbol.getRelaciones() == False):
arbol.getMensajeTabla(columnas,valores)
else:
n = Tablas("tabla",None)
n.lista_de_campos = columnas
n.data = valores
return n
class Alias():
def __init__(self, id, expresion):
self.id = id
self.expresion = expresion |
from django.apps import AppConfig
class CheeseappConfig(AppConfig):
name = 'cheeseapp'
|
import matplotlib.pyplot as plt
import numpy as np
import torch
def show_failures(
model,
data_loader,
unnormalizer=None,
class_dict=None,
nrows=3,
ncols=5,
figsize=None,
):
failure_features = []
failure_pred_labels = []
failure_true_labels = []
for batch_idx, (features, targets) in enumerate(data_loader):
with torch.no_grad():
features = features
targets = targets
logits = model(features)
predictions = torch.argmax(logits, dim=1)
for i in range(features.shape[0]):
if targets[i] != predictions[i]:
failure_features.append(features[i])
failure_pred_labels.append(predictions[i])
failure_true_labels.append(targets[i])
if len(failure_true_labels) >= nrows * ncols:
break
features = torch.stack(failure_features, dim=0)
targets = torch.tensor(failure_true_labels)
predictions = torch.tensor(failure_pred_labels)
fig, axes = plt.subplots(
nrows=nrows, ncols=ncols, sharex=True, sharey=True, figsize=figsize
)
if unnormalizer is not None:
for idx in range(features.shape[0]):
features[idx] = unnormalizer(features[idx])
nhwc_img = np.transpose(features, axes=(0, 2, 3, 1))
if nhwc_img.shape[-1] == 1:
nhw_img = np.squeeze(nhwc_img.numpy(), axis=3)
for idx, ax in enumerate(axes.ravel()):
ax.imshow(nhw_img[idx], cmap="binary")
if class_dict is not None:
ax.title.set_text(
f"P: {class_dict[predictions[idx].item()]}"
f"\nT: {class_dict[targets[idx].item()]}"
)
else:
ax.title.set_text(f"P: {predictions[idx]} | T: {targets[idx]}")
ax.axison = False
else:
for idx, ax in enumerate(axes.ravel()):
ax.imshow(nhwc_img[idx])
if class_dict is not None:
ax.title.set_text(
f"P: {class_dict[predictions[idx].item()]}"
f"\nT: {class_dict[targets[idx].item()]}"
)
else:
ax.title.set_text(f"P: {predictions[idx]} | T: {targets[idx]}")
ax.axison = False
return fig, axes
|
RAW_QUEUES = {
"example_timed_set": {
"job_factory": lambda rawparam: {
"path": "example.Print",
"params": {
"test": rawparam
}
}
}
} |
from setuptools import setup
from setuptools import find_namespace_packages
from setuptools import find_packages
#README file :
with open("README.md","r") as readme_handle :
long_descriptions=readme_handle.read()
setup(
name="auto_translation",
author="Mohammed BADI",
author_email="[email protected]",
version="0.1.5",
description="A simple python library used to translate and speak a given text from the user with Google Translation using user's Chrome browser. ",
long_description=long_descriptions,
long_description_content_type="text/markdown",
url="https://github.com/mouh2020/auto-translation",
install_requires=['selenium==3.141.0',
'chromedriver-binary-auto==0.1'],
keywords=['translation','traduction','translate','speech','voice','google translation','languages'],
packages=find_namespace_packages(include=['auto_browser','auto_browser.*']),
classifiers=[
'Intended Audience :: Education',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Speech',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat'
],
)
|
from datetime import date, timedelta
import csv
f = open('html.txt', 'w')
#life_expect = []
#with open('behavior.csv') as csvfile:
# reader = csv.DictReader(csvfile)
# for row in reader:
# life_expect.append(row)
#f.write("INSERT INTO genetic VALUES")
code = 2018
while code >= 1900:
stCode = str(code)
f.write("<option value=\"" + stCode + "\">" + stCode + "</option>\n")
code -= 1
f.close()
|
#!/usr/bin/env python
"""This is the GRR client installer module.
GRR allows several installers to be registered as plugins. The
installers are executed when the client is deployed to a target system
in their specified order (according to the registry plugin system).
Installers are usually used to upgrade existing clients and setup
clients in unusual situations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import os
import sys
from future.utils import with_metaclass
from grr_response_core import config
from grr_response_core.config import contexts
from grr_response_core.lib import flags
from grr_response_core.lib import registry
class Installer(
with_metaclass(registry.MetaclassRegistry, registry.HookRegistry)):
"""A GRR installer plugin.
Modules can register special actions which only run on installation
by extending this base class. Execution order is controlled using
the same mechanism provided by HookRegistry - i.e. by declaring
"pre" and "order" attributes.
"""
def RunInstaller():
"""Run all registered installers.
Run all the current installers and then exit the process.
"""
try:
os.makedirs(os.path.dirname(config.CONFIG["Installer.logfile"]))
except OSError:
pass
# Always log to the installer logfile at debug level. This way if our
# installer fails we can send detailed diagnostics.
handler = logging.FileHandler(config.CONFIG["Installer.logfile"], mode="wb")
handler.setLevel(logging.DEBUG)
# Add this to the root logger.
logging.getLogger().addHandler(handler)
# Ordinarily when the client starts up, the local volatile
# configuration is read. Howevwer, when running the installer, we
# need to ensure that only the installer configuration is used so
# nothing gets overridden by local settings. We there must reload
# the configuration from the flag and ignore the Config.writeback
# location.
config.CONFIG.Initialize(filename=flags.FLAGS.config, reset=True)
config.CONFIG.AddContext(contexts.INSTALLER_CONTEXT,
"Context applied when we run the client installer.")
logging.warn("Starting installation procedure for GRR client.")
try:
Installer().Init()
except Exception as e: # pylint: disable=broad-except
# Ouch! we failed to install... Not a lot we can do
# here - just log the error and give up.
logging.exception("Installation failed: %s", e)
# Error return status.
sys.exit(-1)
# Exit successfully.
sys.exit(0)
|
from typing import Optional, List
from stx.compiling.reading.location import Location
from stx.components import Component, Composite, CodeBlock, Table, Image, \
FunctionCall, CustomText
from stx.components import ListBlock, Paragraph, PlainText, StyledText
from stx.components import LinkText, Literal, Figure, Section, Separator
from stx.components import ContentBox, TableOfContents, ElementReference
from stx.components import CapturedText
from stx.document import Document
from stx.utils.stx_error import StxError
def location_to_json(location: Optional[Location]) -> Optional[dict]:
if location is None:
return None
return {
'file_path': location.file_path,
'line': location.line,
'column': location.column,
}
def extend_base(component: Component, type_id: str, inherited: dict):
return {
'type': type_id,
'refs': component.get_refs(),
'location': location_to_json(component.location),
**inherited,
}
def composite_to_json(composite: Composite) -> dict:
return extend_base(composite, 'composite', {
'components': components_to_json(composite.components),
})
def code_block_to_json(code_block: CodeBlock) -> dict:
return extend_base(code_block, 'code-block', {
'lang': code_block.lang,
'content': components_to_json(code_block.contents),
})
def table_to_json(table: Table) -> dict:
return extend_base(table, 'table', {
'caption': component_to_json(table.caption),
'number': table.number,
'rows': [
{
'header': row.header,
'cells': components_to_json(row.cells),
}
for row in table.rows
]
})
def list_to_json(list_block: ListBlock) -> dict:
return extend_base(list_block, 'list', {
'ordered': list_block.ordered,
'items': components_to_json(list_block.items),
})
def paragraph_to_json(paragraph: Paragraph) -> dict:
return extend_base(paragraph, 'paragraph', {
'contents': components_to_json(paragraph.contents),
})
def plain_text_to_json(plain_text: PlainText) -> dict:
return extend_base(plain_text, 'plain-text', {
'content': plain_text.content,
})
def styled_text_to_json(styled_text: StyledText) -> dict:
return extend_base(styled_text, 'styled-text', {
'style': styled_text.style,
'contents': components_to_json(styled_text.contents),
})
def custom_text_to_json(custom: CustomText) -> dict:
return extend_base(custom, 'styled-text', {
'custom_style': custom.custom_style,
'contents': components_to_json(custom.contents),
})
def link_text_to_json(link_text: LinkText) -> dict:
return extend_base(link_text, 'link-text', {
'reference': link_text.reference,
'contents': components_to_json(link_text.contents),
})
def literal_block_to_json(literal: Literal) -> dict:
return extend_base(literal, 'literal', {
# TODO add origin?
'content': literal.text,
})
def figure_to_json(figure: Figure) -> dict:
return extend_base(figure, 'figure', {
'number': figure.number,
'content': component_to_json(figure.content),
'caption': component_to_json(figure.caption),
})
def section_to_json(section: Section) -> dict:
return extend_base(section, 'section', {
'level': section.level,
'number': section.number,
'heading': component_to_json(section.heading),
'content': component_to_json(section.content),
})
def toc_elements_to_json(elements: List[ElementReference]) -> List[dict]:
return [
{
'title': element.title,
'reference': element.reference,
'number': element.number,
'elements': toc_elements_to_json(element.elements),
}
for element in elements
]
def toc_to_json(toc: TableOfContents) -> dict:
return extend_base(toc, 'toc', {
'title': toc.title,
'elements': toc_elements_to_json(toc.elements),
})
def separator_to_json(separator: Separator) -> dict:
return extend_base(separator, 'separator', {
'name': separator.level,
})
def box_to_json(box: ContentBox) -> dict:
return extend_base(box, 'box', {
'style': box.style,
'content': component_to_json(box.content),
})
def components_to_json(components: List[Component]) -> List[dict]:
return [
component_to_json(component)
for component in components
]
def captured_text_to_json(captured: CapturedText) -> dict:
return extend_base(captured, 'captured-text', {
'class': captured.class_,
'contents': components_to_json(captured.contents),
})
def image_to_json(image: Image):
return extend_base(image, 'image', {
'src': image.src,
'alt': image.alt,
})
def function_call_to_json(call: FunctionCall):
if call.result is None:
raise StxError(f'Not resolved function: {call.key}', call.location)
return component_to_json(call.result)
def component_to_json(content: Optional[Component]) -> Optional[dict]:
if content is None:
return None
elif isinstance(content, Composite):
return composite_to_json(content)
elif isinstance(content, CodeBlock):
return code_block_to_json(content)
elif isinstance(content, Table):
return table_to_json(content)
elif isinstance(content, ListBlock):
return list_to_json(content)
elif isinstance(content, Paragraph):
return paragraph_to_json(content)
elif isinstance(content, PlainText):
return plain_text_to_json(content)
elif isinstance(content, StyledText):
return styled_text_to_json(content)
elif isinstance(content, CustomText):
return custom_text_to_json(content)
elif isinstance(content, LinkText):
return link_text_to_json(content)
elif isinstance(content, Literal):
return literal_block_to_json(content)
elif isinstance(content, Figure):
return figure_to_json(content)
elif isinstance(content, Section):
return section_to_json(content)
elif isinstance(content, TableOfContents):
return toc_to_json(content)
elif isinstance(content, Separator):
return separator_to_json(content)
elif isinstance(content, ContentBox):
return box_to_json(content)
elif isinstance(content, CapturedText):
return captured_text_to_json(content)
elif isinstance(content, Image):
return image_to_json(content)
elif isinstance(content, FunctionCall):
return function_call_to_json(content)
else:
raise NotImplementedError(f'Not implemented type: {type(content)}')
def document_to_json(doc: Document) -> dict:
return {
'title': doc.title,
'author': doc.author,
'content': component_to_json(doc.content)
}
|
"""
---
title: Diffusion models
summary: >
A set of PyTorch implementations/tutorials of diffusion models.
---
# Diffusion models
* [Denoising Diffusion Probabilistic Models (DDPM)](ddpm/index.html)
"""
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
from django.core.validators import MinValueValidator
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('cabotapp', '0006_auto_20170821_1000'),
]
operations = [
migrations.CreateModel(
name='PingStatusCheck',
fields=[
('statuscheck_ptr',
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to='cabotapp.StatusCheck')),
('host',
models.TextField(
help_text=b'Host to check.')),
('packet_size',
models.PositiveIntegerField(
verbose_name=b'Packet size',
help_text=b'Packet size in data bytes.',
default=56)),
('count',
models.PositiveIntegerField(
help_text=b'Ping count.',
default=3)),
('max_rtt',
models.FloatField(
verbose_name=b'Max RTT',
help_text=b'Maximum RTT.',
validators=[MinValueValidator(0.0)],
default=70)),
],
options={
'abstract': False,
},
bases=('cabotapp.statuscheck',),
),
]
|
import pytest
from django.test import TestCase
from oauth2_provider.generators import BaseHashGenerator, generate_client_id, generate_client_secret
class MockHashGenerator(BaseHashGenerator):
def hash(self):
return 42
@pytest.mark.usefixtures("oauth2_settings")
class TestGenerators(TestCase):
def test_generate_client_id(self):
g = self.oauth2_settings.CLIENT_ID_GENERATOR_CLASS()
self.assertEqual(len(g.hash()), 40)
self.oauth2_settings.CLIENT_ID_GENERATOR_CLASS = MockHashGenerator
self.assertEqual(generate_client_id(), 42)
def test_generate_secret_id(self):
g = self.oauth2_settings.CLIENT_SECRET_GENERATOR_CLASS()
self.assertEqual(len(g.hash()), 128)
self.oauth2_settings.CLIENT_SECRET_GENERATOR_CLASS = MockHashGenerator
self.assertEqual(generate_client_secret(), 42)
def test_basegen_misuse(self):
g = BaseHashGenerator()
self.assertRaises(NotImplementedError, g.hash)
|
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-07-06 14:02:20.222384
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.in_interfaces.reference import Reference
from pycatia.knowledge_interfaces.length import Length
from pycatia.mec_mod_interfaces.hybrid_shape import HybridShape
class HybridShapePolyline(HybridShape):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| MecModInterfaces.HybridShape
| HybridShapePolyline
|
| Represents the hybrid shape polyline curve object.
| Role: To access or set the data of the hybrid shape polyline object. This data
| includes:
|
| Elements
| Radius
| Closure
|
| Use the HybridShapeFactory object to create a HybridShapePolyline
| object.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.hybrid_shape_polyline = com_object
@property
def closure(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Closure() As boolean
|
| Returns or sets the flag to decide closure of the
| polyline.
|
| Parameters:
|
| Closure
| (For get_Closure) Returns or sets the closure
| property
|
| Example:
| This example retrieves the closure property of the polyline of
| the HybShpPolyline hybrid shape polyline.
|
| Dim HybShpPolClosure As boolean
| HybShpPolClosure = HybShpPolyline.Closure
:return: bool
:rtype: bool
"""
return self.hybrid_shape_polyline.Closure
@closure.setter
def closure(self, value: bool):
"""
:param bool value:
"""
self.hybrid_shape_polyline.Closure = value
@property
def number_of_elements(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property NumberOfElements() As long (Read Only)
|
| Returns the number of elements of the polyline.
|
| Parameters:
|
| NumberOfElements
| Number of elements in the polyline.
|
| Example:
| This example retrieves the number of elements in the polyline
| of the HybShpPolyline hybrid shape polyline.
|
| Dim HybShpPolNoOfEle As long
| HybShpPolNoOfEle = HybShpPolyline.NumberOfElements
:return: int
:rtype: int
"""
return self.hybrid_shape_polyline.NumberOfElements
def get_element(self, i_position: int, o_element: Reference, o_radius: Length) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetElement(long iPosition,
| Reference oElement,
| Length oRadius)
|
| Returns the element of the polyline.
|
| Parameters:
|
| iPosition
| Position at which the element is to be retrieved.
| oElement
| Reference to the element.
| ioRadius
| Length to the radius.
|
| Example:
| This example retrieves the element and radius of the polyline
| at specified position of the HybShpPolyline hybrid shape
| polyline.
|
| Dim HybShpPolylineElement As Reference
| Dim HybShpPolylineRadius As Reference
| HybShpPolyline.GetElement 1,
| HybShpPolylineElement,HybShpPolylineRadius
:param int i_position:
:param Reference o_element:
:param Length o_radius:
:return: None
:rtype: None
"""
return self.hybrid_shape_polyline.GetElement(i_position, o_element.com_object, o_radius.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'get_element'
# # vba_code = """
# # Public Function get_element(hybrid_shape_polyline)
# # Dim iPosition (2)
# # hybrid_shape_polyline.GetElement iPosition
# # get_element = iPosition
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def insert_element(self, i_point: Reference, i_position: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub InsertElement(Reference iPoint,
| long iPosition)
|
| Inserts the element at a specified position in the
| polyline.
|
| Parameters:
|
| iPoint
| Reference of the point object to be inserted.
| iPosition
| Position at which the element should be inserted.
|
| Example:
| This example inserts the element in the polyline of the
| HybShpPolyline hybrid shape polyline.
|
| HybShpPolyline.InsertElement PointReference,1
:param Reference i_point:
:param int i_position:
:return: None
:rtype: None
"""
return self.hybrid_shape_polyline.InsertElement(i_point.com_object, i_position)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'insert_element'
# # vba_code = """
# # Public Function insert_element(hybrid_shape_polyline)
# # Dim iPoint (2)
# # hybrid_shape_polyline.InsertElement iPoint
# # insert_element = iPoint
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def remove_element(self, i_position: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub RemoveElement(long iPosition)
|
| Removes the element at a specified position in the
| polyline.
|
| Parameters:
|
| iPosition
| Position from which the element should be should be
| removed.
|
| Example:
| This example removes the element in the polyline of the
| HybShpPolyline hybrid shape polyline.
|
| HybShpPolyline.RemoveElement 1
:param int i_position:
:return: None
:rtype: None
"""
return self.hybrid_shape_polyline.RemoveElement(i_position)
def replace_element(self, i_point: Reference, i_position: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub ReplaceElement(Reference iPoint,
| long iPosition)
|
| Replaces the element at a specified position in the
| polyline.
|
| Parameters:
|
| iPoint
| Reference of the point object that will replace the old element.
|
| iPosition
| Position at which the element should be inserted.
|
| Example:
| This example replaces the element in the polyline of the
| HybShpPolyline hybrid shape polyline.
|
| HybShpPolyline.ReplaceElement PointReference, 1
:param Reference i_point:
:param int i_position:
:return: None
:rtype: None
"""
return self.hybrid_shape_polyline.ReplaceElement(i_point.com_object, i_position)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'replace_element'
# # vba_code = """
# # Public Function replace_element(hybrid_shape_polyline)
# # Dim iPoint (2)
# # hybrid_shape_polyline.ReplaceElement iPoint
# # replace_element = iPoint
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def set_radius(self, i_position: int, i_radius: float) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetRadius(long iPosition,
| double iRadius)
|
| Sets the radius at specified position of the polyline.
|
| Parameters:
|
| iPosition
| Position at which radius should be set
| iRadius
| Value of the radius to be set.
|
| Example:
| This example sets the radius at the specific position of the
| polyline of the HybShpPolyline hybrid shape
| polyline.
|
| HybShpPolyline.SetRadius 1, 10
:param int i_position:
:param float i_radius:
:return: None
:rtype: None
"""
return self.hybrid_shape_polyline.SetRadius(i_position, i_radius)
def __repr__(self):
return f'HybridShapePolyline(name="{self.name}")'
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
import argparse
import datetime
import numpy as np
import h5py
def getListOfFiles(dirName):
listOfFiles = os.listdir(dirName)
allFiles = list()
for entry in listOfFiles:
fullPath = os.path.join(dirName, entry)
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
allFiles.append(fullPath)
return allFiles
def load_hdf5(filename):
"""
load training samples from *.hdf5 file
"""
if not(os.path.exists(filename)):
print ("file:", filename, "does not exist")
os._exit(1)
if os.path.splitext(filename)[1] != '.h5':
print ("file:", filename, "is not an hdf5 file")
os._exit(1)
h5_file = h5py.File(filename, 'r')
values = list(h5_file.values())[0]
print ("load data size:", values.shape[0])
return values
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='generate training samples\
from a specified directory')
parser.add_argument('directory', type=str,
help='directory contains feature files in .h5')
parser.add_argument('-n', '--npy', action='store_true',
help='if is .npy rather than .h5, use this.')
args = parser.parse_args()
path = args.directory
if not args.npy:
print ("load h5 from directory: {}".format(path))
if os.path.isdir(path):
features = None
labels = None
h5_files = getListOfFiles(path)
print ("Total number of files:", len(h5_files))
for i, h5_file in enumerate(h5_files):
print ("Process File", i, ":", h5_file)
feature = load_hdf5(h5_file)
if np.any(np.isinf(feature)):
print ("inf data found")
features = np.concatenate((features, feature), axis=0) if features is not None \
else feature
else:
print ("Fail to find", path)
os._exit(-1)
date = datetime.datetime.now().strftime('%Y-%m-%d')
sample_file = path + '/merged' + date + '.h5'
print ("Save samples file to:", sample_file)
h5_file = h5py.File(sample_file, 'w')
h5_file.create_dataset('data', data=features)
h5_file.close()
else:
print ("load npy from directory: {}".format(path))
if os.path.isdir(path):
features_go = None
features_cutin = None
npy_files = getListOfFiles(path)
print ("Total number of files:", len(npy_files))
for i, npy_file in enumerate(npy_files):
print ("Process File", i, ":", npy_file)
temp_features = np.load(npy_file)
feature_go = np.zeros((temp_features.shape[0], 157))
feature_cutin = np.zeros((temp_features.shape[0], 157))
count_go = 0
count_cutin = 0
for j in range(temp_features.shape[0]):
fea = np.asarray(temp_features[j])
if fea.shape[0] != 157:
continue
if fea[-1] < -1 or fea[-1] > 4:
continue
fea = fea.reshape((1, 157))
if fea[0, -1]%2 == 0:
feature_go[count_go] = fea
count_go += 1
else:
feature_cutin[count_cutin] = fea
count_cutin += 1
feature_go = feature_go[:count_go]
feature_cutin = feature_cutin[:count_cutin]
features_go = np.concatenate((features_go, feature_go), axis=0) if features_go is not None \
else feature_go
features_cutin = np.concatenate((features_cutin, feature_cutin), axis=0) if features_cutin is not None \
else feature_cutin
else:
print ("Fail to find", path)
os._exit(-1)
print (features_go.shape)
print (features_cutin.shape)
date = datetime.datetime.now().strftime('%Y-%m-%d')
sample_file_go = path + '/merged_go_' + date + '.h5'
sample_file_cutin = path + '/merged_cutin_' + date + '.h5'
h5_file_go = h5py.File(sample_file_go, 'w')
h5_file_go.create_dataset('data', data=features_go)
h5_file_go.close()
h5_file_cutin = h5py.File(sample_file_cutin, 'w')
h5_file_cutin.create_dataset('data', data=features_cutin)
h5_file_cutin.close()
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the gcp module - monitoring.py"""
import typing
import unittest
import mock
from tests.providers.gcp import gcp_mocks
class GoogleCloudMonitoringTest(unittest.TestCase):
"""Test Google Cloud Monitoring class."""
# pylint: disable=line-too-long
@typing.no_type_check
@mock.patch('libcloudforensics.providers.gcp.internal.monitoring.GoogleCloudMonitoring.GcmApi')
def testActiveServices(self, mock_gcm_api):
"""Validates the parsing of Monitoring API TimeSeries data."""
services = mock_gcm_api.return_value.projects.return_value.timeSeries.return_value.list
services.return_value.execute.return_value = gcp_mocks.MOCK_GCM_METRICS_COUNT
active_services = gcp_mocks.FAKE_MONITORING.ActiveServices()
self.assertIn('compute.googleapis.com', active_services)
self.assertEqual(active_services['compute.googleapis.com'],
gcp_mocks.MOCK_COMPUTE_METRIC)
self.assertIn('stackdriver.googleapis.com', active_services)
self.assertEqual(active_services['stackdriver.googleapis.com'],
gcp_mocks.MOCK_STACKDRIVER_METRIC)
self.assertIn('logging.googleapis.com', active_services)
self.assertEqual(active_services['logging.googleapis.com'],
gcp_mocks.MOCK_LOGGING_METRIC)
@typing.no_type_check
def testBuildCpuUsageFilter(self):
"""Validates the query filter builder functionality"""
# pylint: disable=protected-access
instances_filter = gcp_mocks.FAKE_MONITORING._BuildCpuUsageFilter(
['0000000000000000001', '0000000000000000002'])
self.assertEqual(
instances_filter, ('metric.type = "compute.googleapis.com/instance/'
'cpu/utilization" AND (resource.label.instance_id = '
'"0000000000000000001" OR resource.label.instance_id = '
'"0000000000000000002")'))
@typing.no_type_check
@mock.patch('libcloudforensics.providers.gcp.internal.monitoring.GoogleCloudMonitoring.GcmApi')
def testGetCpuUsage(self, mock_gcm_api):
"""Validates the parsing of CPU usage metrics."""
services = mock_gcm_api.return_value.projects.return_value.timeSeries.return_value.list
services.return_value.execute.return_value = gcp_mocks.MOCK_GCM_METRICS_CPU
cpu_usage = gcp_mocks.FAKE_MONITORING.GetCpuUsage()
self.assertEqual(2, len(cpu_usage))
self.assertListEqual(cpu_usage,
[
{
'instance_name': 'instance-a',
'instance_id': '0000000000000000001',
'cpu_usage':
[
{
'timestamp': '2021-01-01T00:00:00.000000Z',
'cpu_usage': 0.1
}
] * 24 * 7
},
{
'instance_name': 'instance-b',
'instance_id': '0000000000000000002',
'cpu_usage':
[
{
'timestamp': '2021-01-01T00:00:00.000000Z',
'cpu_usage': 0.1
}
] * 24 * 7
}
])
|
# -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'soundly'
project = "python package for audio analysis"
project_no_spaces = project.replace(' ', '')
version = '0.2'
description = 'Python package for audio analysis'
authors = ['Fahad Ali SARWAR']
authors_string = ', '.join(authors)
emails = ['[email protected]']
license = 'MIT'
copyright = '2019 ' + authors_string
url = 'http://example.com/'
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import logging
import os
import pathlib
import shutil
import tempfile
import uuid
from typing import Dict
from azureml.core import Run
from responsibleai import RAIInsights, __version__ as responsibleai_version
from constants import DashboardInfo, PropertyKeyValues, RAIToolType
_logger = logging.getLogger(__file__)
logging.basicConfig(level=logging.INFO)
# Directory names saved by RAIInsights might not match tool names
_tool_directory_mapping: Dict[str, str] = {
RAIToolType.CAUSAL: "causal",
RAIToolType.COUNTERFACTUAL: "counterfactual",
RAIToolType.ERROR_ANALYSIS: "error_analysis",
RAIToolType.EXPLANATION: "explainer",
}
def print_dir_tree(base_dir):
for current_dir, subdirs, files in os.walk(base_dir):
# Current Iteration Directory
print(current_dir)
# Directories
for dirname in subdirs:
print("\t" + dirname)
# Files
for filename in files:
print("\t" + filename)
def load_dashboard_info_file(input_port_path: str) -> Dict[str, str]:
# Load the rai_insights_dashboard file info
rai_insights_dashboard_file = os.path.join(
input_port_path, DashboardInfo.RAI_INSIGHTS_PARENT_FILENAME
)
with open(rai_insights_dashboard_file, "r") as si:
dashboard_info = json.load(si)
_logger.info("rai_insights_parent info: {0}".format(dashboard_info))
return dashboard_info
def copy_dashboard_info_file(src_port_path: str, dst_port_path: str):
src = pathlib.Path(src_port_path) / DashboardInfo.RAI_INSIGHTS_PARENT_FILENAME
dst = pathlib.Path(dst_port_path) / DashboardInfo.RAI_INSIGHTS_PARENT_FILENAME
shutil.copyfile(src, dst)
def create_rai_tool_directories(rai_insights_dir: pathlib.Path) -> None:
# Have to create empty subdirectories for the managers
# THe RAI Insights object expect these to be present, but
# since directories don't actually exist in Azure Blob store
# they may not be present (some of the tools always have
# a file present, even if no tool instances have been added)
for v in _tool_directory_mapping.values():
os.makedirs(rai_insights_dir / v, exist_ok=True)
_logger.info("Added empty directories")
def load_rai_insights_from_input_port(input_port_path: str) -> RAIInsights:
with tempfile.TemporaryDirectory() as incoming_temp_dir:
incoming_dir = pathlib.Path(incoming_temp_dir)
shutil.copytree(input_port_path, incoming_dir, dirs_exist_ok=True)
_logger.info("Copied RAI Insights input to temporary directory")
create_rai_tool_directories(incoming_dir)
result = RAIInsights.load(incoming_dir)
_logger.info("Loaded RAIInsights object")
return result
def copy_insight_to_raiinsights(
rai_insights_dir: pathlib.Path, insight_dir: pathlib.Path
) -> str:
print("Starting copy")
# Recall that we copy the JSON containing metadata from the
# constructor component into each directory
# This means we have that file and the results directory
# present in the insight_dir
dir_items = list(insight_dir.iterdir())
assert len(dir_items) == 2
# We want the directory, not the JSON file
if dir_items[0].name == DashboardInfo.RAI_INSIGHTS_PARENT_FILENAME:
tool_dir_name = dir_items[1].name
else:
tool_dir_name = dir_items[0].name
_logger.info("Detected tool: {0}".format(tool_dir_name))
assert tool_dir_name in _tool_directory_mapping.values()
for k, v in _tool_directory_mapping.items():
if tool_dir_name == v:
tool_type = k
_logger.info("Mapped to tool: {0}".format(tool_type))
tool_dir = insight_dir / tool_dir_name
tool_dir_items = list(tool_dir.iterdir())
assert len(tool_dir_items) == 1
src_dir = insight_dir / tool_dir_name / tool_dir_items[0].parts[-1]
dst_dir = rai_insights_dir / tool_dir_name / tool_dir_items[0].parts[-1]
print("Copy source:", str(src_dir))
print("Copy dest :", str(dst_dir))
shutil.copytree(
src=src_dir,
dst=dst_dir,
)
_logger.info("Copy complete")
return tool_type
def save_to_output_port(rai_i: RAIInsights, output_port_path: str, tool_type: str):
with tempfile.TemporaryDirectory() as tmpdirname:
rai_i.save(tmpdirname)
_logger.info(f"Saved to {tmpdirname}")
tool_dir_name = _tool_directory_mapping[tool_type]
insight_dirs = os.listdir(pathlib.Path(tmpdirname) / tool_dir_name)
assert len(insight_dirs) == 1, "Checking for exactly one tool output"
_logger.info("Checking dirname is GUID")
uuid.UUID(insight_dirs[0])
target_path = pathlib.Path(output_port_path) / tool_dir_name
target_path.mkdir()
_logger.info("Created output directory")
_logger.info("Starting copy")
shutil.copytree(
pathlib.Path(tmpdirname) / tool_dir_name,
target_path,
dirs_exist_ok=True,
)
_logger.info("Copied to output")
def add_properties_to_gather_run(
dashboard_info: Dict[str, str], tool_present_dict: Dict[str, str]
):
_logger.info("Adding properties to the gather run")
gather_run = Run.get_context()
run_properties = {
PropertyKeyValues.RAI_INSIGHTS_TYPE_KEY: PropertyKeyValues.RAI_INSIGHTS_TYPE_GATHER,
PropertyKeyValues.RAI_INSIGHTS_RESPONSIBLEAI_VERSION_KEY: responsibleai_version,
PropertyKeyValues.RAI_INSIGHTS_MODEL_ID_KEY: dashboard_info[
DashboardInfo.RAI_INSIGHTS_MODEL_ID_KEY
],
}
_logger.info("Appending tool present information")
for k, v in tool_present_dict.items():
key = PropertyKeyValues.RAI_INSIGHTS_TOOL_KEY_FORMAT.format(k)
run_properties[key] = str(v)
_logger.info("Making service call")
gather_run.add_properties(run_properties)
_logger.info("Properties added to gather run")
|
'''
Item display
'''
import configparser
import os.path
from ..config import CONFIG, CONFIGDIRECTORY
from ..version import __version__ as version
__all__ = 'NoConfig', 'load_items', 'load_dungeons', 'new_item', 'new_dungeons'
class NoConfig(Exception):
'''
Raised when item layout config is not available.
'''
pass
def load_items() -> dict:
'''
Load item layout.
Returns:
dict: layout in format {identifier: (column, row)}
'''
return _load('Items')
def load_dungeons() -> dict:
'''
Load dungeon layout.
Returns:
dict: layout in format {identifier: (column, row)}
'''
return _load('Dungeons')
def _load(ltype: str) -> dict:
'''
Load item/dungeon layout.
Args:
ltype: 'Items' or 'Dungeons'
Returns:
dict: layout in format {identifier: (column, row)}
Raise:
NoConfig, configparser.Error: if no item layout is available
'''
inp = configparser.ConfigParser(allow_no_value=True)
try:
fid = open(os.path.join(CONFIGDIRECTORY, CONFIG['button_layout']), 'r')
except FileNotFoundError as err:
raise NoConfig() from err
try:
inp.read_file(fid)
finally:
fid.close()
if ltype not in inp:
raise NoConfig()
try:
if inp['version']['version'] != version:
raise NoConfig()
except (KeyError, configparser.NoSectionError,
configparser.NoOptionError) as err:
raise NoConfig() from err
layout = {}
for item in inp[ltype]:
if not inp[ltype][item]:
continue
try:
sep = tuple(int(c) for c in inp[ltype][item].split(','))
except ValueError as err:
raise NoConfig() from err
if len(sep) != 2:
raise NoConfig()
layout[item] = sep
return layout
def new_item(layout: dict) -> None:
'''
Store given item icon layout.
Args:
layout: layout in format {identifier: (column, row)}
'''
_new(layout, 'Items')
def new_dungeon(layout: dict) -> None:
'''
Store given dungeon icon layout.
Args:
layout: layout in format {identifier: (column, row)}
'''
_new(layout, 'Dungeons')
def _new(layout: dict, ltype: str) -> None:
'''
Store given icon layout.
Args:
layout: layout in format {identifier: (column, row)}
ltype: 'Items' or 'Dungeons'
'''
assert ltype in ('Items', 'Dungeons')
if ltype == 'Items':
other = 'Dungeons'
else:
other = 'Items'
try:
existing = _load(other)
except (NoConfig, configparser.Error):
existing = {}
out = configparser.ConfigParser(allow_no_value=True)
out.add_section(ltype)
for item in layout:
out[ltype][item] = ', '.join(str(c) for c in layout[item])
out.add_section(other)
for item in existing:
out[other][item] = ', '.join(str(c) for c in existing[item])
out.add_section('version')
out['version']['version'] = version
with open(os.path.join(
CONFIGDIRECTORY, CONFIG['button_layout']), 'w') as fid:
out.write(fid)
|
# SCAR - Serverless Container-aware ARchitectures
# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
import json
import os
import re
import subprocess
import tarfile
import tempfile
import uuid
import sys
import platform
import shutil
from .exceptions import InvalidPlatformError
import logging
def resource_path(relative_path, bin_path=None):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
if bin_path:
return bin_path
else:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def is_binary_execution():
try:
binary_env = sys._MEIPASS
if platform.system().lower() != 'linux':
raise InvalidPlatformError()
return True
except Exception:
return False
def get_logger():
logger = logging.getLogger()
if is_variable_in_environment('LOG_LEVEL'):
logger.setLevel(get_environment_variable('LOG_LEVEL'))
else:
logger.setLevel('DEBUG')
return logger
def join_paths(*paths):
return os.path.join(*paths)
def get_temp_dir():
return tempfile.gettempdir()
def lazy_property(func):
''' A decorator that makes a property lazy-evaluated.'''
attr_name = '_lazy_' + func.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, func(self))
return getattr(self, attr_name)
return _lazy_property
def find_expression(string_to_search, rgx_pattern):
'''Returns the first group that matches the rgx_pattern in the string_to_search'''
if string_to_search:
pattern = re.compile(rgx_pattern)
match = pattern.search(string_to_search)
if match :
return match.group()
def base64_to_utf8_string(value):
return base64.b64decode(value).decode('utf-8')
def utf8_to_base64_string(value):
return base64.b64encode(value).decode('utf-8')
def dict_to_base64_string(value):
return base64.b64encode(json.dumps(value)).decode("utf-8")
def divide_list_in_chunks(elements, chunk_size):
"""Yield successive n-sized chunks from th elements list."""
if len(elements) == 0:
yield []
for i in range(0, len(elements), chunk_size):
yield elements[i:i + chunk_size]
def get_random_uuid4_str():
return str(uuid.uuid4())
def merge_dicts(d1, d2):
'''
Merge 'd1' and 'd2' dicts into 'd1'.
'd2' has precedence over 'd1'
'''
for k,v in d2.items():
if v:
if k not in d1:
d1[k] = v
elif type(v) is dict:
d1[k] = merge_dicts(d1[k], v)
elif type(v) is list:
d1[k] += v
return d1
def is_value_in_dict(dictionary, value):
return value in dictionary and dictionary[value]
def get_tree_size(path):
"""Return total size of files in given path and subdirs."""
total = 0
for entry in os.scandir(path):
if entry.is_dir(follow_symlinks=False):
total += get_tree_size(entry.path)
else:
total += entry.stat(follow_symlinks=False).st_size
return total
def get_all_files_in_directory(dir_path):
files = []
for dirname, _, filenames in os.walk(dir_path):
for filename in filenames:
files.append(os.path.join(dirname, filename))
return files
def get_file_size(file_path):
'''Return file size in bytes'''
return os.stat(file_path).st_size
def create_folder(folder_name):
if not os.path.isdir(folder_name):
os.makedirs(folder_name, exist_ok=True)
def create_file_with_content(path, content):
with open(path, "w") as f:
f.write(content)
def read_file(file_path, mode="r"):
with open(file_path, mode) as content_file:
return content_file.read()
def delete_file(path):
if os.path.isfile(path):
os.remove(path)
def delete_folder(path):
shutil.rmtree(path)
def create_tar_gz(files_to_archive, destination_tar_path):
with tarfile.open(destination_tar_path, "w:gz") as tar:
for file_path in files_to_archive:
tar.add(file_path, arcname=os.path.basename(file_path))
return destination_tar_path
def extract_tar_gz(tar_path, destination_path):
with tarfile.open(tar_path, "r:gz") as tar:
tar.extractall(path=destination_path)
def kill_process(self, process):
# Using SIGKILL instead of SIGTERM to ensure the process finalization
os.killpg(os.getpgid(process.pid), subprocess.signal.SIGKILL)
def execute_command(command):
subprocess.call(command)
def execute_command_and_return_output(command):
return subprocess.check_output(command).decode("utf-8")
def is_variable_in_environment(variable):
return is_value_in_dict(os.environ, variable)
def set_environment_variable(key, variable):
if key and variable:
os.environ[key] = variable
def get_environment_variable(variable):
if is_variable_in_environment(variable):
return os.environ[variable]
def parse_arg_list(arg_keys, cmd_args):
result = {}
for key in arg_keys:
if type(key) is tuple:
if key[0] in cmd_args and cmd_args[key[0]]:
result[key[1]] = cmd_args[key[0]]
else:
if key in cmd_args and cmd_args[key]:
result[key] = cmd_args[key]
return result
def get_user_defined_variables():
user_vars = {}
for key in os.environ.keys():
# Find global variables with the specified prefix
if re.match("CONT_VAR_.*", key):
user_vars[key.replace("CONT_VAR_", "")] = get_environment_variable(key)
return user_vars
|
#!/usr/bin/python
# Copyright 2008 Deutsches Forschungszentrum fuer Kuenstliche Intelligenz
# or its licensors, as applicable.
#
# You may not use this file except under the terms of the accompanying license.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Project:
# File: check-am.py
# Purpose: identify files which are not handled in OCRopus automake
# Responsible: kofler
# Reviewer:
# Primary Repository:
# Web Sites: www.iupr.org, www.dfki.de
import os, sys, glob
# verify we are in the right folder, i.e. OCRopus top-level
if not os.path.exists('ocr-utils') or not os.path.exists('ocroscript'):
print >> sys.stderr
print >> sys.stderr, "This script must be run from the OCRopus top-level folder!"
print >> sys.stderr
exit(1)
if not os.path.exists('Makefile.am'):
print >> sys.stderr
print >> sys.stderr, "Makefile.am not found!"
print >> sys.stderr
if not os.path.exists('ocroscript/Makefile.am'):
print >> sys.stderr
print >> sys.stderr, "ocroscript Makefile.am not found!"
print >> sys.stderr
def output(files, kind=""):
"""
Produce some helpful output for maintaining automake
"""
if len(files) > 0:
print
print "These", kind, "files are not handled:"
for src in files:
print src
print "---"
else:
print
print "OK, all", kind, "files are handled."
# get all ocr-* and additional folders with sources
dirs = [ d for d in glob.glob('ocr-*') if os.path.isdir(d) ]
dirs.append('ext/voronoi')
# switch to this later
#pkgs = [ p for p in glob.glob('*/*.pkg') ]
# get all cc and h files
ccs = []
for d in dirs:
ccs += glob.glob(d+"/*.cc")
hs = []
for d in dirs:
hs += glob.glob(d+"/*.h")
# get all pkg files in ocroscript
pkgs = [ p for p in os.listdir('ocroscript') if p.endswith('.pkg') ]
# get all ocroscript sources
ocroccs = [ c for c in os.listdir('ocroscript') if c.endswith('.cc') ]
# read automake file
amfile = open('Makefile.am')
am = amfile.read()
amfile.close()
# read ocroscript automake file
osamfile = open('ocroscript/Makefile.am')
osam = osamfile.read()
osamfile.close()
# identify missing cc files, also mains and tests
missingccs = []
missingmains = []
missingtests = []
for src in ccs:
if src not in am:
if "main-" in src:
missingmains.append(src)
elif "test-" in src:
missingtests.append(src)
else:
missingccs.append(src)
# identify missing h files
missinghs = []
for h in hs:
if h not in am:
missinghs.append(h)
# identify missing pkg files
missingpkgs = []
for p in pkgs:
if p not in osam:
missingpkgs.append(p)
# identify missing cc files for ocroscript
missingocroccs = []
for src in ocroccs:
if src not in osam:
missingocroccs.append(src)
print
print "Please remember: This script only checks if files are handled at all."
print "It does NOT check whether they are handled correctly!"
# output maintainance information for cc, h, main- and test- files
output(missingccs, "cc")
output(missinghs, "h")
output(missingpkgs, "pkg")
output(missingocroccs, "ocroscript cc")
output(missingmains, "main")
#output(missingtests, "test")
#print "dirs", dirs
#print "ccs", ccs
#print "hs", hs
#print pkgs
#print am |
test = { 'name': 'q1e',
'points': 3,
'suites': [ { 'cases': [ {'code': ">>> top_selling_segments.labels == ('Segment_Category', 'Average Sales') and top_selling_segments.num_rows == 7\nTrue", 'hidden': False, 'locked': False},
{ 'code': ">>> np.all(top_selling_segments.column('Segment_Category') == np.array(['Quick Service & Coffee Cafe', 'Quick Service & Burger',\n"
"... 'Quick Service & Mexican', 'Fast Casual & Bakery Cafe',\n"
"... 'Quick Service & Chicken', 'Quick Service & Sandwich',\n"
"... 'Quick Service & Pizza']))\n"
'True',
'hidden': False,
'locked': False},
{ 'code': ">>> np.all(np.isclose(top_selling_segments.column('Average Sales'), np.array([7972.25 , 6106.46153846, 6071.5 , 5890. ,\n"
'... 3769.83333333, 3741.25 , 2664.5 ])))\n'
'True',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
|
###########################################################################
# Created by: CASIA IVA
# Email: [email protected]
# Copyright (c) 2018
###########################################################################
from __future__ import division
import os
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import upsample, normalize
from torch.nn import Module, Sequential, Conv2d, Parameter, Linear, Sigmoid, Softmax, Dropout, Embedding
#from ...nn import PAM_Module
from ...nn import CAM_Module
from ...nn import PyramidPooling
from .base import BaseNet
__all__ = ['PPANet', 'get_ppanet']
class PPANet(BaseNet):
r"""Fully Convolutional Networks for Semantic Segmentation
Parameters
----------
nclass : int
Number of categories for the training dataset.
backbone : string
Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50',
'resnet101' or 'resnet152').
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`;
Reference:
Long, Jonathan, Evan Shelhamer, and Trevor Darrell. "Fully convolutional networks
for semantic segmentation." *CVPR*, 2015
"""
def __init__(self, nclass, backbone, aux=False, se_loss=False, norm_layer=nn.BatchNorm2d, **kwargs):
super(PPANet, self).__init__(nclass, backbone, aux, se_loss, norm_layer=norm_layer, **kwargs)
self.head = PPANetHead(2048, nclass, norm_layer, self._up_kwargs)
def forward(self, x, dep):
imsize = x.size()[2:]
_, _, c3, c4 = self.base_forward(x)
x = self.head(c4)
x = list(x)
x[0] = upsample(x[0], imsize, **self._up_kwargs)
x[1] = upsample(x[1], imsize, **self._up_kwargs)
x[2] = upsample(x[2], imsize, **self._up_kwargs)
outputs = [x[0], x[1], x[2]]
return tuple(outputs)
class PPANetHead(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer, up_kwargs):
super(PPANetHead, self).__init__()
inter_channels = in_channels // 4 # 512
# pyramid pooling
self.pyramid_pool = PyramidPooling(in_channels, norm_layer, up_kwargs)
self.conv_pp = nn.Sequential(nn.Conv2d(in_channels * 2, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels),
nn.ReLU(True))
# spatial attention
self.conv_s0 = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels),
nn.ReLU())
self.sa = PPA_Module(inter_channels)
self.conv_s1 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels),
nn.ReLU())
# channel attention
self.conv_c0 = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels),
nn.ReLU())
self.sc = CAM_Module(inter_channels)
self.conv_c1 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels),
nn.ReLU())
self.conv_s2 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(inter_channels, out_channels, 1))
self.conv_c2 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(inter_channels, out_channels, 1))
self.conv_2 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(inter_channels, out_channels, 1))
def forward(self, x):
pp_feat = self.pyramid_pool(x) # [4096, 60, 60]
pp_feat = self.conv_pp(pp_feat) # [512, 60, 60]
feat1 = self.conv_s0(x)
sa_feat = self.sa(feat1, pp_feat)
sa_conv = self.conv_s1(sa_feat)
sa_output = self.conv_s2(sa_conv)
feat2 = self.conv_c0(x)
sc_feat = self.sc(feat2)
sc_conv = self.conv_c1(sc_feat)
sc_output = self.conv_c2(sc_conv)
feat_sum = sa_conv + sc_conv
sasc_output = self.conv_2(feat_sum)
output = [sasc_output, sa_output, sc_output]
return tuple(output)
class PPA_Module(Module):
""" Position attention module"""
#Ref from SAGAN
def __init__(self, in_dim):
super(PPA_Module, self).__init__()
self.chanel_in = in_dim
self.query_conv = Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
self.key_conv = Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
# self.value_conv = Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.softmax = Softmax(dim=-1)
self.gamma = Parameter(torch.zeros(1))
def forward(self, x, pp_feat):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X (HxW) X (HxW)
"""
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width*height).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width*height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = pp_feat.view(m_batchsize, -1, width*height) # [B, 4096, wh]
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out + pp_feat
return out
def get_ppanet(dataset='pascal_voc', backbone='resnet50', pretrained=False,
root='../../encoding/models/pretrain', **kwargs):
acronyms = {
'pascal_voc': 'voc',
'pascal_aug': 'voc',
'pcontext': 'pcontext',
'ade20k': 'ade',
'cityscapes': 'cityscapes',
}
# infer number of classes
from ...datasets import datasets, VOCSegmentation, VOCAugSegmentation, ADE20KSegmentation
model = PPANet(datasets[dataset.lower()].NUM_CLASS, backbone=backbone, root=root, **kwargs)
if pretrained:
from ..model_store import get_model_file
model.load_state_dict(torch.load(
get_model_file('fcn_%s_%s' % (backbone, acronyms[dataset]), root=root)),
strict=False)
return model
|
from mp4box.box import TrackFragmentDecodingTime
def parse_tfdt(reader, my_size):
version = reader.read32()
box = TrackFragmentDecodingTime(my_size, version, 0)
if version == 1:
box.base_media_decode_time = reader.read64()
else:
box.base_media_decode_time = reader.read32()
|
import numpy as np
from intp_integrated import (AccumulatedInterpNN,
AccumulatedInterpDrizzle,
AccumulatedInterpLinear)
import matplotlib.pyplot as plt
def test_plot_accumulate_interp_nn():
x = [0, 1, 2, 3, 4]
y = [1, 3, 2, 4, 1.5]
xx = np.arange(-1., 5., 0.05)
intp = AccumulatedInterpNN(x, y)
yy = intp.rebin(xx)
plt.clf()
plt.plot(x, y, "o")
plt.plot(xx[:-1], yy/0.05)
intp = AccumulatedInterpDrizzle(x, y, drizzle=0.6)
yy = intp.rebin(xx)
plt.plot(xx[:-1], yy/0.05)
def test_plot_accumulate_interp_linear():
x = [0, 1, 2, 3, 4]
y = [1, 3, 2, 4, 1.5]
dx = 0.2
xx = np.arange(0.01, 4., dx)
intp = AccumulatedInterpLinear(x, y)
yy = intp.rebin(xx) / dx
xc = 0.5 * (xx[:-1] + xx[1:])
plt.clf()
plt.plot(x, y, "o")
plt.plot(xc, yy)
|
import os
import glob
import hdf5_getters
def get_all_titles(basedir,ext='.h5') :
titles = []
for root, dirs, files in os.walk(basedir):
files = glob.glob(os.path.join(root,'*'+ext))
for f in files:
h5 = hdf5_getters.open_h5_file_read(f)
titles.append( hdf5_getters.get_title(h5) )
h5.close()
return titles
|
NumOfRows = int(input("Enter number of rows: "))
for i in range(NumOfRows, 0, -1):
for j in range(0, i):
print("* ", end=" ") #for numbers--> print(j, end=" ")
print("\n") |
# -*- coding: utf-8 -*-
import sys
from PySide2 import QtWidgets
from PySide2.QtTest import QTest
from pyleecan.Classes.LamHole import LamHole
from pyleecan.Classes.HoleM52 import HoleM52
from pyleecan.GUI.Dialog.DMatLib.MatLib import MatLib
from pyleecan.GUI.Dialog.DMachineSetup.SMHoleMag.PHoleM52.PHoleM52 import PHoleM52
from pyleecan.Classes.Material import Material
import pytest
@pytest.mark.GUI
class TestPHoleM52(object):
"""Test that the widget PHoleM52 behave like it should"""
@pytest.fixture
def setup(self):
"""Run at the begining of every test to setup the gui"""
if not QtWidgets.QApplication.instance():
self.app = QtWidgets.QApplication(sys.argv)
else:
self.app = QtWidgets.QApplication.instance()
test_obj = LamHole(Rint=0.1, Rext=0.2)
test_obj.hole = list()
test_obj.hole.append(HoleM52(H0=0.10, H1=0.11, H2=0.12, W0=0.13, W3=0.17))
test_obj.hole[0].magnet_0.mat_type.name = "Magnet2"
matlib = MatLib()
matlib.dict_mat["RefMatLib"] = [
Material(name="Magnet1"),
Material(name="Magnet2"),
Material(name="Magnet3"),
]
widget = PHoleM52(test_obj.hole[0], matlib)
yield {"widget": widget, "test_obj": test_obj, "matlib": matlib}
self.app.quit()
def test_init(self, setup):
"""Check that the Widget spinbox initialise to the lamination value"""
assert setup["widget"].lf_H0.value() == 0.10
assert setup["widget"].lf_H1.value() == 0.11
assert setup["widget"].lf_H2.value() == 0.12
assert setup["widget"].lf_W0.value() == 0.13
assert setup["widget"].lf_W3.value() == 0.17
# Check material
assert not setup["widget"].w_mat_1.isHidden()
assert setup["widget"].w_mat_1.c_mat_type.currentText() == "Magnet2"
assert setup["widget"].w_mat_1.c_mat_type.currentIndex() == 1
setup["test_obj"] = LamHole(Rint=0.1, Rext=0.2)
setup["test_obj"].hole = list()
setup["test_obj"].hole.append(
HoleM52(H0=0.10, H1=0.11, H2=0.12, W0=0.13, W3=0.17)
)
setup["test_obj"].hole[0].magnet_0 == None
setup["matlib"] = MatLib()
setup["matlib"].dict_mat["RefMatLib"] = [
Material(name="Magnet1"),
Material(name="Magnet2"),
Material(name="Magnet3"),
]
setup["widget"] = PHoleM52(setup["test_obj"].hole[0], setup["matlib"])
assert not setup["widget"].w_mat_1.isHidden()
def test_set_W0(self, setup):
"""Check that the Widget allow to update W0"""
# Clear the field before writing the new value
setup["widget"].lf_W0.clear()
QTest.keyClicks(setup["widget"].lf_W0, "0.31")
setup["widget"].lf_W0.editingFinished.emit() # To trigger the slot
assert setup["widget"].hole.W0 == 0.31
assert setup["test_obj"].hole[0].W0 == 0.31
def test_set_W3(self, setup):
"""Check that the Widget allow to update W3"""
# Clear the field before writing the new value
setup["widget"].lf_W3.clear()
QTest.keyClicks(setup["widget"].lf_W3, "0.323")
setup["widget"].lf_W3.editingFinished.emit() # To trigger the slot
assert setup["widget"].hole.W3 == 0.323
assert setup["test_obj"].hole[0].W3 == 0.323
def test_set_H0(self, setup):
"""Check that the Widget allow to update H0"""
# Clear the field before writing the new value
setup["widget"].lf_H0.clear()
QTest.keyClicks(setup["widget"].lf_H0, "0.34")
setup["widget"].lf_H0.editingFinished.emit() # To trigger the slot
assert setup["widget"].hole.H0 == 0.34
assert setup["test_obj"].hole[0].H0 == 0.34
def test_set_H1(self, setup):
"""Check that the Widget allow to update H1"""
# Clear the field before writing the new value
setup["widget"].lf_H1.clear()
QTest.keyClicks(setup["widget"].lf_H1, "0.35")
setup["widget"].lf_H1.editingFinished.emit() # To trigger the slot
assert setup["widget"].hole.H1 == 0.35
assert setup["test_obj"].hole[0].H1 == 0.35
def test_set_H2(self, setup):
"""Check that the Widget allow to update H2"""
# Clear the field before writing the new value
setup["widget"].lf_H2.clear()
QTest.keyClicks(setup["widget"].lf_H2, "0.36")
setup["widget"].lf_H2.editingFinished.emit() # To trigger the slot
assert setup["widget"].hole.H2 == 0.36
assert setup["test_obj"].hole[0].H2 == 0.36
def test_set_material_0(self, setup):
"""Check that you can change the material of mat_void"""
setup["widget"].w_mat_0.c_mat_type.setCurrentIndex(0)
assert setup["widget"].w_mat_0.c_mat_type.currentText() == "Magnet1"
assert setup["test_obj"].hole[0].mat_void.name == "Magnet1"
def test_set_material_1(self, setup):
"""Check that you can change the material of magnet_0"""
setup["widget"].w_mat_1.c_mat_type.setCurrentIndex(0)
assert setup["widget"].w_mat_1.c_mat_type.currentText() == "Magnet1"
assert setup["test_obj"].hole[0].magnet_0.mat_type.name == "Magnet1"
def test_comp_output(self, setup):
"""Check that you can compute the output only if the hole is correctly set """
setup["test_obj"] = LamHole(Rint=0.1, Rext=0.2)
setup["test_obj"].hole = list()
setup["test_obj"].hole.append(
HoleM52(H0=0.0010, H1=0.11, H2=0.00012, W0=0.0013, W3=0.0017)
)
setup["widget"].hole = setup["test_obj"].hole[0]
setup["widget"].comp_output()
# Nan are there because the value are not correct for the sin, cos and tan methods. But with true values, it works.
assert setup["widget"].out_slot_surface.text() == "Slot suface: 0.002569 m²"
assert (
setup["widget"].out_magnet_surface.text() == "Magnet surface: 0.000143 m²"
)
assert setup["widget"].out_alpha.text() == "alpha: 0.166 rad (9.511°)"
assert setup["widget"].out_W1.text() == "W1: 0.006234 m"
|
from django.urls import path
from .views import home, delete_todo
urlpatterns = [
path('', home, name='home'),
path('delete_todo/<int:todo_id>', delete_todo, name='delete'),
]
|
import os
import json
import sys
import pytest
import subprocess
import re
import shlex
from prettytable import PrettyTable
from collections import OrderedDict
from yattag import Doc
from pathlib import Path
from tests.conftest import TEST_ROOT, PROJECT_ROOT
BG_COLOR_GREEN_HEX = 'ccffcc'
BG_COLOR_YELLOW_HEX = 'ffffcc'
BG_COLOR_RED_HEX = 'ffcccc'
DIFF_TARGET_MIN_GLOBAL = -0.1
DIFF_TARGET_MAX_GLOBAL = 0.1
DIFF_FP32_MIN_GLOBAL = -1.0
DIFF_FP32_MAX_GLOBAL = 0.1
results_path = str(PROJECT_ROOT)
class TestSotaCheckpoints:
param_list = []
train_param_list = []
ids_list = []
train_ids_list = []
row_dict = OrderedDict()
color_dict = OrderedDict()
test = None
cmd = "{} examples/{sample_type}/main.py -m {} --config {conf} \
--data {dataset}/{data_name}/ --log-dir={res}/logs/ --metrics-dump \
{res}/{mod_name}.json"
@staticmethod
def run_cmd(comm):
print()
print(comm)
print()
com_line = shlex.split(comm)
result = subprocess.Popen(com_line, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
err_string = None
while result.poll() is None:
stdout_line = result.stdout.readline().decode('utf-8').strip()
if re.search("Error:", stdout_line):
err_string = stdout_line
if stdout_line != "":
print(stdout_line)
return err_string
@staticmethod
def make_table_row(test, expected_, metrics_type_, key, error_message, metric, diff_target, fp32_metric_=None,
diff_fp32=None):
TestSotaCheckpoints.test = test
if metric != 0:
if fp32_metric_ is None:
fp32_metric_ = "-"
diff_fp32 = "-"
if test == 'eval':
row = [str(key), str(expected_), str(metric), str(fp32_metric_), str(metrics_type_),
str(diff_fp32), str(diff_target), str("-")]
else:
row = [str(key), str(expected_), str(metric), str(metrics_type_), str(diff_target), str("-")]
else:
if fp32_metric_ is None:
fp32_metric_ = "-"
if test == 'eval':
row = [str(key), str(expected_), str("Not executed"), str(fp32_metric_), str(metrics_type_),
str("-"), str("-"), str(error_message)]
else:
row = [str(key), str(expected_), str("Not executed"), str(metrics_type_), str("-"), str(error_message)]
return row
def write_results_table(self, init_table_string):
result_table = PrettyTable()
result_table.field_names = init_table_string
for key in self.row_dict:
result_table.add_row(self.row_dict[key])
print()
print(result_table)
doc, tag, text = Doc().tagtext()
doc.asis('<!DOCTYPE html>')
with tag('p'):
text('legend: ')
with tag('p'):
with tag('span', style="Background-color: #{}".format(BG_COLOR_GREEN_HEX)):
text('Thresholds for FP32 and Expected are passed')
with tag('p'):
with tag('span', style="Background-color: #{}".format(BG_COLOR_YELLOW_HEX)):
text('Thresholds for Expected is failed, but for FP32 passed')
with tag('p'):
with tag('span', style="Background-color: #{}".format(BG_COLOR_RED_HEX)):
text('Thresholds for FP32 and Expected are failed')
with tag('table', border="1", cellpadding="5", style="border-collapse: collapse; border: 1px solid black;"):
with tag('tr'):
for i in init_table_string:
with tag('td'):
text(i)
for key in self.row_dict:
with tag('tr', bgcolor='{}'.format(self.color_dict[key])):
for i in self.row_dict[key]:
if i is None:
i = '-'
with tag('td'):
text(i)
f = open('results.html', 'w')
f.write(doc.getvalue())
f.close()
@staticmethod
def threshold_check(err, diff_target, diff_fp32_min_=None, diff_fp32_max_=None, fp32_metric=None,
diff_fp32=None, diff_target_min=None, diff_target_max=None):
color = BG_COLOR_RED_HEX
within_thresholds = False
if not diff_target_min:
diff_target_min = DIFF_TARGET_MIN_GLOBAL
if not diff_target_max:
diff_target_max = DIFF_TARGET_MAX_GLOBAL
if not diff_fp32_min_:
diff_fp32_min_ = DIFF_FP32_MIN_GLOBAL
if not diff_fp32_max_:
diff_fp32_max_ = DIFF_FP32_MAX_GLOBAL
if err is None:
if fp32_metric is not None:
if diff_fp32_min_ < diff_fp32 < diff_fp32_max_ and diff_target_min < diff_target < diff_target_max:
color = BG_COLOR_GREEN_HEX
within_thresholds = True
elif diff_fp32_min_ < diff_fp32 < diff_fp32_max_:
color = BG_COLOR_YELLOW_HEX
elif diff_target_min < diff_target < diff_target_max:
color = BG_COLOR_GREEN_HEX
within_thresholds = True
return color, within_thresholds
@staticmethod
def write_common_metrics_file():
metric_value = OrderedDict()
for i in TestSotaCheckpoints.ids_list:
with open('{}.json'.format(i)) as metric_file:
metrics = json.load(metric_file)
metric_value[i] = metrics['Accuracy']
if os.path.isfile('metrics.json'):
path = Path('metrics.json')
data = json.loads(path.read_text(encoding='utf-8'))
data.update(metric_value)
path.write_text(json.dumps(data, indent=4), encoding='utf-8')
else:
with open('metrics.json', 'w') as outfile:
json.dump(metric_value, outfile)
@staticmethod
def read_metric(model_name_):
with open('{}.json'.format(model_name_)) as metric_file:
metrics = json.load(metric_file)
return metrics['Accuracy']
sota_eval_config = json.load(open('{}/sota_checkpoints_eval.json'.format(TEST_ROOT)),
object_pairs_hook=OrderedDict)
for sample_type_ in sota_eval_config:
datasets = sota_eval_config[sample_type_]
for dataset_name in datasets:
model_dict = datasets[dataset_name]
for model_name in model_dict:
config_name = model_dict[model_name].get('config', {})
reference = None
if model_dict[model_name].get('reference', {}):
reference = model_dict[model_name].get('reference', {})
expected = model_dict[model_name].get('target', {})
metric_type = model_dict[model_name].get('metric_type', {})
if model_dict[model_name].get('resume', {}):
resume_file = model_dict[model_name].get('resume', {})
else:
resume_file = None
if model_dict[model_name].get('batch', {}):
batch = model_dict[model_name].get('batch', {})
else:
batch = None
diff_fp32_min = model_dict[model_name].get('diff_fp32_min') if not None else None
diff_fp32_max = model_dict[model_name].get('diff_fp32_max') if not None else None
diff_target_min = model_dict[model_name].get('diff_target_min') if not None else None
diff_target_max = model_dict[model_name].get('diff_target_max') if not None else None
param_list.append((config_name, reference, expected, metric_type, dataset_name, sample_type_,
resume_file, batch, diff_fp32_min, diff_fp32_max, model_name, diff_target_min,
diff_target_max))
ids_list.append(model_name)
if model_dict[model_name].get('compression_description', {}):
train_param_list.append((config_name, expected, metric_type, dataset_name, sample_type_,
model_name))
train_ids_list.append(model_name)
@pytest.mark.parametrize("config_name_, reference_, expected_, metric_type_, dataset_name_, _sample_type_,"
" resume_file_, batch_, diff_fp32_min_, diff_fp32_max_, model_name_, diff_target_min_, "
"diff_target_max_", param_list,
ids=ids_list)
def test_eval(self, sota_checkpoints_dir, sota_data_dir, config_name_, reference_, expected_, metric_type_,
dataset_name_, _sample_type_, resume_file_, batch_, diff_fp32_min_, diff_fp32_max_, model_name_,
diff_target_min_, diff_target_max_):
test = "eval"
os.chdir(results_path)
cmd = self.cmd.format(sys.executable, 'test', conf=config_name_, dataset=sota_data_dir, data_name=dataset_name_,
sample_type=_sample_type_, res=results_path, mod_name=model_name_)
if resume_file_:
resume = resume_file_
cmd += " --resume {}/{}".format(sota_checkpoints_dir, resume)
else:
cmd += " --pretrained"
if batch_:
cmd += " -b {}".format(batch_)
err = self.run_cmd(cmd)
metric_value = self.read_metric(model_name_)
fp32_metric = None
if reference_ is not None:
with open('{}.json'.format(reference_)) as ref_metric:
metrics = json.load(ref_metric)
fp32_metric = metrics['Accuracy']
diff_target = round((metric_value - expected_), 2)
diff_fp32 = round((metric_value - fp32_metric), 2) if fp32_metric is not None else None
self.row_dict[model_name_] = self.make_table_row(test, expected_, metric_type_, model_name_, err,
metric_value, diff_target, fp32_metric, diff_fp32)
self.color_dict[model_name_], is_accuracy_within_thresholds = self.threshold_check(err, diff_target,
diff_fp32_min_,
diff_fp32_max_,
fp32_metric,
diff_fp32,
diff_target_min_,
diff_target_max_)
assert is_accuracy_within_thresholds
@pytest.mark.parametrize("config_name_, expected_, metric_type_, dataset_name_, _sample_type_, model_name_",
train_param_list, ids=train_ids_list)
def test_train(self, sota_data_dir, config_name_, expected_, metric_type_, dataset_name_, _sample_type_,
model_name_):
os.chdir(results_path)
test = 'train'
cmd = self.cmd.format(sys.executable, 'train', conf=config_name_, dataset=sota_data_dir,
data_name=dataset_name_, sample_type=_sample_type_, res=results_path,
mod_name=model_name_)
err = self.run_cmd(cmd)
metric_value = self.read_metric(model_name_)
diff_target = round((metric_value - expected_), 2)
self.row_dict[model_name_] = self.make_table_row(test, expected_, metric_type_, model_name_, err, metric_value,
diff_target)
self.color_dict[model_name_], is_accuracy_within_thresholds = self.threshold_check(err, diff_target)
assert is_accuracy_within_thresholds
Tsc = TestSotaCheckpoints
@pytest.fixture(autouse=True, scope="module")
def skip_params(sota_data_dir):
if sota_data_dir is None:
pytest.skip('Path to datasets is not set')
@pytest.fixture(autouse=True, scope="class")
def results():
yield
Tsc.write_common_metrics_file()
if Tsc.test == "eval":
header = ["Model", "Expected", "Measured", "Reference FP32", "Metrics type", "Diff FP32", "Diff Expected",
"Error"]
else:
header = ["Model", "Expected", "Measured", "Metrics type", "Diff Expected", "Error"]
Tsc().write_results_table(header)
|
"""
Handles special relationships
"""
##########################################
# TODO - fix import mess
# Cannot import _general_copy here since it
# will result in a circular import
##########################################
from qcexport_extra_collection import _add_collection
from qcfractal.storage_sockets.models import (
CollectionORM,
QueueManagerORM,
OptimizationProcedureORM,
GridOptimizationProcedureORM,
TorsionDriveProcedureORM,
)
def _add_procedure_mixin(procedure_table, orm_obj, src_info, session_dest, session_src, new_pk_map, options, indent):
"""Handling of common parts of procedures"""
from qcexport import _general_copy
# Fix keywords in the qc_spec column
keyword_id = orm_obj.qc_spec["keywords"]
if keyword_id is not None:
# Is it a hash? That is incorrect
if not keyword_id.isdecimal():
print(indent + f"!!! Keyword {keyword_id} is not an integer!")
else:
new_kw = _general_copy(
"keywords",
session_dest,
session_src,
new_pk_map,
options,
filter_by={"id": src_info["qc_spec"]["keywords"]},
single=True,
indent=indent + " ",
)
orm_obj.qc_spec["keywords"] = new_kw["id"]
def _add_optimization_procedure(orm_obj, src_info, session_dest, session_src, new_pk_map, options, indent):
from qcexport import _general_copy
print(indent + f'$ Adding extra children for optimization procedure {src_info["id"]}')
_general_copy(
"opt_result_association",
session_dest,
session_src,
new_pk_map,
options,
filter_by={"opt_id": src_info["id"]},
indent=indent + " ",
)
_add_procedure_mixin(
"optimization_procedure", orm_obj, src_info, session_dest, session_src, new_pk_map, options, indent
)
def _add_gridoptimization_procedure(orm_obj, src_info, session_dest, session_src, new_pk_map, options, indent):
from qcexport import _general_copy
print(indent + f'$ Adding extra children for grid optimization procedure {src_info["id"]}')
_general_copy(
"grid_optimization_association",
session_dest,
session_src,
new_pk_map,
options,
filter_by={"grid_opt_id": src_info["id"]},
indent=indent + " ",
)
_add_procedure_mixin(
"grid_optimization_procedure", orm_obj, src_info, session_dest, session_src, new_pk_map, options, indent
)
def _add_torsiondrive_procedure(orm_obj, src_info, session_dest, session_src, new_pk_map, options, indent):
from qcexport import _general_copy
print(indent + f'$ Adding extra children for torsiondrive procedure {src_info["id"]}')
_general_copy(
"torsion_init_mol_association",
session_dest,
session_src,
new_pk_map,
options,
filter_by={"torsion_id": src_info["id"]},
indent=indent + " ",
)
_add_procedure_mixin(
"torsiondrive_procedure", orm_obj, src_info, session_dest, session_src, new_pk_map, options, indent
)
def _add_queuemanager(orm_obj, src_info, session_dest, session_src, new_pk_map, options, indent):
"""Adds extra info for queue managers (ie, logs)"""
from qcexport import _general_copy
print(indent + f'$ Adding extra children for queue manager {src_info["id"]}:{src_info["name"]}')
max_limit = options.get("queue_manager_log_max", None)
# Add the logs for the queue manager
_general_copy(
table_name="queue_manager_logs",
session_dest=session_dest,
session_src=session_src,
new_pk_map=new_pk_map,
options=options,
filter_by={"manager_id": src_info["id"]},
order_by={"id": "desc"},
limit=max_limit,
indent=indent + " ",
)
extra_children_map = {
CollectionORM: _add_collection,
QueueManagerORM: _add_queuemanager,
OptimizationProcedureORM: _add_optimization_procedure,
GridOptimizationProcedureORM: _add_gridoptimization_procedure,
TorsionDriveProcedureORM: _add_torsiondrive_procedure,
}
|
from random import randint
def sorteio(lst):
for x in range(0,5):
lst.append(randint(0,10))
print(f'Sorteando uma Lista de Valores: {lis} Pronto!')
def somapares(vl):
s=0
for x in vl:
if x % 2 == 0:
s += x
print(f'Os Valores da Lista {vl}, Somando seus Pares, o Resultado será {s}')
lis=[]
sorteio(lis)
somapares(lis)
|
"""
Module for reaction based on Ion class
TODO,
"""
import copy
import re
import io
import os
import time
from collections.abc import Iterable
import numpy as np
from numpy.linalg import matrix_power
from scipy.linalg import solve_banded
import isotope
from logged import Logged
from utils import stuple, project, CachedAttribute
from collections import Iterable
from version2human import version2human
from utils import cachedmethod
import physconst
class DuplicateIons(Exception):
"""
Exception raise when ions list or sets illegally contian duplicate ions
"""
def __init__(self, ions = None):
if ions is None:
super().__init__('Duplicate ions.')
else:
super().__init__('Duplicate ions: {}'.format(ions))
class IonList(object):
"""
Unsorted list of ions - fixed positions.
TODO - Maybe not allow KepIon and Ion mixed - all things with extra flags.
TODO - add some of AbuSet functionallity, e.g., return all
Elements, maybe maping matrices (move functionallity here?)
"""
def __init__(self, ions = None, duplicates = None):
"""
duplicates:
True
False - drop
None - raise exception
"""
self._ions = np.array((), dtype = np.object)
self.duplicates = duplicates
self.add(ions)
def add(self, ions = None):
"""
Add ion(s) to list.
"""
if ions is None:
return
if isinstance(ions, self.__class__):
self._ions = np.append(self._ions, ions)
return
ions = np.atleast_1d(ions)
ions = np.array([i if isinstance(i, isotope.Ion) else isotope.ion(i) for i in ions])
if self.duplicates is True:
self._ions = np.append(self._ions, ions)
elif self.duplicates is False:
_, ii = np.unique(ions, return_index = True)
ions = ions[sorted(ii)]
ii = np.in1d(ions, self._ions)
self._ions = np.append(self._ions, ions[~ii])
elif self.duplicates is None:
ions = np.append(self._ions, ions)
ix, nx = np.unique(ions, return_counts = True)
ix = ix[nx > 1]
if len(ix) > 0:
raise DuplicateIons(ix)
self._ions = ions
else:
raise Exception('Invalid mode for "duplicates": {}'.format(self.duplicates))
def index(self, ion):
if not isinstance(ion, isotope.Ion):
ion = isotope.ion(ion)
i = np.argwhere(self._ions == ion)
if len(i) == 0:
return -1
else:
return i.flatten()[0]
def __eq__(self, other):
# assert isinstance(other, IonList)
if len(self) != len(other):
return False
for i,j in zip(self, other):
if not i == j:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def ions(self):
return copy.deepcopy(self._ions)
def copy(self):
new = self.__class__()
new._ions = self._ions.copy()
return new
# copy from IonList:
def __str__(self):
return "[{}]".format(','.join((str(i) for i in self._ions)))
def __repr__(self):
return "{}({})".format(self.__class__.__name__,str(self))
def __getitem__(self, index):
return self._ions[index]
def __len__(self):
"""
Return number of isotopes.
"""
return len(self._ions)
def __iter__(self):
for ion in self._ions:
yield ion
def __add__(self, other):
new = self.copy()
new.add(other)
return new
@property
def A(self):
return isotope.ufunc_A(self._ions)
@property
def Z(self):
return isotope.ufunc_Z(self._ions)
@property
def N(self):
return isotope.ufunc_N(self._ions)
class IonSet(object):
"""
Provide sorted list of isotopes.
TODO: efficient additions of arrays of ions.
I suppose in principle we do want :set: properties but it also is
supposed to be sorted by index at all times if _sort is set.
Maybe there need be 2 kinds to allow interface with ionmap:
ionmap also allows to have elements, etc., mixed.
TODO - Maybe not KepIon and Ion mixed - things with extra flags.
TODO - Do not allow duplicates
NOTE - I think there should be only one, IonSet or IonList
"""
def __init__(self, *args, **kwargs):
self._ions = np.array((), dtype = np.object)
self._sort = kwargs.get('sort', True)
self._type = kwargs.get('type', None)
assert self._type is None or not issubclass(self._type, isotope.Ion), 'type needs to be subclass of Ion'
self.add(args)
def copy(self):
"""
return a copy
"""
return copy.copy(self)
def add(self, ions = None):
"""
Add ion(s) to list.
Ions need to be same "type"
"""
if isinstance(ions, str):
ions = tuple(isotope.ion(i) for i in re.split('[\'",;\s]+', ions))
if len(ions) == 1:
try:
ions = ions[0]
except:
pass
if (ions is not None):
if np.isscalar(ions):
self._add_one(ions)
elif len(ions) == 1:
while isinstance(ions, Iterable) and not isinstance(ions, str):
ions = ions[0]
self._add_one(ions)
else:
for ion in ions:
self._add_one(ion, update = False)
self._update()
def _add_one(self, ix, update = True):
"""
add one ion and check compatibility
"""
if self._type is None:
self._type = type(ix)
if not issubclass(self._type, isotope.Ion):
self._type = type(isotope.ion(ix))
if isinstance(ix, isotope.Ion) and not isinstance(ix, self._type):
raise TypeError("All ions need compatible type: {:s}, {:s}".format(
str(self._type),str(type(ix))))
if not isinstance(ix, self._type):
ix = self._type(ix)
self._ions = np.append(self._ions, ix)
if update:
self._update()
def _update(self):
if self._sort:
self._ions.sort()
self._ions = np.unique(self._ions)
def __str__(self):
return "[{}]".format(','.join((str(i) for i in self._ions)))
def __repr__(self):
return "{}({})".format(self.__class__.__name__,str(self))
def __getitem__(self, index):
return self._ions[index]
def __len__(self):
"""
Return number of isotopes.
"""
return len(self._ions)
def __iter__(self):
i=0
while i < self.__len__():
yield self._ions[i]
i+=1
def __add__(self, other):
new = self.copy()
new.add(other)
return new
class Abu(object):
"""
Ion plus single abundance. Provide some general functionallity.
TODO - add most of AbuSet functionallity
TODO Contain mass information?
"""
def __init__(self, ion, abu = 0):
self.ion = ion
self.abu = np.float64(abu)
def __str__(self):
return "{!s}:{:>12.5f}".format(self.ion,self.abu)
def X(self):
return self.abu
def Y(self):
return self.abu / self.ion.A
class AbuDist(object):
"""
Ion and distribution information, e.g., structure.
TDOD - implement
TDOD - add Abu(Set) and AbuData functionallities
"""
# should be initialized with Ion and abundace vector,
# mass and radius would be good.
pass
class AbuData(object):
"""
n-D abu data plus extra info.
ions are in shape[0]
Probably base class (if not identical) to starfit data.
"""
def __init__(self,
data = None,
ions = None,
molfrac = None,
#fields = None,
#formats = None,
#values = None,
#info = None,
):
self.ions = IonList(ions)
self.data = data
self.molfrac = molfrac
#self.fields = fields
#self.formats = formats
#self.values = values
def __len__(self):
return np.product(self.data.shape[:-1])
def abu(self, molfrac = None):
"""
abundace
"""
if molfrac is None:
molfrac = self.molfrac
if molfrac == self.molfrac:
yfac = np.ones(self.data.shape[1])
else:
yfac = isotope.ufunc_A(self.ions)
if molfrac and not self.molfrac:
yfac = 1 / yfac
value = np.ndarray(self.data.shape, dtype = np.float64)
value[...,:] = self.data[...,:] * yfac
return value
def updated(self,
data = None,
ions = None,
molfrac = None,
):
"""
return updated copy of self
"""
new = copy.deepcopy(self)
new.data = data.copy()
new.ions = IonList(ions.copy())
if molfrac is not None:
new.molfrac = molfrac
return new
def decayed(self, decay = None, **decpar):
"""
Return copy of self with yields replaced by decayed yields.
"""
from ionmap import Decay
return Decay.Map(self, **decpar)
def cleaned(self, threshold = 1.e-25, debug = False):
maxima = np.max(self.data, axis = 0)
ii, = np.where(maxima > threshold)
save_data = self.data
save_ions = self.ions
self.data = self.data[:, ii]
self.ions = self.ions.__class__(self.ions[ii])
if debug:
print('[{}] removing {}.'.format(
self.__class__.__name__,
', '.join(str(i) for i in (set(save_ions) - set(self.ions))
)
))
new = copy.deepcopy(self)
self.data = save_data
self.ions = save_ions
return new
@property
def A(self):
return self.ions.A
@property
def Z(self):
return self.ions.Z
@property
def N(self):
return self.ions.N
class AbuDump(AbuData):
"""
KEPLER BURN abundance set. To hold KEPLER ppnb data (plus wind)
"""
def __init__(self,
data = None,
ions = None,
windb = None,
molfrac = None,
bottom = None,
xm = None,
zm = None,
):
"""
xm is mass of zone in g
ym is mass coordinate of out zone boundary
if there is n star data, it should be in zones 1..n zone 0 is
the inner boundady condition (could become having non-zero data
in accretion probelms); zone n+1 holds the wind data
abundace data should be mol fractions or mass fractions
data on import has format [isotopes, zones+2], but is stored
internally as [zones+2, isotopes]
molfrac determines whether the input is in mol fractions;
wise to set if known; otherwise the code tries automatic determination,
which may fail.
"""
if isinstance(data, AbuData):
self.data = data.data.copy()
self.ions = data.ions.copy()
self.molfrac = data.molfrac
assert ions == None
assert molfrac == None
if bottom is None:
bottom = 1
self.i0 = bottom
self.i1 = data.data.shape[1]
#check for wind - if abu in wind is 0, reduce i1 by 1
if np.sum(self.data[-1,:]) == 0:
self.has_wind = False
self.i1 -= 1
else:
self.has_wind = True
else:
self.data = data.copy().transpose()
self.ions = IonList(ions)
if molfrac is None:
molfrac = (1 - np.sum(self.data[1,:])) > 0.1
self.molfrac = molfrac
self.i0 = 1
self.i1 = self.data.shape[0] - 1
if windb is not None:
self.data[-1,:] = windb
self.i1 += 1
self.has_wind = True
else:
self.has_wind = False
self.xm = xm
self.zm = zm
def __iter__(self, **kw):
for ion in self.ions:
yield (ion, self.ion_abu(ion, **kw))
def ion_abu(self,
ion,
molfrac = False,
missing = np.nan):
"""
Return isotope abundace
"""
if not isinstance(ion, isotope.Ion):
ion = isotope.ion(ion)
if ion == isotope.VOID:
value = np.ndarray(self.data.shape[0], dtype = np.float64)
value.fill(missing)
return value
# return self.ion_abu(ion, molfrac = molfrac, missing = missing)
yfac = max(1, ion.A)
if molfrac == self.molfrac:
yfac = 1
if molfrac and not self.molfrac:
yfac = 1 / yfac
value = np.ndarray(self.data.shape[0], dtype = np.float64)
i = self.ions.index(ion)
assert i != -1, 'ion {} not in data set'.format(ion)
value[self.i0:self.i1] = self.data[self.i0:self.i1,i] * yfac
value[:self.i0] = missing
value[self.i1:] = missing
return value
@cachedmethod
def __call__(self, *args, **kw):
return self.ion_abu(*args, **kw)
@cachedmethod
def __getitem__(self, ion):
return self.ion_abu(ion)
def __getattr__(self, attr):
if attr in self.ions:
return self.ion_abu(attr)
return super().__getattribute__(attr)
def abu(self,
molfrac = None,
missing = np.nan,
wind = True,
bottom = True):
"""
abundace
"""
i1 = self.data.shape[0] - ( 0 if wind else 1)
i0 = 0 if bottom else 1
if molfrac is None:
molfrac = self.molfrac
if molfrac == self.molfrac:
yfac = np.ones(self.data.shape[1])
else:
yfac = isotope.ufunc_A(self.ions)
if molfrac and not self.molfrac:
yfac = 1 / yfac
value = np.ndarray(self.data.shape, dtype = np.float64)
value[self.i0:self.i1,:] = self.data[self.i0:self.i1,:] * yfac[np.newaxis,:]
value[:self.i0,:] = missing
value[self.i1:,:] = missing
return value[i0:i1]
def project(self,
output = 'massfrac',
xm = None,
zones = None,
):
"""
Return projected yields as AbusSet object.
Output = massfrac | molfrac | g | mol | Msun
"""
if xm is None:
try:
xm = self.xm
except AttributeError:
raise Exception('Need to have zone mass defined.')
assert xm.shape[0] == self.data.shape[0]
if zones is None:
# exclude "phony" zones but leave in wind
zones = slice(self.i0, self.i1)
norm = 1
if output == 'massfrac':
molfrac = False
norm = 1 / np.sum(xm[zones])
elif output == 'molfrac':
molfrac = True
norm = 1 / np.sum(xm[zones])
elif output == 'mol':
molfrac = True
elif output == 'g':
molfrac = False
elif output == 'Msun':
molfrac = False
norm = 1 / physconst.XMSUN
if molfrac is None:
molfrac = self.molfrac
if molfrac == self.molfrac:
yfac = np.ones(self.data.shape[1])
else:
yfac = isotope.ufunc_A(self.ions)
if molfrac and not self.molfrac:
yfac = 1 / yfac
value = np.tensordot(self.data[zones, :], xm[zones], axes=(0, 0)) * yfac * norm
return AbuSet(
iso = self.ions,
abu = value,
normalize = False,
unit = '?')
def mix(self,
mbox,
iterations = 4,
xm = None,
zones = None,
):
"""
Mix abundance dump and return new mixed object.
"""
if xm is None:
try:
xm = self.xm
except AttributeError:
raise Exception('Need to have zone mass defined.')
assert xm.shape[0] == self.data.shape[0], 'require same number of zones in xm and data set'
if zones is None:
# exclude "phony" and surface zones
if self.has_wind:
zones = slice(self.i0, self.i1 - 1)
else:
zones = slice(self.i0, self.i1)
# convert to list of zones
if isinstance(zones, slice):
zones = np.array(range(*zones.indices(len(xm))))
if not isinstance(zones, np.ndarray):
zones = np.array(zones)
nzones = len(zones)
mat = np.zeros((nzones, nzones), dtype=np.float64)
xm = xm[zones]
# compute contributions
# mat of [i,j] contains contributions of zone j to zone i
xmt = 0.
j = -1
i = 0
for xmi in xm:
j += 1
xmt += xmi
if xmt >= mbox:
break
if xmt < mbox:
# complete mixing
mat[:, :] = xm[np.newaxis, :] / xmt
else:
mat[i, i:j+1] = xm[i:j+1] / xmt
for k in range(1, nzones):
j0 = j
xmt -= xm[k - 1]
xmt0 = xmt
while xmt <= mbox:
j += 1
xmt += xm[j]
if j == nzones - 1:
break
if k <= j0:
mat[k, :j0+1] = mat[k-1, :j0+1] * (xmt0 / xmt)
if j > j0:
mat[k, j0+1:j+1] = xm[j0+1:j+1] / xmt
i = k
if j == nzones - 1:
break
# add flat tail
mat[i+1:, :] = mat[i, :]
mat = matrix_power(np.transpose(mat), iterations)
result = copy.deepcopy(self)
result.data[zones, ...] = np.tensordot(mat, result.data[zones, ...], axes = (0, 0))
return result
def mix_diff(self,
diff,
xm = None,
zones = None,
):
"""
Mix abundance dump and return new mixed object.
'diff' is the mix width using diffusion-like approximation
(Gaussian-like mixing with widths mbox). In a diffusion
apprximation diff would correstpond to sqrt(D * dt) where D is
the diffusion coefficient and dt is the time [step]. Hence
the unit of diff here is 'mass' (g) for conveniece.
'diff' may also be an array of same dimension as xm. The
mixing magnitudes are defined on the zone interfaces similar
to the mass coordinates zm.
It appears similar results in the near field are obtained with
a 'diff' value of about 2x the 'mix' 'mbox' value; in the far
field, a value of sqrt(2) gives more similar results (on log
scale).
"""
if xm is None:
try:
xm = self.xm
except AttributeError:
raise Exception('Need to have zone mass defined.')
assert xm.shape[0] == self.data.shape[0], 'require same number of zones in xm and data set'
if zones is None:
# exclude "phony" and surface zones
if self.has_wind:
zones = slice(self.i0, self.i1 - 1)
else:
zones = slice(self.i0, self.i1)
# convert to list of zones
if isinstance(zones, slice):
zones = np.array(range(*zones.indices(len(xm))))
if not isinstance(zones, np.ndarray):
zones = np.array(zones)
nzones = len(zones)
xm = xm[zones]
# compute forward matrix
# mat of [i,j] contains contributions of zone j to zone i
if np.isscalar(diff):
diff = np.tile(diff, len(xm))
elif len(diff) == len(self.xm):
diff = diff[zones]
assert len(diff) == len(xm)
xmim = np.zeros_like(xm)
xmip = np.zeros_like(xm)
ip = np.arange(1,nzones)
im = np.arange(0,nzones-1)
ia = np.arange(0,nzones)
xmi = -2 / xm
xmip[im] = diff[im]**2 / (xm[im] + xm[im+1])
xmim[ip] = xmip[im]
mat = np.ndarray((3,nzones), dtype=np.float64)
mat[0,ip] = xmi[im] * xmip[im]
mat[1,ia] = -xmi[ia] * (xmip[ia] + xmim[ia]) + 1
mat[2,im] = xmi[ip] * xmim[ip]
result = copy.deepcopy(self)
result.data[zones, ...] = solve_banded((1,1), mat, result.data[zones, ...])
return result
@CachedAttribute
def abar(self):
"""
Compute Abar as a function of coordinate
"""
if not self.molfrac:
A = 1 / isotope.ufunc_A(self.ions)
else:
A = np.ones(self.data.shape[1])
xA = np.tensordot(self.data, A, axes = (1, 0))
return 1 / xA
# @CachedAttribute
# def muI(self):
# """
# mean molecular weight per ion
# """
# if not self.molfrac:
# A = isotope.ufunc_A(self.ions)
# else:
# A = np.ones(self.data.shape[1])
# xA = np.tensordot(self.data, A, axes = (1, 0))
# return 1 / xA
muI = abar
@CachedAttribute
def zbar(self):
"""
mean charge number
"""
Z = isotope.ufunc_Z(self.ions)
if not self.molfrac:
A = isotope.ufunc_A(self.ions)
Z /= A
else:
A = np.ones(self.data.shape[1])
xZ = np.tensordot(self.data, Z, axes = (1, 0))
xA = np.tensordot(self.data, A, axes = (1, 0))
return xZ / xA
@CachedAttribute
def Ye(self):
"""
Compute Ye as a function of coordinate
"""
Z = isotope.ufunc_Z(self.ions)
if not self.molfrac:
Z /= isotope.ufunc_A(self.ions)
Ye = np.tensordot(self.data, Z, axes = (1, 0))
return Ye
@CachedAttribute
def eta(self):
"""
neutron excess eta = 1-2*Ye
"""
return 1 - 2 * self.Ye
@CachedAttribute
def mu(self):
"""
mean molecular weight
"""
Z1 = 1 + isotope.ufunc_Z(self.ions)
if not self.molfrac:
Z1 /= isotope.ufunc_A(self.ions)
xmu = np.tensordot(self.data, Z1, axes = (1, 0))
return 1 / xmu
@CachedAttribute
def mue(self):
"""
mean molecular weight per electron
"""
return 1 / self.Ye
@CachedAttribute
def zm_sun(self):
"""
mass shell interfaces in solar masses
"""
return self.zm / physconst.XMSUN
########################################################################
class AbuSet(Logged):
"""
Prototype Class for single abundace set like solar abundance
Should there be a subclass or a field for normalized/partial abundances?
- Normalization is difficult if addition occurs piecewise ...
How about support for different formats, like [] ... ?
*** TODO ***
- derive using common functions with IonSet
- replace internal variables with "_"-names,
most prominently: iso --> _ions
abu --> _abu
*** Add a way to hold non-normalized values?
"""
def __init__(self,
iso = None,
abu = None,
comment = None,
mixture = None,
dat_file = None,
bg_file = None,
silent = False,
# todo
normalize = False, # only useful for fractions
normalized = False, # only useful for fractions
molfrac = False, #?
unit = 'massfrac', # set up proper unit system
# - g, mol, massfrac, molfrac, M_sun
sorted = False,
sentinel = None,
comp = None,
**kwargs):
"""
Initialize abundance set.
Currently allow call with
* list of isotopes only (abu set to 0)
* list of isotopes and list of abundances
* keyword iso=abu
* dictionary {'iso':abu, ...}
Initialize from data file:
* dat_file (KEPLER abundance data file, like solabu.dat)
* bg_file (KEPLER BURN generator data file)
TODO:
* sorting implementation
* implement molfrac
* proper deal with normalization on/off
* make A, Z, etc., properties rather than functions
* can initialise from another composition for derived classes?
"""
self.comment = stuple(comment)
self.mixture = mixture
self.sentinel = sentinel
self.comp = comp
# TODO - implement
assert molfrac == False
self.molfrac = molfrac
self.unit = unit
self.is_sorted = sorted
if dat_file is not None:
self._from_dat(dat_file, silent = silent)
return
if bg_file is not None:
self._from_bg(bg_file, silent = silent)
return
if issubclass(dict, type(iso)):
self.iso, self.abu = self._ion_abu_from_dict(**iso)
elif iso is None:
assert abu is None, "Need isotope name"
self.iso = np.array([], dtype=np.object)
self.abu = np.array([], dtype=np.float64)
else:
self.iso = np.array([isotope.ion(i) for i in np.atleast_1d(iso)], dtype=np.object)
if abu is not None:
self.abu = np.array(np.atleast_1d(abu), dtype=np.float64)
assert len(self.abu) == len(self.iso), "Need equal number of elements."
else:
self.abu = np.zeros_like(self.iso, dtype=np.float64)
if len(kwargs) > 0:
self._append(*self._ion_abu_from_dict(**kwargs))
if self.is_sorted:
self.sort()
# need to update for general use, e.g., mapped elemental abundances
if molfrac:
self.abu *= self.A()
@staticmethod
def _ion_abu_from_dict(**kwargs):
#todo: add sorting?
#todo: check uniqueness
return (
np.array([isotope.ion(i) for i in kwargs.keys()],
dtype=np.object),
np.array(list(kwargs.values()),
dtype=np.float64))
def _append(self, iso, abu):
#todo: add sorting? - YES
#todo: check uniqueness
self.iso = np.append(self.iso, iso)
self.abu = np.append(self.abu, abu)
def __str__(self):
return ("abu([" + self.unit + "] " +
", ".join(['{:s}: {:8G}'\
.format(iso.Name(),abu)
for iso, abu in zip(self.iso, self.abu)]) +
")")
__repr__ = __str__
def _delete(self, iso):
"""
remove isotope
"""
ii = np.where(self.iso == iso)[0]
assert len(ii) == 1
self.iso = np.delete(self.iso, ii)
self.abu = np.delete(self.abu, ii)
def __delitem__(self, iso):
"""
remove isotope
"""
self._delete(iso)
def normalize(self, total = None):
"""
Normalize abundances to one.
If sum == 0 just return.
"""
abusum = self.abu.sum()
if abusum == 0.:
return
self.abu /= abusum
if total is not None:
self.abu *= total
def normalized(self, total = None):
"""
Return normalized copy of abu.
"""
x = copy.copy(self)
x.normalize(total = total)
return x
def _from_bg(self,
file_name,
silent = False):
"""
Generate abundace set from BURN gen file.
TODO - return tuple with mutiple mixtures if file has several
Actually, this should be a module function, not part of
the constructor.
TODO - add option to show comment
"""
self.setup_logger(silent = silent)
self.comment = stuple(self.comment)
xre = re.compile('[-+a-zA-Z0-9.]+')
self.iso = np.array([],dtype=np.object)
self.abu = np.array([],dtype=np.float64)
with open(file_name,'r') as f:
self.logger_file_info(f)
for line in f:
if line.startswith('c '):
self.comment += (line[2:].rstrip(),)
elif line.startswith('m '):
xdata = xre.findall(line)
xnum = len(xdata)
if xnum < 2:
continue
mixture = xdata[1]
if self.mixture is None:
self.mixture = mixture
self.logger.info('Loading mixture "{:s}".'.format(self.mixture))
if self.mixture == mixture:
if xnum < 3:
continue
assert xnum % 2 == 0
xion = xdata[3::2]
xabu = xdata[2::2]
for i,a in zip(xion,xabu):
self._append(isotope.ion(i),np.double(a.replace('D','E').replace('d','e')))
self.close_logger(timing='Data loaded in')
def _from_dat(self,
filename,
silent = False):
"""
Load abundace set from "dat" file.
TODO - add option to show comment
"""
self.setup_logger(silent = silent)
self.comment = stuple(self.comment)
xre = re.compile('[-+a-zA-Z0-9.]+')
self.iso = np.array([],dtype=np.object)
self.abu = np.array([],dtype=np.float64)
with open(filename,'r') as f:
self.logger_file_info(f)
self.comment += ('',
'Generated from file "{:s}".'.format(filename),
'Original file comments follow:',
'')
for line in f:
if not line.startswith((';','#')):
xdata = xre.findall(line)
xnum = len(xdata)
if xnum == 0:
continue
if xnum == 2:
xion,xabu = tuple(xdata)
else:
print(line)
raise IOError('bad format')
self._append(isotope.ion(xion), np.double(xabu))
else:
self.comment += (line[2:].rstrip(),)
self.filename = filename
message = "{:3d} isotopes loaded in".format(len(self.iso))
self.close_logger(timing = message)
def _new_order(self):
# reset things that depend on order
# this should be called every time things are added or removed.
# try:
# del self.approx_map
# except:
# pass
pass
def ionset(self):
return IonSet(self.ions)
def sort(self):
"""
Sort ions.
"""
if self.iso.size == 0:
return
sort = self.iso.argsort()
self.iso = self.iso[sort]
self.abu = self.abu[sort]
self._new_order()
def write_bg(self,
outfile,
net = 1,
mixture = None,
zmax = 83,
overwrite = False,
silent = False,
write_net = True):
"""
Write out BURN generator.
If outfile is file use this.
If outfile is a filename open outfile for writing.
We need to assert gaps around be8, b9
(though it is not clear why KEPLER needs these gaps)
We do need to assert, however, because ADAPNET preserves gaps
for this reason, not to introduce gaps in new network. Which
also makes the work of ADAPNET easier.
Eventually KEPLER should check for these gaps and issue errors
if present. Maybe it already does?
"""
self.setup_logger(silent = silent)
# minnet from adapnet general purpose network
# we could load this, but for now, we just hard-code.
# note the gaps for Be8 and B9 that appear to be requires
# by KEPLER - though it is not clear why
# maybe because these istopes are not in bdat
# TODO - ask RDH to add to future versions of bdat.
minnet = [[[ 1, 1]],
[[ 1, 3]],
[[ 3, 4]],
[[ 6, 7]],
[[ 7, 7], [ 9, 9]],
[[ 8, 8], [10, 11]],
[[11,13]],
[[13,15]],
[[14,18]],
[[17,19]],
[[19,22]],
[[21,23]],
[[23,26]],
[[25,27]],
[[27,30]],
[[30,31]],
[[31,36]]]
version = 10000
default_mixture = 'x'
card_cmt = 'c'
card_mix = 'm'
card_grd = 'gg'
card_ntw = 'netw'
zmax = min(zmax+1, len(isotope.elements))
netw = np.ndarray((zmax,2,2), dtype=np.int64)
netw_count = np.zeros(zmax, dtype=np.int64)
for iz,z in enumerate(minnet):
for ia,a in enumerate(z):
netw[iz,ia,:] = a
netw_count[iz] = len(z)
niso = 0
iso = np.zeros_like(self.iso)
abu = np.zeros_like(self.abu)
for i,a in zip(self.iso, self.abu):
Z = i.Z
if Z < zmax:
A = i.A
n = netw_count[Z]
if n == 0:
netw_count[Z] = 1
netw[Z,0,:] = A
else:
netw[Z,0,0] = min(A, netw[Z,0,0])
netw[Z,n-1,1] = max(A, netw[Z,n-1,1])
add = True
if n == 2:
if (A > netw[Z,0,1]) and (A < netw[Z,1,0]):
self.logger.error('{:s} inside required gap.'.format(i.name()))
add = False
if add:
iso[niso] = i
abu[niso] = a
niso += 1
iso = iso[:niso]
abu = abu[:niso]
if mixture is None:
try:
mixture = self.mixture
except:
pass
if mixture is None:
mixture = default_mixture
if not isinstance(outfile, io.IOBase):
filename = os.path.expanduser(os.path.expandvars(outfile))
assert overwrite or not os.path.exists(filename),'file exisits'
f = open(filename,'w')
else:
f = outfile
if write_net:
f.write(card_cmt + ' COMPUTER-GENERATED BURN GENERATOR FILE\n')
f.write(card_cmt + ' VERSION {:s}'.format(version2human(version))+'\n')
f.write(card_cmt + ' ' + time.asctime(time.gmtime())+' UTC\n')
f.write(card_cmt + '\n')
for c in self.comment:
f.write("{:s} {:s}\n".format(card_cmt,c))
f.write(card_cmt + '\n')
f.write(card_cmt + ' define network (Z_max = {:d})\n'.format(zmax-1))
for i in range(zmax):
nc = netw_count[i]
if nc > 0:
c = " ".join(["{:3d} {:3d}".format(*netw[i,j,:]) for j in range(nc)])
f.write("{:s} {:d} {:2s} {:s}\n".format(
card_ntw,
net,
isotope.elements[i],
c,
))
f.write(card_cmt + '\n')
f.write(card_cmt + ' define composition '+mixture+'\n')
for i,a in zip(iso, abu):
f.write("{:s} {:s} {:14.8E} {:s}\n".format(card_mix,mixture,a,i.name()))
if write_net:
f.write(card_cmt + '\n')
f.write(card_cmt + ' specify grid composition (homogeneous star)\n')
f.write(card_cmt + ' NOTE: call only after all "g" cards in main generator\n')
f.write("{:s} {:d} {:s}\n".format(card_grd,net,mixture))
if not isinstance(outfile, io.IOBase):
f.close()
if write_net:
self.close_logger(timing='BURN generator written to "{:s}" in'.format(f.name))
else:
self.close_logger(timing='BURN mixture {:s} written in'.format(mixture))
def write_dat(self,
outfile,
overwrite = False,
silent = False):
"""
Write out dat file.
If file is file use this.
If file is file name open file for write.
"""
self.setup_logger(silent = silent)
version = 10000
card_cmt = ';'
if not isinstance(outfile, io.IOBase):
filename = os.path.expanduser(os.path.expandvars(outfile))
assert overwrite or not os.path.exists(filename),' file exisits: '+filename
f = open(filename,'w')
else:
f = outfile
f.write(card_cmt + ' COMPUTER-GENERATED ABUNDANCE DATA FILE\n')
f.write(card_cmt + ' VERSION {:s}'.format(version2human(version))+'\n')
f.write(card_cmt + ' ' + time.asctime(time.gmtime())+' UTC\n')
f.write(card_cmt + '\n')
for c in self.comment:
f.write("{:s} {:s}\n".format(card_cmt,c))
f.write(card_cmt + '\n')
f.write(card_cmt + '----------------------------------------------------------------------\n')
for i,a in zip(self.iso, self._X()):
f.write("{:6s} {:13.7E}\n".format(i.name(),a))
if not isinstance(outfile, io.IOBase):
f.close()
self.close_logger(timing = '"{:s}" written in'.format(f.name))
def write_compsurb(self,
outfile,
overwrite = False,
silent = False):
"""
Write out data file for setting compsurf BURN values.
If file is file use this.
If file is file name open file for write.
"""
self.setup_logger(silent = silent)
version = 10000
card_cmt = 'c'
if not isinstance(outfile, io.IOBase):
filename = os.path.expanduser(os.path.expandvars(outfile))
assert overwrite or not os.path.exists(filename),' file exisits: '+filename
f = open(filename,'w')
else:
f = outfile
f.write(card_cmt + ' COMPUTER-GENERATED BURN COMPSURF LINK FILE\n')
f.write(card_cmt + ' VERSION {:s}'.format(version2human(version))+'\n')
f.write(card_cmt + ' ' + time.asctime(time.gmtime())+' UTC\n')
f.write(card_cmt + '\n')
for c in self.comment:
f.write("{:s} {:s}\n".format(card_cmt,c))
f.write(card_cmt + '\n')
f.write(card_cmt + '----------------------------------------------------------------------\n')
f.write("compsurb clear\n")
for i,a in zip(self.iso, self._X()):
f.write("compsurb {:13.7E} {:s}\n".format(a, i.name()))
f.write("mapsurfb\n")
if not isinstance(outfile, io.IOBase):
f.close()
self.close_logger(timing = '"{:s}" written in'.format(f.name))
# some property routines
# ** TODO replace with isotope.ufunc_* routines
def A(self):
return isotope.ufunc_A(self.iso)
def Z(self):
return isotope.ufunc_Z(self.iso)
def N(self):
return isotope.ufunc_N(self.iso)
def E(self):
return isotope.ufunc_E(self.iso)
def isomer(self):
return isotope.ufunc_isomer(self.iso)
def isotope(self):
return isotope.ufunc_isotope(self.iso)
def element(self):
return isotope.ufunc_element(self.iso)
def isobar(self):
return isotope.ufunc_isobar(self.iso)
def isotone(self):
return isotope.ufunc_isotone(self.iso)
def element_name(self):
return np.array([x.element_name() for x in self.iso])
def element_symbol(self, upcase = True):
return np.array([x.element_symbol(upcase = upcase) for x in self.iso])
def idx(self):
return isotope.ufunc_idx(self.iso)
def isotone_idx(self):
return isotope.ufunc_isotone_idx(self.iso)
def isobar_idx(self):
return isotope.ufunc_isobar_idx(self.iso)
def element_idx(self):
return isotope.ufunc_element_idx(self.iso)
def isotope_idx(self):
return isotope.ufunc_isotope_idx(self.iso)
def isomer_idx(self):
return isotope.ufunc_isomer_idx(self.iso)
def isotones_idx(self):
return np.unique(self.isotone_idx())
def isobars_idx(self):
return np.unique(self.isobar_idx())
def elements_idx(self):
return np.unique(self.element_idx())
def isotopes_idx(self):
return np.unique(self.isotope_idx())
def isomers_idx(self):
return np.unique(self.isomer_idx())
def get_isotopes(self, ion):
ion = isotope.ion(ion)
ii = np.where(isotope.ufunc_Z(self.iso) == ion.Z)[0]
return self.iso[ii]
def get_isotones(self, ion):
ion = isotope.ion(ion)
ii = np.where(isotope.ufunc_N(self.iso) == ion.N)[0]
return self.iso[ii]
def get_isobars(self, ion):
ion = isotope.ion(ion)
ii = np.where(isotope.ufunc_A(self.iso) == ion.A)[0]
return self.iso[ii]
def get_isomers(self, ion):
ion = isotope.ion(ion)
ii = np.where(np.logical_and(
isotope.ufunc_Z(self.iso) == ion.Z,
isotope.ufunc_N(self.iso) == ion.N)
)[0]
return self.iso[ii]
def XYZ(self):
"""
Return 'astronomical' X, Y, Z of composition by mass fraction
"""
x = sum(self.X()[self.Z() <= 1])
y = sum(self.X()[self.Z() == 2])
z = sum(self.X()[self.Z() >= 3])
return np.array([x, y, z])
def xyz(self):
"""
Return 'astronomical' X, Y, Z of composition by mol fraction
"""
x = sum(self.Y()[self.Z() <= 1])
y = sum(self.Y()[self.Z() == 2])
z = sum(self.Y()[self.Z() >= 3])
return np.array([x, y, z])
def metallicity(self):
"""
Return 'metallicity' Z of composition.
"""
z = sum(self.X()[self.Z() >= 3])
return z
def Ye(self):
"""
Return electron to baryon ratio.
"""
Ye = np.sum(self.Z() * self.Y())
return Ye
def mue(self):
"""
Return mean molecular weight per electron of composition.
"""
return 1 / self.Ye()
def eta(self):
"""
Return neutron excess of mixture.
"""
return 1 - 2 * self.Ye()
def mu(self):
"""
Return mean molecular weight of composition.
"""
xmu = np.sum((self.Z() + 1) * self.Y())
return 1 / xmu
# here to add some general routines that return data for any Ion type
# (isotope, isotone, isobar, element)
def _get(self, selection = None, data = 'X', invalid = np.nan, exception = False):
"""
Return data for selected (isotope, isotone, isobar, element).
If nothing is specified, all isotopes will be returned.
Default is mass fraction 'X'.
"""
def output(x):
nonlocal shape, exception, selection
if exception:
index, = np.where(np.isnan(x))
print(index)
if len(index) > 0:
raise AttributeError('Isotope(s) not found: ' + ', '.join([str(ion) for ion in selection[index]]))
if len(shape) == 0:
x = x[0]
elif len(shape) != 1:
x = x.reshape(shape)
return x
# the following should go into class definition or init routine
# setup
uis_func = [
isotope.ufunc_is_isomer,
isotope.ufunc_is_isotope,
isotope.ufunc_is_element,
isotope.ufunc_is_isobar,
isotope.ufunc_is_isotone,
]
# we would only have to switch out this one definition to get them all...
if data == 'X':
val_func = [
self.isomers_X,
self.isotopes_X,
self.elements_X,
self.isobars_X,
self.isotones_X,
]
elif data == 'Y':
val_func = [
self.isomers_Y,
self.isotopes_Y,
self.elements_Y,
self.isobars_Y,
self.isotones_Y,
]
elif data == 'log_eps':
val_func = [
self.isomers_log_eps,
self.isotopes_log_eps,
self.elements_log_eps,
self.isobars_log_eps,
self.isotones_log_eps,
]
elif data == 'Abu':
val_func = [
self.isomers_Abu,
self.isotopes_Abu,
self.elements_Abu,
self.isobars_Abu,
self.isotones_Abu,
]
else:
raise ValueError("Invalid Data request: '{}'.".format(data))
# selection setup
if selection is None:
selection = self.iso
shape = np.shape(selection)
if shape == ():
selection = np.array([selection])
if not isinstance(selection, np.ndarray):
selection = np.array(selection)
if len(shape) > 1:
selection = selection.reshape(-1)
# use factory function instead of Ion
selection = np.array([isotope.ion(ix) for ix in selection])
selections = np.ndarray([len(uis_func), len(selection)], dtype = np.bool)
# pure cases
# here we test the most likely cases first ...
for i,(uis,val) in enumerate(zip(uis_func, val_func)):
selections[i, :] = uis(selection)
if np.all(selections[i, :]):
return output(val(selection))
# now mixed cases
if exception:
invalid = np.nan
x = np.tile(np.array(invalid, dtype=np.float64), len(selection))
for i,val in enumerate(val_func):
if np.any(selections[i,:]):
index, = np.nonzero(selections[i,:])
x[index] = val(selection[index])
return output(x)
def _X(self):
return self.abu
def _Y(self):
return self.abu / self.A()
def ppm(self, selection = None):
"""
Return ppm (10,000 ppm = 1 mass%) for selected(isomer, isotope, isotone, isobar, element).
If nothing is specified, all isotopes will be returned.
"""
return self.X(selection) * 1e6
def X(self, selection = None):
"""
Return mass fraction for selected (isomer, isotope, isotone, isobar, element).
If nothing is specified, all isotopes will be returned.
"""
return self._get(selection, data = 'X')
def Y(self, selection = None):
"""
Return mole/g for selected (isomer, isotope, isotone, isobar, element).
If nothing is specified, all isotopes will be returned.
"""
return self._get(selection, data = 'Y')
def log_eps(self, selection = None):
"""
Return log(eps) for selected (isomer, isotope, isotone, isobar, element).
If nothing is specified, all isotopes will be returned.
"""
return self._get(selection, data = 'log_eps')
def Abu(self, selection = None):
"""
Return A with A(Si) = 10**6 for selected (isomer, isotope, isotone, isobar, element).
If nothing is specified, all isotopes will be returned.
"""
return self._get(selection, data = 'Abu')
# add functions to compute [] , \delta - those need to be in SolAbu
# generic selection routine
def _selection(self,
x,
idx,
selection = None,
missing = np.nan,
check = None,
convert = None,
):
if selection is None:
return x
# we should really use IonList objects
if np.shape(selection) == ():
selection = np.array([selection])
selection = isotope.ufunc_ion(selection)
if not np.all(check(selection)):
raise KeyError("Selection contained invalid entries.")
selection = convert(selection)
y = np.ndarray(selection.shape, dtype = x.dtype)
ii = np.argsort(idx)
sel = np.minimum(np.searchsorted(idx[ii], selection), len(ii) - 1)
jj = idx[ii][sel] == selection
y[jj] = x[ii][sel][jj]
y[~jj] = missing
return y
def _generic_selection(self,
data = None,
selection = None,
mode = None,
missing = np.nan,
return_selection = False,
**kwargs):
if data is None:
data = self.abu
elif data == 'X':
data = self._X()
elif data == 'Y':
data = self._Y()
if mode is None:
convert = isotope.ufunc_idx
check = isotope.ufunc_is_ion
keys = self.idx()
else:
keys = self.__getattribute__(mode + '_idx')(),
check = isotope.__dict__['ufunc_is_' + mode]
convert = isotope.__dict__['ufunc_' + mode + '_idx']
x, idx = project(
data,
keys,
return_values = True,
)
if selection is None:
result = x
else:
result = self._selection(
x,
idx,
selection = selection,
check = check,
convert = convert,
missing = missing,
**kwargs)
if return_selection:
if selection is None:
selection = isotope.ufunc_ion_from_idx(idx)
return result, selection
return result
# ------------------------------
# isomer routines
# ------------------------------
def _isomers_selection(self, **kwargs):
return self._generic_selection(
mode = 'isomer',
**kwargs)
def isomers_X(self, selection = None):
"""
Return isomer mass fractions.
"""
return self._isomers_selection(
data = 'X',
selection = selection)
def isomers_Y(self, selection = None):
"""
Return isomer number fractions (mol per gram).
"""
return self._isomers_selection(
data = 'Y',
selection = selection)
def isomers_log_eps(self, selection = None):
"""
Return isomer log(eps).
log10(# of atoms relative to H) + 12
"""
h = self.Z() == 1
A = self.A()
x = sum(self._X()[h] / A[h])
y = self.isomers_Y(selection)
return np.log10(y / x) + 12
def isomers_set(self, selection = None):
"""
Return set of projected isomers.
"""
abu, iso = self._isomers_selection(
data = 'X',
selection = selection,
return_selection = True)
return AbuSet(abu = abu,
iso = iso,
comment = self.comment)
# we may want to add something signifying isomers ...
def isomers_log_eps(self, selection = None):
"""
Return isomere log(eps).
log10(# of atoms relative to H) + 12
"""
h = self.Z() == 1
A = self.A()
x = sum(self._X()[h] / A[h])
y = self.isomers_Y(selection)
return np.log10(y / x) + 12
def isomers_Abu(self, selection = None):
"""
Return isomere Abundance A(Si) = 10**6.
number of atoms relative to Si
"""
si = self.Z() == 14
A = self.A()
x = sum(self._X()[si] / A[si])
y = self.isomers_Y(selection)
return y / x * 1e6
def isomers(self):
"""
Return list of all isomers in abundance set.
"""
idx = np.unique(self.isomer_idx())
ii = np.where(idx != isotope.Ion.VOID_IDX)
return isotope.ufunc_ion_from_idx(idx[ii])
# ------------------------------
# isotope routines
# ------------------------------
def _isotopes_selection(self, **kwargs):
return self._generic_selection(mode = 'isotope', **kwargs)
def isotopes_X(self, selection = None):
"""
Return isotope mass fractions.
"""
return self._isotopes_selection(
data = 'X',
selection = selection)
def isotopes_Y(self, selection = None):
"""
Return isotope number fractions (mol per gram).
"""
return self._isotopes_selection(
data = 'Y',
selection = selection)
def isotopes_log_eps(self, selection = None):
"""
Return isotope log(eps).
log10(# of atoms relative to H) + 12
"""
h = self.Z() == 1
A = self.A()
x = sum(self._X()[h] / A[h])
y = self.isotopes_Y(selection)
return np.log10(y / x) + 12
def isotopes_Abu(self, selection = None):
"""
Return isotope Abundance A(Si) = 10**6.
number of atoms relative to Si
"""
si = self.Z() == 14
A = self.A()
x = sum(self._X()[si] / A[si])
y = self.isotopes_Y(selection)
return y / x * 1e6
def isotopes(self):
"""
Return list of all isotopes in abundance set.
"""
idx = np.unique(self.isotope_idx())
ii = np.where(idx != isotope.Ion.VOID_IDX)
return isotope.ufunc_ion_from_idx(idx[ii])
def isotopes_set(self, selection = None):
"""
Return set of projected isotopes.
"""
abu, iso = self._isotopes_selection(
data = 'X',
selection = selection,
return_selection = True)
return AbuSet(abu = abu,
iso = iso,
comment = self.comment)
# we may want to add something signifying isotopes ...
# ------------------------------
# element routines
# ------------------------------
def _elements_selection(self, **kwargs):
return self._generic_selection(mode = 'element', **kwargs)
def elements(self):
"""
Return list of all elements in abundance set.
"""
idx = np.unique(self.element_idx())
ii = np.where(idx != isotope.Ion.VOID_IDX)
return isotope.ufunc_ion_from_idx(idx[ii])
def elements_Z(self):
"""
Return charge number of all elements.
"""
return np.unique(self.Z())
def elements_X(self, selection = None):
"""
Return elemental mass fractions.
"""
return self._elements_selection(
data = 'X',
selection = selection)
def elements_Y(self, selection = None):
"""
Return elemental number fractions (mol per gram).
"""
return self._elements_selection(
data = 'Y',
selection = selection)
def elements_log_eps(self, selection = None):
"""
Return elemental log(eps).
log10(# of atoms relative to H) + 12
"""
h = self.Z() == 1
A = self.A()
x = sum(self._X()[h] / A[h])
y = self.elements_Y(selection)
return np.log10(y / x) + 12
def elements_Abu(self, selection = None):
"""
Return element abundance A(Si) = 10**6.
number of atoms relative to Si
"""
si = self.Z() == 14
A = self.A()
x = sum(self._X()[si] / A[si])
y = self.elements_Y(selection)
return y / x * 1e6
def elements_name(self):
"""
Return element name of each isotope.
"""
return np.array([Elements[x] for x in self.elements_Z()])
def elements_set(self, selection = None):
"""
Return set of projected elements.
"""
abu, iso = self._elements_selection(
data = 'X',
selection = selection,
return_selection = True)
return AbuSet(abu = abu,
iso = iso,
comment = self.comment)
# we may want to add something signifying elements ...
# ------------------------------
# isobar routines
# ------------------------------
def _isobars_selection(self, **kwargs):
return self._generic_selection(mode = 'isobar', **kwargs)
def isobars(self):
"""
Return list of all elements in abundance set.
"""
idx = np.unique(self.isobar_idx())
ii = np.where(idx != isotope.Ion.VOID_IDX)
return isotope.ufunc_ion_from_idx(idx[ii])
def isobars_A(self):
"""
Return mass number of all isobars.
"""
return np.unique(self.A())
def isobars_X(self, selection=None):
"""
Return isobar mass fractions.
"""
return self._isobars_selection(
data = 'X',
selection = selection)
def isobars_Y(self, selection=None):
"""
Return isobar number fractions (mol per gram).
"""
return self._isobars_selection(
data = 'Y',
selection = selection)
def isobars_log_eps(self, selection = None):
"""
Return isobar log(eps).
log10(# of atoms relative to H) + 12
"""
h = self.Z() == 1
A = self.A()
x = sum(self._X()[h] / A[h])
y = self.isobars_Y(selection)
return np.log10(y / x) + 12
def isobars_Abu(self, selection = None):
"""
Return isobar abundance A(Si) = 10**6.
number of atoms relative to Si
"""
si = self.Z() == 14
A = self.A()
x = sum(self._X()[si] / A[si])
y = self.isobars_Y(selection)
return y / x * 1e6
def isobars_set(self, selection = None):
"""
Return set of projected isobars.
"""
abu, iso = self._isobars_selection(
data = 'X',
selection = selection,
return_selection = True)
return AbuSet(abu = abu,
iso = iso,
comment = self.comment)
# we may want to add something signifying isobars ...
# ------------------------------
# isotone routines
# ------------------------------
def _isotones_selection(self, **kwargs):
return self._generic_selection(mode = 'isotone', **kwargs)
def isotones(self):
"""
Return list of all isotones in abundance set.
"""
idx = np.unique(self.isotone_idx())
ii = np.where(idx != isotope.Ion.VOID_IDX)
return isotope.ufunc_ion_from_idx(idx[ii])
def isotones_N(self):
"""
Return neutron number of all isotones.
"""
return np.unique(self.N())
def isotones_X(self, selection=None):
"""
Return isotone mass fractions.
"""
return self._isotones_selection(
data = 'X',
selection = selection)
def isotones_Y(self, selection=None):
"""
Return isotone number fractions (mol per gram).
"""
return self._isotones_selection(
data = 'Y',
selection = selection)
def isotones_log_eps(self, selection = None):
"""
Return isotone log(eps).
log10(# of atoms relative to H) + 12
"""
h = self.Z() == 1
A = self.A()
x = sum(self._X()[h] / A[h])
y = self.isotones_Y(selection)
return np.log10(y / x) + 12
def isotones_Abu(self, selection = None):
"""
Return isotone abundance A(Si) = 10**6.
number of atoms relative to Si
"""
si = self.Z() == 14
A = self.A()
x = sum(self._X()[si] / A[si])
y = self.isotones_Y(selection)
return y / x * 1e6
def isotones_set(self, selection = None):
"""
Return set of projected isotones.
"""
abu, iso = self._isotones_selection(
data = 'X',
selection = selection,
return_selection = True)
return AbuSet(abu = abu,
iso = iso,
comment = self.comment)
# we may want to add something signifying isotones ...
# general access interfaces
def __getitem__(self, index):
try:
return self._get(index)
except:
raise AttributeError('Isotope not found.')
def __setitem__(self, index, item):
# TODO add isotope if not in list?
# maybe add parameter allow_new
try:
# this does not work for numpy
if isinstance(index, str):
index = isotope.ion(index)
if isinstance(index, isotope.Ion):
index = np.where(self.iso == index)
if not is_numlike(item):
raise AttributeError('Abundance needs to be numlike.')
self.abu[index] = item
except:
raise AttributeError('Isotope not found.')
def __len__(self):
"""
Return number of isotopes.
"""
return len(self.iso)
def __getattr__(self, attr):
try:
x = self._get(attr, invalid = np.nan)
except AttributeError:
pass
else:
if not np.isnan(x):
return x
return super().__getattribute__(attr)
def __setattr__(self, attr, value):
if attr not in self.__dict__:
if 'iso' in self.__dict__:
x = np.where(self.iso == attr)[0]
if len(x) == 1:
i = x[0]
fac = 1 - value
self.abu[i] = 0.
self._normalize(fac) # not implemented
self.abu[i] = value
return
super().__setattr__(attr,value)
def __iter__(self):
i=0
while i < self.__len__():
yield (self.iso[i], self.abu[i])
i+=1
def __call__(self, *args, **kwargs):
"""
TODO:
1) add new isotopes to abundance pattern
2) renormailzie
3) check value is in Range 0<v<1
"""
for k in args:
x = np.where(self.iso == k)[0]
if len(x) == 1:
return self.abu[x[0]]
else:
raise AttributeError('Isotope not in list')
for k,v in kwargs.items():
if not 0 <= v <= 1:
raise ValueError('Abundance for '+k+' is out of range.')
x=np.where(self.iso == k)[0]
if len(x) == 1:
self.abu[x[0]] = v
else:
# TODO - add isotope instead (if valid)
raise AttributeError('Isotope not in list')
def __contains__(self, other):
"""
Determine whether AbuSet contains other (iso) or AbuSet (vector).
"""
if isinstance(other, AbuSet):
return self.contains(other).all()
x, = np.where(self.iso == other)
if len(x) == 1:
return True
return False
def contains(self, iso):
"""
Determine whether AbuSet contains iso (scalar) or which isotopes in iso are present (vector).
If scalar argument: return True/False.
If vector argument return array with result for each element.
"""
if np.isscalar(iso):
return iso in self
else:
if isinstance(iso, AbuSet):
iso = iso.iso
return self._in1d(iso, self.iso)
@staticmethod
def _in1d(ar1, ar2):
"""
replace non-working in1d that uses mergesort
we convert to index and then call the original in1d
... which is sort of what would have happened anyway ...
"""
ar1 = np.array([i.index() for i in ar1])
ar2 = np.array([i.index() for i in ar2])
return np.in1d(ar1, ar2)
# ar = np.concatenate( (ar1, ar2) )
# order = ar.argsort()
# sar = ar[order]
# equal_adj = (sar[1:] == sar[:-1])
# flag = np.concatenate( (equal_adj, [False] ) )
# indx = order.argsort()[:len( ar1 )]
# return flag[indx]
def ions(self):
"""
return IonList
"""
return IonList(self.iso)
# ------------------------------
# arithmetic operations
# ------------------------------
_zero_missing = None
@staticmethod
def _return_matching(iso1, abu1, iso2, abu2,
missing = None,
missing1 = None,
missing2 = None):
"""
if missing is None: return intersection of isotopes, otherwise union
if only one missing value is provided, use other set as basis
"""
if missing1 is None:
missing1 = missing
if missing2 is None:
missing2 = missing
idx1 = isotope.ufunc_idx(iso1)
idx2 = isotope.ufunc_idx(iso2)
if missing1 is None and missing2 is None:
idx = np.sort(np.intersect1d(idx1, idx2))
ii1 = np.argsort(idx1)
ii2 = np.argsort(idx2)
abu1 = abu1[ii1]
abu2 = abu2[ii2]
idx1 = idx1[ii1]
idx2 = idx2[ii2]
ii1 = np.searchsorted(idx1, idx)
ii2 = np.searchsorted(idx2, idx)
abu_1 = abu1[ii1]
abu_2 = abu2[ii2]
elif missing1 is None and missing2 is not None:
ii1 = np.argsort(idx1)
abu_1 = abu1[ii1]
idx = idx1[ii1]
abu_2 = np.ndarray(len(idx), dtype = np.float64)
abu_2.fill(missing2)
sel = np.minimum(np.searchsorted(idx, idx2), len(idx) - 1)
jj = idx[sel] == idx2
abu_2[sel[jj]] = abu2[jj]
elif missing1 is not None and missing2 is None:
ii2 = np.argsort(idx2)
abu_2 = abu2[ii2]
idx = idx2[ii2]
abu_1 = np.ndarray(len(idx), dtype = np.float64)
abu_1.fill(missing1)
sel = np.minimum(np.searchsorted(idx, idx1), len(idx) - 1)
jj = idx[sel] == idx1
abu_1[sel[jj]] = abu1[jj]
else:
idx = np.sort(np.union1d(idx1, idx2))
abu_1 = np.ndarray(len(idx), dtype = np.float64)
abu_2 = np.ndarray(len(idx), dtype = np.float64)
abu_1.fill(missing1)
abu_2.fill(missing2)
ii1 = np.searchsorted(idx, idx1)
ii2 = np.searchsorted(idx, idx2)
abu_1[ii1] = abu1
abu_2[ii2] = abu2
iso = isotope.ufunc_ion_from_idx(idx)
return iso, abu_1, abu_2
def __truediv__(self, other):
"""
return abundance ratio
TODO - check for X or Y
"""
if isinstance(other, AbuSet):
iso, abu1, abu2 = self._return_matching(
self.iso,
self.X(),
other.iso,
other.X(),
missing1 = self._zero_missing)
new = abu1 / abu2
comment = other.comment
else:
try:
new = self.abu / other
except:
return NotImplemented
iso = self.iso
comment = str(other)
return AbuSet(
iso,
new,
comment = ' / '.join(stuple(self.comment, comment))
)
def __rtruediv__(self, other):
"""
return abundance ratio
TODO - check for X or Y
"""
if isinstance(other, AbuSet):
iso, abu1, abu2 = self._return_matching(
self.iso,
self.X(),
other.iso,
other.X(),
missing2 = self._zero_missing)
new = abu2 / abu1
comment = other.comment
else:
try:
new = other / self.abu
except:
return NotImplemented
comment = str(other)
iso = self.iso
return AbuSet(
iso,
new,
comment = ' / '.join(stuple(comment, self.comment))
)
def __floordiv__(self, other):
"""
abundace ratio, try working subset match
TODO - check for X or Y
"""
if not isinstance(other, AbuSet):
return NotImplemented
iso = []
new = []
mis = []
for i in other.iso:
a = self[i]
b = other[i]
if not np.isnan(a) and b > 0:
iso.append(i)
new.append(a / b)
else:
mis.append(i)
break
if len(mis) > 0:
iso = []
new = []
mis = []
for i in self.iso:
a = self[i]
b = other[i]
if not np.isnan(a) and b > 0:
iso.append(i)
new.append(a / b)
else:
mis.append(i)
break
if len(mis) > 0:
return NotImplemented
return AbuSet(
iso,
new,
comment = ' // '.join(stuple(self.comment, other.comment))
)
def __add__(self, other):
"""
return sum of abundances
TODO - check for X or Y
"""
if not isinstance(other, AbuSet):
raise NotImplementedError()
iso, abu1, abu2 = self._return_matching(
self.iso,
self.X(),
other.iso,
other.X(),
missing = 0)
new = abu1 + abu2
comment = other.comment
return AbuSet(
iso,
new,
comment = ' + '.join(stuple(self.comment, comment))
)
def __sub__(self, other):
"""
return difference of abundances
TODO - check for X or Y
"""
if not isinstance(other, AbuSet):
return NotImplemented
iso, abu1, abu2 = self._return_matching(
self.iso,
self.X(),
other.iso,
other.X(),
missing = 0)
new = abu1 - abu2
comment = other.comment
return AbuSet(
iso,
new,
comment = ' - '.join(stuple(self.comment, comment))
)
def __mul__(self, other):
"""
return product of abundances
TODO - check for X or Y
"""
if isinstance(other, AbuSet):
iso, abu1, abu2 = self._return_matching(
self.iso,
self.X(),
other.iso,
other.X(),
missing = self._zero_missing)
new = abu1 * abu2
comment = other.comment
else:
try:
new = self.abu * other
except:
return NotImplemented
comment = str(other)
iso = self.iso
return AbuSet(
iso,
new,
comment = ' * '.join(stuple(self.comment, comment))
)
__rmul__ = __mul__
def __pow__(self, other):
"""
return power of abundances
TODO - check for X or Y
"""
if isinstance(other, AbuSet):
iso, abu1, abu2 = self._return_matching(
self.iso,
self.X(),
other.iso,
other.X(),
missing1 = self._zero_missing)
new = abu2 ** abu1
comment = other.comment
else:
try:
new = self.abu ** other
except:
return NotImplemented
comment = str(other)
iso = self.iso
return AbuSet(
iso,
new,
comment = ' ** '.join(stuple(self.comment, comment))
)
def __neg__(self):
"""
return negative of abundance
"""
new = - self.abu
return AbuSet(
self.iso,
new,
comment = ' - '.join(stuple('', self.comment))
)
def index(self, iso):
"""
Return isotope index in iso/abu array
accepts scalar, np.ndarray, list, tuple, set
"""
if isinstance(iso, str) or not isinstance(iso, (Iterable, np.ndarray)):
return np.where(self.iso == iso)[0][0]
elif isinstance(iso, np.ndarray):
ii = np.ndarray(iso.shape, dtype=np.int64)
for i,k in enumerate(iso.flat):
ii[i] = np.where(self.iso == k)[0][0]
return ii
elif isinstance(iso, Iterable):
ii = []
for k in iso:
if isinstance(k, str):
i = np.where(self.iso == k)[0][0]
else:
i = self.index(k)
ii += [i]
if not isinstance(iso, list):
ii = type(iso)(ii)
return ii
else:
raise AttributeError('Argument type not supported.')
def kepler_write_compsurb(self, filename):
"""
Write link file to set KEPLER surface BURN abundances.
"""
with open(filename, 'wt') as f:
f.write('compsurb clear\n')
for i in self.ions():
x = self.X(i)
if x > 0:
f.write('compsurb {abu:12.5e} {ion:<5s}\n'.format(ion=i.name(), abu=x))
f.write('compsurb show\n')
|
import jwt
import pytest
from django.test.client import Client as TestClient
from oidc_apis.models import ApiScope
from tunnistamo.tests.conftest import (
DummyFixedOidcBackend, create_oidc_clients_and_api, get_api_tokens, get_tokens, get_userinfo, refresh_token,
social_login
)
def _get_access_and_id_tokens(settings, oidc_client, response_type, trust_loa=True):
test_client = TestClient()
api_scope = ApiScope.objects.filter(allowed_apps=oidc_client).first()
social_login(settings, test_client, trust_loa=trust_loa)
return get_tokens(
test_client,
oidc_client,
response_type,
scopes=['openid', 'profile', api_scope.identifier]
)
@pytest.mark.django_db
@pytest.mark.parametrize('response_type', [
'code',
'id_token',
'id_token token',
'code token',
'code id_token',
'code id_token token',
])
@pytest.mark.parametrize('trust_loa', (True, False))
def test_loa_in_id_token_trust(settings, response_type, trust_loa):
oidc_client = create_oidc_clients_and_api()
tokens = _get_access_and_id_tokens(
settings,
oidc_client,
response_type,
trust_loa=trust_loa
)
assert 'id_token' in tokens
assert 'id_token_decoded' in tokens
assert 'loa' in tokens['id_token_decoded']
expected_loa = 'substantial' if trust_loa else 'low'
assert tokens['id_token_decoded']['loa'] == expected_loa
def _check_claims(decoded_token, oidc_client_id, tunnistamo_session_id):
CLAIMS_TO_CHECK = {
'loa': 'substantial',
'azp': oidc_client_id,
'amr': DummyFixedOidcBackend.name,
'sid': tunnistamo_session_id,
}
for claim, expected_value in CLAIMS_TO_CHECK.items():
assert claim in decoded_token
assert decoded_token[claim] == expected_value
@pytest.mark.django_db
@pytest.mark.parametrize('response_type', [
'code',
'id_token',
'id_token token',
'code token',
'code id_token',
'code id_token token',
])
def test_claims_in_id_token(settings, response_type):
oidc_client = create_oidc_clients_and_api()
tokens = _get_access_and_id_tokens(
settings,
oidc_client,
response_type,
)
assert 'id_token' in tokens
assert 'id_token_decoded' in tokens
_check_claims(tokens['id_token_decoded'], oidc_client.client_id, tokens['tunnistamo_session_id'])
@pytest.mark.django_db
@pytest.mark.parametrize('response_type', [
'code',
# 'id_token', # Cannot fetch API tokens without an access token
'id_token token',
'code token',
'code id_token',
'code id_token token',
])
def test_claims_in_api_token(settings, response_type):
oidc_client = create_oidc_clients_and_api()
tokens = _get_access_and_id_tokens(settings, oidc_client, response_type)
api_scope = ApiScope.objects.filter(allowed_apps=oidc_client).first()
api_tokens = get_api_tokens(tokens['access_token'], only_return_content=True)
test_api_token = api_tokens[api_scope.identifier]
decoded_token = jwt.decode(
test_api_token,
algorithms=["RS256"],
options={"verify_signature": False},
)
_check_claims(decoded_token, oidc_client.client_id, tokens['tunnistamo_session_id'])
@pytest.mark.django_db
@pytest.mark.parametrize('response_type', [
'code',
# 'id_token', # No refresh token
# 'id_token token', # No refresh token
'code token',
'code id_token',
'code id_token token',
])
def test_claims_in_id_token_after_refresh(settings, response_type):
oidc_client = create_oidc_clients_and_api()
tokens = _get_access_and_id_tokens(settings, oidc_client, response_type)
new_tokens = refresh_token(oidc_client, tokens, only_return_content=True)
assert 'id_token' in new_tokens
assert 'id_token_decoded' in new_tokens
_check_claims(new_tokens['id_token_decoded'], oidc_client.client_id, tokens['tunnistamo_session_id'])
@pytest.mark.django_db
@pytest.mark.parametrize('response_type', [
'code',
# 'id_token', # Cannot fetch user info without an access token
'id_token token',
'code token',
'code id_token',
'code id_token token',
])
def test_claims_not_in_userinfo(settings, response_type):
oidc_client = create_oidc_clients_and_api()
tokens = _get_access_and_id_tokens(
settings,
oidc_client,
response_type,
)
assert 'access_token' in tokens
userinfo = get_userinfo(tokens['access_token'], only_return_content=True)
assert 'sub' in userinfo
assert 'loa' not in userinfo
assert 'azp' not in userinfo
assert 'amr' not in userinfo
assert 'sid' not in userinfo
|
"""
Utilities for streaming data from various sources.
"""
import csv
import datetime as dt
import functools
import gzip
import itertools
import random
import types
import os
import numpy as np
try:
import pandas as pd
PANDAS_INSTALLED = True
except ImportError:
PANDAS_INSTALLED = False
from sklearn import utils
__all__ = [
'iter_csv',
'iter_array',
'iter_pandas',
'iter_sklearn_dataset',
'simulate_qa',
'shuffle'
]
def iter_array(X, y=None, feature_names=None, target_names=None, shuffle=False, random_state=None):
"""Yields rows from an array of features and an array of targets.
This method is compatible with ``numpy`` arrays as well as Python lists.
Parameters:
X (array-like of shape (n_samples, n_features))
y (array-like of shape (n_samples,))
feature_names (list of length n_features)
target_names (list of length n_outputs)
shuffle (bool): Whether to shuffle the inputs or not.
random_state (int, RandomState instance or None, default=None): If int, ``random_state`` is
the seed used by the random number generator; if ``RandomState`` instance,
``random_state`` is the random number generator; if ``None``, the random number
generator is the ``RandomState`` instance used by ``np.random``.
Yields:
tuple: A pair (``x``, ``y``) where ``x`` is a dict of features and ``y`` is the target.
"""
feature_names = list(range(len(X[0]))) if feature_names is None else feature_names
multioutput = y is not None and not np.isscalar(y[0])
if multioutput and target_names is None:
target_names = list(range(len(y[0])))
# Shuffle the data
rng = utils.check_random_state(random_state)
if shuffle:
order = rng.permutation(len(X))
X, y = X[order], y if y is None else y[order]
if multioutput:
for x, yi in itertools.zip_longest(X, y if hasattr(y, '__iter__') else []):
yield dict(zip(feature_names, x)), dict(zip(target_names, yi))
else:
for x, yi in itertools.zip_longest(X, y if hasattr(y, '__iter__') else []):
yield dict(zip(feature_names, x)), yi
def iter_sklearn_dataset(dataset, **kwargs):
"""Yields rows from one of the datasets provided by scikit-learn.
Parameters:
dataset (sklearn.utils.Bunch): A scikit-learn dataset.
Yields:
tuple: A pair (``x``, ``y``) where ``x`` is a dict of features and ``y`` is the target.
"""
kwargs['X'] = dataset.data
kwargs['y'] = dataset.target
try:
kwargs['feature_names'] = dataset.feature_names
except AttributeError:
pass
for x, yi in iter_array(**kwargs):
yield x, yi
def iter_pandas(X, y=None, **kwargs):
"""Yields rows from a ``pandas.DataFrame``.
Parameters:
X (pandas.DataFrame)
y (array-like of shape (n_samples,))
Yields:
tuple: A pair (``x``, ``y``) where ``x`` is a dict of features and ``y`` is the target.
"""
kwargs['feature_names'] = X.columns
if isinstance(y, pd.DataFrame):
kwargs['target_names'] = y.columns
for x, yi in iter_array(X.to_numpy(), y, **kwargs):
yield x, yi
class DictReader(csv.DictReader):
"""Overlay on top of `csv.DictReader` which allows sampling."""
def __init__(self, fraction, rng, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fraction = fraction
self.rng = rng
def __next__(self):
if self.line_num == 0:
self.fieldnames
row = next(self.reader)
if self.fraction < 1:
while self.rng.random() > self.fraction:
row = next(self.reader)
self.line_num = self.reader.line_num
while row == []:
row = next(self.reader)
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
def iter_csv(filepath_or_buffer, target_name, names=None, converters=None, parse_dates=None, fraction=1.,
compression='infer', seed=None):
"""Yields rows from a CSV file.
Parameters:
filepath_or_buffer: Either a string indicating the location of a CSV file, or a buffer
object that has a ``read`` method.
target_name (str): The name of the target.
names (list of str): A list of names to associate with each element in a row. If ``None``,
then the first row will be assumed to contain the names.
converters (dict): A `dict` mapping feature names to callables used to parse their
associated values.
parse_dates (dict): A `dict` mapping feature names to a format passed to the
`datetime.datetime.strptime` method.
fraction (float): Sampling fraction.
compression (str): For on-the-fly decompression of on-disk data. If 'infer' and
``filepath_or_buffer`` is path-like, then the decompression method is inferred for the
following extensions: '.gz'.
seed (int): If specified, the sampling will be deterministic.
Yields:
tuple: A pair (``x``, ``y``) where ``x`` is a dict of features and ``y`` is the target.
Example:
::
>>> import io
>>> from creme import stream
>>> data = io.StringIO('''name,day,viewers
... Breaking Bad,2018-03-14,1337
... The Sopranos,2018-03-14,42
... Breaking Bad,2018-03-15,7331
... ''')
>>> params = dict(
... target_name='viewers',
... converters={'viewers': int},
... parse_dates={'day': '%Y-%m-%d'}
... )
>>> for x, y in stream.iter_csv(data, **params):
... print(x, y)
{'name': 'Breaking Bad', 'day': datetime.datetime(2018, 3, 14, 0, 0)} 1337
{'name': 'The Sopranos', 'day': datetime.datetime(2018, 3, 14, 0, 0)} 42
{'name': 'Breaking Bad', 'day': datetime.datetime(2018, 3, 15, 0, 0)} 7331
"""
# If a file is not opened, then we open it
if not hasattr(filepath_or_buffer, 'read'):
# Determine the compression from the file extension if "infer" has been specified
if compression == 'infer':
_, ext = os.path.splitext(filepath_or_buffer)
compression = {
'.csv': 'csv',
'.gz': 'gzip'
}[ext]
# Determine the file opening method from the compression
open_func = {
'csv': open,
'gzip': functools.partial(gzip.open, mode='rt')
}[compression]
# Open the file using the opening method
filepath_or_buffer = open_func(filepath_or_buffer)
for x in DictReader(
fraction=fraction,
rng=random.Random(seed),
f=filepath_or_buffer,
fieldnames=names
):
# Cast the values to the given types
if converters is not None:
for i, t in converters.items():
x[i] = t(x[i])
# Parse the dates
if parse_dates is not None:
for i, fmt in parse_dates.items():
x[i] = dt.datetime.strptime(x[i], fmt)
# Separate the target from the features
y = x.pop(target_name)
yield x, y
# Close the file
filepath_or_buffer.close()
def simulate_qa(X_y, on, lag):
"""Simulate a real case learning scenario using a temporal attribute and a lag.
Each observation will first be shown without revealing the ``y`` value. Once the observation is
old enough it will be shown once again, this time with the ``y`` value being revealed. Each
observation will thus be shown twice, once without the target equal to ``None`` and once with
the actual value. The duration between "questions" and "answers" depends on the ``lag``
parameter. For this to work the data is assumed to be sorted with respect to the temporal
attribute.
Parameters:
X_y (generator): A stream of (``x``, ``y``) pairs.
on (str): The attribute used for measuring time.
lag (datetime.timedelta or int or float): Amount to wait before revealing the target
associated with each observation. This value is expected to be able to sum with the
``on`` attribute.
Yields:
(``q_a``, ``x``, ``y``) pairs where ``q_a`` is a `bool` that indicates if the current
iteration is question or not.
Example:
>>> from creme import stream
>>> X_y = [({'moment': i, 'x': i}, bool(i % 2)) for i in range(5)]
>>> for is_question, x, y in simulate_qa(X_y, on='moment', lag=3):
... print('Q' if is_question else 'A', x, y)
Q {'moment': 0, 'x': 0} False
Q {'moment': 1, 'x': 1} True
Q {'moment': 2, 'x': 2} False
A {'moment': 0, 'x': 0} False
Q {'moment': 3, 'x': 3} True
A {'moment': 1, 'x': 1} True
Q {'moment': 4, 'x': 4} False
A {'moment': 2, 'x': 2} False
A {'moment': 3, 'x': 3} True
A {'moment': 4, 'x': 4} False
"""
answers = []
for x, y in X_y:
while answers:
# Get the oldest example
x_ans, y_ans = answers[0]
# If the oldest answer isn't old enough then stop
if x_ans[on] + lag > x[on]:
break
# Else yield the oldest answer and release it from memory
yield False, x_ans, y_ans
del answers[0]
# Show the observation and label it as a question
yield True, x, y
# Store the answer for the future
answers.append((x, y))
# Yield the final answers that remain
for x, y in answers:
yield False, x, y
def shuffle(stream, buffer_size, seed=None):
"""Shuffles a stream of data.
This works by maintaining a buffer of elements. The first buffer_size elements are stored in
memory. Once the buffer is full, a random element inside the buffer is yielded. Everytime an
element is yielded, the next element in the stream replaces it and the buffer is sampled again.
Increasing buffer_size will improve the quality of the shuffling.
If you really want to stream over your dataset in a "good" random order, the best way is to
split your dataset into smaller datasets and loop over them in a round-robin fashion. You may
do this by using the ``roundrobin`` recipe from the `itertools` module.
References:
1. `Visualizing TensorFlow's streaming shufflers <http://www.moderndescartes.com/essays/shuffle_viz/>`_
"""
# If stream is not a generator, then we coerce it to one
if not isinstance(stream, types.GeneratorType):
stream = iter(stream)
# Initialize the buffer
buff = list(itertools.islice(stream, buffer_size))
# Deplete the stream until it is empty
for element in stream:
# Pick a random element from the buffer and yield it
i = random.randint(0, len(buff) - 1)
yield buff[i]
# Replace the yielded element from the buffer with the new element from the stream
buff[i] = element
# Shuffle the remaining buffer elements and yield them one by one
random.shuffle(buff)
for element in buff:
yield element
|
point_dict = {'Two': 2, 'Three': 3, 'Four': 4, 'Five': 5, 'Six': 6, 'Seven': 7, 'Eight': 8, 'Nine': 9, 'Ten': 10,
'Jack': 10, 'Queen': 10, 'King': 10, 'Ace': 11}
suits = ['Hearts', 'Diamonds', 'Spades', 'Clubs']
class Card:
def __init__(self, value, suit):
self.value = value
self.suit = suit
self.point_value = point_dict[self.value]
def __repr__(self):
return self.value + ' of ' + self.suit
|
# flake8: noqa
name = "abp_blocklist_parser"
from abp_blocklist_parser.BlockListParser import BlockListParser
|
# Sod shock tube
import numpy
from models import sr_swe
from bcs import outflow
from simulation import simulation
from methods import fvs_method
from rk import rk3
from grid import grid
from matplotlib import pyplot
Ngz = 4
Npoints = 400
L = 0.5
interval = grid([-L, L], Npoints, Ngz)
phiL = 0.41
phiR = 0.01
phiL = 0.9
phiR = 0.05
qL = numpy.array([phiL, 0])
qR = numpy.array([phiR, 0])
model = sr_swe.sr_swe(initial_data = sr_swe.initial_riemann(qL, qR))
sim = simulation(model, interval, fvs_method(3), rk3, outflow, cfl=0.5)
sim.evolve(0.4)
sim.plot_system()
pyplot.show()
#
#exact = numpy.genfromtxt('../sr_swe.txt')
#fig, ax = pyplot.subplots(2, 1)
#ax[0].plot(exact[0, :]*0.4, exact[1, :], 'k-', label='Exact')
#ax[0].plot(sim.coordinates, sim.prim[0, :], 'ro', label='Sim')
#ax[0].set_ylabel(r"$\Phi$")
#ax[1].plot(exact[0, :]*0.4, exact[2, :], 'k-', label='Exact')
#ax[1].plot(sim.coordinates, sim.prim[1, :], 'ro', label='Sim')
#ax[1].set_ylabel(r"$v$")
#fig.tight_layout()
#pyplot.show()
|
# Copyright (c) 2011 Mattias Nissler <[email protected]>
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. The name of the author may not be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import functools
import socket
import struct
from unittest import TestCase
from tuntap.packet import IPv4Packet, IPv6Packet, UDPPacket
class TestIO(TestCase):
def __init__(self, name, af, listenAddress, codec):
super(TestIO, self).__init__(name)
self._codec = codec(af, listenAddress);
def __str__(self):
return '%s [%s]' % (super(TestIO, self).__str__(), str(self._codec))
def setUp(self):
super(TestIO, self).setUp()
self._codec.start()
def tearDown(self):
self._codec.stop()
super(TestIO, self).tearDown()
class TestIp(TestIO):
def __init__(self, name, codec):
super(TestIp, self).__init__(name, socket.AF_INET, None, codec)
def test_Send(self):
payload = 'knock, knock!'
port = 12345
self._codec.sendUDP(payload, (self._codec.addr.remote, port))
self._codec.expectPacket(
{ 'version': 4,
'src': socket.inet_pton(self._codec.af, self._codec.addr.local),
'dst': socket.inet_pton(self._codec.af, self._codec.addr.remote),
'proto': IPv4Packet.PROTO_UDP,
'payload': { 'dst': port,
'payload': payload } })
self.assertTrue(self._codec.runPacket())
def test_Recv(self):
srcport = 23456
payload = 'who\'s there?'
packet = IPv4Packet(proto = IPv4Packet.PROTO_UDP,
src = socket.inet_pton(self._codec.af, self._codec.addr.remote),
dst = socket.inet_pton(self._codec.af, self._codec.addr.local),
payload = UDPPacket(src = srcport,
dst = self._codec.UDPPort,
payload = payload))
self._codec.sendPacket(packet.encode())
self._codec.expectUDP(payload)
self.assertTrue(self._codec.runUDP())
class TestIp6(TestIO):
def __init__(self, name, codec):
super(TestIp6, self).__init__(name, socket.AF_INET6, None, codec)
def test_Send(self):
payload = 'knock, knock!'
port = 12345
self._codec.sendUDP(payload, (self._codec.addr.remote, port))
self._codec.expectPacket(
{ 'version': 6,
'src': socket.inet_pton(self._codec.af, self._codec.addr.local),
'dst': socket.inet_pton(self._codec.af, self._codec.addr.remote),
'proto': IPv6Packet.PROTO_UDP,
'payload': { 'dst': port,
'payload': payload } })
self.assertTrue(self._codec.runPacket())
def test_Recv(self):
srcport = 23456
payload = 'who\'s there?'
packet = IPv6Packet(proto = IPv6Packet.PROTO_UDP,
src = socket.inet_pton(self._codec.af, self._codec.addr.remote),
dst = socket.inet_pton(self._codec.af, self._codec.addr.local),
payload = UDPPacket(src = srcport,
dst = self._codec.UDPPort,
payload = payload))
self._codec.sendPacket(packet.encode())
self._codec.expectUDP(payload)
self.assertTrue(self._codec.runUDP())
class TestMulticast(TestIO):
MULTICAST_GROUP = '224.1.2.3'
def __init__(self, name, codec):
super(TestMulticast, self).__init__(name, socket.AF_INET, TestMulticast.MULTICAST_GROUP,
codec)
def setUp(self):
super(TestMulticast, self).setUp()
mreq = struct.pack('4s4s',
socket.inet_pton(self._codec.af, TestMulticast.MULTICAST_GROUP),
socket.inet_pton(self._codec.af, self._codec.addr.local))
self._codec._recvSock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
self._codec._sendSock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1)
self._codec._sendSock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF,
socket.inet_pton(self._codec.af, self._codec.addr.local))
def test_Send(self):
payload = 'knock, knock!'
port = 12345
self._codec.sendUDP(payload, (TestMulticast.MULTICAST_GROUP, port))
self._codec.expectPacket(
{ 'version': 4,
'src': socket.inet_pton(self._codec.af, self._codec.addr.local),
'dst': socket.inet_pton(self._codec.af, TestMulticast.MULTICAST_GROUP),
'proto': IPv4Packet.PROTO_UDP,
'payload': { 'dst': port,
'payload': payload } })
self.assertTrue(self._codec.runPacket())
def test_Recv(self):
srcport = 23456
payload = 'who\'s there?'
packet = IPv4Packet(proto = IPv4Packet.PROTO_UDP,
src = socket.inet_pton(self._codec.af, self._codec.addr.remote),
dst = socket.inet_pton(self._codec.af, TestMulticast.MULTICAST_GROUP),
payload = UDPPacket(src = srcport,
dst = self._codec.UDPPort,
payload = payload))
self._codec.sendPacket(packet.encode())
self._codec.expectUDP(payload)
self.assertTrue(self._codec.runUDP())
class TestMulticast6(TestIO):
MULTICAST_GROUP = 'ff05::114'
def __init__(self, name, codec):
super(TestMulticast6, self).__init__(name, socket.AF_INET6, TestMulticast6.MULTICAST_GROUP,
codec)
def setUp(self):
super(TestMulticast6, self).setUp()
mreq = struct.pack('16sI',
socket.inet_pton(self._codec.af, TestMulticast6.MULTICAST_GROUP),
self._codec._harness.interface.index)
self._codec._recvSock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
self._codec._sendSock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 1)
self._codec._sendSock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF,
self._codec._harness.interface.index)
def test_Send(self):
payload = 'knock, knock!'
port = 12345
self._codec.sendUDP(payload, (TestMulticast6.MULTICAST_GROUP, port))
self._codec.expectPacket(
{ 'version': 6,
'dst': socket.inet_pton(self._codec.af, TestMulticast6.MULTICAST_GROUP),
'proto': IPv6Packet.PROTO_UDP,
'payload': { 'dst': port,
'payload': payload } })
self.assertTrue(self._codec.runPacket())
def test_Recv(self):
srcport = 23456
payload = 'who\'s there?'
packet = IPv6Packet(proto = IPv6Packet.PROTO_UDP,
src = socket.inet_pton(self._codec.af, self._codec.addr.remote),
dst = socket.inet_pton(self._codec.af, TestMulticast6.MULTICAST_GROUP),
payload = UDPPacket(src = srcport,
dst = self._codec.UDPPort,
payload = payload))
self._codec.sendPacket(packet.encode())
self._codec.expectUDP(payload)
self.assertTrue(self._codec.runUDP())
|
import UcsSdk
import time
def EventHandler(mce):
print 'Received a New Event with ClassId: ' + str(mce.mo.classId)
print "ChangeList: ", mce.changeList
print "EventId: ", mce.eventId
def main():
ucs = UcsSdk.UcsHandle()
ucs.UcsHandle.Login(username='', password='')
ucs.UcsHandle.AddEventHandler(classId='', callBack=EventHandler)
while True:
print '.',
time.sleep(5)
ucs.UcsHandle.Logout()
if __name__ == '__main__':
main()
|
"""
Script for generating simulated time series using the method of Timmer and Konig (1995).
"""
from numpy import log, zeros, sqrt, arange, exp, real, pi, interp, conj
from numpy.random import normal
from numpy.fft import fft
def ts_gen(n, dt=1., freq=[], pow=[], seed=None, time=0, spline=0, double=1, phase=[0], loginterp=True):
"""
; ----------------------------------------------------------
;+
; NAME:
; TS_GEN
;
; PURPOSE:
; Generate a random time series from a power spectrum model
;
; AUTHOR:
; Simon Vaughan (U.Leicester)
;
; CALLING SEQUENCE:
; x = TS_GEN(65536,dt=0.1)
;
; INPUTS:
; n - (scalar) length of time series (default = 65536)
;
; OPTIONAL INPUTS:
; dt - (scalar) Sampling period (default = 1.0)
; freq - (vector) frequencies at which spectrum is known
; pow - (vector) spectral density at frequencies FREQ
; phase - (vector) phase shift at frequencies FREQ (default=0)
; seed - (long integer) seed for random number generator
; spline - (logical) use cubic spline interpolation
; log - (logical) interpolate linearly in log-log space
; double - (logical) perform FFT in double prec.
;
; OUTPUTS:
; x - (vector) output time series
;
; OPTIONAL OUTPUTS:
; time - sampling times [0,n-1]*dt
;
; DETAILS:
; Generate an evenly-sampled time series for a noise (random)
; process with a power spectrum specified by POW and FREQ.
;
; The method comes from:
; Davies R. B., Harte D. S., 1987, Biometrika, v74, pp95-101
; and was introduced to astronomy by
; Timmer J., Konig M., 1995, A&A, v300, pp707-710
;
; The time series is generated using the following algorithm:
;
; 1. The "true" power spectrum is specified using POW(FREQ)
;
; 2. Define the Fourier frequencies for the output time series
; as f_j = j/(N*dT) with j=1,2,...,N/2. Use interpolation
; to find the power spectrum at f_j from input POW(FREQ)
;
; 3. The "true" power spectrum is converted from power
; (non-negative) to a "true" DFT for the process, using the
; fact that POW = |DFT|^2, so we have a complex-valued
; DFT = complex(sqrt(POW),sqrt(POW)) at each frequency f_j
;
; 4. Draw two sets of N/2 normal deviates (random numbers from
; "normal" Gaussian distribution.
;
; 5. Multiply the real and imaginary parts of the DFT by the
; deviates. This randomised the "true" DFT and gives it the
; distribution expected for an observed or "sampled" DFT from a
; single time series of a random process.
; X(f_j) = DFT(f_j) * eps_j where eps_j is a normal deviate
;
; 6. Use the inverse FT to convert from the frequency domain to
; the time domain, i.e. from x(t_i) = FFT[X(f_j)]
;
; 7. Fill-in the array of times t_i = i*dT for i=0,...,N-1
;
; The randomisation step (5) is equivalent to drawing the square
; amplitude of the DFT from a chi-squared distribution (with two
; degrees of freedom), and the phase of the DFT from a uniform
; distribution over the range [0,2*pi]. These are the expected
; sampling distributions from a random time series.
;
; Note that in reality the DFT is also defined for negative
; Fourier frequencies j=-N/2,...,-1. In order for the resulting
; time series to be real we require that the X(f_j) = X'(-f_j),
; so the negative frequencies carry the complex conjugate of the
; positive frequencies. Each side of the DFT is normalised by
; 1/2 so that the sum over all (-ve and +ve) frequencies is
; equal to the total variace (the integral of the power
; spectrum).
; Also, the DFT at the Nyquist frequency j=N/2 is always real
; when N is even, so the imaginary part is set to zero.
; The DFT at zero frequency (j=0) determines the mean (DC
; component) of the resulting time series. Here we generate
; zero-mean data, so this is set to zero, i.e. X(f_j = 0) = 0.
;
; The spectrum is specified by the vectors FREQ and POW, which
; are interpolated as needed to populate the periodogram needed
; for the generation (step 2). Interpolation is linear unless SPLINE
; keyword is set (in which case it is cubic spline). If FREQ and
; POW are not specified, the spectrum is assumed to be flat
; (i.e. POW = constant).
;
; WARNING: The routine needs to know the power density at
; frequencies f_j = j/(N*dT) with j=1,2,...,N/2. You need
; to make sure your input spectrum spans this full range,
; i.e. that MIN(FREQ) <= 1/(N*dT) and MAX(FREQ) >= 1/2dT.
; This may involve simply adding more extra point to the
; input power spectrum at a very low or high frequency.
; If this is the case the program will return a WARNING
; but carry on by extrapolating the data outside the
; range of the input data.
;
; If the input power spectrum is a power law it may be best
; to use the LOG keyword. This forces the interpolation to
; be done using the log of the power spectrum. I.e. it
; interpolates log(pow) - log(freq) data, and then converts
; the result back to linear-space. In effect, it interpolates
; between points using a power law model.
;
; As the routine uses the FFT function, it works fastest if N is
; a power of 2 (default = 2^16)
;
; There is an addition optional input PHASE. This allows a phase
; shift to be added to the data. Since the phase is randomly and
; uniformly distibuted over the range [0,2*pi] this is of no
; value for a single time series. But it is possible to generate
; two time series by calling the routine twice using the same
; random number seed but different POW or PHASE values. The will
; result in two time series that differ only in their power
; spectrum (modulus square of DFT) or phase (argument of DFT).
; If X = A*exp(i*theta) then applying a phase shift phi we get
; X' = A*exp(i*[theta + phi]) = X * exp(i*phi).
;
; EXAMPLE USAGE:
;
; Generate time series with 1/f spectrum
;
; IDL> freq = (INDGEN(512)+1)/1024.0
; IDL> pow = freq^(-1)
; IDL> x = TS_GEN(1024, dt=1.0, freq=freq, pow=pow,time=time, $
; seed=seed)
; IDL> plot,time,x
;
; Generate time series with 1/f spectrum making use of LOG keyword
;
; IDL> freq = [1e-6,100]
; IDL> pow = 0.01*freq^(-1)
; IDL> x = TS_GEN(1024, dt=1.0, freq=freq, pow=pow,time=time, $
; seed=seed,/log)
; IDL> plot,time,x
;
; Because the spectrum is a power law, we only need define two
; end points at let the interpolation (in log-log space) do the rest.
; (NB: try this without the LOG keyword to see how it goes wrong!)
;
; Generate two time series with constant phase delay of pi/2 using a
; 1/f^2 spectrum
;
; IDL> freq = (INDGEN(512)+1)/1024.0
; IDL> pow = freq^(-2)
; IDL> s = 123L
; IDL> x = TS_GEN(1024, dt=1.0, freq=freq, pow=pow,time=time, $
; seed=s,phase=0)
; IDL> plot,time,x
; IDL> phase = !pi/2
; IDL> s = 123L
; IDL> x = TS_GEN(1024, dt=1.0, freq=freq, pow=pow,time=time,$
; seed=s,phase=phase)
; IDL> oplot,time,x,color=170
;
; NB: A constant time delay of tau can be produced using
; phase(j) = 2.0*!pi*tau*freq(j)
;
; HISTORY:
; 14/05/07 - v1.0 - first working version
; 15/05/07 - v1.1 - bug fix: INDGEN now uses L64 keyword
; this is needed for N > 2^15
; 20/12/07 - v1.2 - added PHASE keyword
; 15/01/09 - v1.3 - added LOG keyword
; 19/01/09 - v.14 - added check that range of FREQ
; spans [f_min = 1/NdT, f_max = 1/2dT]
; 22/09/10 - v1.5 - added clauses to allow for integer DT values
;
; NOTES:
; + uses built in random number generator
;
;-
; ---------------------------------------------------------
"""
# ; options for compilation (recommended by RSI)
# COMPILE_OPT idl2, HIDDEN
# ; watch out for errors
# on_error, 2
# ; ----------------------------------------------------------
# ; Check the arguments
# ; if N not defined, set default
# if len(n) == 0 : n = 65536
# ; make sure N is even
if (n % 2) != 0:
print('** Please make N even in TS_GEN')
return 0
else:
n = int(n)
# ; check the shape of the input array
nf = len(freq)
np = len(pow)
if (nf != np):
print('** FREQ and POW of differing sizes in TS_GEN.')
return 0
if nf < 2:
print('** FREQ too small in TS_GEN.')
return 0
# ; if FREQ is not defined, set-up default (flat) spectrum
if nf == 0:
freq = [0.0, 0.5 / dt]
pow = [1.0, 1.0]
# ; if PHASE is not defined, set-up default (zero) phase shift
np = len(phase)
if (np != nf and np != 1):
print('** FREQ and PHASE of differing sizes in TS_GEN.')
return (0)
if (np == 0): phi = zeros(nf)
if (np == 1): phi = zeros(nf) + phase[0]
if (np == nf): phi = phase
# ; check that PHI is within range [0,2*pi]
phi = phi % (2. * pi)
# ; ----------------------------------------------------------
# ; check the range of input frequencies spans the range
# ; needed to generate the simulation
f_min = 1.0 / (n * dt)
f_max = 1.0 / (2.0 * dt)
if min(freq) > f_min:
print("-- WARNING. MIN(FREQ) > f_min in TS_GEN.")
print("-- MIN(FREQ) = ", min(freq), " f_min = ", f_min)
print("-- Data will be extrapolated. You may prefer to EXPand the range of FREQ.")
if max(freq) < f_max:
print("** MAX(FREQ) < f_max in TS_GEN. EXPand range of FREQ")
print("-- MAX(FREQ) = ", max(freq), " f_max = ", f_max)
print("-- Data will be extrapolated. You may prefer to EXPand the range of FREQ.")
# ; ----------------------------------------------------------
# ; Main part of procedure
# ; number of positive frequencies in Fourier Transform
nf = n // 2
# ; make array for Fourier Transform
# ; (need room for positive and negative frequencies)
x = zeros((2 * nf), dtype=complex)
# ; make array for frequencies
f = arange(nf + 1, dtype=float) / (n * dt)
# ; interpolation of input power spectrum, to fill in any gaps
# ; interpolate given spectrum POW(FREQ) onto required frequencies F
if loginterp:
# ; convert to log-log space, interpolate there, and convert back
lpow = log(pow)
lfreq = log(freq)
lf = log(f[1:nf + 1])
lspec = interp(lf, lfreq, lpow)
spec = zeros(nf + 1)
spec[1:nf + 1] = exp(lspec)
# spec = [0.0, spec]
lpow = 0
lfreq = 0
lf = 0
else:
# ; or just interpolate in lin-lin space as default
spec = interp(f, freq, pow)
# ; set DC value ( psd(f=0) ) to zero
spec[0] = 0.0
# ; interpolate phase shift spectrum (PHI)
phi = interp(f, freq, phi)
# ; normalise spectrum
spec = spec * nf / (2.0 * dt)
# ; ----------------------------------------------------------
# ; now for the Davies-Harte algorithm
# ; take square root of spectrum
spec = sqrt(spec)
# ; populate Fourier Transform of time series with SQRT(spectrum)
# ; multiplied by normal deviate
# ; (independent for real and complex components)
# ; first positive frequencies
x[1:nf] = spec[1:nf] * normal(size=nf - 1) + spec[1:nf] * normal(size=nf - 1) * 1j
# ; apply phase shift X'(f) = X(f) * EXP(i*phi)
x[1:nf] = x[1:nf] * exp(phi[1:nf] * 1j)
# ; FT must be real at Nyquist frequency
x[nf] = spec[nf] * normal()
# ; make sure FT at negative frequencies is conjugate: X(-f) = X*(f)
x[nf + 1:2 * nf] = conj(x[nf - 1:0:-1])
# ; then inverse Fourier Transform into time domain: X(f) -> x(t)
x = fft(x)
# ; drop imaginary part (which is zero)
x = real(x)
# ; calculate TIME if needed
time = arange(n) * dt
# ; ----------------------------------------------------------
# ; Return the data array to the user
return x
|
import ccxtpro
from asyncio import get_event_loop
print('CCXT Pro version', ccxtpro.__version__)
async def main(loop):
exchange = ccxtpro.bitvavo({
'enableRateLimit': True,
'asyncio_loop': loop,
})
await exchange.load_markets()
exchange.verbose = True
symbol = 'BTC/EUR'
while True:
try:
orderbook = await exchange.watch_order_book(symbol)
print(orderbook['nonce'], symbol, orderbook['asks'][0], orderbook['bids'][0])
except Exception as e:
print(type(e).__name__, str(e))
break
await exchange.close()
loop = get_event_loop()
loop.run_until_complete(main(loop)) |
from typing import List, Union
from torch import nn as nn
import os
from path import Path
import inspect
import subprocess
import types
import importlib
def init_from(
class_name: str, modules: List[Union[str, types.ModuleType]], *args, **kwargs
):
"""Initializes class from its name and list of module names it might be part of.
Args:
class_name: the name of the class that is to be initialized.
modules: the list of modules or module names that are to be searched for
the class.
*args: the non-keyword arguments for the constructor of the given class.
**kwargs: the keyword arguments for the constructor of the given class.
Returns:
An instantiation of the class that first matched the given name during the
search through the given modules.
Raises:
ValueError: If the given class cannot be found in any of the given modules.
"""
modules = [
m if isinstance(m, types.ModuleType) else importlib.import_module(m)
for m in modules
]
for module in modules:
if hasattr(module, class_name):
return getattr(module, class_name)(*args, **kwargs)
else:
raise ValueError(
f"Can't find class {class_name} in modules {[m.__name__ for m in modules]}"
)
def is_number(s, number_type):
""" Returns True is string is a number. """
try:
number_type(s)
return True
except ValueError:
return False
# from https://stackoverflow.com/questions/14989858/get-the-current-git-hash-in-a-python-script
def get_git_revision_hash():
try:
if which("git") is not None:
with Path(kge_base_dir()):
return (
subprocess.check_output(["git", "rev-parse", "HEAD"])
.strip()
.decode()
)
else:
return "No git binary found"
except:
return "No working git repository found."
# from https://stackoverflow.com/questions/14989858/get-the-current-git-hash-in-a-python-script
def get_git_revision_short_hash():
try:
if which("git") is not None:
with Path(kge_base_dir()):
return (
subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
.strip()
.decode()
)
else:
return "No git binary found"
except:
return "No working git repository found."
# from https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def module_base_dir(module_name):
module = importlib.import_module(module_name)
return os.path.abspath(filename_in_module(module, ".."))
def kge_base_dir():
return module_base_dir("kge")
def filename_in_module(module_or_module_list, filename):
if not isinstance(module_or_module_list, list):
module_or_module_list = [module_or_module_list]
for module in module_or_module_list:
f = os.path.dirname(inspect.getfile(module)) + "/" + filename
if os.path.exists(f):
return f
raise FileNotFoundError(
"{} not found in one of modules {}".format(filename, module_or_module_list)
)
def get_activation_function(s: str):
if s == "tanh":
return nn.Tanh()
elif s == "relu":
return nn.ReLU()
else:
raise ValueError("activation function {} unknown".format(s))
def round_to_points(round_points_to: List[int], to_be_rounded: int):
"""
Rounds to_be_rounded to the points in round_points_to. Assumes
that the first element in round_points_to is the lower bound and that
the last is the upper bound.
:param round_points_to: List[int]
:param to_be_rounded: int
:return: int
"""
if len(round_points_to) > 0:
assert (
round_points_to[0] <= round_points_to[-1]
), "First element in round_points_to should be the lower bound and the last the upper bound"
last = -1
for i, round_point in enumerate(round_points_to):
if to_be_rounded < (round_point - last) / 2 + last:
# Assumes that the first element in round_points_to is
# the lower bound.
if i == 0:
return round_point
else:
return last
last = round_point
return last
else:
raise Exception(
"{} was called with an empty list to be rounded to.".format(
round_to_points.__name__
)
)
|
from iml_common.blockdevices.blockdevice_lvm_volume import BlockDeviceLvmVolume
from iml_common.test.command_capture_testcase import CommandCaptureTestCase
class TestBlockDeviceLvmVolume(CommandCaptureTestCase):
def setUp(self):
super(TestBlockDeviceLvmVolume, self).setUp()
self.blockdevice = BlockDeviceLvmVolume("lvm_volume", "/dev/mappper/lvg-test-lvm-test")
def test_uuid(self):
self.add_command(
("lvs", "--noheadings", "-o", "lv_uuid", "/dev/mappper/lvg-test-lvm-test"),
stdout=" CtSfyh-ThdO-Bg3i-EiKU-6knJ-Ix4D-ru49Py\n",
)
self.assertEqual("CtSfyh-ThdO-Bg3i-EiKU-6knJ-Ix4D-ru49Py", self.blockdevice.uuid)
|
#!/usr/bin/env python
# Visual Modulator Constellation
# Josh Sanz
# 2019-01-24
import sys
import six
from os import environ, listdir
from os.path import isfile, join
import argparse
from matplotlib import pyplot as plt
import numpy as np
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def rgb_to_hex(rgb):
return '#%02x%02x%02x' % rgb
def plot_mod_timelapse(config, data):
nlabels = len(data[config['mod_files'][0]])
colors = plt.rcParams['axes.prop_cycle'][:nlabels]
colors = [c['color'] for c in colors]
while len(colors) < nlabels:
colors *= 2
colors = colors[:nlabels]
# Collate data
x = np.zeros((len(data), nlabels))
y = np.zeros((len(data), nlabels))
for i, f in enumerate(config['mod_files']):
x[i, :] = np.real(data[f])
y[i, :] = np.imag(data[f])
xlim = [min(np.amin(x), -1), max(np.amax(x), 1)]
ylim = [min(np.amin(y), -1), max(np.amax(y), 1)]
alpha = np.linspace(0.25, 1, x.shape[0])
for i in range(nlabels):
# Set up color vector
rgb = hex_to_rgb(colors[i])
rgba = np.zeros((x.shape[0], 4))
rgba[:, 0] = rgb[0] / 255.
rgba[:, 1] = rgb[1] / 255.
rgba[:, 2] = rgb[2] / 255.
rgba[:, 3] = alpha
# Plot data
plt.scatter(x[:, i], y[:, i], c=rgba)
plt.title("Timelapse of Constellation")
plt.xlabel("Re")
plt.ylabel("Im")
plt.xlim(xlim)
plt.ylim(ylim)
if config['save']:
plt.savefig(join(config['dir'], config['prefix_mod'] + "_timelapse.png"))
plt.close()
else:
plt.show()
def plot_mod_individuals(config, data):
# Get axis limits
xlim = [-1, 1]
ylim = [-1, 1]
for d in six.itervalues(data):
x = np.real(d)
y = np.imag(d)
if min(x) < xlim[0]:
xlim[0] = min(x)
if max(x) > xlim[1]:
xlim[1] = max(x)
if min(y) < ylim[0]:
ylim[0] = min(y)
if max(y) > ylim[1]:
ylim[1] = max(y)
# Plot individual constellations
nlabels = len(data[config['mod_files'][0]])
colors = plt.rcParams['axes.prop_cycle'][:nlabels]
colors = [c['color'] for c in colors]
while len(colors) < nlabels:
colors *= 2
colors = colors[:nlabels]
for fname in config['mod_files']:
plt.scatter(np.real(data[fname]), np.imag(data[fname]), c=colors)
plt.title(fname[:-4])
plt.xlabel("Re")
plt.ylabel("Im")
plt.xlim(xlim)
plt.ylim(ylim)
if config['save']:
plt.savefig(join(config['dir'], fname[:-4] + ".png"))
plt.close()
else:
plt.show()
def plot_demod_individuals(config, data):
# Get axis limits
xlim = [-1, 1]
ylim = [-1, 1]
nlabels = 1
for d in six.itervalues(data):
iq = d['iq']
x = np.real(iq)
y = np.imag(iq)
if min(x) < xlim[0]:
xlim[0] = min(x)
if max(x) > xlim[1]:
xlim[1] = max(x)
if min(y) < ylim[0]:
ylim[0] = min(y)
if max(y) > ylim[1]:
ylim[1] = max(y)
l = d['labels']
nlabels = max(nlabels, np.unique(l).size)
# Plot individual constellations
colors = plt.rcParams['axes.prop_cycle'][:nlabels]
colors = [c['color'] for c in colors]
while len(colors) < nlabels:
colors *= 2
colors = np.array(colors)
for fname in config['demod_files']:
iq = data[fname]['iq']
labels = data[fname]['labels']
plt.scatter(iq.real, iq.imag, c=colors[labels])
for c in np.unique(labels):
c_iq = iq[labels == c]
center = (np.mean(c_iq.real), np.mean(c_iq.imag))
plt.annotate(c, center, fontsize=12, fontweight='bold')
plt.title(fname[:-4])
plt.xlabel("Re")
plt.ylabel("Im")
plt.xlim(xlim)
plt.ylim(ylim)
if config['save']:
plt.savefig(join(config['dir'], fname[:-4] + ".png"))
plt.close()
else:
plt.show()
def plot_ber_curves(config, data):
ylim = [0, 1]
for fname in config['ber_files']:
it = data[fname][:, 0]
ber = data[fname][:, 1]
plt.stem(it, ber)
plt.ylabel("BER")
plt.xlabel("Training Iterations")
plt.title(fname[:-4])
# plt.ylim(ylim)
if config['save']:
plt.savefig(join(config['dir'], fname[:-4] + ".png"))
plt.close()
else:
plt.show()
def parse_args():
"""Parse input arguments."""
save = False
parser = argparse.ArgumentParser(description="Display the constellations over time")
parser.add_argument("directory", help="The directory to read and plot constellations from",
nargs='?', default=environ["HOME"])
parser.add_argument("-d", "--prefix-demod", help="Filename prefix for filtering demod files",
default="neural_demod_constellation")
parser.add_argument("-m", "--prefix-mod", help="Filename prefix for filtering mod files",
default="neural_mod_constellation")
parser.add_argument("-s", "--save", help="Save the plots to disk", action="store_true")
ber_group = parser.add_mutually_exclusive_group()
ber_group.add_argument("-b", "--ber", help="Plot BER curves", action="store_true")
ber_group.add_argument("-B", "--BER", help="Plot BER curves only", action="store_true")
args = parser.parse_args()
if args.save:
save = True
dirname = args.directory
demod_files = sorted(filter(lambda x: (x.startswith(args.prefix_demod) and
x.endswith('npz') and
isfile(join(dirname, x))),
listdir(dirname)), key=lambda f: f[-19:-4])
mod_files = sorted(filter(lambda x: (x.startswith(args.prefix_mod) and
x.endswith('npy') and
isfile(join(dirname, x))),
listdir(dirname)), key=lambda f: f[-19:-4])
ber_files = sorted(filter(lambda x: (x.startswith('ber') and
x.endswith('.csv') and
isfile(join(dirname, x))),
listdir(dirname)))
return {'dir': dirname, 'demod_files': demod_files, 'mod_files': mod_files, 'save': save,
'prefix_demod': args.prefix_demod, 'prefix_mod': args.prefix_mod,
'ber_files': ber_files, 'ber': args.ber, 'ber_only': args.BER}
def main():
"""Parse inputs, parse file, plot results."""
config = parse_args()
if not config['ber_only']:
# Plot demod consetllations
data = {}
# Load in data
for fname in config['demod_files']:
data[fname] = np.load(join(config['dir'], fname))
# Plot individual constellations
plot_demod_individuals(data=data, config=config)
# Plot mod constellations
data = {}
# Load in data
for fname in config['mod_files']:
data[fname] = np.load(join(config['dir'], fname))
# Plot individual constellations
plot_mod_individuals(data=data, config=config)
# Plot a timelapse of constellation points
plot_mod_timelapse(data=data, config=config)
if config['ber'] or config['ber_only']:
# Plot ber curves
data = {}
for fname in config['ber_files']:
data[fname] = np.loadtxt(fname, delimiter=',', skiprows=1)
plot_ber_curves(config=config, data=data)
# Exit
return 0
if __name__ == "__main__":
main()
sys.exit(0)
|
class NoPendingBlockAvailableError(Exception):
pass
|
# Generated by Django 2.1.5 on 2019-02-03 11:19
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('calls', '0005_auto_20190130_2011'),
]
operations = [
migrations.AddField(
model_name='call',
name='cost',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='call',
name='duration',
field=models.PositiveIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)], verbose_name='Call duration in seconds'),
),
]
|
import os
import re
import gzip as gz
import json
def from_json(fname):
"""
Routine to extract the contents of a json file. no longer pretends to support PY2.
:param fname: json file, optionally gzipped
:return: a json-derived dict
"""
print('Loading JSON data from %s:' % fname)
if bool(re.search('\.gz$', fname)):
with gz.open(fname, 'rt') as fp:
j = json.load(fp)
else:
with open(fname, 'r') as fp:
j = json.load(fp)
return j
def to_json(obj, fname, gzip=False):
dirname = os.path.dirname(fname)
if not os.path.exists(dirname):
os.makedirs(dirname, exist_ok=True)
if gzip is True:
if not bool(re.search('\.gz$', fname)):
fname += '.gz'
try: # python3
with gz.open(fname, 'wt') as fp:
json.dump(obj, fp, indent=2, sort_keys=True)
except ValueError: # python2
with gz.open(fname, 'w') as fp:
json.dump(obj, fp, indent=2, sort_keys=True)
else:
with open(fname, 'w') as fp:
json.dump(obj, fp, indent=2, sort_keys=True)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from _v8 import ffi, lib
from . import exceptions
from . import context
__all__ = ['VM']
class VM(object):
"""
Holds the VM state (V8 isolate).\
Running scripts within a VM is thread-safe,\
but only a single thread will execute code\
at a given time (there is a Global Lock).\
It's feasible to run one VM per thread\
or to have a pre-initialized pool.
There may be many VMs per platform
:param platform: Initialized platform
:type platform: :py:class:`._Platform`
"""
def __init__(self, platform):
self._platform = platform
self._c_vm = None
def __enter__(self):
"""
See :py:func:`set_up` method for docs
"""
assert not self.is_alive()
assert self._platform.is_alive()
self._c_vm = ffi.new('v8cffi_vm_t **')
self._c_vm[0] = ffi.NULL
code = lib.v8cffi_vm_new(self._c_vm)
if code != lib.E_V8_OK:
raise exceptions.get_exception(code)
return self
def __exit__(self, *_, **__):
"""
See :py:func:`tear_down` method for docs
"""
assert self.is_alive()
assert self._platform.is_alive()
lib.v8cffi_vm_free(self._c_vm[0])
self._c_vm = None
def is_alive(self):
"""
Check the vm is initialized and was not exited
:return: Whether the vm is alive or not
:rtype: bool
"""
return self._c_vm is not None
def create_context(self):
"""
Create a :py:class:`.Context` for running\
JS scripts
:return: Instance of :py:class:`.Context`
:rtype: :py:class:`.Context`
"""
return context.Context(self)
def get_c_vm(self):
"""
@Private
Return the underlying C VM
:return: struct cdata
:rtype: :py:class:`ffi.CData<struct **>`
"""
return self._c_vm
def set_up(self):
"""
Initialize the VM.\
Remember to call :py:func:`tear_down`\
before exiting the application.\
It's recommended to use a ``with``\
statement instead of this method\
to ensure clean up
:raise V8MemoryError: if there\
is no memory for allocating it,\
the process should die afterwards anyway,\
there is little point in catching this
"""
return self.__enter__()
def tear_down(self):
"""
Destructs the VM
"""
return self.__exit__()
|
##############################################################################
# Copyright (c) 2015 Ericsson AB and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from __future__ import absolute_import
from __future__ import print_function
import logging
import pkg_resources
from oslo_serialization import jsonutils
import yardstick.ssh as ssh
from yardstick.benchmark.scenarios import base
LOG = logging.getLogger(__name__)
class Pktgen(base.Scenario):
"""Execute pktgen between two hosts
Parameters
packetsize - packet size in bytes without the CRC
type: int
unit: bytes
default: 60
number_of_ports - number of UDP ports to test
type: int
unit: na
default: 10
duration - duration of the test
type: int
unit: seconds
default: 20
"""
__scenario_type__ = "Pktgen"
TARGET_SCRIPT = 'pktgen_benchmark.bash'
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.setup_done = False
def setup(self):
"""scenario setup"""
self.target_script = pkg_resources.resource_filename(
'yardstick.benchmark.scenarios.networking',
Pktgen.TARGET_SCRIPT)
host = self.context_cfg['host']
target = self.context_cfg['target']
LOG.info("user:%s, target:%s", target['user'], target['ip'])
self.server = ssh.SSH.from_node(target, defaults={"user": "ubuntu"})
self.server.wait(timeout=600)
LOG.info("user:%s, host:%s", host['user'], host['ip'])
self.client = ssh.SSH.from_node(host, defaults={"user": "ubuntu"})
self.client.wait(timeout=600)
# copy script to host
self.client._put_file_shell(self.target_script, '~/pktgen.sh')
self.setup_done = True
def _iptables_setup(self):
"""Setup iptables on server to monitor for received packets"""
cmd = "sudo iptables -F; " \
"sudo iptables -A INPUT -p udp --dport 1000:%s -j DROP" \
% (1000 + self.number_of_ports)
LOG.debug("Executing command: %s", cmd)
status, _, stderr = self.server.execute(cmd)
if status:
raise RuntimeError(stderr)
def _iptables_get_result(self):
"""Get packet statistics from server"""
cmd = "sudo iptables -L INPUT -vnx |" \
"awk '/dpts:1000:%s/ {{printf \"%%s\", $1}}'" \
% (1000 + self.number_of_ports)
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.server.execute(cmd)
if status:
raise RuntimeError(stderr)
return int(stdout)
def run(self, result):
"""execute the benchmark"""
if not self.setup_done:
self.setup()
ipaddr = self.context_cfg["target"].get("ipaddr", '127.0.0.1')
options = self.scenario_cfg['options']
packetsize = options.get("packetsize", 60)
self.number_of_ports = options.get("number_of_ports", 10)
# if run by a duration runner
duration_time = self.scenario_cfg["runner"].get("duration", None) \
if "runner" in self.scenario_cfg else None
# if run by an arithmetic runner
arithmetic_time = options.get("duration", None)
if duration_time:
duration = duration_time
elif arithmetic_time:
duration = arithmetic_time
else:
duration = 20
self._iptables_setup()
cmd = "sudo bash pktgen.sh %s %s %s %s" \
% (ipaddr, self.number_of_ports, packetsize, duration)
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
result.update(jsonutils.loads(stdout))
result['packets_received'] = self._iptables_get_result()
result['packetsize'] = packetsize
if "sla" in self.scenario_cfg:
sent = result['packets_sent']
received = result['packets_received']
ppm = 1000000 * (sent - received) / sent
sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
% (ppm, sla_max_ppm)
def _test():
"""internal test function"""
key_filename = pkg_resources.resource_filename('yardstick.resources',
'files/yardstick_key')
ctx = {
'host': {
'ip': '10.229.47.137',
'user': 'root',
'key_filename': key_filename
},
'target': {
'ip': '10.229.47.137',
'user': 'root',
'key_filename': key_filename,
'ipaddr': '10.229.47.137',
}
}
logger = logging.getLogger('yardstick')
logger.setLevel(logging.DEBUG)
options = {'packetsize': 120}
args = {'options': options}
result = {}
p = Pktgen(args, ctx)
p.run(result)
print(result)
if __name__ == '__main__':
_test()
|
from sys import path
path.append("../QIK_Web/util/")
import constants
from sys import path
path.append("../ML_Models/DeepImageRetrieval")
from dirtorch import extract_features, test_dir, datasets
import numpy as np
is_initialized = False
net = None
image_data = []
def init():
print("dir_search :: init :: Start")
global net, is_initialized, image_data
if not is_initialized:
is_initialized = True
net = extract_features.load_model(constants.DIR_MODEL_PATH, 0, None)
# Reading the file containing the images.
for image in open(constants.DIR_CANDIDATE_IMAGE_DATA):
image_data.append(constants.TOMCAT_IP_ADDR + constants.IMAGE_DATA_DIR + image.split("/")[-1])
def dir_search(image_file, fetch_limit):
print("dir_search :: dir_search :: Start")
global net, image_data
# Writing the image to a dataset file.
dataset = open(constants.DIR_QUERY_FILE_PATH, "w")
dataset.write(image_file + "\n")
dataset.flush()
# Extracting features for the query image.
dataset = datasets.ImageList(constants.DIR_QUERY_FILE_PATH)
extract_features.extract_features(dataset, net, '', pooling="gem", gemp=3, output=constants.DIR_QUERY_FEATURES_FILE)
bdescs = np.load(constants.DIR_CANDIDATES_FEATURES_FILE + '.npy')
qdescs = np.load(constants.DIR_QUERY_FEATURES_FILE + '.npy')
# Computing the distance matrix.
scores = test_dir.matmul(qdescs, bdescs)
# Get the top 20 indices
indices = sorted(range(len(scores[0])), key=lambda i: scores[0][i])[-(fetch_limit):]
# Preparing the return list.
ret_lst = []
for index in reversed(indices):
ret_lst.append(image_data[index])
print("dir_search :: dir_search :: ", ret_lst)
return ret_lst |
import gc
from queue import Queue, Empty
from threading import Thread
from time import sleep
import cv2
from torpido.config.constants import VIDEO_WIDTH
from torpido.util import resize
class Stream:
"""
Class that continuously gets frames from a VideoCapture object
with a dedicated thread, which actually speeds up the video
reading 2 times faster than working in the main UI Thread.
Attributes
----------
__Q : queue
python queue for storing the frames that are to be processed
stream : video capture
open cv stream object that read the video in frames
stopped : bool
stream is ended or the video is not ended yet
Examples
--------
Reads the video using the Thread and saving the frames in the queue
to process them. As the video is read by the Thread the speed increases
and locking functions of open cv can be skipped.
"""
def __init__(self, src):
cv2.setUseOptimized(True)
self.__Q, self.stream = Queue(maxsize=1200), cv2.VideoCapture(src)
self.stopped, self._thread = False, None
def start(self):
self._thread = Thread(target=self.__get, name="torpido.video.Stream", args=())
self._thread.daemon = True
self._thread.start()
return self
def __get(self):
while True:
if self.stopped:
break
if not self.__Q.full():
(grabbed, frame) = self.stream.read()
if not grabbed:
self.stream.release()
self.stopped = True
return
# resizing the reduce memory and since im not using full
frame = resize(frame, width=VIDEO_WIDTH)
self.__Q.put(frame)
else:
sleep(0.1)
self.stream.release()
def read(self):
try:
if self.stopped:
return None
data = self.__Q.get(timeout=3)
except Empty:
data = None
return data
def get_capture(self):
return self.stream
def get_queue_size(self):
return self.__Q.qsize()
def more(self):
return not self.stopped
def stop(self):
self.stopped = True
if self._thread is not None:
self._thread.join()
self._thread = None
self.stream.release()
gc.collect()
def __del__(self):
del self.__Q
|
import matplotlib
import datetime
import numpy as np
from dateutil.tz import tzutc
def polyfit(dates, levels, p):
"""Task 2F: Returns a tuple of the polynomial object and any shift of the time axis"""
# Convert dates to floats
dates_float = matplotlib.dates.date2num(dates)
# Find the time shift d0
d0 = dates_float[0]
dates_shifted = []
for i in range(len(dates_float)):
dates_shifted.append(dates_float[i] - dates_float[0])
# Find coefficients of best-fit polynomial f(x) of degree p
p_coeff = np.polyfit(dates_shifted, levels, p)
# Convert coefficient into a polynomial that can be evaluated,
# e.g. poly(0.3)
poly = np.poly1d(p_coeff)
poly_tuples = (poly, d0)
return poly_tuples
def warnings(stations):
"""Task 2G: Returns lists of risk-assessed towns"""
# Create list of unique towns
town_list0 = []
for station in stations:
town_list0.append(station.town)
town_list = list(set(town_list0))
town_list.sort()
# Create town lists
severe_risk = []
high_risk = []
moderate_risk = []
low_risk = []
# Assess flood risks
for station in stations:
if (type(station.relative_water_level()) != type(None)):
if float(station.relative_water_level()) >= 2:
# Town is at severe risk
severe_risk.append(station.town)
elif 1.5 <= float(station.relative_water_level()) < 2:
# Town is at high risk
high_risk.append(station.town)
elif 1.0 <= float(station.relative_water_level()) < 1.5:
# Town is at moderate risk
moderate_risk.append(station.town)
elif float(station.relative_water_level()) < 1.0:
# Town is at low risk
low_risk.append(station.town)
# Append lists
warnings = [severe_risk, high_risk, moderate_risk, low_risk]
return warnings |
# Written in python2.7, meant to be run in the saliency conda env.
import os
from cfg import *
import numpy as np
import argparse
cfg = flickr_cfg()
with open(cfg.val_file, "r") as f:
lines = f.readlines()
val_ex = []
for line in lines:
val_ex.append(int(line.rstrip().split(".jpg")[0]))
with open(cfg.test_file, "r") as f:
lines = f.readlines()
test_ex = []
for line in lines:
test_ex.append(int(line.rstrip().split(".jpg")[0]))
with open(cfg.annotations_path, "r") as f:
lines = f.readlines()
file_id_to_annotation_map = {} # int: str
for example in lines:
filename, annotation = example.split("\t")
file_id = int(filename.split(".jpg")[0]) # removes the .jpg
if file_id in test_ex:
file_id_to_annotation_map[file_id] = annotation.rstrip()
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=str, required=False)
parser.add_argument("--checkpoint", type=int, required=True)
parser.add_argument("--num-evals", type=int, required=True)
args = parser.parse_args()
np.random.seed(0)
file_ids_to_eval = np.random.choice(list(file_id_to_annotation_map.keys()), args.num_evals)
print("file_ids_to_eval", file_ids_to_eval)
for eval_id in file_ids_to_eval:
caption = file_id_to_annotation_map[eval_id]
command = 'python visualization.py --dataset Flickr30k --media_id {} --checkpoint {} --sentence "{}" --gpu {}'.format(eval_id, args.checkpoint, caption, args.gpu)
print(command)
os.system(command)
# produce local copy commands
commands = []
for eval_id in file_ids_to_eval:
output_path = "/scratch/cluster/albertyu/dev/caption-guided-saliency/output_samples"
command = "scp titan1:{}/{}/*.png .".format(output_path, eval_id)
commands.append(command)
print("Copy commands")
print(commands)
|
from dataclasses import dataclass, field
from typing import Any
import pygame as pg
from config.constants import CELLSIZE
from .screen import Screen
@dataclass(order=True)
class PrioritizedItem:
priority: int
item: Any = field(compare=False)
class GameObject:
priority = 100
def __init__(self):
self.prioritized_item = PrioritizedItem(self.priority, self)
Screen.game_objects.put(self.prioritized_item)
self.screen = Screen.instance
def render(self):
pass
class Sprite(pg.sprite.Sprite):
def __init__(self, group, color, x=None, y=None):
super().__init__(group)
self.image = pg.Surface((CELLSIZE, CELLSIZE))
self.image.fill(color)
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.rect.x = self.x * CELLSIZE
self.rect.y = self.y * CELLSIZE
def render(self):
pass
|
from __future__ import with_statement
from __future__ import absolute_import
from udsoncan.client import Client
from udsoncan import services
from udsoncan.exceptions import *
from test.ClientServerTest import ClientServerTest
class TestECUReset(ClientServerTest):
def __init__(self, *args, **kwargs):
ClientServerTest.__init__(self, *args, **kwargs)
def test_ecu_reset_success(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, "\x11\x55")
self.conn.fromuserqueue.put("\x51\x55") # Positive response
def _test_ecu_reset_success(self):
response = self.udsclient.ecu_reset(0x55)
self.assertTrue(response.positive)
self.assertEqual(response.service_data.reset_type_echo, 0x55)
def test_ecu_reset_success_spr(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, "\x11\xD5")
self.conn.fromuserqueue.put(u"wait") # Synchronize
def _test_ecu_reset_success_spr(self):
with self.udsclient.suppress_positive_response:
response = self.udsclient.ecu_reset(0x55)
self.assertEqual(response, None)
self.conn.fromuserqueue.get(timeout=0.2) #Avoid closing connection prematurely
def test_ecu_reset_success_pdt(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, "\x11\x04")
self.conn.fromuserqueue.put("\x51\x04\x23") # Positive response
def _test_ecu_reset_success_pdt(self):
response = self.udsclient.ecu_reset(services.ECUReset.ResetType.enableRapidPowerShutDown)
self.assertTrue(response.positive)
self.assertEqual(response.service_data.reset_type_echo, services.ECUReset.ResetType.enableRapidPowerShutDown)
self.assertEqual(response.service_data.powerdown_time, 0x23)
def test_ecu_reset_denied_exception(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, "\x11\x55")
self.conn.fromuserqueue.put("\x7F\x11\x33") #Security Access Denied
def _test_ecu_reset_denied_exception(self):
with self.assertRaises(NegativeResponseException) as handle:
self.udsclient.ecu_reset(0x55)
response = handle.exception.response
self.assertTrue(response.valid)
self.assertTrue(issubclass(response.service, services.ECUReset))
self.assertEqual(response.code, 0x33)
def test_ecu_reset_denied_no_exception(self):
self.wait_request_and_respond("\x7F\x11\x33") #Security Access Denied
def _test_ecu_reset_denied_no_exception(self):
self.udsclient.config[u'exception_on_negative_response'] = False
response = self.udsclient.ecu_reset(0x55)
self.assertTrue(response.valid)
self.assertFalse(response.positive)
self.assertTrue(issubclass(response.service, services.ECUReset))
self.assertEqual(response.code, 0x33)
def test_ecu_reset_invalidservice_exception(self):
self.wait_request_and_respond("\x00\x55") #Inexistent Service
def _test_ecu_reset_invalidservice_exception(self):
with self.assertRaises(InvalidResponseException) as handle:
self.udsclient.ecu_reset(0x55)
def test_ecu_reset_invalidservice_no_exception(self):
self.wait_request_and_respond("\x00\x55") #Inexistent Service
def _test_ecu_reset_invalidservice_no_exception(self):
self.udsclient.config[u'exception_on_invalid_response'] = False
response = self.udsclient.ecu_reset(0x55)
def test_ecu_reset_wrongservice_exception(self):
self.wait_request_and_respond("\x7E\x00") # Valid but wrong service (Tester Present)
def _test_ecu_reset_wrongservice_exception(self):
with self.assertRaises(UnexpectedResponseException) as handle:
self.udsclient.ecu_reset(0x55)
def test_ecu_reset_wrongservice_no_exception(self):
self.wait_request_and_respond("\x7E\x00") # Valid but wrong service (Tester Present)
def _test_ecu_reset_wrongservice_no_exception(self):
self.udsclient.config[u'exception_on_unexpected_response'] = False
response = self.udsclient.ecu_reset(0x55)
self.assertTrue(response.valid)
self.assertTrue(response.unexpected)
def test_ecu_reset_missing_pdt_exception(self):
self.wait_request_and_respond("\x51\x04") # Valid but wrong service (Tester Present)
def _test_ecu_reset_missing_pdt_exception(self):
with self.assertRaises(InvalidResponseException) as handle:
self.udsclient.ecu_reset(services.ECUReset.ResetType.enableRapidPowerShutDown)
def test_ecu_reset_missing_pdt_no_exception(self):
self.wait_request_and_respond("\x51\x04") # Valid but wrong service (Tester Present)
def _test_ecu_reset_missing_pdt_no_exception(self):
self.udsclient.config[u'exception_on_invalid_response'] = False
response = self.udsclient.ecu_reset(services.ECUReset.ResetType.enableRapidPowerShutDown)
self.assertFalse(response.valid)
def test_bad_param(self):
pass
def _test_bad_param(self):
with self.assertRaises(ValueError):
response = self.udsclient.ecu_reset(0x100)
with self.assertRaises(ValueError):
response = self.udsclient.ecu_reset(-1)
|
class PlotOptions:
def __init__(self, plot_type="3d_plot", dims_plot=[], slices=[]):
if plot_type not in ["2d_plot", "3d_plot"]:
raise Exception("Illegal plot type !")
if plot_type == "2d_plot" :
if len(dims_plot) != 2:
raise Exception("Make sure that dim_plot size is 2 !!")
if plot_type == "3d_plot" :
if len(dims_plot) != 3:
raise Exception("Make sure that dim_plot size is 3 !!")
self.dims_plot = dims_plot
self.plot_type = plot_type
self.slices = slices
|
import os
import torch
import elf
import numpy as np
import wandb
from elf.segmentation.features import compute_rag
from torch.nn import functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from collections import namedtuple
import matplotlib.pyplot as plt
from matplotlib import cm
from multiprocessing import Process, Lock
import threading
import shutil
from skimage.morphology import dilation
from environments.multicut import MulticutEmbeddingsEnv, State
from data.spg_dset import SpgDset
from models.agent_model import Agent
from models.feature_extractor import FeExtractor
from utils.exploration_functions import RunningAverage
from utils.general import soft_update_params, set_seed_everywhere, get_colored_edges_in_sseg, pca_project, random_label_cmap
from utils.replay_memory import TransitionData_ts
from utils.graphs import get_joint_sg_logprobs_edges
from utils.distances import CosineDistance, L2Distance
from utils.matching import matching
from utils.yaml_conv_parser import dict_to_attrdict
from utils.training_helpers import update_env_data, supervised_policy_pretraining, state_to_cpu, Forwarder
from utils.metrics import AveragePrecision, ClusterMetrics
# from timeit import default_timer as timer
class AgentA2CTrainer(object):
def __init__(self, cfg, global_count):
super(AgentA2CTrainer, self).__init__()
assert torch.cuda.device_count() == 1
self.device = torch.device("cuda:0")
torch.cuda.set_device(self.device)
torch.set_default_tensor_type(torch.FloatTensor)
self.cfg = cfg
self.global_count = global_count
self.memory = TransitionData_ts(capacity=self.cfg.mem_size)
self.best_val_reward = -np.inf
if self.cfg.distance == 'cosine':
self.distance = CosineDistance()
else:
self.distance = L2Distance()
self.fe_ext = FeExtractor(dict_to_attrdict(self.cfg.backbone), self.distance, cfg.fe_delta_dist, self.device)
self.fe_ext.embed_model.load_state_dict(torch.load(self.cfg.fe_model_name))
self.fe_ext.cuda(self.device)
self.model = Agent(self.cfg, State, self.distance, self.device, with_temp=False)
wandb.watch(self.model)
self.model.cuda(self.device)
self.model_mtx = Lock()
MovSumLosses = namedtuple('mov_avg_losses', ('actor', 'critic'))
Scalers = namedtuple('Scalers', ('critic', 'actor'))
OptimizerContainer = namedtuple('OptimizerContainer',
('actor', 'critic', 'actor_shed', 'critic_shed'))
actor_optimizer = torch.optim.Adam(self.model.actor.parameters(), lr=self.cfg.actor_lr)
critic_optimizer = torch.optim.Adam(self.model.critic.parameters(), lr=self.cfg.critic_lr)
lr_sched_cfg = dict_to_attrdict(self.cfg.lr_sched)
bw = lr_sched_cfg.mov_avg_bandwidth
off = lr_sched_cfg.mov_avg_offset
weights = np.linspace(lr_sched_cfg.weight_range[0], lr_sched_cfg.weight_range[1], bw)
weights = weights / weights.sum() # make them sum up to one
shed = lr_sched_cfg.torch_sched
self.mov_sum_losses = MovSumLosses(RunningAverage(weights, band_width=bw, offset=off),
RunningAverage(weights, band_width=bw, offset=off))
self.optimizers = OptimizerContainer(actor_optimizer, critic_optimizer,
*[ReduceLROnPlateau(opt, patience=shed.patience,
threshold=shed.threshold, min_lr=shed.min_lr,
factor=shed.factor) for opt in
(actor_optimizer, critic_optimizer)])
self.scalers = Scalers(torch.cuda.amp.GradScaler(), torch.cuda.amp.GradScaler())
self.forwarder = Forwarder()
if self.cfg.agent_model_name != "":
self.model.load_state_dict(torch.load(self.cfg.agent_model_name))
# finished with prepping
for param in self.fe_ext.parameters():
param.requires_grad = False
self.train_dset = SpgDset(self.cfg.data_dir, dict_to_attrdict(self.cfg.patch_manager), dict_to_attrdict(self.cfg.data_keys), max(self.cfg.s_subgraph))
self.val_dset = SpgDset(self.cfg.val_data_dir, dict_to_attrdict(self.cfg.patch_manager), dict_to_attrdict(self.cfg.data_keys), max(self.cfg.s_subgraph))
self.segm_metric = AveragePrecision()
self.clst_metric = ClusterMetrics()
self.global_counter = 0
def validate(self):
"""validates the prediction against the method of clustering the embedding space"""
env = MulticutEmbeddingsEnv(self.fe_ext, self.cfg, self.device)
if self.cfg.verbose:
print("\n\n###### start validate ######", end='')
self.model.eval()
n_examples = len(self.val_dset)
#taus = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
#rl_scores, keys = [], None
self.clst_metric.reset()
map_scores = []
ex_raws, ex_sps, ex_gts, ex_mc_gts, ex_embeds, ex_rl, edge_ids, rewards, actions = [], [], [], [], [], [], [], [], []
dloader = iter(DataLoader(self.val_dset, batch_size=1, shuffle=False, pin_memory=True, num_workers=0))
acc_reward = 0
for it in range(n_examples):
update_env_data(env, dloader, self.val_dset, self.device, with_gt_edges="sub_graph_dice" in self.cfg.reward_function)
env.reset()
state = env.get_state()
self.model_mtx.acquire()
try:
distr, _, _, _, _ = self.forwarder.forward(self.model, state, State, self.device, grad=False, post_data=False)
finally:
self.model_mtx.release()
action = torch.sigmoid(distr.loc)
reward = env.execute_action(action, tau=0.0, train=False)
rew = reward[-1].item() if self.cfg.reward_function == "sub_graph_dice" else reward[-2].item()
acc_reward += rew
if self.cfg.verbose:
print(f"\nstep: {it}; mean_loc: {round(distr.loc.mean().item(), 5)}; mean reward: {round(rew, 5)}", end='')
embeddings = env.embeddings[0].cpu().numpy()
gt_seg = env.gt_seg[0].cpu().numpy()
gt_mc = cm.prism(env.gt_soln[0].cpu()/env.gt_soln[0].max().item()) if env.gt_edge_weights is not None else torch.zeros(env.raw.shape[-2:])
rl_labels = env.current_soln.cpu().numpy()[0]
ex_embeds.append(pca_project(embeddings, n_comps=3))
ex_raws.append(env.raw[0].cpu().permute(1, 2, 0).squeeze())
ex_sps.append(env.init_sp_seg[0].cpu())
ex_mc_gts.append(gt_mc)
ex_gts.append(gt_seg)
ex_rl.append(rl_labels)
edge_ids.append(env.edge_ids)
rewards.append(reward[-1])
actions.append(action)
map_scores.append(self.segm_metric(rl_labels, gt_seg))
self.clst_metric(rl_labels, gt_seg)
'''
_rl_scores = matching(gt_seg, rl_labels, thresh=taus, criterion='iou', report_matches=False)
if it == 0:
for tau_it in range(len(_rl_scores)):
rl_scores.append(np.array(list(map(float, list(_rl_scores[tau_it]._asdict().values())[1:]))))
keys = list(_rl_scores[0]._asdict().keys())[1:]
else:
for tau_it in range(len(_rl_scores)):
rl_scores[tau_it] += np.array(list(map(float, list(_rl_scores[tau_it]._asdict().values())[1:]))
'''
'''
div = np.ones_like(rl_scores[0])
for i, key in enumerate(keys):
if key not in ('fp', 'tp', 'fn'):
div[i] = 10
for tau_it in range(len(rl_scores)):
rl_scores[tau_it] = dict(zip(keys, rl_scores[tau_it] / div))
fig, axs = plt.subplots(1, 2, figsize=(10, 10))
plt.subplots_adjust(hspace=.5)
for m in ('precision', 'recall', 'accuracy', 'f1'):
y = [s[m] for s in rl_scores]
data = [[x, y] for (x, y) in zip(taus, y)]
table = wandb.Table(data=data, columns=["IoU_threshold", m])
wandb.log({"validation/" + m: wandb.plot.line(table, "IoU_threshold", m, stroke=None, title=m)})
axs[0].plot(taus, [s[m] for s in rl_scores], '.-', lw=2, label=m)
axs[0].set_ylabel('Metric value')
axs[0].grid()
axs[0].legend(bbox_to_anchor=(.8, 1.65), loc='upper left', fontsize='xx-small')
axs[0].set_title('RL method')
axs[0].set_xlabel(r'IoU threshold $\tau$')
for m in ('fp', 'tp', 'fn'):
y = [s[m] for s in rl_scores]
data = [[x, y] for (x, y) in zip(taus, y)]
table = wandb.Table(data=data, columns=["IoU_threshold", m])
wandb.log({"validation/" + m: wandb.plot.line(table, "IoU_threshold", m, stroke=None, title=m)})
axs[1].plot(taus, [s[m] for s in rl_scores], '.-', lw=2, label=m)
axs[1].set_ylabel('Number #')
axs[1].grid()
axs[1].legend(bbox_to_anchor=(.87, 1.6), loc='upper left', fontsize='xx-small');
axs[1].set_title('RL method')
axs[1].set_xlabel(r'IoU threshold $\tau$')
#wandb.log({"validation/metrics": [wandb.Image(fig, caption="metrics")]})
plt.close('all')
'''
splits, merges, are, arp, arr = self.clst_metric.dump()
wandb.log({"validation/acc_reward": acc_reward})
wandb.log({"validation/mAP": np.mean(map_scores)}, step=self.global_counter)
wandb.log({"validation/UnderSegmVI": splits}, step=self.global_counter)
wandb.log({"validation/OverSegmVI": merges}, step=self.global_counter)
wandb.log({"validation/ARE": are}, step=self.global_counter)
wandb.log({"validation/ARP": arp}, step=self.global_counter)
wandb.log({"validation/ARR": arr}, step=self.global_counter)
# do the lr sheduling
self.optimizers.critic_shed.step(acc_reward)
self.optimizers.actor_shed.step(acc_reward)
if acc_reward > self.best_val_reward:
self.best_val_reward = acc_reward
wandb.run.summary["validation_reward"] = acc_reward
torch.save(self.model.state_dict(), os.path.join(wandb.run.dir, "best_checkpoint_agent.pth"))
if self.cfg.verbose:
print("\n###### finish validate ######\n", end='')
label_cm = random_label_cmap(zeroth=1.0)
label_cm.set_bad(alpha=0)
for i in self.cfg.store_indices:
fig, axs = plt.subplots(2, 3 if self.cfg.reward_function == "sub_graph_dice" else 4 , sharex='col',
figsize=(9, 5), sharey='row', gridspec_kw={'hspace': 0, 'wspace': 0})
axs[0, 0].imshow(ex_gts[i], cmap=random_label_cmap(), interpolation="none")
axs[0, 0].set_title('gt', y=1.05, size=10)
axs[0, 0].axis('off')
if ex_raws[i].ndim == 3:
if ex_raws[i].shape[-1] > 2:
axs[0, 1].imshow(ex_raws[i][..., :3], cmap="gray")
else:
axs[0, 1].imshow(ex_raws[i][..., 0], cmap="gray")
else:
axs[1, 1].imshow(ex_raws[i], cmap="gray")
axs[0, 1].set_title('raw image', y=1.05, size=10)
axs[0, 1].axis('off')
axs[0, 2].imshow(ex_sps[i], cmap=random_label_cmap(), interpolation="none")
axs[0, 2].set_title('superpixels', y=1.05, size=10)
axs[0, 2].axis('off')
axs[1, 0].imshow(ex_embeds[i])
axs[1, 0].set_title('pc proj 1-3', y=-0.15, size=10)
axs[1, 0].axis('off')
if ex_raws[i].ndim == 3:
if ex_raws[i].shape[-1] > 1:
axs[1, 1].imshow(ex_raws[i][..., -1], cmap="gray")
else:
axs[1, 1].imshow(ex_raws[i][..., 0], cmap="gray")
else:
axs[1, 1].imshow(ex_raws[i], cmap="gray")
axs[1, 1].set_title('sp edge', y=-0.15, size=10)
axs[1, 1].axis('off')
axs[1, 2].imshow(ex_rl[i], cmap=random_label_cmap(), interpolation="none")
axs[1, 2].set_title('prediction', y=-0.15, size=10)
axs[1, 2].axis('off')
if self.cfg.reward_function != "sub_graph_dice":
frame_rew, scores_rew, bnd_mask = get_colored_edges_in_sseg(ex_sps[i][None].float(), edge_ids[i].cpu(), rewards[i].cpu())
frame_act, scores_act, _ = get_colored_edges_in_sseg(ex_sps[i][None].float(), edge_ids[i].cpu(), 1 - actions[i].cpu().squeeze())
bnd_mask = torch.from_numpy(dilation(bnd_mask.cpu().numpy()))
frame_rew = np.stack([dilation(frame_rew.cpu().numpy()[..., i]) for i in range(3)], -1)
frame_act = np.stack([dilation(frame_act.cpu().numpy()[..., i]) for i in range(3)], -1)
ex_rl[i] = ex_rl[i].squeeze().astype(np.float)
ex_rl[i][bnd_mask] = np.nan
axs[1, 3].imshow(frame_rew, interpolation="none")
axs[1, 3].imshow(ex_rl[i], cmap=label_cm, alpha=0.8, interpolation="none")
axs[1, 3].set_title("rewards 1:g, 0:r", y=-0.2)
axs[1, 3].axis('off')
axs[0, 3].imshow(frame_act, interpolation="none")
axs[0, 3].imshow(ex_rl[i], cmap=label_cm, alpha=0.8, interpolation="none")
axs[0, 3].set_title("actions 0:g, 1:r", y=1.05, size=10)
axs[0, 3].axis('off')
wandb.log({"validation/sample_" + str(i): [wandb.Image(fig, caption="sample images")]},
step=self.global_counter)
plt.close('all')
def update_critic(self, obs, action, reward):
self.optimizers.critic.zero_grad()
with torch.cuda.amp.autocast(enabled=True):
current_Q, side_loss = self.forwarder.forward(self.model, obs, State, self.device, actions=action)
critic_loss = torch.tensor([0.0], device=current_Q[0].device)
mean_reward = 0
for i, sz in enumerate(self.cfg.s_subgraph):
target_Q = reward[i]
target_Q = target_Q.detach()
critic_loss = critic_loss + F.mse_loss(current_Q[i], target_Q)
mean_reward += reward[i].mean()
critic_loss = critic_loss / len(self.cfg.s_subgraph) + self.cfg.side_loss_weight * side_loss
self.scalers.critic.scale(critic_loss).backward()
self.scalers.critic.step(self.optimizers.critic)
self.scalers.critic.update()
return critic_loss.item(), mean_reward / len(self.cfg.s_subgraph)
def update_actor(self, obs, reward, expl_action):
self.optimizers.actor.zero_grad()
with torch.cuda.amp.autocast(enabled=True):
expl_action = None
distribution, actor_Q, action, side_loss = self.forwarder.forward(self.model, obs, State, self.device,
expl_action=expl_action, policy_opt=True)
log_prob = distribution.log_prob(action)
actor_loss = torch.tensor([0.0], device=actor_Q[0].device)
entropy_loss = torch.tensor([0.0], device=actor_Q[0].device)
_log_prob, sg_entropy = [], []
for i, sz in enumerate(self.cfg.s_subgraph):
ret = get_joint_sg_logprobs_edges(log_prob, distribution.scale, obs, i, sz)
_log_prob.append(ret[0])
sg_entropy.append(ret[1])
actor_loss = actor_loss + (-(_log_prob[i] * actor_Q[i]).mean())
# actor_loss = actor_loss - actor_Q[i].mean()
actor_loss = actor_loss / len(self.cfg.s_subgraph) + self.cfg.side_loss_weight * side_loss
min_entropy = (self.cfg.entropy_range[1] - self.cfg.entropy_range[0]) * (1 - reward[-1]) + self.cfg.entropy_range[0]
for i, sz in enumerate(self.cfg.s_subgraph):
min_entropy = min_entropy.to(_log_prob[i].device).squeeze()
entropy = sg_entropy[i] if self.cfg.use_closed_form_entropy else -_log_prob[i]
entropy_loss = entropy_loss + ((entropy - (self.cfg.s_subgraph[i] * min_entropy))**2).mean()
entropy_loss = entropy_loss / len(self.cfg.s_subgraph)
actor_loss = actor_loss + 0.001 * entropy_loss
self.scalers.actor.scale(actor_loss).backward()
self.scalers.actor.step(self.optimizers.actor)
self.scalers.actor.update()
return actor_loss.item(), min_entropy, distribution.loc.mean().item()
def _step(self, step):
actor_loss, min_entropy, loc_mean = None, None, None
(obs, action, reward), sample_idx = self.memory.sample()
critic_loss, mean_reward = self.update_critic(obs, action, reward)
self.memory.report_sample_loss(critic_loss + mean_reward, sample_idx)
self.mov_sum_losses.critic.apply(critic_loss)
wandb.log({"loss/critic": critic_loss}, step=self.global_counter)
if self.cfg.actor_update_after < step and step % self.cfg.actor_update_frequency == 0:
actor_loss, min_entropy, loc_mean = self.update_actor(obs, reward, action)
self.mov_sum_losses.actor.apply(actor_loss)
wandb.log({"loss/actor": actor_loss}, step=self.global_counter)
if step % self.cfg.post_stats_frequency == 0:
if min_entropy != "nl":
wandb.log({"min_entropy": min_entropy}, step=self.global_counter)
wandb.log({"mov_avg/critic": self.mov_sum_losses.critic.avg}, step=self.global_counter)
wandb.log({"mov_avg/actor": self.mov_sum_losses.actor.avg}, step=self.global_counter)
wandb.log({"lr/critic": self.optimizers.critic_shed.optimizer.param_groups[0]['lr']}, step=self.global_counter)
wandb.log({"lr/actor": self.optimizers.actor_shed.optimizer.param_groups[0]['lr']}, step=self.global_counter)
self.global_counter = self.global_counter + 1
if step % self.cfg.critic_target_update_frequency == 0:
soft_update_params(self.model.critic, self.model.critic_tgt, self.cfg.critic_tau)
return critic_loss, actor_loss, loc_mean
def train_until_finished(self):
while self.global_count.value() <= self.cfg.T_max + self.cfg.mem_size:
self.model_mtx.acquire()
try:
stats = [[], [], []]
for i in range(self.cfg.n_updates_per_step):
_stats = self._step(self.global_count.value())
[s.append(_s) for s, _s in zip(stats, _stats)]
for j in range(len(stats)):
if any([_s is None for _s in stats[j]]):
stats[j] = "nl"
else:
stats[j] = round(sum(stats[j])/self.cfg.n_updates_per_step, 5)
if self.cfg.verbose:
print(f"step: {self.global_count.value()}; mean_loc: {stats[-1]}; n_explorer_steps {self.memory.push_count}", end="")
print(f"; cl: {stats[0]}; acl: {stats[1]}")
finally:
self.model_mtx.release()
self.global_count.increment()
self.memory.reset_push_count()
if self.global_count.value() % self.cfg.validatoin_freq == 0:
self.validate()
# Acts and trains model
def train_and_explore(self, rn):
self.global_count.reset()
set_seed_everywhere(rn)
wandb.config.random_seed = rn
if self.cfg.verbose:
print('###### start training ######')
print('Running on device: ', self.device)
print('found ', self.train_dset.length, " training data patches")
print('found ', self.val_dset.length, "validation data patches")
print('training with seed: ' + str(rn))
explorers = []
for i in range(self.cfg.n_explorers):
explorers.append(threading.Thread(target=self.explore))
[explorer.start() for explorer in explorers]
self.memory.is_full_event.wait()
trainer = threading.Thread(target=self.train_until_finished)
trainer.start()
trainer.join()
self.global_count.set(self.cfg.T_max + self.cfg.mem_size + 4)
[explorer.join() for explorer in explorers]
self.memory.clear()
del self.memory
# torch.save(self.model.state_dict(), os.path.join(wandb.run.dir, "last_checkpoint_agent.pth"))
if self.cfg.verbose:
print('\n\n###### training finished ######')
return
def explore(self):
env = MulticutEmbeddingsEnv(self.fe_ext, self.cfg, self.device)
tau = 1
while self.global_count.value() <= self.cfg.T_max + self.cfg.mem_size:
dloader = iter(DataLoader(self.train_dset, batch_size=self.cfg.batch_size, shuffle=True, pin_memory=True, num_workers=0))
for iteration in range((len(self.train_dset) // self.cfg.batch_size) * self.cfg.data_update_frequency):
if iteration % self.cfg.data_update_frequency == 0:
update_env_data(env, dloader, self.train_dset, self.device, with_gt_edges="sub_graph_dice" in self.cfg.reward_function)
env.reset()
state = env.get_state()
if not self.memory.is_full():
action = torch.rand((env.edge_ids.shape[-1], 1), device=self.device)
else:
self.model_mtx.acquire()
try:
distr, _, action, _, _ = self.forwarder.forward(self.model, state, State, self.device, grad=False)
finally:
self.model_mtx.release()
reward = env.execute_action(action, tau=max(0, tau))
self.memory.push(state_to_cpu(state, State), action, reward)
if self.global_count.value() > self.cfg.T_max + self.cfg.mem_size:
break
return
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_config import cfg
from keystoneclient import _discover
from keystoneclient.auth.identity.generic import base
from keystoneclient.auth.identity import v2
from keystoneclient.auth.identity import v3
from keystoneclient import utils
LOG = logging.getLogger(__name__)
def get_options():
return [
cfg.StrOpt('user-id', help='User id'),
cfg.StrOpt('user-name', dest='username', help='Username',
deprecated_name='username'),
cfg.StrOpt('user-domain-id', help="User's domain id"),
cfg.StrOpt('user-domain-name', help="User's domain name"),
cfg.StrOpt('password', help="User's password"),
]
class Password(base.BaseGenericPlugin):
"""A common user/password authentication plugin.
:param string username: Username for authentication.
:param string user_id: User ID for authentication.
:param string password: Password for authentication.
:param string user_domain_id: User's domain ID for authentication.
:param string user_domain_name: User's domain name for authentication.
"""
@utils.positional()
def __init__(self, auth_url, username=None, user_id=None, password=None,
user_domain_id=None, user_domain_name=None, **kwargs):
super(Password, self).__init__(auth_url=auth_url, **kwargs)
self._username = username
self._user_id = user_id
self._password = password
self._user_domain_id = user_domain_id
self._user_domain_name = user_domain_name
def create_plugin(self, session, version, url, raw_status=None):
if _discover.version_match((2,), version):
if self._user_domain_id or self._user_domain_name:
# If you specify any domain parameters it won't work so quit.
return None
return v2.Password(auth_url=url,
user_id=self._user_id,
username=self._username,
password=self._password,
**self._v2_params)
elif _discover.version_match((3,), version):
return v3.Password(auth_url=url,
user_id=self._user_id,
username=self._username,
user_domain_id=self._user_domain_id,
user_domain_name=self._user_domain_name,
password=self._password,
**self._v3_params)
@classmethod
def get_options(cls):
options = super(Password, cls).get_options()
options.extend(get_options())
return options
|
import json
import logging
import os
import re
from collections import namedtuple
from typing import Union
from auth.authorization import Authorizer
from config.exceptions import InvalidConfigException
from model import script_config
from model.model_helper import InvalidFileException
from model.script_config import get_sorted_config
from utils import os_utils, file_utils, process_utils
from utils.file_utils import to_filename
from utils.string_utils import is_blank, strip
SCRIPT_EDIT_CODE_MODE = 'new_code'
SCRIPT_EDIT_UPLOAD_MODE = 'upload_script'
SCRIPT_EDIT_PATH_MODE = 'new_path'
SCRIPT_PATH_FIELD = 'script_path'
WORKING_DIR_FIELD = 'working_directory'
LOGGER = logging.getLogger('config_service')
ConfigSearchResult = namedtuple('ConfigSearchResult', ['short_config', 'path', 'config_object'])
def _script_name_to_file_name(script_name):
filename = _escape_characters_in_filename(script_name)
return filename + '.json'
def _escape_characters_in_filename(script_name):
escaped = re.sub('[\\s/]+', '_', script_name).strip("_")
return to_filename(escaped)
def _preprocess_incoming_config(config):
name = config.get('name')
if is_blank(name):
raise InvalidConfigException('Script name is required')
config['name'] = name.strip()
class ConfigService:
def __init__(self, authorizer, conf_folder) -> None:
self._authorizer = authorizer # type: Authorizer
self._script_configs_folder = os.path.join(conf_folder, 'runners')
self._scripts_folder = os.path.join(conf_folder, 'scripts')
file_utils.prepare_folder(self._script_configs_folder)
def load_config(self, name, user):
self._check_admin_access(user)
search_result = self._find_config(name)
if search_result is None:
return None
(short_config, path, config_object) = search_result
if config_object.get('name') is None:
config_object['name'] = short_config.name
if not self._can_edit_script(user, short_config):
raise ConfigNotAllowedException(str(user) + ' has no admin access to ' + short_config.name)
return {'config': config_object, 'filename': os.path.basename(path)}
def create_config(self, user, config, uploaded_script):
self._check_admin_access(user)
_preprocess_incoming_config(config)
name = config['name']
search_result = self._find_config(name)
if search_result is not None:
raise InvalidConfigException('Another config with the same name already exists')
self._preprocess_script_fields(config, None, uploaded_script, user)
path = os.path.join(self._script_configs_folder, _script_name_to_file_name(name))
unique_path = file_utils.create_unique_filename(path, 100)
LOGGER.info('Creating new script config "' + name + '" in ' + unique_path)
self._save_config(config, unique_path)
def update_config(self, user, config, filename, uploaded_script):
self._check_admin_access(user)
_preprocess_incoming_config(config)
if is_blank(filename):
raise InvalidConfigException('Script filename should be specified')
original_file_path = os.path.join(self._script_configs_folder, filename)
if not os.path.exists(original_file_path):
raise InvalidFileException(original_file_path, 'Failed to find script path: ' + original_file_path)
with open(original_file_path, 'r') as f:
original_config_json = json.load(f)
short_original_config = script_config.read_short(original_file_path, original_config_json)
name = config['name']
search_result = self._find_config(name)
if (search_result is not None) and (os.path.basename(search_result.path) != filename):
raise InvalidConfigException('Another script found with the same name: ' + name)
if not self._can_edit_script(user, short_original_config):
raise ConfigNotAllowedException(str(user) + ' is not allowed to modify ' + short_original_config.name)
self._preprocess_script_fields(config, original_config_json, uploaded_script, user)
LOGGER.info('Updating script config "' + name + '" in ' + original_file_path)
self._save_config(config, original_file_path)
def load_script_code(self, script_name, user):
if not self._authorizer.can_edit_code(user.user_id):
logging.warning('User ' + str(user) + ' is not allowed to edit code')
raise InvalidAccessException('Code edit is not allowed for this user')
config_wrapper = self.load_config(script_name, user)
if config_wrapper is None:
return None
config = config_wrapper.get('config')
return self._load_script_code_by_config(config)
def _load_script_code_by_config(self, plain_config):
script_path = plain_config.get(SCRIPT_PATH_FIELD)
if is_blank(script_path):
raise InvalidFileException('', 'Script path is not specified')
command = process_utils.split_command(script_path, plain_config.get(WORKING_DIR_FIELD))
binary_files = []
for argument in command:
if file_utils.exists(argument):
if file_utils.is_binary(argument):
binary_files.append(argument)
continue
return {'code': file_utils.read_file(argument), 'file_path': argument}
if binary_files:
if len(binary_files) == 1:
return {'code': None, 'file_path': binary_files[0], 'code_edit_error': 'Cannot edit binary file'}
raise InvalidFileException('command', 'Cannot choose which binary file to edit: ' + str(binary_files))
if len(command) == 1:
return {'code': None, 'file_path': command[0], 'code_edit_error': 'Script path does not exist'}
raise InvalidFileException('command', 'Failed to find script path in command "' + script_path + '"')
def _save_config(self, config, path):
sorted_config = get_sorted_config(config)
config_json = json.dumps(sorted_config, indent=2)
file_utils.write_file(path, config_json)
def list_configs(self, user, mode=None):
edit_mode = mode == 'edit'
if edit_mode:
self._check_admin_access(user)
conf_service = self
def load_script(path, content):
try:
json_object = json.loads(content)
short_config = script_config.read_short(path, json_object)
if short_config is None:
return None
if edit_mode and (not conf_service._can_edit_script(user, short_config)):
return None
if (not edit_mode) and (not conf_service._can_access_script(user, short_config)):
return None
return short_config
except:
LOGGER.exception('Could not load script: ' + path)
return self._visit_script_configs(load_script)
def load_config_model(self, name, user, parameter_values=None, skip_invalid_parameters=False):
search_result = self._find_config(name)
if search_result is None:
return None
(short_config, path, config_object) = search_result
if not self._can_access_script(user, short_config):
raise ConfigNotAllowedException()
return self._load_script_config(path, config_object, user, parameter_values, skip_invalid_parameters)
def _visit_script_configs(self, visitor):
configs_dir = self._script_configs_folder
files = os.listdir(configs_dir)
configs = [file for file in files if file.lower().endswith(".json")]
result = []
for config_path in configs:
path = os.path.join(configs_dir, config_path)
try:
content = file_utils.read_file(path)
visit_result = visitor(path, content)
if visit_result is not None:
result.append(visit_result)
except StopIteration as e:
if e.value is not None:
result.append(e.value)
except:
LOGGER.exception("Couldn't read the file: " + config_path)
return result
def _find_config(self, name) -> Union[ConfigSearchResult, None]:
def find_and_load(path, content):
try:
json_object = json.loads(content)
short_config = script_config.read_short(path, json_object)
if short_config is None:
return None
except:
LOGGER.exception('Could not load script config: ' + path)
return None
if short_config.name != name.strip():
return None
raise StopIteration(ConfigSearchResult(short_config, path, json_object))
configs = self._visit_script_configs(find_and_load)
if not configs:
return None
return configs[0]
@staticmethod
def _load_script_config(path, content_or_json_dict, user, parameter_values, skip_invalid_parameters):
if isinstance(content_or_json_dict, str):
json_object = json.loads(content_or_json_dict)
else:
json_object = content_or_json_dict
config = script_config.ConfigModel(
json_object,
path,
user.get_username(),
user.get_audit_name(),
pty_enabled_default=os_utils.is_pty_supported())
if parameter_values is not None:
config.set_all_param_values(parameter_values, skip_invalid_parameters)
return config
def _can_access_script(self, user, short_config):
return self._authorizer.is_allowed(user.user_id, short_config.allowed_users)
def _can_edit_script(self, user, short_config):
return self._authorizer.is_allowed(user.user_id, short_config.admin_users)
def _check_admin_access(self, user):
if not self._authorizer.is_admin(user.user_id):
raise AdminAccessRequiredException('Admin access to scripts is prohibited for ' + str(user))
def _preprocess_script_fields(self, config, original_config_json, uploaded_script, user):
script_config = config.get('script')
if not script_config:
raise InvalidConfigException('script option is required')
if SCRIPT_PATH_FIELD in config:
del config[SCRIPT_PATH_FIELD]
del config['script']
new_path = strip(script_config.get('path'))
if is_blank(new_path):
raise InvalidConfigException('script.path option is required')
config[SCRIPT_PATH_FIELD] = new_path
mode = script_config.get('mode')
if is_blank(mode) or mode == SCRIPT_EDIT_PATH_MODE:
pass
elif mode in (SCRIPT_EDIT_UPLOAD_MODE, SCRIPT_EDIT_CODE_MODE):
if not self._authorizer.can_edit_code(user.user_id):
raise InvalidAccessException('User ' + str(user) + ' is not allowed to edit code')
if mode == SCRIPT_EDIT_UPLOAD_MODE:
if uploaded_script is None:
raise InvalidConfigException('Uploaded script should be specified')
if original_config_json is None: # new config
if mode == SCRIPT_EDIT_UPLOAD_MODE:
# escaped name is needed, when uploaded file and server has different OSes,
# thus different special characters
escaped_name = to_filename(uploaded_script.filename)
target_path = os.path.join(self._scripts_folder, escaped_name)
else:
filename = os.path.basename(new_path)
target_path = os.path.join(self._scripts_folder, _escape_characters_in_filename(filename))
script_path = file_utils.create_unique_filename(target_path, 100)
config[SCRIPT_PATH_FIELD] = script_path
else:
existing_code = self._load_script_code_by_config(original_config_json)
script_path = existing_code['file_path']
if (mode == SCRIPT_EDIT_CODE_MODE) and existing_code.get('code_edit_error') is not None:
raise InvalidConfigException('Failed to edit code: ' + existing_code.get('code_edit_error'))
if new_path != original_config_json.get(SCRIPT_PATH_FIELD):
raise InvalidConfigException('script.path override is not allowed for ' + mode + ' mode')
if mode == SCRIPT_EDIT_UPLOAD_MODE:
file_utils.write_file(script_path, uploaded_script.body, byte_content=True)
else:
code = script_config.get('code')
if code is None:
raise InvalidConfigException('script.code should be specified')
file_utils.write_file(script_path, code)
file_utils.make_executable(script_path)
else:
raise InvalidConfigException('Unsupported mode: ' + mode)
class ConfigNotAllowedException(Exception):
def __init__(self, message=None):
super().__init__(message)
class AdminAccessRequiredException(Exception):
def __init__(self, message):
super().__init__(message)
class InvalidAccessException(Exception):
def __init__(self, message=None):
super().__init__(message)
|
import re
from datetime import datetime, time, date
from typing import Optional, List, Dict, Union
from scrapy.http import HtmlResponse
from lxml.html import fromstring, HtmlElement
from city_scrapers_core.constants import (
BOARD,
FORUM,
ADVISORY_COMMITTEE,
COMMITTEE,
CANCELLED,
CONFIRMED,
PASSED,
)
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
import logging
from logging import Logger
log: Logger = logging.getLogger("alle_library_assoc")
class MeetingTimes:
board: time
general: time
advisory: time
lac: time
def __init__(self, board: time, general: time, advisory: time, lac: time):
self.board = board
self.general = general
self.advisory = advisory
self.lac = lac
class MeetingDate:
the_date: date
the_place: Dict[str, str]
notes: Optional[str]
def __init__(self, the_date: date, the_place: Dict[str, str], notes: str = None):
self.the_date = the_date
self.the_place = the_place
self.notes = notes
class AllGenericInfo:
the_date: datetime
location: Dict[str, str]
status: Union[CANCELLED, CONFIRMED, PASSED]
notes: Optional[str]
source: str
def __init__(
self,
the_date: datetime,
location: Dict[str, str],
status: Union[CANCELLED, CONFIRMED, PASSED],
source: str,
notes: str = None,
):
self.the_date = the_date
self.location = location
self.status = status
self.notes = notes
self.source = source
class PageChangedException(RuntimeError):
pass
class AlleLibraryAssocSpider(CityScrapersSpider):
name = "alle_library_assoc"
agency = "Allegheny Library Association"
timezone = "America/New_York"
allowed_domains = ["https://aclalibraries.org"]
start_urls = ["https://aclalibraries.org/who-we-are/"]
# The regex below is meant to split stuff like the following
# - Monday, January 27
# - Monday, January 27 - some note
# - Monday, January 27 some note
#
# This regex is a little complicated BUT FOR GOOD REASON
# - \s+ is used a lot instead of a simple space because the page has random
# weird whitespace characters instead of normal spaces
# - This clause '(\s+[–-]\s+|\s+)' is here because notes show up either like
# 'Mon, Jan 27 - note here' or 'Mon, Jan 27 note here'.
# - You'll notice I have '[–-]'. I inside the brackets are a minus and an
# endash (I think). The website was using endashes and my tests used
# minuses so I figure I'd include both.
date_reg = re.compile(r"(\w+,\s+\w+\s+\d+)((\s+[–-]\s+|\s+)(.*))?")
def parse(self, response: HtmlResponse):
tree = fromstring(response.text)
# I'm assuming the heading 'Board' doesn't show up in any other tab
board_div = tree.xpath("//div/h2[text()='Board']/..")
if len(board_div) != 1:
raise PageChangedException()
board_div = board_div[0]
meeting_times = self._ensure_times_are_as_expected(board_div)
self._ensure_meetings_are_remote(board_div)
dates = self._get_dates(board_div)
board_dates, general_dates, advisory_dates, lac_dates = dates
meetings = self._make_meeting(
board_dates,
meeting_times.board,
"alle_library_assoc_board_",
"Board Meeting",
BOARD,
)
meetings += self._make_meeting(
general_dates,
meeting_times.general,
"alle_library_assoc_general_",
"General Meeting",
FORUM,
)
meetings += self._make_meeting(
advisory_dates,
meeting_times.advisory,
"alle_library_assoc_advisory_",
"Advisory Council Meeting",
ADVISORY_COMMITTEE,
)
meetings += self._make_meeting(
lac_dates,
meeting_times.lac,
"alle_library_assoc_lac_",
"Lac Executive Comitee Meeting",
COMMITTEE,
)
return meetings
def _make_meeting(
self,
dates: List[MeetingDate],
meeting_time: time,
id_prefix: str,
title: str,
classification,
) -> List[Meeting]:
meetings = []
for the_date in dates:
start = datetime.combine(the_date.the_date, meeting_time)
the_id = f"{id_prefix}_{start.strftime(r'%Y_%m_%d')}"
if the_date.notes and "canceled" in the_date.notes.casefold():
status = CANCELLED
elif start < datetime.now():
status = PASSED
else:
status = CONFIRMED
meeting = Meeting(
id=the_id,
title=title,
classification=classification,
status=status,
all_day=False,
time_notes=the_date.notes,
location=the_date.the_place,
source=self.start_urls[0],
start=start,
)
meetings.append(meeting)
return meetings
def _date_from_lis(self, lis: List[str]) -> List[MeetingDate]:
location = {"name": "Remote", "address": "Remote"}
dates = []
for li in lis:
log.info(repr(li))
match = self.date_reg.match(li)
if not match:
log.warning("Failed to capture a meeting date.")
continue
# Take 'Monday, January 27' and turn it into a date
date_str = match.group(1).strip()
tmp_date = datetime.strptime(date_str, r"%A, %B %d")
the_date = date(date.today().year, tmp_date.month, tmp_date.day)
notes = match.group(4).strip() if match.group(4) else None
dates.append(MeetingDate(the_date, location, notes))
return dates
def _lis_from_ul(self, some_ul: HtmlElement) -> List[str]:
return [li.text_content().strip() for li in some_ul.xpath("li")]
def _get_dates(self, board_div: HtmlElement):
"""Get the lists of dates, locations, and notes from the website."""
# I expect there to be two uls. The first with the board dates and the
# second with the general membership dates
uls = board_div.xpath("ul")
if len(uls) != 3:
raise PageChangedException()
board_ul, general_ul, advisory_ul = uls
lac_uls = board_div.xpath("./div/div/ul")
if len(lac_uls) != 1:
raise PageChangedException()
lac_ul = lac_uls[0]
board_lis = self._lis_from_ul(board_ul)
general_lis = self._lis_from_ul(general_ul)
advisory_lis = self._lis_from_ul(advisory_ul)
lac_lis = self._lis_from_ul(lac_ul)
board_dates = self._date_from_lis(board_lis)
general_dates = self._date_from_lis(general_lis)
advisory_dates = self._date_from_lis(advisory_lis)
lac_dates = self._date_from_lis(lac_lis)
return (board_dates, general_dates, advisory_dates, lac_dates)
def _ensure_times_are_as_expected(self, board_div: HtmlElement) -> MeetingTimes:
"""I expect the board and general meetings to be at a certain time."""
# They put their meeting times in plain english inside paragraph tags
# (p). Instead of trying to parse plain english I'm just going to
# assume they don't change much.
#
# I expect the first p in the div to be the board and the second to be
# the general membership p. If anything differs from whats expected an
# error is thrown.
#
# This returns the times so that all the time stuff is handled in this
# function. I'm assuming the first p is for the board and the second p
# is general
expected_board_p = "ACLA Board meetings (6:30 pm unless otherwise noted)"
expected_general_p = "General Membership meetings (7:00 pm)"
expected_advisory_p = "(10:00 am)"
expected_lac_p = "(10:00 am)"
ps = board_div.xpath("p")
if len(ps) != 3:
raise PageChangedException()
board_p, general_p, advisory_p = ps
lac_ps = board_div.xpath("./div/p")
if len(lac_ps) < 2:
raise PageChangedException()
lac_p = lac_ps[0]
if board_p.text_content().strip() != expected_board_p:
raise PageChangedException()
if general_p.text_content().strip() != expected_general_p:
raise PageChangedException()
if advisory_p.text_content().strip() != expected_advisory_p:
raise PageChangedException()
if lac_p.text_content().strip() != expected_lac_p:
raise PageChangedException()
return MeetingTimes(
board=time(18, 30),
general=time(19, 0),
advisory=time(10, 0),
lac=time(10, 0),
)
def _ensure_meetings_are_remote(self, board_div: HtmlElement):
remote_statement_p = board_div.xpath("./div/div/p")
if len(remote_statement_p) != 1:
raise PageChangedException()
remote_statement_p = remote_statement_p[0]
statement = "All meetings will be held remotely until further notice"
if statement not in remote_statement_p.text_content():
raise PageChangedException()
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy import Selector
from light_novel.items import LightNovelItem
# http://m.webqxs.com/0/42/
# http://m.webqxs.com/0/42/5678.html
# 设置要爬的开始章节和结束章节
global start_index,end_index
start_index = 1
end_index = 100
# 26
global end_url,current_index
end_url = ''
current_index = 0
class StorySpider(scrapy.Spider):
if start_index > end_index:
raise ValueError('start_index > end_index!')
if start_index < 1:
raise ValueError('start_index at least is 1')
global current_index
current_index = start_index
name = 'story'
def __init__(self):
# server 域名
self.server_link = 'http://m.webqxs.com'
self.allowed_domains = ['m.webqxs.com']
# http://m.webqxs.com/0/42_2/
self.start_urls = ['http://m.webqxs.com/0/42/']
# 从 start_requests 发送请求
def start_requests(self):
yield scrapy.Request(url = self.start_urls[0], callback = self.parse1)
# 解析 response,获得章节链接地址
def parse1(self, response):
# 得到目录第一页的 li 个数
lis = response.xpath("//ul[@class='chapters']/li").extract()
global start_index,end_index
if len(lis) < start_index:
next_url = response.xpath("//div/a[text() ='下一页']/@href").extract()[0]
next_url = self.server_link + next_url
if next_url == '':
raise ValueError('no next_url, check start_index')
return
# 此时 start_index 应调整为当前目录页的 li 的位置,end_index 也要相应调整
start_index = start_index - len(lis)
end_index = end_index - len(lis)
yield scrapy.Request(url=next_url, callback=self.parse1)
return
# 得到的数组 要取[0]
first_index = str(start_index + 1)
first_url = response.xpath('//ul/li['+ first_index + ']/a/@href').extract()[0]
# 保存第一章的链接地址 到在 items.py 定义的 link_url
item = LightNovelItem()
item['link_url'] = first_url
urls = response.xpath('//ul/li/a/@href').extract()
global end_url
if end_index < len(urls):
end_url = urls[end_index]
else:
# 网页开 http://m.webqxs.com/0/42/ 查看最后一章,会发现它的"下一章"链接指回目录,
# 所以如果读取到 下一章 的链接为目录,就说明这一章已经是最后一章
end_url = "http://m.webqxs.com/0/42/"
# 根据章节链接,发送 Request 请求,并传递 item 参数
yield scrapy.Request(url = item['link_url'], meta = {'item':item,}, callback = self.parse_article)
def parse_article(self, response):
item = response.meta['item']
# 章节名,取元素
title = response.xpath("//h1[@id='chaptertitle']/text()").extract()[0]
item['title'] = title
print("title is ", title)
# 文章,for in 取元素,存到字符串里
details = response.xpath('//p/text()').extract()
s = ''
for detail in details:
s = s + detail + "\n\n"
s = s.replace('切换语言[采用cookies记录/时效1天]:','')
item['text'] = s
# 下一章链接,取元素后拼接 host
next_url = response.xpath("//ul/li/p[@class='p1 p3']/a/@href").extract()[0]
next_url = self.server_link + next_url
item['link_url'] = next_url
global current_index
current_index = current_index + 1
print('current_index: ',current_index)
# 下一章链接为 end_url 时,不再请求
# 下一章链接为空时,不再请求
global end_url
if next_url == end_url:
yield item
elif next_url == "":
yield item
else:
yield item
yield scrapy.Request(url=item['link_url'], meta={'item': item}, callback=self.parse_article) |
# Autores: Ellen Junker, Kessy Roberta Staub e Mateus da Silva Francelino
# Data: 12/10/2021
import cv2
from FireDetector import FireDetector
from glob import glob
import matplotlib.pyplot as plt
import os
if __name__ == '__main__':
img_names = glob(os.path.join('fire_images','*.png')) #<<< para pegar as imagens sem fogo
#img_names = glob(os.path.join('non_fire_images','*.png')) #<<< para pegar as imagens com fogo
for fn in img_names:
image=cv2.imread(fn)
fire_detector = FireDetector(image)
fire, non_fire = fire_detector.fire_extraction()
mask = fire_detector.build_mask(fire)
bit_mask = fire_detector.convert_mask(mask)
canny = fire_detector.apply_canny(bit_mask)
contour = fire_detector.apply_fire_contour(mask)
contoured_image = fire_detector.add_contour2image(contour)
cv2.imshow('Original', image)
cv2.imshow('Fire', fire)
cv2.imshow('Non Fire', non_fire)
cv2.imshow('Mask', mask)
cv2.imshow('Canny', canny)
cv2.imshow('Contour', contour)
cv2.imshow('Contoured Image', contoured_image)
cv2.waitKey()
cv2.destroyAllWindows()
|
# Copyright 2018-2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._pipeline_param import PipelineParam, match_serialized_pipelineparam
from ._pipeline import Pipeline, PipelineExecutionMode, pipeline, get_pipeline_conf, PipelineConf
from ._container_op import BaseOp, ContainerOp, InputArgumentPath, UserContainer, Sidecar
from ._resource_op import ResourceOp
from ._volume_op import VolumeOp, VOLUME_MODE_RWO, VOLUME_MODE_RWM, VOLUME_MODE_ROM
from ._pipeline_volume import PipelineVolume
from ._volume_snapshot_op import VolumeSnapshotOp
from ._ops_group import OpsGroup, ExitHandler, Condition, ParallelFor, SubGraph
from ._component import graph_component, component
def importer(*args, **kwargs):
import warnings
from kfp.dsl import importer as v2importer
warnings.warn(
'`kfp.dsl.importer` is a deprecated alias and will be removed'
' in KFP v2.0. Please import from `kfp.dsl` instead.',
category=FutureWarning)
return v2importer(*args, **kwargs)
EXECUTION_ID_PLACEHOLDER = '{{workflow.uid}}-{{pod.name}}'
RUN_ID_PLACEHOLDER = '{{workflow.uid}}'
ROOT_PARAMETER_NAME = 'pipeline-root'
|
import discord
from discord.ext import commands
from .player import Player, Skill
from mysqldb import the_database
from extra.prompt.menu import Confirm, ConfirmButton
from extra import utils
import os
from datetime import datetime
import random
from typing import List, Optional, Union
from PIL import Image, ImageDraw, ImageFont, ImageOps
from extra.view import UserBabyView
bots_and_commands_channel_id = int(os.getenv('BOTS_AND_COMMANDS_CHANNEL_ID', 123))
class Seraph(Player):
emoji = '<:Seraph:839498018998976563>'
@commands.command(aliases=['dp', 'divine', 'protection'])
@Player.poisoned()
@Player.skill_on_cooldown()
@Player.skills_locked()
@Player.user_is_class('seraph')
@Player.skill_mark()
async def divine_protection(self, ctx, target: discord.Member = None) -> None:
""" Gives a Divine Protection shield to a member, so they are protected against
attacks for 24 hours.
:param target: The target member. (Optional)
PS: If target not provided, you are the target. """
perpetrator = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{perpetrator.mention}, you can only use this command in {self.bots_txt.mention}!**")
perpetrator_fx = await self.get_user_effects(perpetrator)
if 'knocked_out' in perpetrator_fx:
return await ctx.send(f"**{perpetrator.mention}, you can't use your skill, because you are knocked-out!**")
if not target:
target = perpetrator
if target.bot:
return await ctx.send(f"**You cannot protect a bot, {perpetrator.mention}!**")
target_sloth_profile = await self.get_sloth_profile(target.id)
if not target_sloth_profile:
return await ctx.send(f"**You cannot protect someone who doesn't have an account, {perpetrator.mention}!**")
if target_sloth_profile[1] == 'default':
return await ctx.send(f"**You cannot protect someone who has a `default` Sloth class, {perpetrator.mention}!**")
target_fx = await self.get_user_effects(target)
if 'protected' in target_fx:
return await ctx.send(f"**{target.mention} is already protected, {perpetrator.mention}!**")
confirmed = await Confirm(f"**{perpetrator.mention}, are you sure you want to use your skill, to protect {target.mention}?**").prompt(ctx)
if not confirmed:
return await ctx.send("**Not protecting anyone, then!**")
if ctx.invoked_with == 'mirror':
mirrored_skill = await self.get_skill_action_by_user_id_and_skill_type(user_id=perpetrator.id, skill_type='mirror')
if not mirrored_skill:
return await ctx.send(f"**Something went wrong with this, {perpetrator.mention}!**")
else:
_, exists = await Player.skill_on_cooldown(skill=Skill.ONE).predicate(ctx)
current_timestamp = await utils.get_timestamp()
await self.insert_skill_action(
user_id=perpetrator.id, skill_type="divine_protection", skill_timestamp=current_timestamp,
target_id=target.id, channel_id=ctx.channel.id
)
if ctx.invoked_with != 'mirror':
if exists:
await self.update_user_skill_ts(perpetrator.id, Skill.ONE, current_timestamp)
else:
await self.insert_user_skill_cooldown(perpetrator.id, Skill.ONE, current_timestamp)
# Updates user's skills used counter
await self.update_user_skills_used(user_id=perpetrator.id)
divine_protection_embed = await self.get_divine_protection_embed(
channel=ctx.channel, perpetrator_id=perpetrator.id, target_id=target.id)
await ctx.send(embed=divine_protection_embed)
@commands.command()
@Player.poisoned()
@Player.skills_used(requirement=5)
@Player.skill_on_cooldown(skill=Skill.TWO)
@Player.skills_locked()
@Player.user_is_class('seraph')
@Player.skill_mark()
async def reinforce(self, ctx) -> None:
""" Gets a 50% chance of reinforcing all of their protected people's Divine Protection shield,
by making it last for one more day and a 45% chance of getting a protection for themselves too
(in case they don't have one already). """
perpetrator = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{perpetrator.mention}, you can only use this command in {self.bots_txt.mention}!**")
perpetrator_fx = await self.get_user_effects(perpetrator)
if 'knocked_out' in perpetrator_fx:
return await ctx.send(f"**{perpetrator.mention}, you can't use your skill, because you are knocked-out!**")
# Gets all active Divine Protection shields from the user
shields = await self.get_skill_action_by_user_id_and_skill_type(user_id=perpetrator.id, skill_type='divine_protection', multiple=True)
if not shields:
return await ctx.send(f"**You don't have any active shield, {perpetrator.mention}!**")
user = await self.get_user_currency(perpetrator.id)
if not user[1] >= 50:
return await ctx.send(f"**You don't have `50łł` to use this skill, {perpetrator.mention}!**")
# Confirms the use of the skill
confirm = await Confirm(
f"**Are you sure you want to reinforce `{len(shields)}` active Divine Protection shields for `50łł`, {perpetrator.mention}?**").prompt(ctx)
# User confirmed the use the skill
if not confirm:
return await ctx.send(f"**Not reinforcing them, then, {perpetrator.mention}!**")
_, exists = await Player.skill_on_cooldown(skill=Skill.TWO).predicate(ctx)
current_timestamp = await utils.get_timestamp()
# Upate user's money
await self.client.get_cog('SlothCurrency').update_user_money(perpetrator.id, -50)
# Update perpetrator's second skill timestamp
if exists:
await self.update_user_skill_ts(user_id=perpetrator.id, skill=Skill.TWO, new_skill_ts=current_timestamp)
else:
await self.insert_user_skill_cooldown(ctx.author.id, Skill.TWO, current_timestamp)
# Updates user's skills used counter
await self.update_user_skills_used(user_id=perpetrator.id)
# Calculates chance (50%) of reinforcing the shields of the targets
n1 = random.random()
if n1 <= 0.5:
# Tries to execute it and update the database
try:
# Update active Divine Protection shields' time (+1 day)
await self.reinforce_shields(perpetrator_id=perpetrator.id)
except Exception as e:
print(e)
await ctx.send(f"**For some reason I couldn't reinforce the shield(s), {perpetrator.mention}!**")
else:
reinforce_shields_embed = await self.get_reinforce_shields_embed(
channel=ctx.channel, perpetrator_id=perpetrator.id, shields_len=len(shields))
await ctx.send(embed=reinforce_shields_embed)
else:
await ctx.send(f"**You had a `50%` chance of reinforcing all active Divine Protection shields, but you missed it, {perpetrator.mention}!**")
# Checks whether the perpetrator already has a Divien Protection shield
if 'protected' not in perpetrator_fx:
n2 = random.random()
# Calculates the chance (45%) of getting a shield for the perpetrator
if n2 <= 0.45:
# Tries to execute it and update the database
try:
# Give user a shield
await self.insert_skill_action(
user_id=perpetrator.id, skill_type="divine_protection", skill_timestamp=current_timestamp,
target_id=perpetrator.id, channel_id=ctx.channel.id
)
except Exception as e:
print(e)
await ctx.send(f"**For some reason I couldn't give you a shield, {perpetrator.mention}!**")
else:
self_shield_embed = await self.get_self_shield_embed(
channel=ctx.channel, perpetrator_id=perpetrator.id)
await ctx.send(embed=self_shield_embed)
else:
await ctx.send(f"**You had a `45%` chance of getting a Divine Protection shield for yourself, but you missed it, {perpetrator.mention}!**")
async def check_protections(self) -> None:
""" Check on-going protections and their expiration time. """
divine_protections = await self.get_expired_protections()
for dp in divine_protections:
await self.delete_skill_action_by_target_id_and_skill_type(dp[3], 'divine_protection')
channel = self.bots_txt
await channel.send(
content=f"<@{dp[0]}>, <@{dp[3]}>",
embed=discord.Embed(
description=f"**<@{dp[3]}>'s `Divine Protection` from <@{dp[0]}> just expired!**",
color=discord.Color.red()))
async def reinforce_shields(self, perpetrator_id: int, increment: Optional[int] = 86400) -> None:
""" Reinforces all active Divine Protection shields.
:param perpetrator_id: The ID of the perpetrator of those shields.
:param increment: The amount to increment. Default = 1 day """
mycursor, db = await the_database()
await mycursor.execute("""
UPDATE SlothSkills SET skill_timestamp = skill_timestamp + %s WHERE user_id = %s
AND skill_type = 'divine_protection'""", (increment, perpetrator_id))
await db.commit()
await mycursor.close()
async def reinforce_shield(self, user_id: int, increment: Optional[int] = 86400) -> None:
""" Reinforces a specific active Divine Protection shield.
:param user_id: The ID of the user.
:param increment: The amount to increment. Default = 1 day """
mycursor, db = await the_database()
await mycursor.execute("""
UPDATE SlothSkills SET skill_timestamp = skill_timestamp + %s WHERE target_id = %s
AND skill_type = 'divine_protection'""", (increment, user_id))
await db.commit()
await mycursor.close()
async def get_expired_protections(self) -> None:
""" Gets expired divine protection skill actions. """
the_time = await utils.get_timestamp()
mycursor, db = await the_database()
await mycursor.execute("""
SELECT * FROM SlothSkills
WHERE skill_type = 'divine_protection' AND (%s - skill_timestamp) >= 86400
""", (the_time,))
divine_protections = await mycursor.fetchall()
await mycursor.close()
return divine_protections
async def get_divine_protection_embed(self, channel, perpetrator_id: int, target_id: int) -> discord.Embed:
""" Makes an embedded message for a divine protection action.
:param channel: The context channel.
:param perpetrator_id: The ID of the perpetrator of the divine protection.
:param target_id: The ID of the target member that is gonna be protected. """
timestamp = await utils.get_timestamp()
divine_embed = discord.Embed(
title="A Divine Protection has been executed!",
timestamp=datetime.fromtimestamp(timestamp)
)
divine_embed.description = f"🛡️ <@{perpetrator_id}> protected <@{target_id}> from attacks for 24 hours! 🛡️"
divine_embed.color = discord.Color.green()
divine_embed.set_thumbnail(url="https://thelanguagesloth.com/media/sloth_classes/Seraph.png")
divine_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)
return divine_embed
async def get_reinforce_shields_embed(self, channel, perpetrator_id: int, shields_len: int) -> discord.Embed:
""" Makes an embedded message for a shield reinforcement action.
:param channel: The context channel.
:param perpetrator_id: The ID of the perpetrator of the shield reinforcement.
:param shields_len: The amount of active shields that the perpetrator have. """
timestamp = await utils.get_timestamp()
reinforce_shields_embed = discord.Embed(
title="A Shield Reinforcement has been executed!",
timestamp=datetime.fromtimestamp(timestamp)
)
reinforce_shields_embed.description = f"🛡️ <@{perpetrator_id}> reinforced `{shields_len}` active shields; now they have more 24 hours of duration! 🛡️💪"
reinforce_shields_embed.color = discord.Color.green()
reinforce_shields_embed.set_author(name='50% of chance', url=self.client.user.display_avatar)
reinforce_shields_embed.set_thumbnail(url="https://thelanguagesloth.com/media/sloth_classes/Seraph.png")
reinforce_shields_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)
return reinforce_shields_embed
async def get_self_shield_embed(self, channel, perpetrator_id: int) -> discord.Embed:
""" Makes an embedded message for a shield reinforcement action.
:param channel: The context channel.
:param perpetrator_id: The ID of the perpetrator of the shield reinforcement.
:param shields_len: The amount of active shields that the perpetrator have. """
timestamp = await utils.get_timestamp()
self_shield_embed = discord.Embed(
title="A Divine Protection shield has been Given!",
timestamp=datetime.fromtimestamp(timestamp)
)
self_shield_embed.description = f"🛡️ <@{perpetrator_id}> got a shield for themselves for reinforcing other shields! 🛡️💪"
self_shield_embed.color = discord.Color.green()
self_shield_embed.set_author(name='45% of chance', url=self.client.user.display_avatar)
self_shield_embed.set_thumbnail(url="https://thelanguagesloth.com/media/sloth_classes/Seraph.png")
self_shield_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)
return self_shield_embed
@commands.command()
@Player.poisoned()
@Player.skills_used(requirement=20)
@Player.skill_on_cooldown(skill=Skill.THREE)
@Player.skills_locked()
@Player.user_is_class('seraph')
@Player.skill_mark()
async def heal(self, ctx, target: discord.Member = None) -> None:
""" Heals a member from all debuffs.
:param target: The member from whom to remove the debuffs.
PS: If target not provided, the target is you.
* Skill cost: 100łł.
* Cooldown: 1 day. """
perpetrator = ctx.author
if ctx.channel.id != self.bots_txt.id:
return await ctx.send(f"**{perpetrator.mention}, you can only use this command in {self.bots_txt.mention}!**")
perpetrator_fx = await self.get_user_effects(perpetrator)
if 'knocked_out' in perpetrator_fx:
return await ctx.send(f"**{perpetrator.mention}, you can't use your skill, because you are knocked-out!**")
if not target:
target = perpetrator
if target.bot:
return await ctx.send(f"**You cannot heal a bot, {perpetrator.mention}!**")
target_sloth_profile = await self.get_sloth_profile(target.id)
if not target_sloth_profile:
return await ctx.send(f"**You cannot protect someone who doesn't have an account, {perpetrator.mention}!**")
if target_sloth_profile[1] == 'default':
return await ctx.send(f"**You cannot protect someone who has a `default` Sloth class, {perpetrator.mention}!**")
effects = await self.get_user_effects(target)
debuffs = [fx for fx, values in effects.items() if values['debuff'] and fx not in ['locked', 'poisoned', 'kidnapped']]
if not debuffs:
return await ctx.send(f"**{target.mention} doesn't have any active debuff, {perpetrator.mention}!**")
user = await self.get_user_currency(perpetrator.id)
if user[1] < 100:
return await ctx.send(f"**You don't have `100łł` to use this skill, {perpetrator.mention}!**")
confirm = await Confirm(f"**Do you really want to heal {target.mention} from all debuffs, {perpetrator.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not doing it then, {perpetrator.mention}!**")
_, exists = await Player.skill_on_cooldown(Skill.THREE).predicate(ctx)
await self.client.get_cog('SlothCurrency').update_user_money(perpetrator.id, -100)
current_ts = await utils.get_timestamp()
if exists:
await self.update_user_skill_ts(perpetrator.id, Skill.THREE, current_ts)
else:
await self.insert_user_skill_cooldown(perpetrator.id, Skill.THREE, current_ts)
debuffs_removed = await self.remove_debuffs(member=target, debuffs=debuffs)
heal_embed = await self.make_heal_embed(target=target, perpetrator=perpetrator, debuffs_removed=debuffs_removed)
await ctx.send(embed=heal_embed)
async def remove_debuffs(self, member: discord.Member, debuffs: List[str]) -> int:
""" Removes all debuffs from a member.
:param member: The member from whom to remove the debuffs.
:param debuffs: A list of debuffs to remove. """
debuffs_removed = 0
for debuff in debuffs:
try:
if debuff == 'hacked':
debuffs_removed += 1
if debuff == 'knocked_out':
debuffs_removed += 1
if debuff == 'wired':
debuffs_removed += 1
if debuff == 'frogged':
debuffs_removed += 1
if debuff == 'munk':
await member.edit(nick=member.display_name.replace('Munk', '').strip())
debuffs_removed += 1
if debuff == 'sabotaged':
debuffs_removed += 1
except Exception as e:
print(e)
continue
await self.delete_debuff_skill_action_by_target_id(member.id)
return debuffs_removed
async def make_heal_embed(self, target: discord.Member, perpetrator: discord.Member, debuffs_removed: int) -> discord.Embed:
""" Makes an embedded message for a heal skill action.
:param target: The member that was healed.
:param perpetrator: The person who healed them.
:param debuffs_removed: The amount of debuffs removed """
parsed_time = await utils.parse_time()
heal_embed = discord.Embed(
title="__Someone just got healed__!",
description=f"{target.mention} just got healed from `{debuffs_removed}` bad effect(s)!",
color=target.color,
timestamp=parsed_time
)
heal_embed.set_thumbnail(url=target.display_avatar)
heal_embed.set_image(url="https://cdn3.iconfinder.com/data/icons/role-playing-game-5/340/magic_game_rpg_human_healing_heal-512.png")
heal_embed.set_author(name=perpetrator, url=perpetrator.display_avatar, icon_url=perpetrator.display_avatar)
heal_embed.set_footer(text=perpetrator.guild.name, icon_url=perpetrator.guild.icon.url)
return heal_embed
@commands.command(aliases=['conceive_grace', 'give_grace', 'provide_grace'])
@Player.poisoned()
@Player.skills_used(requirement=50)
@Player.skill_on_cooldown(skill=Skill.FOUR)
@Player.skills_locked()
@Player.user_is_class('seraph')
@Player.skill_mark()
async def attain_grace(self, ctx, target: Optional[discord.Member] = None) -> None:
""" Tries with a 10% chance of success to attain the grace from the deity
so the person, who must be honeymoon'd receives a baby to take care of,
together with their spouse.
:param target: The target member to attain the grace to. [Optional][Default=You]
PS: Don't forget to feed your baby, that's crucial and vital.
• Delay = 1 day
• Cost = 500łł """
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{ctx.author.mention}, you can only use this command in {self.bots_txt.mention}!**")
perpetrator = ctx.author
perpetrator_fx = await self.get_user_effects(perpetrator)
if 'knocked_out' in perpetrator_fx:
return await ctx.send(f"**{perpetrator.mention}, you can't use your skill, because you are knocked-out!**")
if not target:
target = perpetrator
if target.bot:
return await ctx.send(f"**You cannot use it on a bot, {perpetrator.mention}!**")
target_sloth_profile = await self.get_sloth_profile(target.id)
if not target_sloth_profile:
return await ctx.send(f"**You cannot attain the grace for someone who doesn't have an account, {perpetrator.mention}!**")
if target_sloth_profile[1] == 'default':
return await ctx.send(f"**You cannot attain the grace for someone who has a `default` Sloth class, {perpetrator.mention}!**")
marriage = await self.client.get_cog('SlothClass').get_user_marriage(target.id)
if not marriage['partner']:
return await ctx.send(f"**You cannot attain the grace for someone who is not married, {perpetrator.mention}!**")
if not marriage['honeymoon']:
return await ctx.send(f"**You cannot attain the grace for someone who is not honeymoon'd, {perpetrator.mention}!**")
confirm = await Confirm(f"**Are you sure you want to spend `500` to try to attain the grace for {target.mention}, {perpetrator.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not doing it, then, {target.mention}!**")
_, exists = await Player.skill_on_cooldown(skill=Skill.FOUR).predicate(ctx)
user_currency = await self.get_user_currency(perpetrator.id)
if user_currency[1] < 500:
return await ctx.send(f"**{perpetrator.mention}, you don't have `500łł`!**")
await self.client.get_cog('SlothCurrency').update_user_money(perpetrator.id, -500)
emoji = '👶'
# Calculates the chance (10%) of attaining the grace for the a member
rn = random.random()
attained_grace: bool = rn <= 0.10
try:
current_timestamp = await utils.get_timestamp()
if attained_grace:
target_partner_sloth_profile = await self.get_sloth_profile(marriage['partner'])
baby_class = random.choice([target_sloth_profile[1], target_partner_sloth_profile[1]])
await self.insert_user_baby(target.id, marriage['partner'], f"Baby {baby_class}", baby_class)
if exists:
await self.update_user_skill_ts(perpetrator.id, Skill.FOUR, current_timestamp)
else:
await self.insert_user_skill_cooldown(perpetrator.id, Skill.FOUR, current_timestamp)
# Updates user's skills used counter
await self.update_user_skills_used(user_id=perpetrator.id)
except Exception as e:
print(e)
return await ctx.send(f"**{perpetrator.mention}, something went wrong with it, try again later!**")
else:
if attained_grace:
attained_grace_embed = await self.attained_grace_embed(perpetrator=perpetrator, target=target, emoji=emoji)
await ctx.send(embed=attained_grace_embed)
else:
await ctx.send(f"**You had a `10%` chance of attaining the grace for {target.mention}, but you missed it, {perpetrator.mention}!**")
async def attained_grace_embed(self, perpetrator: int, target: int, emoji: str) -> discord.Embed:
""" Makes an embedded message for an attain grace action.
:param perpetrator: The member who conceived the grace.
:param target: The member who has been received the grace.
:param emoji: The emoji representing the skill action. """
parsed_time = await utils.parse_time()
attained_grace_embed = discord.Embed(
title="__A Grace has been Attained__!",
description=f"{target.mention} attained the grace thanks to {perpetrator.mention} and has been given a beautiful baby!",
color=discord.Color.green(),
timestamp=parsed_time
)
attained_grace_embed.set_thumbnail(url=target.display_avatar)
attained_grace_embed.set_image(url="https://c.tenor.com/ISO7aKhQvX4AAAAC/lion-king-simba.gif")
attained_grace_embed.set_author(name=perpetrator, url=perpetrator.display_avatar, icon_url=perpetrator.display_avatar)
attained_grace_embed.set_footer(text=perpetrator.guild.name, icon_url=perpetrator.guild.icon.url)
return attained_grace_embed
@commands.group(aliases=['bb'])
@Player.poisoned()
@Player.kidnapped()
async def baby(self, ctx) -> None:
""" Command for managing and interacting with a baby.
(Use this without a subcommand to see all subcommands available) """
if ctx.invoked_subcommand:
return
prefix = self.client.command_prefix
subcommands = [f"{prefix}{c.qualified_name}" for c in ctx.command.commands
]
subcommands = '\n'.join(subcommands)
items_embed = discord.Embed(
title="__Subcommads__:",
description=f"```apache\n{subcommands}```",
color=ctx.author.color,
timestamp=ctx.message.created_at
)
await ctx.send(embed=items_embed)
@baby.command(name="choose_class", aliases=["update_class", "change_class", "cc"])
@commands.cooldown(1, 5, commands.BucketType.user)
async def _baby_choose_class(self, ctx) -> None:
""" Chooses a class for your baby. """
member: discord.Member = ctx.author
user_baby = await self.get_user_baby(member.id)
if not user_baby:
return await ctx.send(f"**You don't even have a baby, {member.mention}!**")
if user_baby[3].lower() != 'embryo':
return await ctx.send(f"**You already chose a class for your baby, {member.mention}!**")
embed: discord.Embed = discord.Embed(
title="__Baby Class Selection__",
color=member.color,
timestamp=ctx.message.created_at
)
embed.set_author(name=member, icon_url=member.display_avatar)
embed.set_thumbnail(url=member.display_avatar)
embed.set_footer(text="3 minutes to select", icon_url=ctx.guild.icon.url)
view: discord.ui.View = UserBabyView(member)
msg = await ctx.send(embed=embed, view=view)
await view.wait()
await utils.disable_buttons(view)
await msg.edit(view=view)
if view.selected_baby is None:
return
if not view.selected_baby:
return
await self.update_user_baby_class(member.id, view.selected_baby.lower())
await ctx.send(f"**Your `Embryo` is born as a `{view.selected_baby}`, {member.mention}!**")
@baby.command(name="change_name", aliases=["name", "c_name", "update_name", "cn"])
@commands.cooldown(1, 5, commands.BucketType.user)
async def _baby_change_name(self, ctx, *, baby_name: str = None) -> None:
""" Changes the baby's name.
:param baby_name: The new baby name to change to.
* Price: 250.
* Character limit: 25. """
member: discord.Member = ctx.author
if not (user_baby := await self.get_user_baby(member.id)):
return await ctx.send(f"**You don't even have a baby, you cannot change it's name, {member.mention}!**")
if user_baby[2].lower() == 'Embryo':
return await ctx.send(f"**You cannot change the name of an unhatched embryo, {member.mention}!**")
if not baby_name:
return await ctx.send(f"**Please, inform a name for your baby, {member.mention}!**")
if baby_name.lower() == 'embryo':
return await ctx.send(f"**You cannot put that name, {member.mention}!**")
if len(baby_name) > 25:
return await ctx.send(f"**The limit of characters for the name is 25, {member.mention}!**")
SlothCurrency = self.client.get_cog('SlothCurrency')
user_money = await SlothCurrency.get_user_currency(member.id)
if user_money[0][1] < 250:
return await ctx.send(f"**You don't have 250łł to change your baby's nickname, {member.mention}!**")
confirm = await Confirm(f"**Are you sure you want to spend `250łł` to change your baby's name to `{baby_name}`?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not doing it then, {member.mention}!**")
await SlothCurrency.update_user_money(member.id, -250)
await self.update_user_baby_name(member.id, baby_name)
await ctx.send(f"**Successfully updated your baby {user_baby[2]}'s nickname from `{user_baby[2]}` to `{baby_name}`, {member.mention}!**")
@baby.command(name="see", aliases=["show", "display", "render"])
@commands.cooldown(1, 5, commands.BucketType.user)
async def _baby_see(self, ctx, member: Optional[Union[discord.Member, discord.User]] = None) -> None:
""" Sees someone's baby.
:param member: The member from whom to show the baby. [Optional][Default = You] """
author: discord.Member = ctx.author
if not member:
member = author
user_baby = await self.get_user_baby(member.id)
if not user_baby:
if author == member:
return await ctx.send(f"**You don't have a baby, {member.mention}!**")
else:
return await ctx.send(f"**{member} doesn't have a baby, {author.mention}!**")
# Gets the baby's parents
parent_one: discord.Member = ctx.guild.get_member(user_baby[0])
parent_two: discord.Member = ctx.guild.get_member(user_baby[1])
# Gets parents' profile pictures
p1pfp = await utils.get_user_pfp(parent_one)
p2pfp = await utils.get_user_pfp(parent_two)
# Makes the Baby's Image
small = ImageFont.truetype("built titling sb.ttf", 45)
background = Image.open(f"./sloth_custom_images/background/base_baby_background.png")
hud = Image.open(f"./sloth_custom_images/hud/base_baby_hud.png")
baby_class = Image.open(f"./sloth_custom_images/sloth/{user_baby[3].title()}.png").resize((470, 350))
background.paste(hud, (0, 0), hud)
background.paste(p1pfp, (5, 5), p1pfp)
background.paste(p2pfp, (730, 5), p2pfp)
background.paste(baby_class, (160, 280), baby_class)
draw = ImageDraw.Draw(background)
draw.text((320, 5), str(user_baby[2]), fill="white", font=small)
draw.text((5, 70), f"LP: {user_baby[4]}", fill="red", font=small)
draw.text((5, 120), f"Food: {user_baby[5]}", fill="brown", font=small)
file_path = f"media/temporary/user_baby-{member.id}.png"
background.save(file_path)
# Sends the Baby's Image
await ctx.send(file=discord.File(file_path))
return os.remove(file_path)
async def check_baby_food(self) -> None:
""" Checks baby food statuses. """
current_ts = await utils.get_timestamp()
babies = await self.get_hungry_babies(current_ts)
for baby in babies:
if baby[3].lower() == 'embryo':
continue
try:
# Checks whether baby has food
if baby[5] >= 5:
# Increments LP if it needs
if baby[4] < 100:
await self.update_user_baby_lp(baby[0], 5, current_ts)
# Subtracts food
await self.update_user_baby_food(baby[0], -5, current_ts)
else:
# Checks whether baby has lp
if baby[4] - 5 > 0:
await self.update_user_baby_lp(baby[0], -5, current_ts)
await self.update_user_baby_food(baby[0], 0, current_ts)
else:
# Baby died
channel = self.bots_txt
await self.delete_user_baby(baby[0])
embed: discord.Embed = discord.Embed(
description=f"**Sadly, your baby `{baby[3]}` named `{baby[2]}` starved to death because you didn't feed it for a while. My deepest feelings...**",
color=discord.Color.red())
file_path = await self.make_baby_death_image(baby)
embed.set_image(url="attachment://user_baby_death.png")
# Sends the Baby's Image
await channel.send(content=f"<@{baby[0]}>, <@{baby[1]}>", embed=embed, file=discord.File(file_path, filename="user_baby_death.png"))
os.remove(file_path)
except Exception as e:
print('Baby death error', e)
pass
async def make_baby_death_image(self, baby: List[Union[int, str]]) -> str:
""" Makes an embed for the baby's death.
:param baby: The data from the dead baby. """
medium = ImageFont.truetype("built titling sb.ttf", 60)
background = Image.open(f"./sloth_custom_images/background/base_baby_background.png")
baby_class = Image.open(f"./sloth_custom_images/sloth/{baby[3].lower()}.png").resize((470, 350))
background.paste(baby_class, (160, 280), baby_class)
draw = ImageDraw.Draw(background)
draw.text((320, 180), "R.I.P.", fill="black", font=medium)
draw.text((320, 230), str(baby[2]), fill="black", font=medium)
file_path = f"media/temporary/user_baby_death-{baby[0]}.png"
# Makes the image gray
background = ImageOps.grayscale(background)
# Saves image
background.save(file_path)
return file_path
@baby.command(name="feed", aliases=["give_food", "f"])
@commands.cooldown(1, 5, commands.BucketType.user)
async def _baby_feed(self, ctx, leaves: str = None) -> None:
""" Feeds your baby.
:param leaves: The amount of leaves you want to give to your baby.
PS: You feed your baby with leaves.
Each leave gives 5 life points to your baby. """
member: discord.Member = ctx.author
if not leaves:
return await ctx.send(f"**Please, inform an amount of leaves to feed your baby, {member.mention}!**")
user_baby = await self.get_user_baby(member.id)
if not user_baby:
return await ctx.send(f"**You don't have a baby, {member.mention}!**")
if user_baby[3] == 'Embryo':
return await ctx.send(f"**You cannot feed an embryo, {member.mention}!**")
try:
leaves = int(leaves)
except ValueError:
return await ctx.send(f"**Please, inform a valid amount of leaves, {member.mention}!**")
if leaves <= 0:
return await ctx.send(f"**Please, inform an amount of leaves that is greater than 0, {member.mention}!**")
current_ts = await utils.get_timestamp()
food_points: int = user_baby[5]
temp_points = temp_leaves = 0
food_rate: int = 5 # The life points each leaf gives to the baby
for _ in range(leaves):
if food_points + food_rate <= 100:
temp_points += food_rate
temp_leaves += 1
food_points += food_rate
else:
break
confirm_view = ConfirmButton(member, timeout=60)
msg = await ctx.send(
content=f"**Are you sure you want to spend `{temp_leaves}` out of `{leaves}łł` to recover `{temp_points}lp` to your baby, {member.mention}!**",
view=confirm_view)
await confirm_view.wait()
SlothCurrency = self.client.get_cog("SlothCurrency")
user_currency = await self.get_user_currency(member.id)
if user_currency[1] < temp_leaves:
return await ctx.send(f"**You don't have `{temp_leaves}` leaves to feed your baby, {member.mention}!**")
if confirm_view.value:
await SlothCurrency.update_user_money(member.id, -temp_leaves)
await self.update_user_baby_food(member.id, temp_points, current_ts)
embed = discord.Embed(
title="__Pet has been Fed__",
description=f"**You just fed `{user_baby[2]}` with `{temp_leaves}łł`, now it has `{food_points}` food points, {member.mention}!**",
color=discord.Color.green(),
timestamp=ctx.message.created_at
)
embed.set_thumbnail(url=f"https://thelanguagesloth.com/static/assets/images/sloth_classes/{user_baby[3]}.png")
await ctx.send(embed=embed)
await utils.disable_buttons(confirm_view)
await msg.edit(view=confirm_view)
|
"""This class implements the univariate kernel density estimator (KDE).
References
----------
Li, Q. and J. S. Racine (2007) "Nonparametric econometrics", Princeton
University Press, Princeton.
Silverman, B. W. (1986): Density estimation for statistics and data analysis,
CRC press.
"""
import numpy as np
import math
import rpy2.robjects as robjects
class UnivariateKernelDensity:
"""Univariate Kernel Density Estimator.
Args:
data : array-like
The data points used for the estimation.
gridsize : int
If gridsize is None, max(len(data), 50) is used.
kernel : str
The kernel function to be used in the estimation. Implemented
kernels:
- "gaussian" for Gaussian
- "log_gaussian" for log-transformed Gaussian
"""
def __init__(self, data, gridsize=None, kernel='gaussian'):
"""Initialize the estimator with the data, the gridsize and kernel
function.
"""
self.data = np.asarray(data)
self.gridsize = gridsize
self.no_observations = len(self.data)
self.kernel = kernel
def _density_value(self, x, bandwidth):
"""Calculate value of either the Gaussian or log-transformed Gaussian
kernel density estimate (specified when creating an instance of the
estimator class) at point x for a given bandwidth.
Args:
bandwidth : float, str
The value of the bandwidth or a string of an implemented
bandwidth selection method.
x : int, float
The point at which the density should be estimated.
"""
if self.kernel == 'gaussian':
density_value = (
(self.no_observations * self._calc_bw(bandwidth)) **
(-1)) * (sum(
((2 * math.pi)**(-0.5)) * np.exp(((-0.5) * ((
(self.data - x) / self._calc_bw(bandwidth)) ** 2)))
)
)
elif self.kernel == 'log_gaussian':
density_value = (
(self.no_observations) ** (-1)) * (sum(
(
x * self._calc_bw(bandwidth) * math.sqrt(2 * math.pi)
) ** (-1) * np.exp(((-0.5) * ((
(np.log(x) - np.log(self.data)) / self._calc_bw(bandwidth)
) ** 2)))
)
)
else:
raise ValueError("Kernel not implemented.")
return density_value
def _calc_opt_bw_silverman(self):
"""Calculate the optimal bandwidth according to Silverman's rule of
thumb for a Gaussian Kernel [Silverman (1986), p.48].
"""
if self.kernel == 'gaussian':
bw_silverman = (
0.9 * np.std(self.data) / (self.no_observations ** (1 / 5))
)
elif self.kernel == 'log_gaussian':
bw_silverman = (
0.9 * np.std(np.log(self.data)) / (self.no_observations ** (1 / 5))
)
return bw_silverman
def _calc_opt_bw_lscv(self):
"""Use the built-in unbiased cross validation from R package stats to
calculate the optimal band width based on the least-squares cross
validation procedure.
"""
rucv = robjects.r('bw.ucv')
if self.kernel == 'gaussian':
bw_lscv = float(
np.asarray(rucv(robjects.FloatVector(self.data[:])))
)
elif self.kernel == 'log_gaussian':
bw_lscv = float(
np.asarray(rucv(robjects.FloatVector(np.log(self.data[:]))))
)
return bw_lscv
def _calc_bw(self, bandwidth):
"""Calculate the bandwidth according to the method specified by the
bandwidth argument.
Args:
bandwidth : float, str
The value of the bandwidth or a string of an implemented
bandwidth selection method.
"""
if type(bandwidth) in [float, int]:
if bandwidth <= 0:
raise ValueError("Bandwidth must be greater than 0.")
else:
self.bw = bandwidth
elif bandwidth == "silverman":
self.bw = self._calc_opt_bw_silverman()
elif bandwidth == "lscv":
self.bw = self._calc_opt_bw_lscv()
return self.bw
def estimate(self, bandwidth, stretch=4):
"""Estimate the density using a Gaussian kernel and the bandwidth as
specified.
Args:
bandwidth : float, str
The value of the bandwidth or a string of an implemented
bandwidth selection method.
stretch : float
Adjusts the grid to ensure that the estimated density reaches
zero past the max/min value of the data.
"""
if self.gridsize is None:
gridsize = max(self.no_observations, 50)
else:
gridsize = self.gridsize
lower_bound = (
np.min(self.data, axis=0) - stretch * self._calc_bw(bandwidth)
)
upper_bound = (
np.max(self.data, axis=0) + stretch * self._calc_bw(bandwidth)
)
gridpoints = np.linspace(lower_bound, upper_bound, gridsize)
estimated_density = [0] * gridsize
for i in range(gridsize):
estimated_density[i] = self._density_value(
gridpoints[i], bandwidth
)
self.estimated_density = estimated_density
self.support = gridpoints
def __call__(self, bandwidth, stretch=4):
return self.estimate(bandwidth, stretch)
|
from .base import EntityRef
from .exchange_ref import ExchangeRef
class MultipleReferences(Exception):
pass
class NoReference(Exception):
pass
class ProcessRef(EntityRef):
"""
Processes can lookup:
"""
_etype = 'process'
_ref_field = 'referenceExchange'
@property
def _addl(self):
return self.__getitem__('SpatialScope')
def __init__(self, external_ref, query, **kwargs):
self._default_rx = None
self._lci = dict()
super(ProcessRef, self).__init__(external_ref, query, **kwargs)
@property
def reference_entity(self):
if self._reference_entity is None:
self._reference_entity = self._query.get_reference(self.external_ref)
if len(self._reference_entity) == 1:
self._default_rx = self._reference_entity[0].flow.external_ref
return self._reference_entity
def _show_ref(self):
for i in self.references():
print('reference: %s' % i)
@property
def name(self):
return self._name
@property
def default_rx(self):
"""
The 'primary' reference exchange of a process CatalogRef. This is an external_ref for a flow.
This can be set by a user for convenience for multi-reference processes.
(- which is req. unique among references)
:return:
"""
return self._default_rx
@default_rx.setter
def default_rx(self, value):
if not isinstance(value, str) and not isinstance(value, int):
if hasattr(value, 'external_ref'):
value = value.external_ref
elif hasattr(value, 'entity_type'):
if value.entity_type == 'exchange':
value = value.flow.external_ref
if value in [rx.flow.external_ref for rx in self.references()]:
self._default_rx = value
else:
print('Not a valid reference exchange specification')
def reference(self, flow=None):
"""
This used to fallback to regular exchanges; no longer.
:param flow:
:return:
"""
if len(self.reference_entity) == 0:
raise NoReference
if flow is None:
if len(self.reference_entity) > 1:
raise MultipleReferences('%s: You must specify a reference flow' % self.link)
return self.reference_entity[0]
if hasattr(flow, 'entity_type'):
if flow.entity_type == 'exchange':
flow = flow.flow
try:
return next(x for x in self.reference_entity if x.flow == flow or x.flow.external_ref == flow)
except StopIteration:
try:
return next(x for x in self.reference_entity if x.flow.match(flow))
except StopIteration:
print('%s: references:' % self.link)
self._show_ref()
raise KeyError(flow)
def references(self):
for x in self.reference_entity:
yield x
'''
def is_allocated(self, rx):
"""
For process refs, assume
:param rx:
:return:
"""
for _rx in self.reference_entity:
if _rx.key == rx.key:
return _rx.is_alloc
return False
'''
def _use_ref_exch(self, ref_flow):
"""
returns a string which is the external_ref of a flow; default_rx if none was specified and the process has one.
:param ref_flow:
:return:
"""
if ref_flow is None:
if self._default_rx is not None:
ref_flow = self._default_rx
elif hasattr(ref_flow, 'entity_type'):
if ref_flow.entity_type == 'exchange':
return ref_flow.flow.external_ref
elif ref_flow.entity_type == 'flow':
return ref_flow.external_ref
raise TypeError('Invalid reference exchange: %s' % ref_flow)
return ref_flow
'''
Inventory queries
'''
def exchanges(self, **kwargs):
for x in self._query.exchanges(self.external_ref, **kwargs):
yield ExchangeRef(self, self._query.make_ref(x.flow), x.direction, value=None, termination=x.termination,
comment=x.comment)
def exchange_values(self, flow, direction=None, termination=None, reference=None, **kwargs):
"""
This should get replaced by ev()
:param flow:
:param direction:
:param termination:
:param reference:
:param kwargs:
:return:
"""
if hasattr(flow, 'entity_type'):
if flow.entity_type == 'exchange':
flow = flow.flow.external_ref
elif flow.entity_type == 'flow':
flow = flow.external_ref
for x in self._query.exchange_values(self.external_ref, flow, direction,
termination=termination, reference=reference, **kwargs):
yield ExchangeRef(self, self._query.make_ref(x.flow), x.direction, value=x.value, termination=x.termination,
comment=x.comment)
def inventory(self, ref_flow=None, **kwargs):
# ref_flow = self._use_ref_exch(ref_flow) # ref_flow=None returns unallocated inventory
for x in sorted(self._query.inventory(self.external_ref, ref_flow=ref_flow, **kwargs),
key=lambda t: (not t.is_reference, t.type == 'elementary', t.type == 'context', t.type == 'cutoff', t.direction)):
yield ExchangeRef(self, self._query.make_ref(x.flow), x.direction, value=x.value, termination=x.termination,
comment=x.comment, is_reference=x.is_reference)
def exchange_relation(self, ref_flow, exch_flow, direction, termination=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
if hasattr(exch_flow, 'external_ref'):
exch_flow = exch_flow.external_ref
return self._query.exchange_relation(self.external_ref, ref_flow,
exch_flow, direction,
termination=termination, **kwargs)
def fg_lcia(self, lcia_qty, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.lcia(self.external_ref, ref_flow, lcia_qty, **kwargs)
'''
support process
'''
def reference_value(self, flow=None):
if flow is None:
flow = self.reference().flow
return sum(x.value for x in self.exchange_values(flow, reference=True))
def get_exchange(self, key):
try:
return next(x for x in self.reference_entity if x.key == key)
except StopIteration:
raise KeyError
@property
def alloc_qty(self):
"""
This is hugely kludgely. What should be the expected behavior of a process ref asked to perform allocation?
:return:
"""
return None
'''
Background queries
'''
def foreground(self, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.foreground(self.external_ref, ref_flow=ref_flow, **kwargs)
def consumers(self, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.consumers(self.external_ref, ref_flow=ref_flow, **kwargs)
def dependencies(self, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.dependencies(self.external_ref, ref_flow=ref_flow, **kwargs)
def emissions(self, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.emissions(self.external_ref, ref_flow=ref_flow, **kwargs)
def cutoffs(self, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.cutoffs(self.external_ref, ref_flow=ref_flow, **kwargs)
def is_in_background(self, termination=None, ref_flow=None, **kwargs):
if termination is None:
termination = self.external_ref
ref_flow = self._use_ref_exch(ref_flow)
return self._query.is_in_background(termination, ref_flow=ref_flow, **kwargs)
def ad(self, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.ad(self.external_ref, ref_flow, **kwargs)
def bf(self, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.bf(self.external_ref, ref_flow, **kwargs)
def lci(self, ref_flow=None, refresh=False, **kwargs):
"""
Caches LCI results
:param ref_flow:
:param refresh:
:param kwargs:
:return:
"""
ref_flow = self._use_ref_exch(ref_flow)
if refresh:
self._lci.pop(ref_flow, None)
if ref_flow not in self._lci:
self._lci[ref_flow] = list(self._query.lci(self.external_ref, ref_flow, **kwargs))
for i in self._lci[ref_flow]:
yield i
def unobserved_lci(self, observed, ref_flow=None, **kwargs):
"""
Performs a sys_lci of the process's unobserved exchanges. derived by excluding observed exchanges from the
process's inventory and passing the result to sys_lci. Note that terminations are ignored-- if a process
has an observed Electricity flow, all the process's electricity exchanges are assumed to be accounted for
by the observation. (flow.external_ref, direction) is the filter.
:param observed: iterable of exchanges or child flows, having a flow (with external_ref) and direction
:param ref_flow:
:param kwargs:
:return:
"""
excl = set((k.flow.external_ref, k.direction) for k in observed)
ref_flow = self._use_ref_exch(ref_flow)
incl = (k for k in self.inventory(ref_flow) if (k.flow.external_ref, k.direction) not in excl)
return self._query.sys_lci(self, incl, **kwargs)
def bg_lcia(self, lcia_qty, ref_flow=None, **kwargs):
"""
:param lcia_qty: should be a quantity ref (or qty), not an external ID
:param ref_flow:
:param kwargs:
:return:
"""
ref_flow = self._use_ref_exch(ref_flow)
return self._query.bg_lcia(self.external_ref, lcia_qty, ref_flow=ref_flow, **kwargs)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class WorkspaceCollectionsOperations(object):
"""WorkspaceCollectionsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_by_name(
self, resource_group_name, workspace_collection_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves an existing Power BI Workspace Collection.
:param resource_group_name: Azure resource group
:type resource_group_name: str
:param workspace_collection_name: Power BI Embedded Workspace
Collection name
:type workspace_collection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkspaceCollection
<azure.mgmt.powerbiembedded.models.WorkspaceCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<azure.mgmt.powerbiembedded.models.ErrorException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBI/workspaceCollections/{workspaceCollectionName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceCollectionName': self._serialize.url("workspace_collection_name", workspace_collection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkspaceCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, workspace_collection_name, location=None, tags=None, custom_headers=None, raw=False, **operation_config):
"""Creates a new Power BI Workspace Collection with the specified
properties. A Power BI Workspace Collection contains one or more
workspaces, and can be used to provision keys that provide API access
to those workspaces.
:param resource_group_name: Azure resource group
:type resource_group_name: str
:param workspace_collection_name: Power BI Embedded Workspace
Collection name
:type workspace_collection_name: str
:param location: Azure location
:type location: str
:param tags:
:type tags: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkspaceCollection
<azure.mgmt.powerbiembedded.models.WorkspaceCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<azure.mgmt.powerbiembedded.models.ErrorException>`
"""
body = models.CreateWorkspaceCollectionRequest(location=location, tags=tags)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBI/workspaceCollections/{workspaceCollectionName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceCollectionName': self._serialize.url("workspace_collection_name", workspace_collection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(body, 'CreateWorkspaceCollectionRequest')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkspaceCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, workspace_collection_name, tags=None, custom_headers=None, raw=False, **operation_config):
"""Update an existing Power BI Workspace Collection with the specified
properties.
:param resource_group_name: Azure resource group
:type resource_group_name: str
:param workspace_collection_name: Power BI Embedded Workspace
Collection name
:type workspace_collection_name: str
:param tags:
:type tags: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkspaceCollection
<azure.mgmt.powerbiembedded.models.WorkspaceCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<azure.mgmt.powerbiembedded.models.ErrorException>`
"""
body = models.UpdateWorkspaceCollectionRequest(tags=tags)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBI/workspaceCollections/{workspaceCollectionName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceCollectionName': self._serialize.url("workspace_collection_name", workspace_collection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(body, 'UpdateWorkspaceCollectionRequest')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkspaceCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, workspace_collection_name, custom_headers=None, raw=False, **operation_config):
"""Delete a Power BI Workspace Collection.
:param resource_group_name: Azure resource group
:type resource_group_name: str
:param workspace_collection_name: Power BI Embedded Workspace
Collection name
:type workspace_collection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<azure.mgmt.powerbiembedded.models.ErrorException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBI/workspaceCollections/{workspaceCollectionName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceCollectionName': self._serialize.url("workspace_collection_name", workspace_collection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def check_name_availability(
self, location, name=None, type="Microsoft.PowerBI/workspaceCollections", custom_headers=None, raw=False, **operation_config):
"""Verify the specified Power BI Workspace Collection name is valid and
not already in use.
:param location: Azure location
:type location: str
:param name: Workspace collection name
:type name: str
:param type: Resource type
:type type: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CheckNameResponse
<azure.mgmt.powerbiembedded.models.CheckNameResponse>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<azure.mgmt.powerbiembedded.models.ErrorException>`
"""
body = models.CheckNameRequest(name=name, type=type)
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.PowerBI/locations/{location}/checkNameAvailability'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(body, 'CheckNameRequest')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckNameResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves all existing Power BI workspace collections in the specified
resource group.
:param resource_group_name: Azure resource group
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkspaceCollectionPaged
<azure.mgmt.powerbiembedded.models.WorkspaceCollectionPaged>`
:raises:
:class:`ErrorException<azure.mgmt.powerbiembedded.models.ErrorException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBI/workspaceCollections'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.WorkspaceCollectionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkspaceCollectionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_subscription(
self, custom_headers=None, raw=False, **operation_config):
"""Retrieves all existing Power BI workspace collections in the specified
subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkspaceCollectionPaged
<azure.mgmt.powerbiembedded.models.WorkspaceCollectionPaged>`
:raises:
:class:`ErrorException<azure.mgmt.powerbiembedded.models.ErrorException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.PowerBI/workspaceCollections'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.WorkspaceCollectionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkspaceCollectionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get_access_keys(
self, resource_group_name, workspace_collection_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves the primary and secondary access keys for the specified
Power BI Workspace Collection.
:param resource_group_name: Azure resource group
:type resource_group_name: str
:param workspace_collection_name: Power BI Embedded Workspace
Collection name
:type workspace_collection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkspaceCollectionAccessKeys
<azure.mgmt.powerbiembedded.models.WorkspaceCollectionAccessKeys>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<azure.mgmt.powerbiembedded.models.ErrorException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBI/workspaceCollections/{workspaceCollectionName}/listKeys'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceCollectionName': self._serialize.url("workspace_collection_name", workspace_collection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkspaceCollectionAccessKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def regenerate_key(
self, resource_group_name, workspace_collection_name, key_name=None, custom_headers=None, raw=False, **operation_config):
"""Regenerates the primary or secondary access key for the specified
Power BI Workspace Collection.
:param resource_group_name: Azure resource group
:type resource_group_name: str
:param workspace_collection_name: Power BI Embedded Workspace
Collection name
:type workspace_collection_name: str
:param key_name: Key name. Possible values include: 'key1', 'key2'
:type key_name: str or :class:`AccessKeyName
<azure.mgmt.powerbiembedded.models.AccessKeyName>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkspaceCollectionAccessKeys
<azure.mgmt.powerbiembedded.models.WorkspaceCollectionAccessKeys>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<azure.mgmt.powerbiembedded.models.ErrorException>`
"""
body = models.WorkspaceCollectionAccessKey(key_name=key_name)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBI/workspaceCollections/{workspaceCollectionName}/regenerateKey'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceCollectionName': self._serialize.url("workspace_collection_name", workspace_collection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(body, 'WorkspaceCollectionAccessKey')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkspaceCollectionAccessKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def migrate(
self, resource_group_name, target_resource_group=None, resources=None, custom_headers=None, raw=False, **operation_config):
"""Migrates an existing Power BI Workspace Collection to a different
resource group and/or subscription.
:param resource_group_name: Azure resource group
:type resource_group_name: str
:param target_resource_group: Name of the resource group the Power BI
workspace collections will be migrated to.
:type target_resource_group: str
:param resources:
:type resources: list of str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<azure.mgmt.powerbiembedded.models.ErrorException>`
"""
body = models.MigrateWorkspaceCollectionRequest(target_resource_group=target_resource_group, resources=resources)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/moveResources'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(body, 'MigrateWorkspaceCollectionRequest')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
import shlex
from pathlib import Path
import pytextnow
from nephele.command import Command
from nephele.events import Events
from nephele.telegram import Telegram
_EVENT_NAME = "textnow-message"
def send_sms(event):
telegram = Telegram(event)
username = event["textnow_username"]
sid_cookie = event["textnow_sid_cookie"]
csrf_cookie = event["textnow_csrf_cookie"]
to = event["textnow_to"]
text = event["textnow_text"]
user_cookies_file = Path(f"/tmp/{event['namespace']}/user_cookies.json")
try:
client = pytextnow.Client(username, sid_cookie, csrf_cookie, user_cookies_file)
except Exception:
telegram.send_message(
"An unexpected error occurred while logging into TextNow."
)
raise
try:
client.send_sms(to, text)
except Exception:
telegram.send_message(
"An unexpected error occurred while sending SMS from TextNow."
)
raise
def check_message_event_status(event):
events = Events(event)
telegram = Telegram(event)
try:
rule_info = events.describe_rule(_EVENT_NAME)
telegram.send_message(
f"""Username: {rule_info["event"]["textnow_username"]}
Cookie (connect.sid): {rule_info["event"]["textnow_sid_cookie"]}
Cookie (_csrf): {rule_info["event"]["textnow_csrf_cookie"]}
To: {rule_info["event"]["textnow_to"]}
Text: {rule_info["event"]["textnow_text"]}
Cron Expression: {rule_info["cron_expression"]}"""
)
except events.ResourceNotFoundException:
telegram.send_message(
f"You don't have a TextNow message event setup yet. Use {Command.SET_TEXTNOW_MESSAGE_EVENT.value} to create an event first." # noqa: E501
)
except Exception:
telegram.send_message(
"An unexpected error occurred while checking TextNow message event status."
)
raise
def set_message_event(event):
events = Events(event)
telegram = Telegram(event)
args = shlex.split(event["text"])[1:]
if len(args) < 5:
telegram.send_message(
f"usage: {Command.SET_TEXTNOW_MESSAGE_EVENT.value} <username> <connect.sid cookie> <_csrf cookie> <recipient> <text> <cron expression>" # noqa: E501
)
return
event["text"] = Command.SCHEDULE_TEXTNOW_SEND_SMS.value
event["is_scheduled"] = True
event["textnow_username"] = args[0]
event["textnow_sid_cookie"] = args[1]
event["textnow_csrf_cookie"] = args[2]
event["textnow_to"] = args[3]
event["textnow_text"] = args[4]
cron_expression = args[5]
telegram.send_message("Testing TextNow message event...")
send_sms(event)
try:
events.put_rule(_EVENT_NAME, event, cron_expression)
telegram.send_message(
"Done! The SMS message will be sent periodically to your recipient."
)
except Exception:
telegram.send_message(
"An unexpected error occurred while creating TextNow message event."
)
raise
def delete_message_event(event):
events = Events(event)
telegram = Telegram(event)
try:
events.delete_rule(_EVENT_NAME)
telegram.send_message("Done! You have deleted the TextNow message event.")
except events.ResourceNotFoundException:
telegram.send_message("You don't have a TextNow message event setup yet.")
except Exception:
telegram.send_message(
"An unexpected error occurred while deleting TextNow message event."
)
raise
|
"""Utility functions for the haproxy module."""
def cmd_succeeded(output, ignored_outputs=None):
"""Check if the given output contains an error.
Most commonly a succeeded command will return an empty string. However, some
commands even though have succeeded return an informative message to the
caller. This helper function checks the status of a given output and if it's
empty or the message is included in the ignored_outputs list, it marks the
command status as successful.
@param output (list): the output of the command
@param ignored_outputs (list): list of known output strings to be ignored
@return (bool): True if the command succeeded, False otherwise
"""
if ignored_outputs is None:
ignored_outputs = []
if output and output[0] not in ignored_outputs:
return False
return True
|
from django.shortcuts import render
from django.http import HttpResponse
from django.conf.urls.static import static
import requests
HOST = 'http://127.0.0.1:8000/'
# Create your views here.
def index(request):
return render(request, 'contracts/index.html', {'num':3})
def contract_card(request):
if dict(request.GET):
num = str(dict(request.GET)['num'][0])
#year = str(dict(request.GET)['year'][0])
r = requests.get(f"http://127.0.0.1:8000/items?num={num}")
data_dict = {
'table1':[r.json()],
'pay':36,
}
print(data_dict)
return render(request, 'contracts/contract_card.html', data_dict)
else:
return contracts(request)
def contracts(request):
r = requests.get("http://127.0.0.1:8000/all")
data_dict = {'table1':r.json(),}
return render(request, 'contracts/contracts.html', data_dict)
def notifications(request):
req_dict = {
'table': requests.get(f'{HOST}notifications').json()
}
print(req_dict)
return render(request, 'contracts/notifications.html', req_dict)
def budget_commitment(request):
req_dict = requests.get(f'{HOST}budget_commitment').json()
return render(request, 'contracts/budget_commitment.html', req_dict)
def commitment_treasury(request):
req_dict = requests.get(f'{HOST}commitment_treasury').json()
return render(request, 'contracts/commitment_treasury.html', req_dict)
def deals(request):
req_dict = requests.get(f'{HOST}deals').json()
req_dict['pusy'] = 'ty'
print(req_dict)
return render(request, 'contracts/deals.html', req_dict)
def limits(request):
req_dict = requests.get(f'{HOST}limits').json()
return render(request, 'contracts/limits.html', req_dict)
def payment_schedule(request):
req_dict = requests.get(f'{HOST}payment_schedule').json()
return render(request, 'contracts/payment_schedule.html', req_dict)
def payments_full(request):
req_dict = requests.get(f'{HOST}payments_full').json()
return render(request, 'contracts/payments_full.html', req_dict)
def payments_short(request):
req_dict = requests.get(f'{HOST}payments_short').json()
return render(request, 'contracts/payments_short.html', req_dict)
def payments(request):
req_dict = requests.get(f'{HOST}payments').json()
return render(request, 'contracts/payments.html', req_dict)
def plan(request):
req_dict = requests.get(f'{HOST}plan').json()
return render(request, 'contracts/plan.html', req_dict)
def purchase_plan(request):
req_dict = requests.get(f'{HOST}purchase_plan').json()
return render(request, 'contracts/purchase_plan.html', req_dict)
def spending(request):
req_dict = requests.get(f'{HOST}spending').json()
return render(request, 'contracts/spending.html', req_dict)
|
"""reservedkeywords
(C) 2004-2008 HAS
"""
kReservedKeywords = ["ID", "beginning", "end", "before", "after", "previous", "next", "first", "middle", "last", "any", "beginswith", "endswith", "contains", "isin", "doesnotbeginwith", "doesnotendwith", "doesnotcontain", "isnotin", "AND", "NOT", "OR", "begintransaction", "aborttransaction", "endtransaction", "isrunning", "resulttype", "ignore", "timeout", "waitreply", "help", "as", "with", "relaunchmode"] + keyword.kwlist |
import pytest
from geoconvert.convert import (
address_to_zipcode,
dept_name_to_zipcode,
fr_address_to_dept_code,
fr_dept_name_to_dept_code,
fr_postcode_to_dept_code,
)
class TestFrance:
@pytest.mark.parametrize(
"input_data, expected",
[
(u"Chemin du Solarium\\n Le Haut Vigneau\\n 33175 GRADIGNAN CEDEX", "33"),
(
"Chemin du Solarium 061256784589 Le Haut Vigneau 33175 GRADIGNAN CEDEX ",
"33",
),
(
"Chemin du Solarium Le Haut Vigneau 33175 GRADIGNAN CEDEX 061256784589",
"33",
),
("Chemin du Solarium Le Haut Vigneau 33175 GRADIGNAN CEDEX", "33"),
("7 cours Grandval\\nBP 414 - 20183 AJACCIO - CEDEX", "20A"),
("20212 Erbajolo", "20B"),
("20223 Solenzara Air", "20A"),
("BP 55342 20223 Solenzara Air", "20A"),
("Chemin du Solarium Le Haut Vigneau 33 175 GRADIGNAN CEDEX", "33"),
("20 223 Solenzara Air", "20A"),
("97821 Le Port Cedex", "974"),
("27006 Évreux Cedex", "27"),
(" 27006 Évreux Cedex", "27"),
("27006", "27"),
("Roissy-en-France95700", "95"),
(" 44200 BP 10720 Nantes cedex", "44"),
(
"a l attention de M. Bon Jean, Avenue des clients BP 72152, F - 31020 Toulouse",
"31",
),
(
"a l attention de M. Bon Jean, Avenue des clients BP72152, F - 31020 Toulouse",
"31",
),
(
"a l attention de M. Bon Jean, Avenue des clients bp72152, F - 31020 Toulouse",
"31",
),
("Avenue des clients CS 72152, F - 31020 Toulouse", "31"),
("BP 1330, 6503 TARBES Cedex 9, tel. 05.62.54.58.63", None),
(
"Ville de Blanquefort, 12 rue Dupaty B.P. 20117, à l attention de fernanda Edant-33294 Blanquefort.",
"33",
),
(
"conseil général du Haut-Rhin, 100 avenue d alsace B.P.20351, conseil général du Haut-Rhin-68006 Colmar Cedex",
"68",
),
("Avenue des clients CS 72152, F - 31020 Toulouse", "31"),
("Avenue des clients CS72152, F - 31020 Toulouse", "31"),
("6503 TARBES Cedex 9, tel. 05.62.54.58.63", None),
("97701 Saint-Barthelemy", "977"),
("97098 Saint-Barthelemy", "977"),
("a l attention de M. Bon Jean, Avenue des client", None),
("13 avenue de la porte d'Italie TSA 61371, F - 75621 Paris", "75"),
("avenue René Cassin — BP 67190 97801 Saint-Denis Cedex 9", "974"),
("M. le maire, hôtel de Ville 97717 Saint-Denis", "974"),
("Rue de la Réunion, 75000 Paris", "75"),
("Rue de l'Orne, 44800 Saint-Herblain", "44"),
("Martinique", "972"),
("cotes d'armr", None),
("cotes d'armor", "22"),
("lot", "46"),
("lot-et-garonne", "47"),
("Ici, c'est Angers dans le-Maine et-Loire", "49"),
("Loire", "42"),
("Loiret", "45"),
("Haute Vienne", "87"),
("La Vienne-Dynamique", "86"),
("Haute\tLoire", "43"),
("Haute-Loire", "43"),
("-Marne-", "51"),
("Haute-Marne", "52"),
("Eure", "27"),
("Eure-et Loir", "28"),
("Indre", "36"),
("Indre-et Loire", "37"),
("Tarn", "81"),
("Bonjour du Tarn et Garonne", "82"),
(u"Hauts-de-Seine ", "92"),
(u"H\xe9rault", "34"),
(u"Seine-Saint-Denis ", "93"),
(u"Loire", "42"),
(u"Corse-du-Sud", "20A"),
(u"", None),
(u"Vendé?e", "85"),
(u"Loire Atlanti)que", "44"),
(u"Yonne", "89"),
(u"Saint Pierre et Miquelon", "975"),
("Tout savoir sur Saint Barthélemy", "977"),
("Tout savoir sur saint-barthelemy", "977"),
("Tout savoir sur saint Barthélémy", "977"),
# There can be some mistakes, that we may want to fix one day.
# In this case, we could look for 2 or 3 digit
("Rue de l'Orne, Saint-Herblain (44)", "61"),
],
)
def test_fr_address_to_dept_code(self, input_data, expected):
assert fr_address_to_dept_code(input_data) == expected
@pytest.mark.parametrize(
"input_data, expected",
[
(u"Chemin du Solarium\\n Le Haut Vigneau\\n 33175 GRADIGNAN CEDEX", "33"),
(
"Chemin du Solarium 061256784589 Le Haut Vigneau 33175 GRADIGNAN CEDEX ",
"33",
),
(
"Chemin du Solarium Le Haut Vigneau 33175 GRADIGNAN CEDEX 061256784589",
"33",
),
("Chemin du Solarium Le Haut Vigneau 33175 GRADIGNAN CEDEX", "33"),
("7 cours Grandval\\nBP 414 - 20183 AJACCIO - CEDEX", "20A"),
("20212 Erbajolo", "20B"),
("20223 Solenzara Air", "20A"),
("BP 55342 20223 Solenzara Air", "20A"),
("Chemin du Solarium Le Haut Vigneau 33 175 GRADIGNAN CEDEX", "33"),
("20 223 Solenzara Air", "20A"),
("97821 Le Port Cedex", "974"),
("27006 Évreux Cedex", "27"),
(" 27006 Évreux Cedex", "27"),
("27006", "27"),
("Roissy-en-France95700", "95"),
(" 44200 BP 10720 Nantes cedex", "44"),
(
"a l attention de M. Bon Jean, Avenue des clients BP 72152, F - 31020 Toulouse",
"31",
),
(
"a l attention de M. Bon Jean, Avenue des clients BP72152, F - 31020 Toulouse",
"31",
),
(
"a l attention de M. Bon Jean, Avenue des clients bp72152, F - 31020 Toulouse",
"31",
),
("Avenue des clients CS 72152, F - 31020 Toulouse", "31"),
("BP 1330, 6503 TARBES Cedex 9, tel. 05.62.54.58.63", None),
(
"Ville de Blanquefort, 12 rue Dupaty B.P. 20117, à l attention de fernanda Edant-33294 Blanquefort.",
"33",
),
(
"conseil général du Haut-Rhin, 100 avenue d alsace B.P.20351, conseil général du Haut-Rhin-68006 Colmar Cedex",
"68",
),
("Avenue des clients CS 72152, F - 31020 Toulouse", "31"),
("Avenue des clients CS72152, F - 31020 Toulouse", "31"),
("6503 TARBES Cedex 9, tel. 05.62.54.58.63", None),
("97701 Saint-Barthelemy", "977"),
("97098 Saint-Barthelemy", "977"),
("a l attention de M. Bon Jean, Avenue des client", None),
("13 avenue de la porte d'Italie TSA 61371, F - 75621 Paris", "75"),
("avenue René Cassin — BP 67190 97801 Saint-Denis Cedex 9", "974"),
("M. le maire, hôtel de Ville 97717 Saint-Denis", "974"),
("Rue de la Réunion, 75000 Paris", "75"),
("Rue de l'Orne, 44800 Saint-Herblain", "44"),
("D7, Sainte-Luce 97228, Martinique", "972"),
("99999", None),
],
)
def test_fr_postcode_to_dept_code(self, input_data, expected):
assert fr_postcode_to_dept_code(input_data) == expected
assert address_to_zipcode(input_data) == expected
@pytest.mark.parametrize(
"input_data, expected",
[
("Martinique", "972"),
("cotes d'armr", None),
("cotes d'armor", "22"),
("lot", "46"),
("lot-et-garonne", "47"),
("Ici, c'est Angers dans le-Maine et-Loire", "49"),
("Loire", "42"),
("Loiret", "45"),
("Haute Vienne", "87"),
("La Vienne-Dynamique", "86"),
("Haute\tLoire", "43"),
("Haute-Loire", "43"),
("-Marne-", "51"),
("Haute-Marne", "52"),
("Eure", "27"),
("Eure-et Loir", "28"),
("Indre", "36"),
("Indre-et Loire", "37"),
("Tarn", "81"),
("Bonjour du Tarn et Garonne", "82"),
(u"Hauts-de-Seine ", "92"),
(u"H\xe9rault", "34"),
(u"Seine-Saint-Denis ", "93"),
(u"Loire", "42"),
(u"Corse-du-Sud", "20A"),
(u"", None),
(u"Vendé?e", "85"),
(u"Loire Atlanti)que", "44"),
(u"Yonne", "89"),
(u"Saint Pierre et Miquelon", "975"),
("Tout savoir sur Saint Barthélemy", "977"),
("Tout savoir sur saint-barthelemy", "977"),
("Tout savoir sur saint Barthélémy", "977"),
# There may be some mistakes, so be careful what is passed
("Rue de la Réunion, 75000 Paris", "974"),
("Rue de l'Orne, 44800 Saint-Herblain", "61"),
],
)
def test_fr_dept_name_dept_code(self, input_data, expected):
assert fr_dept_name_to_dept_code(input_data) == expected
assert dept_name_to_zipcode(input_data) == expected
|
import tqdm
import random
import argparse
from src.db.sqlalchemy import db_session
from src.model.user import User
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('max_value', type=float)
return parser.parse_args()
def add():
user_list = db_session().query(User).all()
for user in tqdm.tqdm(user_list, total=len(user_list)):
balance_user = round(random.uniform(0, args.max_value), 2)
user.balance = balance_user
db_session().commit()
if __name__ == '__main__':
args = parse_args()
add()
|
# -*- coding: UTF-8 -*-
from flask.helpers import url_for
from leancloud.errors import LeanCloudError
from leancloud.query import Query
from leancloud.user import User
from forms.auth import LoginForm, RegisterForm
from admin_views import admin_view
from flask import redirect, render_template, request
from flask_login import (current_user, login_user, login_required,
logout_user, confirm_login, login_fresh)
from models.user import Admin
@admin_view.route("/login", methods=["GET", "POST"])
def login():
"""
管理员登录
"""
# if current_user is not None and current_user.is_authenticated:
# return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
try:
User().login(form.login.data, form.password.data)
except LeanCloudError, e:
if e.code == 210:
print u'帐号或密码错误'
elif e.code == 211:
print u'用户不存在'
return u'帐号或密码错误'
else:
user = Query(User).equal_to("username", form.login.data).first()
admin = Query(Admin).equal_to("user", user).first()
login_user(admin)
next = request.args.get('next')
return redirect(next or "/admin/medical")
return render_template('auth/login.html', form=form)
@admin_view.route("/register", methods=["GET", "POST"])
@login_required
def regist_admin():
"""
Register a new user
"""
form = RegisterForm()
if form.validate_on_submit():
user = form.save()
return redirect(url_for('index'))
return render_template("auth/register.html", form=form)
@admin_view.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('.login'))
|
from django.core.management.base import BaseCommand
from jdleden.ledenlijst import create_department_excels_from_file
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('members_file', nargs=1, type=str)
def handle(self, *args, **options):
create_department_excels_from_file(options['members_file'][0])
|
# Generated by Django 3.0.4 on 2020-04-12 01:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20200401_1056'),
('restapi', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Url',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('path', models.CharField(max_length=254)),
('method', models.CharField(max_length=10)),
('describe', models.DateTimeField(auto_now_add=True)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='restapi.Project')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.User')),
],
options={
'db_table': 'restapi_url',
},
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.