max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
src/ml_final_project/utils/evaluators/default.py | yuvalot/ml_final_project | 0 | 1900 | <gh_stars>0
def default_evaluator(model, X_test, y_test):
"""A simple evaluator that takes in a model,
and a test set, and returns the loss.
Args:
model: The model to evaluate.
X_test: The features matrix of the test set.
y_test: The one-hot labels matrix of the test set.
Returns:
The loss on the test set.
"""
return model.evaluate(X_test, y_test, verbose=0)[0]
| 2.671875 | 3 |
test.py | wangjm12138/Yolov3_wang | 0 | 1901 | import random
class Yolov3(object):
def __init__(self):
self.num=0
self.input_size=[8,16,32]
def __iter__(self):
return self
def __next__(self):
a = random.choice(self.input_size)
self.num=self.num+1
if self.num<3:
return a
else:
raise StopIteration
yolo=Yolov3()
for data in yolo:
print(data)
| 3.609375 | 4 |
utils/dsp.py | huchenxucs/WaveRNN | 0 | 1902 | <reponame>huchenxucs/WaveRNN
import math
import numpy as np
import librosa
from utils import hparams as hp
from scipy.signal import lfilter
import soundfile as sf
def label_2_float(x, bits):
return 2 * x / (2**bits - 1.) - 1.
def float_2_label(x, bits):
assert abs(x).max() <= 1.0
x = (x + 1.) * (2**bits - 1) / 2
return x.clip(0, 2**bits - 1)
def load_wav(path):
return librosa.load(path, sr=hp.sample_rate)[0]
def save_wav(x, path):
# librosa.output.write_wav(path, x.astype(np.float32), sr=hp.sample_rate)
sf.write(path, x.astype(np.float32), samplerate=hp.sample_rate)
def split_signal(x):
unsigned = x + 2**15
coarse = unsigned // 256
fine = unsigned % 256
return coarse, fine
def combine_signal(coarse, fine):
return coarse * 256 + fine - 2**15
def encode_16bits(x):
return np.clip(x * 2**15, -2**15, 2**15 - 1).astype(np.int16)
def linear_to_mel(spectrogram):
return librosa.feature.melspectrogram(
S=spectrogram, sr=hp.sample_rate, n_fft=hp.n_fft, n_mels=hp.num_mels, fmin=hp.fmin)
'''
def build_mel_basis():
return librosa.filters.mel(hp.sample_rate, hp.n_fft, n_mels=hp.num_mels, fmin=hp.fmin)
'''
def normalize(S):
return np.clip((S - hp.min_level_db) / -hp.min_level_db, 0, 1)
def denormalize(S):
return (np.clip(S, 0, 1) * -hp.min_level_db) + hp.min_level_db
def amp_to_db(x):
return 20 * np.log10(np.maximum(1e-5, x))
def db_to_amp(x):
return np.power(10.0, x * 0.05)
def spectrogram(y):
D = stft(y)
S = amp_to_db(np.abs(D)) - hp.ref_level_db
return normalize(S)
def melspectrogram(y):
D = stft(y)
S = amp_to_db(linear_to_mel(np.abs(D)))
return normalize(S)
def stft(y):
return librosa.stft(
y=y,
n_fft=hp.n_fft, hop_length=hp.hop_length, win_length=hp.win_length)
def pre_emphasis(x):
return lfilter([1, -hp.preemphasis], [1], x)
def de_emphasis(x):
return lfilter([1], [1, -hp.preemphasis], x)
def encode_mu_law(x, mu):
mu = mu - 1
fx = np.sign(x) * np.log(1 + mu * np.abs(x)) / np.log(1 + mu)
return np.floor((fx + 1) / 2 * mu + 0.5)
def decode_mu_law(y, mu, from_labels=True):
# TODO: get rid of log2 - makes no sense
if from_labels: y = label_2_float(y, math.log2(mu))
mu = mu - 1
x = np.sign(y) / mu * ((1 + mu) ** np.abs(y) - 1)
return x
def reconstruct_waveform(mel, n_iter=32):
"""Uses Griffin-Lim phase reconstruction to convert from a normalized
mel spectrogram back into a waveform."""
denormalized = denormalize(mel)
amp_mel = db_to_amp(denormalized)
S = librosa.feature.inverse.mel_to_stft(
amp_mel, power=1, sr=hp.sample_rate,
n_fft=hp.n_fft, fmin=hp.fmin)
wav = librosa.core.griffinlim(
S, n_iter=n_iter,
hop_length=hp.hop_length, win_length=hp.win_length)
return wav
| 2.40625 | 2 |
loldib/getratings/models/NA/na_talon/na_talon_jng.py | koliupy/loldib | 0 | 1903 | <filename>loldib/getratings/models/NA/na_talon/na_talon_jng.py
from getratings.models.ratings import Ratings
class NA_Talon_Jng_Aatrox(Ratings):
pass
class NA_Talon_Jng_Ahri(Ratings):
pass
class NA_Talon_Jng_Akali(Ratings):
pass
class NA_Talon_Jng_Alistar(Ratings):
pass
class NA_Talon_Jng_Amumu(Ratings):
pass
class NA_Talon_Jng_Anivia(Ratings):
pass
class NA_Talon_Jng_Annie(Ratings):
pass
class NA_Talon_Jng_Ashe(Ratings):
pass
class NA_Talon_Jng_AurelionSol(Ratings):
pass
class NA_Talon_Jng_Azir(Ratings):
pass
class NA_Talon_Jng_Bard(Ratings):
pass
class NA_Talon_Jng_Blitzcrank(Ratings):
pass
class NA_Talon_Jng_Brand(Ratings):
pass
class NA_Talon_Jng_Braum(Ratings):
pass
class NA_Talon_Jng_Caitlyn(Ratings):
pass
class NA_Talon_Jng_Camille(Ratings):
pass
class NA_Talon_Jng_Cassiopeia(Ratings):
pass
class NA_Talon_Jng_Chogath(Ratings):
pass
class NA_Talon_Jng_Corki(Ratings):
pass
class NA_Talon_Jng_Darius(Ratings):
pass
class NA_Talon_Jng_Diana(Ratings):
pass
class NA_Talon_Jng_Draven(Ratings):
pass
class NA_Talon_Jng_DrMundo(Ratings):
pass
class NA_Talon_Jng_Ekko(Ratings):
pass
class NA_Talon_Jng_Elise(Ratings):
pass
class NA_Talon_Jng_Evelynn(Ratings):
pass
class NA_Talon_Jng_Ezreal(Ratings):
pass
class NA_Talon_Jng_Fiddlesticks(Ratings):
pass
class NA_Talon_Jng_Fiora(Ratings):
pass
class NA_Talon_Jng_Fizz(Ratings):
pass
class NA_Talon_Jng_Galio(Ratings):
pass
class NA_Talon_Jng_Gangplank(Ratings):
pass
class NA_Talon_Jng_Garen(Ratings):
pass
class NA_Talon_Jng_Gnar(Ratings):
pass
class NA_Talon_Jng_Gragas(Ratings):
pass
class NA_Talon_Jng_Graves(Ratings):
pass
class NA_Talon_Jng_Hecarim(Ratings):
pass
class NA_Talon_Jng_Heimerdinger(Ratings):
pass
class NA_Talon_Jng_Illaoi(Ratings):
pass
class NA_Talon_Jng_Irelia(Ratings):
pass
class NA_Talon_Jng_Ivern(Ratings):
pass
class NA_Talon_Jng_Janna(Ratings):
pass
class NA_Talon_Jng_JarvanIV(Ratings):
pass
class NA_Talon_Jng_Jax(Ratings):
pass
class NA_Talon_Jng_Jayce(Ratings):
pass
class NA_Talon_Jng_Jhin(Ratings):
pass
class NA_Talon_Jng_Jinx(Ratings):
pass
class NA_Talon_Jng_Kalista(Ratings):
pass
class NA_Talon_Jng_Karma(Ratings):
pass
class NA_Talon_Jng_Karthus(Ratings):
pass
class NA_Talon_Jng_Kassadin(Ratings):
pass
class NA_Talon_Jng_Katarina(Ratings):
pass
class NA_Talon_Jng_Kayle(Ratings):
pass
class NA_Talon_Jng_Kayn(Ratings):
pass
class NA_Talon_Jng_Kennen(Ratings):
pass
class NA_Talon_Jng_Khazix(Ratings):
pass
class NA_Talon_Jng_Kindred(Ratings):
pass
class NA_Talon_Jng_Kled(Ratings):
pass
class NA_Talon_Jng_KogMaw(Ratings):
pass
class NA_Talon_Jng_Leblanc(Ratings):
pass
class NA_Talon_Jng_LeeSin(Ratings):
pass
class NA_Talon_Jng_Leona(Ratings):
pass
class NA_Talon_Jng_Lissandra(Ratings):
pass
class NA_Talon_Jng_Lucian(Ratings):
pass
class NA_Talon_Jng_Lulu(Ratings):
pass
class NA_Talon_Jng_Lux(Ratings):
pass
class NA_Talon_Jng_Malphite(Ratings):
pass
class NA_Talon_Jng_Malzahar(Ratings):
pass
class NA_Talon_Jng_Maokai(Ratings):
pass
class NA_Talon_Jng_MasterYi(Ratings):
pass
class NA_Talon_Jng_MissFortune(Ratings):
pass
class NA_Talon_Jng_MonkeyKing(Ratings):
pass
class NA_Talon_Jng_Mordekaiser(Ratings):
pass
class NA_Talon_Jng_Morgana(Ratings):
pass
class NA_Talon_Jng_Nami(Ratings):
pass
class NA_Talon_Jng_Nasus(Ratings):
pass
class NA_Talon_Jng_Nautilus(Ratings):
pass
class NA_Talon_Jng_Nidalee(Ratings):
pass
class NA_Talon_Jng_Nocturne(Ratings):
pass
class NA_Talon_Jng_Nunu(Ratings):
pass
class NA_Talon_Jng_Olaf(Ratings):
pass
class NA_Talon_Jng_Orianna(Ratings):
pass
class NA_Talon_Jng_Ornn(Ratings):
pass
class NA_Talon_Jng_Pantheon(Ratings):
pass
class NA_Talon_Jng_Poppy(Ratings):
pass
class NA_Talon_Jng_Quinn(Ratings):
pass
class NA_Talon_Jng_Rakan(Ratings):
pass
class NA_Talon_Jng_Rammus(Ratings):
pass
class NA_Talon_Jng_RekSai(Ratings):
pass
class NA_Talon_Jng_Renekton(Ratings):
pass
class NA_Talon_Jng_Rengar(Ratings):
pass
class NA_Talon_Jng_Riven(Ratings):
pass
class NA_Talon_Jng_Rumble(Ratings):
pass
class NA_Talon_Jng_Ryze(Ratings):
pass
class NA_Talon_Jng_Sejuani(Ratings):
pass
class NA_Talon_Jng_Shaco(Ratings):
pass
class NA_Talon_Jng_Shen(Ratings):
pass
class NA_Talon_Jng_Shyvana(Ratings):
pass
class NA_Talon_Jng_Singed(Ratings):
pass
class NA_Talon_Jng_Sion(Ratings):
pass
class NA_Talon_Jng_Sivir(Ratings):
pass
class NA_Talon_Jng_Skarner(Ratings):
pass
class NA_Talon_Jng_Sona(Ratings):
pass
class NA_Talon_Jng_Soraka(Ratings):
pass
class NA_Talon_Jng_Swain(Ratings):
pass
class NA_Talon_Jng_Syndra(Ratings):
pass
class NA_Talon_Jng_TahmKench(Ratings):
pass
class NA_Talon_Jng_Taliyah(Ratings):
pass
class NA_Talon_Jng_Talon(Ratings):
pass
class NA_Talon_Jng_Taric(Ratings):
pass
class NA_Talon_Jng_Teemo(Ratings):
pass
class NA_Talon_Jng_Thresh(Ratings):
pass
class NA_Talon_Jng_Tristana(Ratings):
pass
class NA_Talon_Jng_Trundle(Ratings):
pass
class NA_Talon_Jng_Tryndamere(Ratings):
pass
class NA_Talon_Jng_TwistedFate(Ratings):
pass
class NA_Talon_Jng_Twitch(Ratings):
pass
class NA_Talon_Jng_Udyr(Ratings):
pass
class NA_Talon_Jng_Urgot(Ratings):
pass
class NA_Talon_Jng_Varus(Ratings):
pass
class NA_Talon_Jng_Vayne(Ratings):
pass
class NA_Talon_Jng_Veigar(Ratings):
pass
class NA_Talon_Jng_Velkoz(Ratings):
pass
class NA_Talon_Jng_Vi(Ratings):
pass
class NA_Talon_Jng_Viktor(Ratings):
pass
class NA_Talon_Jng_Vladimir(Ratings):
pass
class NA_Talon_Jng_Volibear(Ratings):
pass
class NA_Talon_Jng_Warwick(Ratings):
pass
class NA_Talon_Jng_Xayah(Ratings):
pass
class NA_Talon_Jng_Xerath(Ratings):
pass
class NA_Talon_Jng_XinZhao(Ratings):
pass
class NA_Talon_Jng_Yasuo(Ratings):
pass
class NA_Talon_Jng_Yorick(Ratings):
pass
class NA_Talon_Jng_Zac(Ratings):
pass
class NA_Talon_Jng_Zed(Ratings):
pass
class NA_Talon_Jng_Ziggs(Ratings):
pass
class NA_Talon_Jng_Zilean(Ratings):
pass
class NA_Talon_Jng_Zyra(Ratings):
pass
| 1.46875 | 1 |
utils/turkish.py | derenyilmaz/personality-analysis-framework | 1 | 1904 | class TurkishText():
"""Class for handling lowercase/uppercase conversions of Turkish characters..
Attributes:
text -- Turkish text to be handled
"""
text = ""
l = ['ı', 'ğ', 'ü', 'ş', 'i', 'ö', 'ç']
u = ['I', 'Ğ', 'Ü', 'Ş', 'İ', 'Ö', 'Ç']
def __init__(self, text):
self.text = text
def upper(self):
"""Converts the text into uppercase letters.
Returns string.
"""
res = ""
for i in self.text:
if i in self.l:
res += self.u[self.l.index(i)]
else :
res += i.upper()
return res
def lower(self):
"""Converts the text into lowercase letters.
Returns string.
"""
res = ""
for i in self.text:
if i in self.u:
res += self.l[self.u.index(i)]
else :
res += i.lower()
return res
def capitalize(self):
"""Converts each first letter to uppercase, and the rest to lowercase letters.
Returns string.
"""
m = self.text.split()
res = ""
for i in m:
res += TurkishText(i[0]).upper() + TurkishText(i[1:]).lower() + " "
return res[:-1:]
| 3.796875 | 4 |
app/helpers/geocode.py | Soumya117/finnazureflaskapp | 0 | 1905 | import googlemaps
gmaps = googlemaps.Client(key='google_key')
def get_markers(address):
geocode_result = gmaps.geocode(address)
return geocode_result[0]['geometry']['location']
| 2.6875 | 3 |
tf/estimators/keras_estimator.py | aspratyush/dl_utils | 0 | 1906 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
import os
import numpy as np
import tensorflow as tf
def run(model, X, Y, optimizer=None, nb_epochs=30, nb_batches=128):
"""
Run the estimator
"""
if optimizer is None:
optimizer = tf.keras.estimators.SGD(
lr=0.0009, decay=1e-5, momentum=0.9, nesterov=True)
# 1. Compile the model
model.compile(
optimizer=optimizer, loss='categorical_crossentropy',
metrics=['accuracy'])
# 2. Create an estimator
model_est = tf.keras.estimator.model_to_estimator(
keras_model=model, model_dir='./lenet')
# Training
# 3a. Create the training function
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={model.input_names[0]: X['train'].astype(np.float32)},
y=Y['train'].astype(np.float32),
batch_size=nb_batches,
num_epochs=nb_epochs,
shuffle=True
)
# 3b. Train the model
model_est.train(input_fn=train_input_fn, steps=nb_epochs*nb_batches)
# Evaluate
# 4a. Evaluate the model
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={model.input_names[0]: X['test'].astype(np.float32)},
y=Y['test'].astype(np.float32),
batch_size=nb_batches,
num_epochs=nb_epochs,
shuffle=True
)
# 4b. Evaluate the model
model_eval = model_est.evaluate(input_fn=eval_input_fn)
print(model_eval)
return model_est, model_eval
def run_from_generator(
model, input_func=None, input_func_dict=None,
eval_func_dict=None, nb_epochs=10, optimizer=None, model_dir=None):
"""
Overloaded function to create an estimator using tf.data.Dataset
:param model : uncompiled keras model
:param input_fn : input function providing tf.data.Dataset to the estimator
:param input_fn_dict : dictionary containing input params for input_fn
:param eval_fn_dict : dictionary containing params for eval input_fn
:param model_dir : directory to store the trained model
"""
# 1. Create optimizer and compile model if optimizer is None
if (optimizer is None):
optimizer = tf.keras.optimizers.SGD(
lr=1e-3, decay=1e-5, momentum=0.9, nesterov=True)
# 2. compile the model
model.compile(
optimizer=optimizer, loss='categorical_crossentropy',
metrics=['accuracy'])
# 3. create estimator
dir_path = os.path.join(os.getcwd(), model_dir)
print("Model path chosen : ", dir_path)
if (not os.path.exists(dir_path)):
os.mkdir(dir_path)
print("Creating estimator...")
est = tf.keras.estimator.model_to_estimator(
keras_model=model, model_dir=dir_path)
# 4. Train and Evaluate the model
print("Training...")
# training spec
train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_func(input_func_dict),
max_steps=500)
# evaluation spec
eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_func(eval_func_dict))
# Run the training
model_est = tf.estimator.train_and_evaluate(est, train_spec, eval_spec)
#est.train(input_fn=lambda: input_func(input_func_dict),
# steps=None)
#
#est.evalute(input_fn=lambda: input_func(eval_func_dict))
return est
| 2.734375 | 3 |
isign/archive.py | l0ui3/isign | 1 | 1907 | """ Represents an app archive. This is an app at rest, whether it's a naked
app bundle in a directory, or a zipped app bundle, or an IPA. We have a
common interface to extract these apps to a temp file, then resign them,
and create an archive of the same type """
import abc
import biplist
from bundle import App, Bundle, is_info_plist_native
from exceptions import MissingHelpers, NotSignable, NotMatched
from distutils import spawn
import logging
import os
from os.path import abspath, dirname, exists, isdir, isfile, join, normpath
import tempfile
import re
from subprocess import call
from signer import Signer
import shutil
import zipfile
REMOVE_WATCHKIT = True
helper_paths = {}
log = logging.getLogger(__name__)
def get_helper(helper_name):
""" find paths to executables. Cached in helper_paths """
if helper_name not in helper_paths or helper_paths[helper_name] is None:
# note, find_executable returns None is not found
# in other words, we keep retrying until found
helper_paths[helper_name] = spawn.find_executable(helper_name)
log.debug("got executable {} for {}".format(helper_paths[helper_name],
helper_name))
return helper_paths[helper_name]
def make_temp_dir():
return tempfile.mkdtemp(prefix="isign-")
def get_watchkit_paths(root_bundle_path):
""" collect sub-bundles of this bundle that have watchkit """
# typical structure:
#
# app_bundle
# ...
# some_directory
# watchkit_extension <-- this is the watchkit bundle
# Info.plist
# watchkit_bundle <-- this is the part that runs on the Watch
# Info.plist <-- WKWatchKitApp=True
#
watchkit_paths = []
for path, _, _ in os.walk(root_bundle_path):
if path == root_bundle_path:
continue
try:
bundle = Bundle(path)
except NotMatched:
# this directory is not a bundle
continue
if bundle.info.get('WKWatchKitApp') is True:
# get the *containing* bundle
watchkit_paths.append(dirname(path))
return watchkit_paths
def process_watchkit(root_bundle_path, should_remove=False):
""" Unfortunately, we currently can't sign WatchKit. If you don't
care about watchkit functionality, it is
generally harmless to remove it, so that's the default.
Remove when https://github.com/saucelabs/isign/issues/20 is fixed """
watchkit_paths = get_watchkit_paths(root_bundle_path)
if len(watchkit_paths) > 0:
if should_remove:
for path in watchkit_paths:
log.warning("Removing WatchKit bundle {}".format(path))
shutil.rmtree(path)
else:
raise NotSignable("Cannot yet sign WatchKit bundles")
class Archive(object):
__metaclass__ = abc.ABCMeta
# we use abc.abstractmethod throughout because there are certain class
# methods we want to ensure are implemented.
@abc.abstractmethod
def unarchive_to_temp(self):
""" Unarchive and copy to a temp directory """
pass
@abc.abstractmethod
def archive(cls, path, output_path):
""" Archive a directory to an output path """
pass
@abc.abstractmethod
def get_info(cls, path):
""" Obtain app metadata from Info.plist without unarchiving """
pass
@abc.abstractmethod
def precheck(cls, path):
""" Check if this is, in fact, an archive of this type """
pass
@abc.abstractmethod
def find_bundle_dir(cls, path):
""" Locate the directory of the main app (aka bundle) """
pass
class AppArchive(Archive):
""" The simplest form of archive -- a naked App Bundle, with no extra directory structure,
compression, etc """
@classmethod
def find_bundle_dir(cls, path):
""" Included for similarity with the zipped archive classes. In this case, the bundle dir
*is* the directory """
return path
@classmethod
def _get_plist_path(cls, path):
return join(cls.find_bundle_dir(path), "Info.plist")
@classmethod
def get_info(cls, path):
return biplist.readPlist(cls._get_plist_path(path))
@classmethod
def precheck(cls, path):
if not isdir(path):
return False
if not os.path.exists(cls._get_plist_path(path)):
return False
plist = cls.get_info(path)
is_native = is_info_plist_native(plist)
log.debug("is_native: {}".format(is_native))
return is_native
@classmethod
def archive(cls, path, output_path):
if exists(output_path):
shutil.rmtree(output_path)
shutil.move(path, output_path)
log.info("archived %s to %s" % (cls.__name__, output_path))
def __init__(self, path):
self.path = path
self.relative_bundle_dir = '.'
self.bundle_info = self.get_info(self.path)
def unarchive_to_temp(self):
containing_dir = make_temp_dir()
log.debug("unarchiving to temp... %s -> %s", self.path, containing_dir)
shutil.rmtree(containing_dir) # quirk of copytree, top dir can't exist already
shutil.copytree(self.path, containing_dir)
process_watchkit(containing_dir, REMOVE_WATCHKIT)
return UncompressedArchive(containing_dir, '.', self.__class__)
class AppZipArchive(Archive):
""" Just like an app, except it's zipped up, and when repackaged,
should be re-zipped. """
app_dir_pattern = r'^([^/]+\.app/).*$'
extensions = ['.zip']
helpers = ['zip', 'unzip']
@classmethod
def is_helpers_present(cls):
""" returns False if any of our helper apps wasn't found in class init """
is_present = True
for helper_name in cls.helpers:
if get_helper(helper_name) is None:
log.error("missing helper for class {}: {}".format(cls.__name__, helper_name))
is_present = False
break
return is_present
@classmethod
def is_archive_extension_match(cls, path):
""" does this path have the right extension """
log.debug('extension match')
for extension in cls.extensions:
log.debug('extension match: %s', extension)
if path.endswith(extension):
return True
return False
@classmethod
def find_bundle_dir(cls, zipfile_obj):
relative_bundle_dir = None
apps = set()
file_list = zipfile_obj.namelist()
for file_name in file_list:
matched = re.match(cls.app_dir_pattern, file_name)
if matched:
apps.add(matched.group(1))
if len(apps) == 1:
log.debug("found one app")
relative_bundle_dir = apps.pop()
elif len(apps) > 1:
log.warning('more than one app found in archive')
else:
log.warning('no apps found in archive')
return relative_bundle_dir
@classmethod
def _get_plist_path(cls, relative_bundle_dir):
return join(relative_bundle_dir, "Info.plist")
@classmethod
def precheck(cls, path):
""" Checks if an archive looks like this kind of app. Have to examine
within the zipfile, b/c we don't want to make temp dirs just yet. This
recapitulates a very similar precheck in the Bundle class """
if not isfile(path):
return False
if not cls.is_helpers_present():
raise MissingHelpers("helpers not present")
is_native = False
log.debug('precheck')
log.debug('path: %s', path)
if (cls.is_archive_extension_match(path) and
zipfile.is_zipfile(path)):
log.debug("this is an archive, and a zipfile")
zipfile_obj = zipfile.ZipFile(path)
relative_bundle_dir = cls.find_bundle_dir(zipfile_obj)
if relative_bundle_dir is not None:
plist_path = cls._get_plist_path(relative_bundle_dir)
if plist_path not in zipfile_obj.namelist():
return False
plist = cls.get_info(relative_bundle_dir, zipfile_obj)
is_native = is_info_plist_native(plist)
log.debug("is_native: {}".format(is_native))
return is_native
@classmethod
def get_info(cls, relative_bundle_dir, zipfile_obj):
plist_path = cls._get_plist_path(relative_bundle_dir)
plist_bytes = zipfile_obj.read(plist_path)
return biplist.readPlistFromString(plist_bytes)
def __init__(self, path):
self.path = path
zipfile_obj = zipfile.ZipFile(path)
self.relative_bundle_dir = self.find_bundle_dir(zipfile_obj)
self.bundle_info = self.get_info(self.relative_bundle_dir,
zipfile_obj)
def unarchive_to_temp(self):
containing_dir = make_temp_dir()
call([get_helper('unzip'), "-qu", self.path, "-d", containing_dir])
app_dir = abspath(join(containing_dir, self.relative_bundle_dir))
process_watchkit(app_dir, REMOVE_WATCHKIT)
return UncompressedArchive(containing_dir, self.relative_bundle_dir, self.__class__)
@classmethod
def archive(cls, containing_dir, output_path):
""" archive this up into a zipfile. Note this is a classmethod, because
the caller will use us on a temp directory somewhere """
# the temp file is necessary because zip always adds ".zip" if it
# does not have an extension. But we want to respect the desired
# output_path's extension, which could be ".ipa" or who knows.
# So we move it to the output_path later.
#
# We also do a little dance with making another temp directory just
# to construct the zip file. This is the best way to ensure the an unused
# filename. Also, `zip` won't overwrite existing files, so this is safer.
temp_zip_dir = None
try:
# need to chdir and use relative paths, because zip is stupid
temp_zip_dir = tempfile.mkdtemp(prefix="isign-zip-")
temp_zip_file = join(temp_zip_dir, 'temp.zip')
call([get_helper('zip'), "-qr", temp_zip_file, "."], cwd=containing_dir)
shutil.move(temp_zip_file, output_path)
log.info("archived %s to %s" % (cls.__name__, output_path))
finally:
if temp_zip_dir is not None and isdir(temp_zip_dir):
shutil.rmtree(temp_zip_dir)
class IpaArchive(AppZipArchive):
""" IPA is Apple's standard for distributing apps. Much like an AppZip,
but slightly different paths """
extensions = ['.ipa']
app_dir_pattern = r'^(Payload/[^/]+\.app/).*$'
class UncompressedArchive(object):
""" This just keeps track of some state with an unzipped app archive and
how to re-zip it back up once re-signed. The bundle is located somewhere
inside the containing directory, but might be a few directories down, like in
a ContainingDir/Payload/something.app
This class is also useful if you have an app that's already unzipped and
you want to sign it. """
def __init__(self, path, relative_bundle_dir, archive_class):
""" Path is the "Containing dir", the dir at the root level of the unzipped archive
(or the dir itself, in the case of an AppArchive archive)
relative bundle dir is the dir containing the bundle, e.g. Payload/Foo.app
archive class is the kind of archive this was (Ipa, etc.) """
self.path = path
self.relative_bundle_dir = relative_bundle_dir
self.archive_class = archive_class
bundle_path = normpath(join(path, relative_bundle_dir))
self.bundle = App(bundle_path)
def archive(self, output_path):
""" Re-zip this back up, or simply copy it out, depending on what the
original archive class did """
self.archive_class.archive(self.path, output_path)
def clone(self, target_path):
""" Copy the uncompressed archive somewhere else, return initialized
UncompressedArchive """
shutil.copytree(self.path, target_path)
return self.__class__(target_path,
self.relative_bundle_dir,
self.archive_class)
def remove(self):
# the containing dir might be gone already b/c AppArchive simply moves
# it to the desired target when done
if exists(self.path) and isdir(self.path):
log.debug('removing ua: %s', self.path)
shutil.rmtree(self.path)
def archive_factory(path):
""" Guess what kind of archive we are dealing with, return an
archive object. Returns None if path did not match any archive type """
archive = None
for cls in [IpaArchive, AppZipArchive, AppArchive]:
if cls.precheck(path):
archive = cls(path)
log.debug("File %s matched as %s", path, cls.__name__)
break
return archive
def view(input_path):
if not exists(input_path):
raise IOError("{0} not found".format(input_path))
ua = None
bundle_info = None
try:
archive = archive_factory(input_path)
if archive is None:
raise NotMatched('No matching archive type found')
ua = archive.unarchive_to_temp()
bundle_info = ua.bundle.info
finally:
if ua is not None:
ua.remove()
return bundle_info
def resign(input_path,
certificate,
key,
apple_cert,
provisioning_profile,
output_path,
info_props=None,
alternate_entitlements_path=None):
""" Unified interface to extract any kind of archive from
a temporary file, resign it with these credentials,
and create a similar archive for that resigned app """
if not exists(input_path):
raise IOError("{0} not found".format(input_path))
log.debug('Signing with apple_cert: {}'.format(apple_cert))
log.debug('Signing with key: {}'.format(key))
log.debug('Signing with certificate: {}'.format(certificate))
log.debug('Signing with provisioning_profile: {}'.format(provisioning_profile))
signer = Signer(signer_cert_file=certificate,
signer_key_file=key,
apple_cert_file=apple_cert)
ua = None
bundle_info = None
try:
archive = archive_factory(input_path)
if archive is None:
raise NotSignable('No matching archive type found')
ua = archive.unarchive_to_temp()
if info_props:
# Override info.plist props of the parent bundle
ua.bundle.update_info_props(info_props)
ua.bundle.resign(signer, provisioning_profile, alternate_entitlements_path)
bundle_info = ua.bundle.info
ua.archive(output_path)
except NotSignable as e:
msg = "Not signable: <{0}>: {1}\n".format(input_path, e)
log.info(msg)
raise
finally:
if ua is not None:
ua.remove()
return bundle_info
| 2.0625 | 2 |
conan/tools/env/virtualrunenv.py | dscole/conan | 0 | 1908 | from conan.tools.env import Environment
def runenv_from_cpp_info(conanfile, cpp_info):
""" return an Environment deducing the runtime information from a cpp_info
"""
dyn_runenv = Environment(conanfile)
if cpp_info is None: # This happens when the dependency is a private one = BINARY_SKIP
return dyn_runenv
if cpp_info.bin_paths: # cpp_info.exes is not defined yet
dyn_runenv.prepend_path("PATH", cpp_info.bin_paths)
# If it is a build_require this will be the build-os, otherwise it will be the host-os
if cpp_info.lib_paths:
dyn_runenv.prepend_path("LD_LIBRARY_PATH", cpp_info.lib_paths)
dyn_runenv.prepend_path("DYLD_LIBRARY_PATH", cpp_info.lib_paths)
if cpp_info.framework_paths:
dyn_runenv.prepend_path("DYLD_FRAMEWORK_PATH", cpp_info.framework_paths)
return dyn_runenv
class VirtualRunEnv:
""" captures the conanfile environment that is defined from its
dependencies, and also from profiles
"""
def __init__(self, conanfile):
self._conanfile = conanfile
def environment(self):
""" collects the runtime information from dependencies. For normal libraries should be
very occasional
"""
runenv = Environment(self._conanfile)
# FIXME: Missing profile info
# FIXME: Cache value?
host_req = self._conanfile.dependencies.host
test_req = self._conanfile.dependencies.test
for _, dep in list(host_req.items()) + list(test_req.items()):
if dep.runenv_info:
runenv.compose_env(dep.runenv_info)
runenv.compose_env(runenv_from_cpp_info(self._conanfile, dep.cpp_info))
return runenv
def generate(self, auto_activate=False):
run_env = self.environment()
if run_env:
run_env.save_script("conanrunenv", auto_activate=auto_activate)
| 2.3125 | 2 |
src/api/api_lists/models/list.py | rrickgauer/lists | 0 | 1909 | <gh_stars>0
"""
**********************************************************************************
List model
**********************************************************************************
"""
from enum import Enum
from dataclasses import dataclass
from uuid import UUID
from datetime import datetime
class ListType(str, Enum):
LIST : str = 'list'
TEMPLATE: str = 'template'
@classmethod
def _missing_(cls, value):
return ListType.LIST
@dataclass
class List:
id : UUID = None
user_id : UUID = None
name : str = None
created_on: datetime = None
type : ListType = ListType.LIST | 2.5 | 2 |
config/appdaemon/apps/power_alarm.py | azogue/hassio_config | 18 | 1910 | <filename>config/appdaemon/apps/power_alarm.py
# -*- coding: utf-8 -*-
"""
Automation task as a AppDaemon App for Home Assistant -
current meter PEAK POWER notifications
"""
import datetime as dt
from enum import IntEnum
import appdaemon.plugins.hass.hassapi as hass
LOG_LEVEL = "INFO"
LOG_LEVEL_ALERT = "WARNING"
LOGGER = "special_event_log"
COEF_CRITICAL_LIMIT = 1.1 # 10% over limit
MIN_TIME_TURN_OFF_AC = 60 # secs
# Big power consumers
BIG_CONSUMER_1_CLIMATE = "switch.ac_dry_contact"
BIG_CONSUMER_1_LABEL = "aire acondicionado"
BIG_CONSUMER_2 = "switch.calentador"
BIG_CONSUMER_2_LABEL = "calentador"
_IOS_SOUND_POWER_PEAK = "US-EN-Morgan-Freeman-Vacate-The-Premises.wav"
class TypeNotif(IntEnum):
"""
Handler for different kinds of power notifications.
Used to centralize push message construction.
"""
ALERT_OFF = 0
ALERT_ON = 1
ALERT_CRITICAL = 2
def make_ios_push_data(self, data_msg: dict) -> dict:
if self.value == self.ALERT_CRITICAL:
push_data = {
"category": "powerpeak",
"badge": 10,
"sound": _IOS_SOUND_POWER_PEAK,
"critical": 1,
"volume": 1.0,
"thread-id": "power-peak-group",
}
elif self.value == self.ALERT_ON:
push_data = {
"category": "powerpeak",
"thread-id": "power-peak-group",
"badge": 1,
"critical": 1,
"sound": _IOS_SOUND_POWER_PEAK,
}
else:
push_data = {
"category": "confirm",
"thread-id": "power-peak-group",
"sound": _IOS_SOUND_POWER_PEAK,
"badge": 0,
}
data_msg["data"] = {"push": push_data}
return data_msg
def make_telegram_push_data(self, data_msg: dict, target: int) -> dict:
data_msg["target"] = target
data_msg["disable_notification"] = self.value == self.ALERT_OFF
data_msg["inline_keyboard"] = [
[("Luces ON", "/luceson"), ("Luces OFF", "/lucesoff")],
[
("Potencia eléctrica", "/enerpi"),
("Grafs. enerPI", "/enerpitiles"),
],
[
(
"Calentador OFF",
"/service_call switch/turn_off switch.calentador",
),
(
"AC OFF",
"/service_call switch/turn_off switch.ac_dry_contact",
),
],
]
return data_msg
def make_notification_message(
self,
current_peak,
last_trigger,
alarm_start,
devices_off="",
pow_instant=0.0,
pow_sustained=0.0,
) -> dict:
if self.value == self.ALERT_CRITICAL:
return {
"title": "¡El automático está a punto de saltar!",
"message": (
f"Apagando {devices_off} para intentar evitar "
"la sobrecarga eléctrica."
),
}
time_now = (
"{:%H:%M:%S}".format(last_trigger)
if last_trigger is not None
else "???"
)
if self.value == self.ALERT_ON:
data_msg = {
"title": "Alto consumo eléctrico!",
"message": (
f"Peak: {current_peak} W en {time_now}. "
f"Ahora {pow_instant} W ({pow_sustained} sostenidos)"
),
}
data_msg["message"] = data_msg["message"].format(
current_peak, time_now, pow_instant, pow_sustained
)
else:
duration_min = (
dt.datetime.now() - alarm_start
).total_seconds() / 60.0
data_msg = {
"title": "Consumo eléctrico: Normal",
"message": (
f"Potencia normal desde {time_now}, "
f"Pico de potencia: {current_peak} W. "
f"Alerta iniciada hace {duration_min:.1f} min."
),
}
return data_msg
# noinspection PyClassHasNoInit
class PeakNotifier(hass.Hass):
"""
App to notify power peaks (when they are greater than a certain limit),
and after that, notify when back to normal (< lower limit).
"""
# Limit Values
_max_peak: float
_upper_limit: float
_lower_limit: float
_min_time_high: int
_min_time_low: int
# App user inputs
_main_power: str
_main_power_peak: str
_notifier: str
_target_sensor: str
_alarm_state: bool = False
_critical_alarm_state: bool = False
_last_trigger = None
_alarm_start = None
_turn_off_measure_taken = False
_current_peak = 0
def initialize(self):
"""AppDaemon required method for app init."""
self._main_power = self.args.get("sustained_power")
self._main_power_peak = self.args.get("instant_power")
self._notifier = self.config.get("notifier").replace(".", "/")
self._target_sensor = self.config.get("chatid_sensor")
# Power limits
self._upper_limit = float(self.args.get("max_power_kw")) * 1000.0
self._lower_limit = float(self.args.get("max_power_kw_reset")) * 1000.0
self._min_time_high = int(self.args.get("min_time_high"))
self._min_time_low = int(self.args.get("min_time_low"))
# TODO implement _max_peak over _instant_power
self._max_peak = float(self.args.get("max_power_peak_kw")) * 1000.0
# Listen for Main Power changes:
self.listen_state(self.main_power_change, self._main_power)
self.log(
f"PeakNotifier Initialized. P={self._main_power}, "
f"with P>{self._upper_limit} W for {self._min_time_high} secs, "
f"(low={self._lower_limit} W for {self._min_time_low} secs). "
f"Notify: {self._notifier}.",
level=LOG_LEVEL,
log=LOGGER,
)
def notify_alert(self, type_notif: TypeNotif, data: dict):
ios_alarm_msg = type_notif.make_ios_push_data(data.copy())
tlg_alarm_msg = type_notif.make_telegram_push_data(
data.copy(), target=int(self.get_state(self._target_sensor)),
)
self.call_service(self._notifier, **ios_alarm_msg)
self.call_service("telegram_bot/send_message", **tlg_alarm_msg)
# noinspection PyUnusedLocal
def peak_power_change(self, entity, attribute, old, new, kwargs):
"""Power Peak ALARM logic control."""
try:
new = int(float(new))
except ValueError:
return
# Update peak
if new > self._upper_limit and new > self._current_peak:
self._current_peak = new
# noinspection PyUnusedLocal
def main_power_change(self, entity, attribute, old, new, kwargs):
"""Sustained Power ALARM logic control."""
try:
new = int(float(new))
except ValueError:
return
now = dt.datetime.now()
if not self._alarm_state and (new > self._upper_limit):
if new > self._current_peak:
self._current_peak = new
# Pre-Alarm state, before trigger
if self._last_trigger is None:
# Start power peak event
self.log(
"New power peak event at {} with P={} W".format(now, new),
level=LOG_LEVEL,
log=LOGGER,
)
self._last_trigger = now
elif (
now - self._last_trigger
).total_seconds() > self._min_time_high:
# TRIGGER ALARM
self._alarm_start = now
self._turn_off_measure_taken = False
type_notif = TypeNotif.ALERT_ON
data = type_notif.make_notification_message(
self._current_peak,
self._last_trigger,
self._alarm_start,
pow_instant=self.get_state(self._main_power_peak),
pow_sustained=new,
)
self.log(
f"TRIGGER ALARM with msg={data}",
level=LOG_LEVEL_ALERT,
log=LOGGER,
)
self.notify_alert(type_notif, data)
self._alarm_state = True
self._critical_alarm_state = False
self._last_trigger = now
# else: # wait some more time
# (this is the same power peak event,
# waiting min time to trigger alarm)
# pass
elif self._alarm_state: # Alarm state, waiting for reset
if new > self._current_peak:
self._current_peak = new
if (
not self._turn_off_measure_taken
and new > self._upper_limit * COEF_CRITICAL_LIMIT
):
self.log(
"ENABLE CRITICAL ALARM with {} W".format(new),
level=LOG_LEVEL_ALERT,
log=LOGGER,
)
self._critical_alarm_state = True
elif new < self._lower_limit:
if (
now - self._last_trigger
).total_seconds() > self._min_time_low:
# RESET ALARM
type_notif = TypeNotif.ALERT_OFF
data = type_notif.make_notification_message(
self._current_peak,
self._last_trigger,
self._alarm_start,
)
self.log(
"RESET ALARM MODE at {}".format(now),
level=LOG_LEVEL,
log=LOGGER,
)
self.notify_alert(type_notif, data)
self._alarm_state = False
self._critical_alarm_state = False
self._last_trigger = None
self._alarm_start = None
self._turn_off_measure_taken = False
self._current_peak = 0
elif (
not self._turn_off_measure_taken
and self._critical_alarm_state
and new < self._upper_limit
):
self.log(
"DISABLE CRITICAL ALARM (now {} W)".format(new),
level=LOG_LEVEL_ALERT,
log=LOGGER,
)
self._critical_alarm_state = False
elif (
not self._turn_off_measure_taken
and self._critical_alarm_state
and (
(now - self._alarm_start).total_seconds()
> MIN_TIME_TURN_OFF_AC
)
):
# Turn off AC if AC + heater are ON
self._turn_off_measure_taken = True
self._critical_alarm_state = False
devices_turning_off = ""
if self.get_state(BIG_CONSUMER_1_CLIMATE) == "on":
devices_turning_off = BIG_CONSUMER_1_LABEL
self.call_service("climate/turn_off", entity_id="all")
elif self.get_state(BIG_CONSUMER_2) == "on":
devices_turning_off = BIG_CONSUMER_2_LABEL
self.call_service(
"switch/turn_off", entity_id=BIG_CONSUMER_2
)
if devices_turning_off:
# Notification of devices turned off
self.log(
f"CRITICAL ACTION: Turn off '{devices_turning_off}'",
level="ERROR",
log=LOGGER,
)
type_notif = TypeNotif.ALERT_CRITICAL
data = type_notif.make_notification_message(
self._current_peak,
self._last_trigger,
self._alarm_start,
devices_off=devices_turning_off,
pow_instant=self.get_state(self._main_power_peak),
pow_sustained=new,
)
self.notify_alert(type_notif, data)
self._last_trigger = now
else:
self._last_trigger = now
elif (self._last_trigger is not None) and (
(now - self._last_trigger).total_seconds() > self._min_time_low
):
# Normal operation, reset last trigger if no more in min_time_lower
self.log(
"RESET LAST TRIGGER (was in {})".format(self._last_trigger),
level=LOG_LEVEL,
log=LOGGER,
)
self._last_trigger = None
self._current_peak = 0
| 2.28125 | 2 |
mailmynet/Maildir/proxy_postfix/Twisted-11.0.0/build/lib.linux-x86_64-2.6/twisted/internet/gtk2reactor.py | SPIN-UMass/SWEET | 3 | 1911 | # -*- test-case-name: twisted.internet.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to interact with the glib/gtk2
mainloop.
In order to use this support, simply do the following::
| from twisted.internet import gtk2reactor
| gtk2reactor.install()
Then use twisted.internet APIs as usual. The other methods here are not
intended to be called directly.
When installing the reactor, you can choose whether to use the glib
event loop or the GTK+ event loop which is based on it but adds GUI
integration.
"""
# System Imports
import sys, signal
from zope.interface import implements
try:
if not hasattr(sys, 'frozen'):
# Don't want to check this for py2exe
import pygtk
pygtk.require('2.0')
except (ImportError, AttributeError):
pass # maybe we're using pygtk before this hack existed.
import gobject
if hasattr(gobject, "threads_init"):
# recent versions of python-gtk expose this. python-gtk=2.4.1
# (wrapping glib-2.4.7) does. python-gtk=2.0.0 (wrapping
# glib-2.2.3) does not.
gobject.threads_init()
# Twisted Imports
from twisted.python import log, runtime, failure
from twisted.python.compat import set
from twisted.internet.interfaces import IReactorFDSet
from twisted.internet import main, base, posixbase, error, selectreactor
POLL_DISCONNECTED = gobject.IO_HUP | gobject.IO_ERR | gobject.IO_NVAL
# glib's iochannel sources won't tell us about any events that we haven't
# asked for, even if those events aren't sensible inputs to the poll()
# call.
INFLAGS = gobject.IO_IN | POLL_DISCONNECTED
OUTFLAGS = gobject.IO_OUT | POLL_DISCONNECTED
def _our_mainquit():
# XXX: gtk.main_quit() (which is used for crash()) raises an exception if
# gtk.main_level() == 0; however, all the tests freeze if we use this
# function to stop the reactor. what gives? (I believe this may have been
# a stupid mistake where I forgot to import gtk here... I will remove this
# comment if the tests pass)
import gtk
if gtk.main_level():
gtk.main_quit()
class Gtk2Reactor(posixbase.PosixReactorBase):
"""
GTK+-2 event loop reactor.
@ivar _sources: A dictionary mapping L{FileDescriptor} instances to gtk
watch handles.
@ivar _reads: A set of L{FileDescriptor} instances currently monitored for
reading.
@ivar _writes: A set of L{FileDescriptor} instances currently monitored for
writing.
@ivar _simtag: A gtk timeout handle for the next L{simulate} call.
"""
implements(IReactorFDSet)
def __init__(self, useGtk=True):
self._simtag = None
self._reads = set()
self._writes = set()
self._sources = {}
posixbase.PosixReactorBase.__init__(self)
# pre 2.3.91 the glib iteration and mainloop functions didn't release
# global interpreter lock, thus breaking thread and signal support.
if getattr(gobject, "pygtk_version", ()) >= (2, 3, 91) and not useGtk:
self.context = gobject.main_context_default()
self.__pending = self.context.pending
self.__iteration = self.context.iteration
self.loop = gobject.MainLoop()
self.__crash = self.loop.quit
self.__run = self.loop.run
else:
import gtk
self.__pending = gtk.events_pending
self.__iteration = gtk.main_iteration
self.__crash = _our_mainquit
self.__run = gtk.main
if runtime.platformType == 'posix':
def _handleSignals(self):
# Let the base class do its thing, but pygtk is probably
# going to stomp on us so go beyond that and set up some
# signal handling which pygtk won't mess with. This would
# be better done by letting this reactor select a
# different implementation of installHandler for
# _SIGCHLDWaker to use. Then, at least, we could fall
# back to our extension module. See #4286.
from twisted.internet.process import reapAllProcesses as _reapAllProcesses
base._SignalReactorMixin._handleSignals(self)
signal.signal(signal.SIGCHLD, lambda *a: self.callFromThread(_reapAllProcesses))
if getattr(signal, "siginterrupt", None) is not None:
signal.siginterrupt(signal.SIGCHLD, False)
# Like the base, reap processes now in case a process
# exited before the handlers above were installed.
_reapAllProcesses()
# The input_add function in pygtk1 checks for objects with a
# 'fileno' method and, if present, uses the result of that method
# as the input source. The pygtk2 input_add does not do this. The
# function below replicates the pygtk1 functionality.
# In addition, pygtk maps gtk.input_add to _gobject.io_add_watch, and
# g_io_add_watch() takes different condition bitfields than
# gtk_input_add(). We use g_io_add_watch() here in case pygtk fixes this
# bug.
def input_add(self, source, condition, callback):
if hasattr(source, 'fileno'):
# handle python objects
def wrapper(source, condition, real_s=source, real_cb=callback):
return real_cb(real_s, condition)
return gobject.io_add_watch(source.fileno(), condition, wrapper)
else:
return gobject.io_add_watch(source, condition, callback)
def _add(self, source, primary, other, primaryFlag, otherFlag):
"""
Add the given L{FileDescriptor} for monitoring either for reading or
writing. If the file is already monitored for the other operation, we
delete the previous registration and re-register it for both reading
and writing.
"""
if source in primary:
return
flags = primaryFlag
if source in other:
gobject.source_remove(self._sources[source])
flags |= otherFlag
self._sources[source] = self.input_add(source, flags, self.callback)
primary.add(source)
def addReader(self, reader):
"""
Add a L{FileDescriptor} for monitoring of data available to read.
"""
self._add(reader, self._reads, self._writes, INFLAGS, OUTFLAGS)
def addWriter(self, writer):
"""
Add a L{FileDescriptor} for monitoring ability to write data.
"""
self._add(writer, self._writes, self._reads, OUTFLAGS, INFLAGS)
def getReaders(self):
"""
Retrieve the list of current L{FileDescriptor} monitored for reading.
"""
return list(self._reads)
def getWriters(self):
"""
Retrieve the list of current L{FileDescriptor} monitored for writing.
"""
return list(self._writes)
def removeAll(self):
"""
Remove monitoring for all registered L{FileDescriptor}s.
"""
return self._removeAll(self._reads, self._writes)
def _remove(self, source, primary, other, flags):
"""
Remove monitoring the given L{FileDescriptor} for either reading or
writing. If it's still monitored for the other operation, we
re-register the L{FileDescriptor} for only that operation.
"""
if source not in primary:
return
gobject.source_remove(self._sources[source])
primary.remove(source)
if source in other:
self._sources[source] = self.input_add(
source, flags, self.callback)
else:
self._sources.pop(source)
def removeReader(self, reader):
"""
Stop monitoring the given L{FileDescriptor} for reading.
"""
self._remove(reader, self._reads, self._writes, OUTFLAGS)
def removeWriter(self, writer):
"""
Stop monitoring the given L{FileDescriptor} for writing.
"""
self._remove(writer, self._writes, self._reads, INFLAGS)
doIterationTimer = None
def doIterationTimeout(self, *args):
self.doIterationTimer = None
return 0 # auto-remove
def doIteration(self, delay):
# flush some pending events, return if there was something to do
# don't use the usual "while self.context.pending(): self.context.iteration()"
# idiom because lots of IO (in particular test_tcp's
# ProperlyCloseFilesTestCase) can keep us from ever exiting.
log.msg(channel='system', event='iteration', reactor=self)
if self.__pending():
self.__iteration(0)
return
# nothing to do, must delay
if delay == 0:
return # shouldn't delay, so just return
self.doIterationTimer = gobject.timeout_add(int(delay * 1000),
self.doIterationTimeout)
# This will either wake up from IO or from a timeout.
self.__iteration(1) # block
# note: with the .simulate timer below, delays > 0.1 will always be
# woken up by the .simulate timer
if self.doIterationTimer:
# if woken by IO, need to cancel the timer
gobject.source_remove(self.doIterationTimer)
self.doIterationTimer = None
def crash(self):
posixbase.PosixReactorBase.crash(self)
self.__crash()
def run(self, installSignalHandlers=1):
self.startRunning(installSignalHandlers=installSignalHandlers)
gobject.timeout_add(0, self.simulate)
if self._started:
self.__run()
def _doReadOrWrite(self, source, condition, faildict={
error.ConnectionDone: failure.Failure(error.ConnectionDone()),
error.ConnectionLost: failure.Failure(error.ConnectionLost()),
}):
why = None
inRead = False
if condition & POLL_DISCONNECTED and not (condition & gobject.IO_IN):
if source in self._reads:
why = main.CONNECTION_DONE
inRead = True
else:
why = main.CONNECTION_LOST
else:
try:
if condition & gobject.IO_IN:
why = source.doRead()
inRead = True
if not why and condition & gobject.IO_OUT:
# if doRead caused connectionLost, don't call doWrite
# if doRead is doWrite, don't call it again.
if not source.disconnected:
why = source.doWrite()
except:
why = sys.exc_info()[1]
log.msg('Error In %s' % source)
log.deferr()
if why:
self._disconnectSelectable(source, why, inRead)
def callback(self, source, condition):
log.callWithLogger(source, self._doReadOrWrite, source, condition)
self.simulate() # fire Twisted timers
return 1 # 1=don't auto-remove the source
def simulate(self):
"""
Run simulation loops and reschedule callbacks.
"""
if self._simtag is not None:
gobject.source_remove(self._simtag)
self.runUntilCurrent()
timeout = min(self.timeout(), 0.1)
if timeout is None:
timeout = 0.1
# grumble
self._simtag = gobject.timeout_add(int(timeout * 1010), self.simulate)
class PortableGtkReactor(selectreactor.SelectReactor):
"""
Reactor that works on Windows.
Sockets aren't supported by GTK+'s input_add on Win32.
"""
_simtag = None
def crash(self):
selectreactor.SelectReactor.crash(self)
import gtk
# mainquit is deprecated in newer versions
if gtk.main_level():
if hasattr(gtk, 'main_quit'):
gtk.main_quit()
else:
gtk.mainquit()
def run(self, installSignalHandlers=1):
import gtk
self.startRunning(installSignalHandlers=installSignalHandlers)
gobject.timeout_add(0, self.simulate)
# mainloop is deprecated in newer versions
if hasattr(gtk, 'main'):
gtk.main()
else:
gtk.mainloop()
def simulate(self):
"""
Run simulation loops and reschedule callbacks.
"""
if self._simtag is not None:
gobject.source_remove(self._simtag)
self.iterate()
timeout = min(self.timeout(), 0.1)
if timeout is None:
timeout = 0.1
# grumble
self._simtag = gobject.timeout_add(int(timeout * 1010), self.simulate)
def install(useGtk=True):
"""
Configure the twisted mainloop to be run inside the gtk mainloop.
@param useGtk: should glib rather than GTK+ event loop be
used (this will be slightly faster but does not support GUI).
"""
reactor = Gtk2Reactor(useGtk)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
def portableInstall(useGtk=True):
"""
Configure the twisted mainloop to be run inside the gtk mainloop.
"""
reactor = PortableGtkReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
if runtime.platform.getType() != 'posix':
install = portableInstall
__all__ = ['install']
| 2.109375 | 2 |
run_mod.py | fpl-analytics/gr_crypto | 0 | 1912 | <reponame>fpl-analytics/gr_crypto
"""
Setup:
- Import Libraries
- Setup tf on multiple cores
- Import Data
"""
import pandas as pd
import numpy as np
import tensorflow as tf
import seaborn as sns
from time import time
import multiprocessing
import random
import os
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, ConvLSTM2D, Flatten
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from joblib import dump, load
from mod.prep import log_return, log_return_np, preprocess
from mod.model import return_pred
from mod.eval import evaluate_regression, evaluate_up_down
cores = multiprocessing.cpu_count()
tf.config.threading.set_inter_op_parallelism_threads(cores-1)
root_folder = "data"
wide_close = pd.read_csv(root_folder + "/working/wide_close.csv")
wide_target = pd.read_csv(root_folder + "/working/wide_target.csv")
asset_details = pd.read_csv(root_folder + "/asset_details.csv")
assets = [str(i) for i in asset_details["Asset_ID"]]
"""
Preprocess
"""
close_returns = wide_close[assets].apply(log_return)
close_returns["time"] = wide_close["time"]
close_returns[assets] = close_returns[assets].replace([np.inf,-np.inf],np.nan)
"""
Linear Regression
"""
x_steps, y_steps = 60, [1, 15]
col_in, col_out = "1", "1"
train_x, test_x, train_y, test_y, time_d = preprocess(data_in = wide_close, col_in,
col_out, time_col="time", x_steps, y_steps)
# 1 step
lr_1 = LinearRegression()
lr_1.fit(train_x.reshape(-1, x_steps), train_y[:,0,:].reshape(-1, 1))
true, pred = return_pred(test_x, test_y[:,0,:], lr_1)
evaluate_regression(true, pred)
evaluate_up_down(true, pred)
# 15 step
lr_15 = LinearRegression()
lr_15.fit(train_x.reshape(-1, x_steps), train_y[:,1,:].reshape(-1, 1))
true, pred = return_pred(test_x, test_y[:,1,:], lr_1)
evaluate_regression(true, pred)
evaluate_up_down(true, pred)
"""
calculate and store components seperately
process:
- first, get rolling values for each timestamp
- then, predict 1 and 15 gaps and store in array
"""
# Production
"""
Steps:
- Get train, val test and test indices. Importantly, this
needs to cover all assets (even though not all assets exist)
for the whole time period.
- Build models
"""
assets = list(asset_details["Asset_ID"].astype(str))
# Get indexes
i = np.select(
[
(wide_close.index >= 0) & (wide_close.index <= (len(wide_close)*0.7)),
(wide_close.index > (len(wide_close)*0.7)) & (wide_close.index <= (len(wide_close)*0.8))
],
["train", "val"],
default = "test")
indexes = pd.DataFrame({"time":wide_close["time"],
"set":i})
for a in assets:
print("asset", a)
filt = indexes["set"][~pd.isna(wide_close[a])]
counts = filt.value_counts()
df = pd.DataFrame({"counts":counts,
"pct":counts/np.sum(counts)})
print(df, "\n\n")
indexes_d = {}
for s in indexes["set"].unique():
indexes_d[s] = indexes["time"][indexes["set"] == s]
mkdir "model_files"
mkdir "model_files/linear_regression"
for a in assets:
print("Asset", a)
x_steps, y_steps = 60, [1, 16]
cols_in, cols_out = a, a
train_x, test_x, train_y, test_y, time_d = preprocess(wide_close, cols_in,
cols_out, "time", x_steps, y_steps)
# 1 step
lr_1 = LinearRegression()
lr_1.fit(train_x.reshape(-1, x_steps), train_y[:,0,:].reshape(-1, 1))
true, pred = return_pred(test_x, test_y[:,0,:], lr_1)
print("Model 1 Metrics")
evaluate_regression(true, pred)
evaluate_up_down(true, pred)
# 16 step
lr_16 = LinearRegression()
lr_16.fit(train_x.reshape(-1, x_steps), train_y[:,1,:].reshape(-1, 1))
true, pred = return_pred(test_x, test_y[:,1,:], lr_16)
print("Model 16 Metrics")
evaluate_regression(true, pred)
evaluate_up_down(true, pred)
dump(lr_1, f"model_files/linear_regression/lr_{a}_1")
dump(lr_16, f"model_files/linear_regression/lr_{a}_16")
dump(time_d, "model_files/linear_regression/lr_times")
"""
Random Forest
"""
rf = RandomForestRegressor(n_jobs=-1)
# start = time.time()
rf.fit(train_x.reshape(-1, x_steps), train_y.reshape(-1))
# print("Took:", round(start-time.time()))
| 2.09375 | 2 |
bot.py | menlen/one | 0 | 1913 | <reponame>menlen/one
# This example show how to use inline keyboards and process button presses
import telebot
import time
from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton
import os, sys
from PIL import Image, ImageDraw, ImageFont
import random
TELEGRAM_TOKEN = '<KEY>'
bot = telebot.TeleBot(TELEGRAM_TOKEN)
channelId = -1001390673326
user_dict = {}
def TextToImg(ext):
IMAGES = [
'AROQ.jpg',
'AK47.jpg',
'BAXT.jpg',
'BASKETBOL.jpg',
'BAXTLI.jpg',
'DOST.jpg',
'ER.jpg',
'ETIK.jpg',
'FUTBOL.jpg',
'GAZ.jpg',
'HOTIN.jpg',
'BAXT.jpg',
'IPHONE.jpg',
'KOLBASA.jpg',
'KONFET.jpg',
'KOZGU.jpg',
'KUCHUK.jpg',
'MOSHINA.jpg',
'NEWISHTON.jpg',
'NOTEBOOK.jpg',
'OMAD.jpg',
'OYINCHOQ.jpg',
'PAYPQO.jpg',
'BAXT.jpg',
'PUL.jpg',
'PULTUG.jpg',
'QORQIZ.jpg',
'SOSISKA.jpg',
'TELEFON.jpg',
'TELEFONZ.jpg',
'TOK.jpg',
'TORSHIM.jpg',
'TUYA.jpg',
'UY.jpg',
'ZAMBARAK.jpg'
]
try:
img = random.choice(IMAGES)
except:
time.sleep(2)
img = random.choice(IMAGES)
# get an image
base = Image.open(img).convert("RGBA")
ext = ext.upper()
text = ext
# make a blank image for the text, initialized to transparent text color
txt = Image.new("RGBA", base.size, (255,255,255,0))
# get a font
fnt = ImageFont.truetype("OpenSans-Italic.ttf", 40)
# get a drawing context
d = ImageDraw.Draw(txt)
# draw text, half opacity
d.text(((800)/2,(1136)/2), text, font=fnt, fill=(255,0,0,255), anchor='mb')
out = Image.alpha_composite(base, txt)
filename = random.randint(1,35)
g = out.save(f'{filename}.png')
return filename
def gen_markup():
markup = InlineKeyboardMarkup()
markup.row_width = 1
markup.add(InlineKeyboardButton("Azo bo'ling", callback_data="cb_yes", url='t.me/onideal'),
InlineKeyboardButton("Tasdiqlash", callback_data="cb_no"))
return markup
def getUserFromChannel(userId):
u = bot.get_chat_member(channelId, userId)
return u.status
@bot.callback_query_handler(func=lambda call: True)
def callback_query(call):
if call.data == "cb_yes":
bot.answer_callback_query(call.id, "Answer is Yes")
elif call.data == "cb_no":
u = getUserFromChannel(call.from_user.id)
if u == 'member':
msg = bot.send_message(call.from_user.id, """\
Juda soz!!!, Ismingizni yozing
""")
bot.register_next_step_handler(msg, process_name_step)
else:
bot.send_message(call.from_user.id, f"Salom {call.from_user.first_name}, kanallarga a'zo bo'ling va A'zolikni tekshirish buyrug'ini tanlang", reply_markup=gen_markup())
def process_name_step(message):
try:
name = message.text
myfile = TextToImg(name)
photoSend = open(f'{myfile}.png', 'rb')
caption = f'{name} : ismiga sovga @onideal \n@giftmerobot \n@mygiftrobot'
bot.send_photo(message.chat.id, photoSend, caption=caption)
except Exception as e:
bot.reply_to(message, 'oooops')
@bot.message_handler(func=lambda message: True)
def message_handler(message):
us = getUserFromChannel(message.chat.id)
if us == 'member':
msg = bot.send_message(message.chat.id, """\
Juda soz!!!, Ismingizni yozing
""")
bot.register_next_step_handler(msg, process_name_step)
else:
bot.send_message(message.chat.id, f"Salom {message.from_user.first_name}, kanallarga a'zo bo'ling va A'zolikni tekshirish buyrug'ini tanlang", reply_markup=gen_markup())
bot.polling(none_stop=True)
| 2.8125 | 3 |
novice/python-unit-testing/answers/test_rectangle2.py | Southampton-RSG/2019-03-13-southampton-swc | 1 | 1914 | <reponame>Southampton-RSG/2019-03-13-southampton-swc
from rectangle2 import rectangle_area
def test_unit_square():
assert rectangle_area([0, 0, 1, 1]) == 1.0
def test_large_square():
assert rectangle_area([1, 1, 4, 4]) == 9.0
def test_actual_rectangle():
assert rectangle_area([0, 1, 4, 7]) == 24.0
| 2.6875 | 3 |
tests/requestreply.py | unclechu/py-radio-class | 0 | 1915 | # -*- coding: utf-8 -*-
from unittest import TestCase, TestLoader
from radio import (Radio, ListenerNotFound, ReplyHandlerAlreadyBound,
HandlerAlreadyBound)
def init_radio(f):
def wrap(self, *args):
self.radio = Radio()
return f(self, *args)
return wrap
class TestRadioRequestReplyMethods(TestCase):
@init_radio
def test_request_reply_stop_replying(self):
'''
"request", "reply" and "stopReplying" methods work correctly.
'''
def foo_handler(): return 'foo'
def bar_handler(my_arg=222): return my_arg
self.radio.reply('foo', foo_handler)
self.radio.reply('bar', bar_handler)
self.assertEqual(self.radio.request('foo'), 'foo')
self.assertEqual(self.radio.request('bar'), 222)
self.assertEqual(self.radio.request('bar', 333), 333)
self.assertEqual(self.radio.request('bar', my_arg=444), 444)
self.radio.stopReplying('foo')
self.radio.stopReplying('bar')
with self.assertRaises(ListenerNotFound):
self.radio.request('foo')
with self.assertRaises(ListenerNotFound):
self.radio.request('bar')
@init_radio
def test_kwargs(self):
'''
Keyword arguments works correctly.
'''
foo_list = []
def foo_handler(foo, bar): return (foo, bar)
self.radio.reply('foo', foo_handler)
self.assertEqual(self.radio.request('foo', bar=5, foo=10), (10, 5))
@init_radio
def test_on_already_bound(self):
'''
"reply" fails when trying to bound handler that is already bounded.
'''
def foo_handler(): pass
self.radio.reply('foo', foo_handler)
self.radio.reply('bar', foo_handler)
# General exception
with self.assertRaises(HandlerAlreadyBound):
self.radio.reply('foo', foo_handler)
# Child exception
with self.assertRaises(ReplyHandlerAlreadyBound):
self.radio.reply('foo', foo_handler)
@init_radio
def test_off_handler_that_was_not_bounded(self):
'''
"stopReplying" fails when trying to unbound handler that was not
bounded.
'''
def foo_handler(): pass
with self.assertRaises(ListenerNotFound):
self.radio.stopReplying('foo', foo_handler)
@init_radio
def test_off_soft_mode(self):
'''
"stopReplying" will not fail if safe-argument is set to True.
'''
def foo_handler(): pass
self.radio.stopReplying('foo', foo_handler, soft=True)
self.radio.stopReplying('foo', foo_handler, soft=True)
@init_radio
def test_trigger_fail_on_incorrect_arguments(self):
'''
"request" fails when arguments for handler is incorrect.
'''
def foo_handler(required_arg): pass
self.radio.reply('foo', foo_handler)
with self.assertRaises(TypeError):
self.radio.request('foo')
suite = TestLoader().loadTestsFromTestCase(TestRadioRequestReplyMethods)
| 2.8125 | 3 |
mayan/apps/rest_api/exceptions.py | sophiawa/Mayan-EDMS | 1 | 1916 | <gh_stars>1-10
class APIError(Exception):
"""
Base exception for the API app
"""
pass
class APIResourcePatternError(APIError):
"""
Raised when an app tries to override an existing URL regular expression
pattern
"""
pass
| 2.03125 | 2 |
tests/unit/types/message/test_message.py | Immich/jina | 1 | 1917 | <reponame>Immich/jina
import sys
from typing import Sequence
import pytest
from jina import Request, QueryLang, Document
from jina.clients.request import request_generator
from jina.proto import jina_pb2
from jina.proto.jina_pb2 import EnvelopeProto
from jina.types.message import Message
from jina.types.request import _trigger_fields
from tests import random_docs
@pytest.mark.parametrize('field', _trigger_fields.difference({'command', 'args', 'flush'}))
def test_lazy_access(field):
reqs = (Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10)))
for r in reqs:
assert not r.is_used
# access r.train
print(getattr(r, field))
# now it is read
assert r.is_used
def test_multiple_access():
reqs = [Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10))]
for r in reqs:
assert not r.is_used
assert r
assert not r.is_used
for r in reqs:
assert not r.is_used
assert r.index
assert r.is_used
def test_lazy_nest_access():
reqs = (Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10)))
for r in reqs:
assert not r.is_used
# write access r.train
r.docs[0].id = '1' * 16
# now it is read
assert r.is_used
assert r.index.docs[0].id == '1' * 16
def test_lazy_change_message_type():
reqs = (Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10)))
for r in reqs:
assert not r.is_used
# write access r.train
r.control.command = jina_pb2.RequestProto.ControlRequestProto.IDLE
# now it is read
assert r.is_used
assert len(r.index.docs) == 0
def test_lazy_append_access():
reqs = (Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10)))
for r in reqs:
assert not r.is_used
r.request_type = 'index'
# write access r.train
r.docs.append(Document())
# now it is read
assert r.is_used
def test_lazy_clear_access():
reqs = (Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10)))
for r in reqs:
assert not r.is_used
# write access r.train
r.ClearField('index')
# now it is read
assert r.is_used
def test_lazy_nested_clear_access():
reqs = (Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10)))
for r in reqs:
assert not r.is_used
# write access r.train
r.index.ClearField('docs')
# now it is read
assert r.is_used
def test_lazy_msg_access():
reqs = [Message(None, r.SerializeToString(), 'test', '123',
request_id='123', request_type='IndexRequest') for r in request_generator(random_docs(10))]
for r in reqs:
assert not r.request.is_used
assert r.envelope
assert len(r.dump()) == 3
assert not r.request.is_used
for r in reqs:
assert not r.request.is_used
assert r.request
assert len(r.dump()) == 3
assert not r.request.is_used
for r in reqs:
assert not r.request.is_used
assert r.request.index.docs
assert len(r.dump()) == 3
assert r.request.is_used
def test_message_size():
reqs = [Message(None, r, 'test', '123') for r in request_generator(random_docs(10))]
for r in reqs:
assert r.size == 0
assert sys.getsizeof(r.envelope.SerializeToString())
assert sys.getsizeof(r.request.SerializeToString())
assert len(r.dump()) == 3
assert r.size > sys.getsizeof(r.envelope.SerializeToString()) \
+ sys.getsizeof(r.request.SerializeToString())
def test_lazy_request_fields():
reqs = (Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10)))
for r in reqs:
assert list(r.DESCRIPTOR.fields_by_name.keys())
def test_request_extend_queryset():
q1 = {'name': 'SliceQL', 'parameters': {'start': 3, 'end': 4}}
q2 = QueryLang({'name': 'SliceQL', 'parameters': {'start': 3, 'end': 4}, 'priority': 1})
q3 = jina_pb2.QueryLangProto()
q3.name = 'SliceQL'
q3.parameters['start'] = 3
q3.parameters['end'] = 4
q3.priority = 2
r = Request()
r.queryset.extend([q1, q2, q3])
assert isinstance(r.queryset, Sequence)
assert len(r.queryset) == 3
for idx, q in enumerate(r.queryset):
assert q.priority == idx
assert q.parameters['start'] == 3
assert q.parameters['end'] == 4
# q1 and q2 refer to the same
assert len({id(q) for q in r.queryset}) == 2
r2 = Request()
r2.queryset.extend(r.queryset)
assert len({id(q) for q in r2.queryset}) == 2
r = Request()
r.queryset.append(q1)
r.queryset.append(q2)
r.queryset.append(q3)
for idx, q in enumerate(r.queryset):
assert q.priority == idx
assert q.parameters['start'] == 3
assert q.parameters['end'] == 4
with pytest.raises(TypeError):
r.queryset.extend(1)
@pytest.mark.parametrize('typ,pb_typ', [('train', jina_pb2.RequestProto.TrainRequestProto),
('index', jina_pb2.RequestProto.IndexRequestProto),
('search', jina_pb2.RequestProto.SearchRequestProto),
('control', jina_pb2.RequestProto.ControlRequestProto)])
def test_empty_request_type(typ, pb_typ):
r = Request()
assert r.request_type is None
with pytest.raises(ValueError):
print(r.body)
r.request_type = typ
assert r._request_type == typ
assert isinstance(r.body, pb_typ)
@pytest.mark.parametrize('typ,pb_typ', [('index', jina_pb2.RequestProto.IndexRequestProto),
('search', jina_pb2.RequestProto.SearchRequestProto)])
def test_add_doc_to_type(typ, pb_typ):
r = Request()
r.request_type = typ
for _ in range(10):
r.docs.append(Document())
r.groundtruths.append(Document())
assert len(r.docs) == 10
assert len(r.groundtruths) == 10
| 1.859375 | 2 |
tabnine-vim/third_party/ycmd/ycmd/tests/python/testdata/project/settings_extra_conf.py | MrMonk3y/vimrc | 10 | 1918 | import os
import sys
DIR_OF_THIS_SCRIPT = os.path.abspath( os.path.dirname( __file__ ) )
def Settings( **kwargs ):
return {
'interpreter_path': sys.executable,
'sys_path': [ os.path.join( DIR_OF_THIS_SCRIPT, 'third_party' ) ]
}
| 1.960938 | 2 |
SmartCache/sim/Utilities/setup.py | Cloud-PG/smart-cache | 1 | 1919 | from distutils.core import setup
setup(
name='utils',
version='1.0.0',
author='<NAME>',
author_email='<EMAIL>',
packages=[
'utils',
],
scripts=[],
url='https://github.com/Cloud-PG/smart-cache',
license='Apache 2.0 License',
description='Utils for the SmartCache project',
long_description="To do...",
install_requires=open("requirements.txt").read(),
classifier=[
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: Apache 2.0 License",
"Programming Language :: Python :: 3 :: Only"
]
)
| 1.195313 | 1 |
homeassistant/components/eight_sleep/binary_sensor.py | liangleslie/core | 2 | 1920 | <gh_stars>1-10
"""Support for Eight Sleep binary sensors."""
from __future__ import annotations
import logging
from pyeight.eight import EightSleep
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from . import EightSleepBaseEntity
from .const import DATA_API, DATA_HEAT, DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the eight sleep binary sensor."""
if discovery_info is None:
return
eight: EightSleep = hass.data[DOMAIN][DATA_API]
heat_coordinator: DataUpdateCoordinator = hass.data[DOMAIN][DATA_HEAT]
entities = []
for user in eight.users.values():
entities.append(
EightHeatSensor(heat_coordinator, eight, user.userid, "bed_presence")
)
async_add_entities(entities)
class EightHeatSensor(EightSleepBaseEntity, BinarySensorEntity):
"""Representation of a Eight Sleep heat-based sensor."""
_attr_device_class = BinarySensorDeviceClass.OCCUPANCY
def __init__(
self,
coordinator: DataUpdateCoordinator,
eight: EightSleep,
user_id: str | None,
sensor: str,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator, eight, user_id, sensor)
assert self._user_obj
_LOGGER.debug(
"Presence Sensor: %s, Side: %s, User: %s",
sensor,
self._user_obj.side,
user_id,
)
@property
def is_on(self) -> bool:
"""Return true if the binary sensor is on."""
assert self._user_obj
return bool(self._user_obj.bed_presence)
| 2.171875 | 2 |
ravem/tests/util_test.py | bpedersen2/indico-plugins-cern | 0 | 1921 | # This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2022 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
from unittest.mock import MagicMock
import pytest
from requests.exceptions import HTTPError, Timeout
from indico.testing.util import extract_logs
from indico_ravem.plugin import RavemPlugin
from indico_ravem.util import has_access, ravem_api_call
@pytest.mark.usefixtures('db')
@pytest.mark.parametrize('method', ('get', 'post'))
def test_correct_http_method(mocker, method):
request = mocker.patch('indico_ravem.util.requests.request')
response = MagicMock()
response.json.return_value = {'result': 'test'}
response.raise_for_status.return_value = False
request.return_value = response
ravem_api_call('test_endpoint', method=method, param1='test1', param2='test2')
assert request.call_count == 1
assert request.call_args[0][0] == method
@pytest.mark.usefixtures('db')
def test_correct_auth_method(mocker):
request = mocker.patch('indico_ravem.util.requests.request')
response = MagicMock()
response.json.return_value = {'result': 'test'}
response.raise_for_status.return_value = False
request.return_value = response
token = 'foo'
RavemPlugin.settings.set('access_token', token)
ravem_api_call('test_endpoint', param1='test1', param2='test2')
assert request.call_count == 1
assert 'Authorization' in request.call_args[1]['headers']
assert request.call_args[1]['headers']['Authorization'] == 'Bearer %s' % token
@pytest.mark.usefixtures('db')
def test_accepts_json(mocker):
request = mocker.patch('indico_ravem.util.requests.request')
response = MagicMock()
response.json.return_value = {'result': 'test'}
response.raise_for_status.return_value = False
request.return_value = response
ravem_api_call('test_endpoint', param1='test1', param2='test2')
assert request.call_count == 1
assert request.call_args[1]['headers']['Accept'] == 'application/json'
@pytest.mark.usefixtures('db')
@pytest.mark.parametrize(('root_endpoint', 'endpoint', 'expected_url'), (
('https://ravem.test/', 'final_endpoint', 'https://ravem.test/final_endpoint'),
('https://ravem.test/api/', 'final_endpoint', 'https://ravem.test/api/final_endpoint'),
('https://ravem.test/api/v2/', 'final_endpoint', 'https://ravem.test/api/v2/final_endpoint'),
('https://ravem.test', './final_endpoint', 'https://ravem.test/final_endpoint'),
('https://ravem.test/api/', './final_endpoint', 'https://ravem.test/api/final_endpoint'),
('https://ravem.test/api/v2/', './final_endpoint', 'https://ravem.test/api/v2/final_endpoint'),
('https://ravem.test', 'sub/final_endpoint', 'https://ravem.test/sub/final_endpoint'),
('https://ravem.test/api/', 'sub/final_endpoint', 'https://ravem.test/api/sub/final_endpoint'),
('https://ravem.test/api/v2/', 'sub/final_endpoint', 'https://ravem.test/api/v2/sub/final_endpoint'),
('https://ravem.test', './sub/final_endpoint', 'https://ravem.test/sub/final_endpoint'),
('https://ravem.test/api/', './sub/final_endpoint', 'https://ravem.test/api/sub/final_endpoint'),
('https://ravem.test/api/v2/', './sub/final_endpoint', 'https://ravem.test/api/v2/sub/final_endpoint'),
('https://ravem.test/', '', 'https://ravem.test/'),
('https://ravem.test/api/', '', 'https://ravem.test/api/'),
('https://ravem.test/api/v2/', '', 'https://ravem.test/api/v2/'),
))
def test_correct_api_endpoint(mocker, root_endpoint, endpoint, expected_url):
request = mocker.patch('indico_ravem.util.requests.request')
response = MagicMock()
response.json.return_value = {'result': 'test'}
response.raise_for_status.return_value = False
request.return_value = response
RavemPlugin.settings.set('api_endpoint', root_endpoint)
ravem_api_call(endpoint, param1='test1', param2='test2')
assert request.call_count == 1
assert request.call_args[0][1] == expected_url
@pytest.mark.usefixtures('db')
@pytest.mark.parametrize('params', (
{},
{'p1': '1stparam'},
{'p1': '1stparam', 'p2': '2ndparam'}
))
def test_params_generated(mocker, params):
request = mocker.patch('indico_ravem.util.requests.request')
response = MagicMock()
response.json.return_value = {'result': 'test'}
response.raise_for_status.return_value = False
request.return_value = response
ravem_api_call('test_endpoint', params=params)
assert request.call_count == 1
assert request.call_args[1]['params'] == params
@pytest.mark.usefixtures('db')
def test_raises_timeout(mocker):
request = mocker.patch('indico_ravem.util.requests.request')
request.side_effect = Timeout('Timeout test error message', request=request)
with pytest.raises(Timeout) as excinfo:
ravem_api_call('test_endpoint')
assert str(excinfo.value) == "Timeout while contacting the room."
assert request.call_count == 1
@pytest.mark.usefixtures('db')
@pytest.mark.parametrize(('method', 'params'), (
('get', {}),
('post', {}),
('get', {'p1': '1stparam'}),
('post', {'p1': '1stparam'}),
('get', {'p1': '1stparam', 'p2': '2ndparam'}),
('post', {'p1': '1stparam', 'p2': '2ndparam'})
))
def test_unexpected_exception_is_logged(mocker, caplog, method, params):
request = mocker.patch('indico_ravem.util.requests.request')
request.side_effect = IndexError('this is unexpected')
with pytest.raises(IndexError) as excinfo:
ravem_api_call('test_endpoint', method=method, **params)
assert str(excinfo.value) == 'this is unexpected'
log = extract_logs(caplog, one=True, name='indico.plugin.ravem')
assert log.message == "failed call: {} {} with {}: {}".format(method.upper(), 'test_endpoint', params,
'this is unexpected')
assert request.call_count == 1
@pytest.mark.usefixtures('db')
@pytest.mark.parametrize(('method', 'params'), (
('get', {}),
('post', {}),
('get', {'p1': '1stparam'}),
('post', {'p1': '1stparam'}),
('get', {'p1': '1stparam', 'p2': '2ndparam'}),
('post', {'p1': '1stparam', 'p2': '2ndparam'})
))
def test_http_error_is_logged(mocker, caplog, method, params):
request = mocker.patch('indico_ravem.util.requests.request')
request.method = method.upper()
request.url = RavemPlugin.settings.get('api_endpoint') + 'test_endpoint'
response = MagicMock()
response.raise_for_status.side_effect = HTTPError('Well this is embarrassing')
response.request = request
response.url = response.request.url
request.return_value = response
with pytest.raises(HTTPError) as excinfo:
ravem_api_call('test_endpoint', method=method, **params)
assert str(excinfo.value) == 'Well this is embarrassing'
log = extract_logs(caplog, one=True, name='indico.plugin.ravem')
assert log.message == '{} {} failed with {}'.format(
method.upper(), RavemPlugin.settings.get('api_endpoint') + 'test_endpoint', 'Well this is embarrassing')
assert request.call_count == 1
@pytest.mark.usefixtures('db')
def test_unlinked_event_vc_room_has_no_access():
event_vc_room = MagicMock()
event_vc_room.link_object = None
assert not has_access(event_vc_room)
@pytest.mark.usefixtures('db', 'request_context')
def test_unlinked_room_has_no_access(mocker):
session = mocker.patch('indico_ravem.util.session')
session.user = '<NAME>'
event_vc_room = MagicMock()
event_vc_room.link_object.room = None
assert not has_access(event_vc_room)
@pytest.mark.usefixtures('db', 'request_context')
def test_check_if_current_user_is_room_owner(mocker):
session = mocker.patch('indico_ravem.util.session')
session.user = '<NAME>'
request = mocker.patch('indico_ravem.util.request')
request.remote_addr = '192.168.127.12'
retrieve_principal = mocker.patch('indico_ravem.util._retrieve_principal')
retrieve_principal.side_effect = lambda x: session.user
event_vc_room = MagicMock()
event_vc_room.link_object.room.has_equipment = MagicMock(return_value=True)
event_vc_room.link_object.room.get_attribute_value.return_value = request.remote_addr
event_vc_room.vc_room.data.get.return_value = 'User:123'
event_vc_room.event.can_manage.return_value = False
assert has_access(event_vc_room)
@pytest.mark.usefixtures('db', 'request_context')
def test_check_if_current_user_can_modify(mocker):
request = mocker.patch('indico_ravem.util.request')
request.remote_addr = '192.168.127.12'
session = mocker.patch('indico_ravem.util.session')
session.user = '<NAME>'
mocker.patch('indico_ravem.util._retrieve_principal')
event_vc_room = MagicMock()
event_vc_room.link_object.room.has_equipment = MagicMock(return_value=True)
event_vc_room.link_object.room.get_attribute_value.return_value = request.remote_addr
event_vc_room.event.can_manage.return_value = True
assert has_access(event_vc_room)
event_vc_room.event.can_manage.assert_called_once_with(session.user)
| 2.21875 | 2 |
apps/organization/urls.py | stormsha/StormOnline | 18 | 1922 | # _*_ coding: utf-8 _*_
# ---------------------------
__author__ = 'StormSha'
__date__ = '2018/3/28 18:01'
# ---------------------------
# -------------------------django----------------------
from django.conf.urls import url
from .views import OrgView, AddUserAskView, OrgHomeView, OrgCourseView, OrgDescView, OrgTeacherView, AddFavView
from .views import TeacherListView, TeacherDetailView
urlpatterns = [
url(r'^list/$', OrgView.as_view(), name="org_list"),
url(r'^add_ask/$', AddUserAskView.as_view(), name="add_ask"),
url(r'^home/(?P<org_id>\d+)/$', OrgHomeView.as_view(), name="org_home"),
url(r'^course/(?P<org_id>\d+)/$', OrgCourseView.as_view(), name="org_course"),
url(r'^desc/(?P<org_id>\d+)/$', OrgDescView.as_view(), name="org_desc"),
url(r'^org_teacher/(?P<org_id>\d+)/$', OrgTeacherView.as_view(), name="org_teacher"),
# --------------机构收藏-------------------------
url(r'^add_fav/$', AddFavView.as_view(), name="add_fav"),
# -----------------------teacher------------------------------
url(r'^teacher/list/$', TeacherListView.as_view(), name="teacher_list"),
url(r'^teacher/detail/(?P<teacher_id>\d+)/$', TeacherDetailView.as_view(), name="teacher_detail")
] | 1.875 | 2 |
tech_project/lib/python2.7/site-packages/filer/migrations/0010_auto_20180414_2058.py | priyamshah112/Project-Descripton-Blog | 0 | 1923 | <filename>tech_project/lib/python2.7/site-packages/filer/migrations/0010_auto_20180414_2058.py<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('filer', '0009_auto_20171220_1635'),
]
operations = [
migrations.AlterField(
model_name='image',
name='file_ptr',
field=models.OneToOneField(primary_key=True, serialize=False, related_name='filer_image_file', parent_link=True, to='filer.File', on_delete=django.db.models.deletion.CASCADE),
),
]
| 1.484375 | 1 |
SLHCUpgradeSimulations/Configuration/python/aging.py | ckamtsikis/cmssw | 852 | 1924 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
# handle normal mixing or premixing
def getHcalDigitizer(process):
if hasattr(process,'mixData'):
return process.mixData
if hasattr(process,'mix') and hasattr(process.mix,'digitizers') and hasattr(process.mix.digitizers,'hcal'):
return process.mix.digitizers.hcal
return None
def getHGCalDigitizer(process,section):
if hasattr(process,'mix') and hasattr(process.mix,'digitizers'):
if section == 'EE' and hasattr(process.mix.digitizers,'hgceeDigitizer'):
return process.mix.digitizers.hgceeDigitizer
elif section == 'FH' and hasattr(process.mix.digitizers,'hgchefrontDigitizer'):
return process.mix.digitizers.hgchefrontDigitizer
elif section == 'BH' and hasattr(process.mix.digitizers,'hgchebackDigitizer'):
return process.mix.digitizers.hgchebackDigitizer
elif section == 'HFNose' and hasattr(process.mix.digitizers,'hfnoseDigitizer'):
return process.mix.digitizers.hfnoseDigitizer
return None
# change assumptions about lumi rate
def setScenarioHLLHC(module,scenarioHLLHC):
if scenarioHLLHC=="nominal":
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import _years_LHC, _years_HLLHC_nominal
module.years = _years_LHC + _years_HLLHC_nominal
elif scenarioHLLHC=="ultimate":
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import _years_LHC, _years_HLLHC_ultimate
module.years = _years_LHC + _years_HLLHC_ultimate
return module
# turnon = True enables default, False disables
# recalibration and darkening always together
def ageHB(process,turnon,scenarioHLLHC):
if turnon:
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import HBDarkeningEP
process.HBDarkeningEP = HBDarkeningEP
process.HBDarkeningEP = setScenarioHLLHC(process.HBDarkeningEP,scenarioHLLHC)
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HBDarkening = cms.bool(turnon)
if hasattr(process,'es_hardcode'):
process.es_hardcode.HBRecalibration = cms.bool(turnon)
return process
def ageHE(process,turnon,scenarioHLLHC):
if turnon:
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import HEDarkeningEP
process.HEDarkeningEP = HEDarkeningEP
process.HEDarkeningEP = setScenarioHLLHC(process.HEDarkeningEP,scenarioHLLHC)
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HEDarkening = cms.bool(turnon)
if hasattr(process,'es_hardcode'):
process.es_hardcode.HERecalibration = cms.bool(turnon)
return process
def ageHF(process,turnon):
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HFDarkening = cms.bool(turnon)
if hasattr(process,'es_hardcode'):
process.es_hardcode.HFRecalibration = cms.bool(turnon)
return process
def agedHFNose(process,algo=0):
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HFNose_setEndOfLifeNoise
process = HFNose_setEndOfLifeNoise(process,byDose=True,byDoseAlgo=algo)
return process
def agedHGCal(process,algo=0):
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HGCal_setEndOfLifeNoise
process = HGCal_setEndOfLifeNoise(process,byDose=True,byDoseAlgo=algo)
return process
def realisticHGCalStartup(process):
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HGCal_setRealisticStartupNoise
process = HGCal_setRealisticStartupNoise(process)
return process
# needs lumi to set proper ZS thresholds (tbd)
def ageSiPM(process,turnon,lumi):
process.es_hardcode.hbUpgrade.doRadiationDamage = turnon
process.es_hardcode.heUpgrade.doRadiationDamage = turnon
# todo: determine ZS threshold adjustments
# adjust PF thresholds for increased noise
# based on: https://baylor.box.com/s/w32ja75krcbxcycyifexu28dwlgrj7wg
hcal_lumis = [300, 1000, 3000, 4500, 1e10]
hcal_thresholds = {
300: {
"seed": [0.5, 0.625, 0.75, 0.75],
"rec": [0.4, 0.5, 0.6, 0.6],
},
1000: {
"seed": [1.0, 1.5, 1.5, 1.5],
"rec": [0.8, 1.2, 1.2, 1.2],
},
3000: {
"seed": [1.25, 2.5, 2.5, 2.5],
"rec": [1.0, 2.0, 2.0, 2.0],
},
4500: {
"seed": [1.5, 3.0, 3.0, 3.0],
"rec": [1.25, 2.5, 2.5, 2.5],
},
}
ctmodules = ['calotowermaker','caloTowerForTrk','caloTowerForTrkPreSplitting','towerMaker','towerMakerWithHO']
for ilumi, hcal_lumi in enumerate(hcal_lumis[:-1]):
if lumi >= hcal_lumi and lumi < hcal_lumis[ilumi+1]:
if hasattr(process,'particleFlowClusterHBHE'):
process.particleFlowClusterHBHE.seedFinder.thresholdsByDetector[0].seedingThreshold = hcal_thresholds[hcal_lumi]["seed"]
process.particleFlowClusterHBHE.initialClusteringStep.thresholdsByDetector[0].gatheringThreshold = hcal_thresholds[hcal_lumi]["rec"]
process.particleFlowClusterHBHE.pfClusterBuilder.recHitEnergyNorms[0].recHitEnergyNorm = hcal_thresholds[hcal_lumi]["rec"]
process.particleFlowClusterHBHE.pfClusterBuilder.positionCalc.logWeightDenominatorByDetector[0].logWeightDenominator = hcal_thresholds[hcal_lumi]["rec"]
process.particleFlowClusterHBHE.pfClusterBuilder.allCellsPositionCalc.logWeightDenominatorByDetector[0].logWeightDenominator = hcal_thresholds[hcal_lumi]["rec"]
if hasattr(process,'particleFlowClusterHCAL'):
process.particleFlowClusterHCAL.pfClusterBuilder.allCellsPositionCalc.logWeightDenominatorByDetector[0].logWeightDenominator = hcal_thresholds[hcal_lumi]["rec"]
if hasattr(process,'particleFlowRecHitHBHE'):
process.particleFlowRecHitHBHE.producers[0].qualityTests[0].cuts[0].threshold = hcal_thresholds[hcal_lumi]["rec"]
for ctmod in ctmodules:
if hasattr(process,ctmod):
getattr(process,ctmod).HBThreshold1 = hcal_thresholds[hcal_lumi]["rec"][0]
getattr(process,ctmod).HBThreshold2 = hcal_thresholds[hcal_lumi]["rec"][1]
getattr(process,ctmod).HBThreshold = hcal_thresholds[hcal_lumi]["rec"][-1]
break
return process
def ageHcal(process,lumi,instLumi,scenarioHLLHC):
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.DelivLuminosity = cms.double(float(lumi)) # integrated lumi in fb-1
# these lines need to be further activated by turning on 'complete' aging for HF
if hasattr(process,'g4SimHits'):
process.g4SimHits.HCalSD.InstLuminosity = cms.double(float(instLumi))
process.g4SimHits.HCalSD.DelivLuminosity = cms.double(float(lumi))
# recalibration and darkening always together
if hasattr(process,'es_hardcode'):
process.es_hardcode.iLumi = cms.double(float(lumi))
# functions to enable individual subdet aging
process = ageHB(process,True,scenarioHLLHC)
process = ageHE(process,True,scenarioHLLHC)
process = ageHF(process,True)
process = ageSiPM(process,True,lumi)
return process
def turn_on_HB_aging(process):
process = ageHB(process,True,"")
return process
def turn_off_HB_aging(process):
process = ageHB(process,False,"")
return process
def turn_on_HE_aging(process):
process = ageHE(process,True,"")
return process
def turn_off_HE_aging(process):
process = ageHE(process,False,"")
return process
def turn_on_HF_aging(process):
process = ageHF(process,True)
return process
def turn_off_HF_aging(process):
process = ageHF(process,False)
return process
def turn_off_SiPM_aging(process):
process = ageSiPM(process,False,0.0)
return process
def hf_complete_aging(process):
if hasattr(process,'g4SimHits'):
process.g4SimHits.HCalSD.HFDarkening = cms.untracked.bool(True)
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HFDarkening = cms.untracked.bool(False)
return process
def ageEcal(process,lumi,instLumi):
if hasattr(process,'g4SimHits'):
#these lines need to be further activiated by tuning on 'complete' aging for ecal
process.g4SimHits.ECalSD.InstLuminosity = cms.double(instLumi)
process.g4SimHits.ECalSD.DelivLuminosity = cms.double(float(lumi))
# available conditions
ecal_lumis = [300,1000,3000,4500]
ecal_conditions = [
['EcalIntercalibConstantsRcd','EcalIntercalibConstants_TL{:d}_upgrade_8deg_v2_mc'],
['EcalIntercalibConstantsMCRcd','EcalIntercalibConstantsMC_TL{:d}_upgrade_8deg_v2_mc'],
['EcalLaserAPDPNRatiosRcd','EcalLaserAPDPNRatios_TL{:d}_upgrade_8deg_mc'],
['EcalPedestalsRcd','EcalPedestals_TL{:d}_upgradeTIA_8deg_mc'],
['EcalTPGLinearizationConstRcd','EcalTPGLinearizationConst_TL{:d}_upgrade_8deg_mc'],
]
# update PF thresholds, based on https://indico.cern.ch/event/653123/contributions/2659235/attachments/1491385/2318364/170711_upsg_ledovskoy.pdf
ecal_thresholds = {
300 : 0.103,
1000 : 0.175,
3000 : 0.435,
4500 : 0.707,
}
ecal_seed_multiplier = 2.5
# try to get conditions
if int(lumi) in ecal_lumis:
if not hasattr(process.GlobalTag,'toGet'):
process.GlobalTag.toGet=cms.VPSet()
for ecal_condition in ecal_conditions:
process.GlobalTag.toGet.append(cms.PSet(
record = cms.string(ecal_condition[0]),
tag = cms.string(ecal_condition[1].format(int(lumi))),
connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS")
)
)
if hasattr(process,"particleFlowClusterECALUncorrected"):
_seeds = process.particleFlowClusterECALUncorrected.seedFinder.thresholdsByDetector
for iseed in range(0,len(_seeds)):
if _seeds[iseed].detector.value()=="ECAL_BARREL":
_seeds[iseed].seedingThreshold = cms.double(ecal_thresholds[int(lumi)]*ecal_seed_multiplier)
_clusters = process.particleFlowClusterECALUncorrected.initialClusteringStep.thresholdsByDetector
for icluster in range(0,len(_clusters)):
if _clusters[icluster].detector.value()=="ECAL_BARREL":
_clusters[icluster].gatheringThreshold = cms.double(ecal_thresholds[int(lumi)])
return process
def ecal_complete_aging(process):
if hasattr(process,'g4SimHits'):
process.g4SimHits.ECalSD.AgeingWithSlopeLY = cms.untracked.bool(True)
if hasattr(process,'ecal_digi_parameters'):
process.ecal_digi_parameters.UseLCcorrection = cms.untracked.bool(False)
return process
def customise_aging_300(process):
process=ageHcal(process,300,5.0e34,"nominal")
process=ageEcal(process,300,5.0e34)
return process
def customise_aging_1000(process):
process=ageHcal(process,1000,5.0e34,"nominal")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,1000,5.0e34)
return process
def customise_aging_3000(process):
process=ageHcal(process,3000,5.0e34,"nominal")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,3000,5.0e34)
process=agedHGCal(process)
process=agedHFNose(process)
return process
def customise_aging_3000_ultimate(process):
process=ageHcal(process,3000,7.5e34,"ultimate")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,3000,7.5e34)
process=agedHGCal(process)
process=agedHFNose(process)
return process
def customise_aging_4500_ultimate(process):
process=ageHcal(process,4500,7.5e34,"ultimate")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,4500,7.5e34)
process=agedHGCal(process)
process=agedHFNose(process)
return process
| 1.820313 | 2 |
xml_parser.py | cbschaff/nlimb | 12 | 1925 | import numpy as np
import xml.etree.ElementTree as ET
class Geom(object):
def __init__(self, geom):
self.xml = geom
self.params = []
def get_params(self):
return self.params.copy()
def set_params(self, new_params):
self.params = new_params
def update_point(self, p, new_params):
pass
def update_xml(self):
pass
def update(self, new_params):
self.set_params(new_params)
self.update_xml()
def get_smallest_z(self):
pass
def get_param_limits(self):
pass
def get_param_names(self):
pass
def get_volume(self):
pass
class Sphere(Geom):
min_radius = .05
max_radius = .4
def __init__(self, geom):
self.xml = geom
self.params = [float(self.xml.get('size'))] # radius
self.center = np.array([float(x) for x in self.xml.get('pos').split()])
def update_point(self, p, new_params):
return ((p - self.center) * new_params[0] / self.params[0]) + self.center
def update_xml(self):
self.xml.set('size', str(self.params[0]))
def get_smallest_z(self):
return self.center[2] - self.params[0]
def get_param_limits(self):
return [[self.min_radius], [self.max_radius]]
def get_param_names(self):
return ['radius']
def get_volume(self):
return 4./3. * np.pi * self.params[0] ** 3
class Capsule(Geom):
min_length = 0.175
max_length = 0.8
min_radius = 0.035
max_radius = 0.085
def __init__(self, geom):
self.xml = geom
fromto = [float(x) for x in self.xml.get('fromto').split()]
self.p1 = np.array(fromto[:3])
self.p2 = np.array(fromto[3:])
length = np.sqrt(np.sum((self.p2 - self.p1) ** 2))
radius = float(self.xml.get('size'))
self.params = [length, radius]
self.axis = (self.p2 - self.p1) / length
def update_point(self, p, new_params):
lfac = p.dot(self.axis) * self.axis
rfac = p - lfac
return p + lfac * (-1.0 + new_params[0] / self.params[0])# + rfac * (new_params[1] / self.params[1])
def update_xml(self):
self.xml.set('fromto', ' '.join([str(x) for x in np.concatenate([self.p1, self.p2])]))
self.xml.set('size', str(self.params[1])) # radius
def set_params(self, new_params):
p1 = self.update_point(self.p1, new_params)
p2 = self.update_point(self.p2, new_params)
# update only after computing p1, p2
self.p1 = p1
self.p2 = p2
super().set_params(new_params)
def get_smallest_z(self):
return min(self.p1[2], self.p2[2]) - self.params[1]
def get_param_limits(self):
return [[self.min_length, self.min_radius], [self.max_length, self.max_radius]]
def get_param_names(self):
return ['length','radius']
def get_volume(self):
return 4./3. * np.pi * self.params[1]**3 + self.params[0] * np.pi * self.params[1]**2
class Body:
geoms = {'sphere': Sphere, 'capsule': Capsule} # dictionary of legal geometry types
def __init__(self, body, worldbody=False):
self.xml = body
self.worldbody = worldbody
geom_xml = body.find('geom') # assume only one geometry per body
self.geom = self.geoms[geom_xml.get('type')](geom_xml)
self.joints = [j for j in body.findall('joint') if 'ignore' not in j.get('name')]
self.parts = [Body(b) for b in body.findall('body')]
pos = [b.get('pos') for b in body.findall('body')]
self.part_positions = [np.array([float(x) for x in p.split()]) for p in pos]
pos = [j.get('pos') for j in self.joints]
self.joint_positions = [np.array([float(x) for x in p.split()]) for p in pos]
self.n = len(self.geom.get_params())
self.n_all_params = len(self.get_params())
self.zmin = float(self.xml.get("pos").split()[2]) - self.get_height()
def get_height(self):
max_height = -self.geom.get_smallest_z()
for body, pos in zip(self.parts, self.part_positions):
max_height = max(max_height, body.get_height() - pos[2])
return max_height
def update_initial_position(self):
pos = self.xml.get("pos").split()
pos[2] = str(self.get_height() + self.zmin)
self.xml.set("pos", ' '.join(pos))
def update_xml(self):
for body, pos in zip(self.parts, self.part_positions):
body.xml.set('pos', ' '.join([str(x) for x in pos]))
for joint, pos in zip(self.joints, self.joint_positions):
joint.set('pos', ' '.join([str(x) for x in pos]))
def set_body_positions(self, new_params):
for i, pos in enumerate(self.part_positions):
self.part_positions[i] = self.geom.update_point(pos, new_params)
for i, pos in enumerate(self.joint_positions):
self.joint_positions[i] = self.geom.update_point(pos, new_params)
def update(self, new_params):
self.set_body_positions(new_params)
self.geom.update(new_params)
self.update_xml()
def get_params(self):
params = self.geom.get_params()
for body in self.parts:
params += body.get_params()
return params
def get_param_limits(self):
limits = self.geom.get_param_limits()
for body in self.parts:
body_limits = body.get_param_limits()
limits[0] += body_limits[0]
limits[1] += body_limits[1]
return limits
def get_param_names(self):
name = self.xml.get('name')
param_names = [name + '-' + p for p in self.geom.get_param_names()]
for body in self.parts:
param_names += body.get_param_names()
return param_names
def update_params(self, new_params):
if self.worldbody: assert len(new_params) == self.n_all_params, "Wrong number of parameters"
self.update(new_params[:self.n])
remaining_params = new_params[self.n:]
for body in self.parts:
remaining_params = body.update_params(remaining_params)
if self.worldbody:
self.update_initial_position()
else:
return remaining_params
def get_body_names(self):
names = [self.xml.get('name')]
for body in self.parts:
names += body.get_names()
return names
def get_joints(self):
joints = {}
for body,pos in zip(self.parts, self.part_positions):
for j in body.joints:
joints[j.get('name')] = (self.xml.get('name'), body.xml.get('name'), self.geom, body.geom, pos)
joints.update(body.get_joints())
return joints
def get_volumes(self):
volumes = {}
if len(self.joints) > 0:
for j in self.joints:
v1 = self.geom.get_volume()
v2 = sum([b.geom.get_volume() for b in self.parts])
volumes[j.get('name')] = np.array((v1, v2))
for body in self.parts:
volumes.update(body.get_volumes())
return volumes
class MuJoCoXmlRobot:
def __init__(self, model_xml):
self.model_xml = model_xml
self.tree = ET.parse(self.model_xml)
worldbody = self.tree.getroot().find('worldbody')
self.body = Body(worldbody.find('body'), worldbody=True)
def get_params(self):
return self.body.get_params()
def get_param_limits(self):
return self.body.get_param_limits()
def get_param_names(self):
return self.body.get_param_names()
def get_height(self):
return self.body.get_height()
def get_joints(self):
return self.body.get_joints()
def get_volumes(self):
return self.body.get_volumes()
def update(self, params, xml_file=None):
if xml_file is None:
xml_file = self.model_xml
self.body.update_params(list(params))
self.tree.write(xml_file)
if __name__ == '__main__':
robot = MuJoCoXmlRobot('mujoco_assets/hopper.xml')
params = list(1.0 * np.array(robot.get_params()))
robot.update(params, 'mujoco_assets/hopper_test.xml')
assert robot.get_params() == params
#assert robot.get_height() == 1.31
print(robot.get_param_limits())
print(robot.get_param_names())
robot = MuJoCoXmlRobot('mujoco_assets/walker2d.xml')
params = [.4,.04,.5,.05,.55,.055,.6,.06,.5,.05,.55,.055,.6,.06]
robot.update(params, 'mujoco_assets/walker2d_test.xml')
assert robot.get_params() == params
assert robot.get_height() == 1.31
print(robot.get_param_limits())
print(robot.get_param_names())
robot = MuJoCoXmlRobot('mujoco_assets/ant.xml')
params = [.2, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06]
robot.update(params, 'mujoco_assets/ant_test.xml')
assert robot.get_params() == params
assert robot.get_height() == .2
print(robot.get_param_limits())
print(robot.get_param_names())
robot = MuJoCoXmlRobot('mujoco_assets/humanoid.xml')
params = list(.8 * np.array(robot.get_params()))
robot.update(params, 'mujoco_assets/humanoid_test.xml')
assert robot.get_params() == params
print(robot.get_height())
#assert robot.get_height() == .6085
print(robot.get_param_limits())
print(robot.get_param_names())
import gym, roboschool
env = gym.make("RoboschoolHopper-v1")
env.unwrapped.model_xml = 'mujoco_assets/hopper_test.xml'
env.reset()
#env.render()
import os
from scipy.misc import imsave
import subprocess as sp
outdir = 'xml_vid'
os.makedirs(outdir, exist_ok=True)
i = 0
for _ in range(10):
env.reset()
for _ in range(100):
env.step(env.action_space.sample())
rgb = env.render('rgb_array')
imsave(os.path.join(outdir, '{:05d}.png'.format(i)), rgb)
i+=1
sp.call(['ffmpeg', '-r', '60', '-f', 'image2', '-i', os.path.join(outdir, '%05d.png'), '-vcodec', 'libx264', '-pix_fmt', 'yuv420p', os.path.join(outdir, 'out.mp4')])
env.close()
| 2.796875 | 3 |
app/http/middleware/LoadUserMiddleware.py | josephmancuso/masonite-forum | 11 | 1926 | <filename>app/http/middleware/LoadUserMiddleware.py
''' Load User Middleware'''
from masonite.facades.Auth import Auth
class LoadUserMiddleware:
''' Middleware class which loads the current user into the request '''
def __init__(self, Request):
''' Inject Any Dependencies From The Service Container '''
self.request = Request
def before(self):
''' Run This Middleware Before The Route Executes '''
self.load_user(self.request)
return self.request
def after(self):
''' Run This Middleware After The Route Executes '''
pass
def load_user(self, request):
''' Load user into the request '''
request.set_user(Auth(request).user())
| 3.046875 | 3 |
src/unittest/python/merciful_elo_limit_tests.py | mgaertne/minqlx-plugin-tests | 4 | 1927 | <reponame>mgaertne/minqlx-plugin-tests
from minqlx_plugin_test import *
import logging
import unittest
from mockito import *
from mockito.matchers import *
from hamcrest import *
from redis import Redis
from merciful_elo_limit import *
class MercifulEloLimitTests(unittest.TestCase):
def setUp(self):
setup_plugin()
setup_cvars({
"qlx_mercifulelo_minelo": "800",
"qlx_mercifulelo_applicationgames": "10",
"qlx_mercifulelo_abovegames": "10",
"qlx_mercifulelo_daysbanned": "30",
"qlx_owner": "42"
})
setup_game_in_progress()
self.plugin = merciful_elo_limit()
self.reply_channel = mocked_channel()
self.plugin.database = Redis
self.db = mock(Redis)
self.plugin._db_instance = self.db
when(self.db).__getitem__(any).thenReturn("42")
def tearDown(self):
unstub()
def setup_balance_ratings(self, player_elos):
gametype = None
if len(player_elos) > 0:
gametype = self.plugin.game.type_short
ratings = {}
for player, elo in player_elos:
ratings[player.steam_id] = {gametype: {'elo': elo}}
self.plugin._loaded_plugins["balance"] = mock({'ratings': ratings})
def setup_no_balance_plugin(self):
if "balance" in self.plugin._loaded_plugins:
del self.plugin._loaded_plugins["balance"]
def setup_exception_list(self, players):
mybalance_plugin = mock(Plugin)
mybalance_plugin.exceptions = [player.steam_id for player in players]
self.plugin._loaded_plugins["mybalance"] = mybalance_plugin
def test_handle_map_change_resets_tracked_player_ids(self):
connected_players()
self.setup_balance_ratings([])
self.plugin.tracked_player_sids = [123, 455]
self.plugin.handle_map_change("campgrounds", "ca")
assert_that(self.plugin.tracked_player_sids, is_([]))
def test_handle_map_change_resets_announced_player_ids(self):
connected_players()
self.setup_balance_ratings([])
self.plugin.announced_player_elos = [123, 455]
self.plugin.handle_map_change("campgrounds", "ca")
assert_that(self.plugin.announced_player_elos, is_([]))
def test_handle_map_change_fetches_elos_of_connected_players(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 1200)})
self.plugin.handle_map_change("thunderstruck", "ca")
verify(self.plugin._loaded_plugins["balance"]).add_request(
{player1.steam_id: 'ca', player2.steam_id: 'ca'},
self.plugin.callback_ratings, CHAT_CHANNEL
)
def test_handle_player_connect_fetches_elo_of_connecting_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connecting_player = fake_player(789, "Connecting Player")
connected_players(player1, player2, connecting_player)
self.setup_balance_ratings({(player1, 900), (player2, 1200), (connecting_player, 1542)})
self.plugin.handle_player_connect(connecting_player)
verify(self.plugin._loaded_plugins["balance"]).add_request(
{connecting_player.steam_id: 'ca'},
self.plugin.callback_ratings, CHAT_CHANNEL
)
def test_fetch_elos_of_players_with_no_game_setup(self):
setup_no_game()
self.setup_balance_ratings({})
self.plugin.fetch_elos_of_players([])
verify(self.plugin._loaded_plugins["balance"], times=0).add_request(any, any, any)
def test_fetch_elos_of_players_with_unsupported_gametype(self):
setup_game_in_progress("unsupported")
self.setup_balance_ratings({})
self.plugin.fetch_elos_of_players([])
verify(self.plugin._loaded_plugins["balance"], times=0).add_request(any, any, any)
def test_fetch_elos_of_player_with_no_balance_plugin(self):
mocked_logger = mock(spec=logging.Logger)
spy2(minqlx.get_logger)
when(minqlx).get_logger(self.plugin).thenReturn(mocked_logger)
self.setup_no_balance_plugin()
self.plugin.fetch_elos_of_players([])
verify(mocked_logger).warning(matches("Balance plugin not found.*"))
def test_handle_round_countdown_with_no_game(self):
setup_no_game()
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Speccing Player", team="spectator")
connected_players(player1, player2, player3)
self.setup_balance_ratings({})
self.plugin.handle_round_countdown(1)
verify(self.plugin._loaded_plugins["balance"], times=0).add_request(any, any, any)
def test_handle_round_countdown_fetches_elos_of_players_in_teams(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Speccing Player", team="spectator")
connected_players(player1, player2, player3)
self.setup_balance_ratings({(player1, 900), (player2, 1200), (player3, 1600)})
self.plugin.handle_round_countdown(4)
verify(self.plugin._loaded_plugins["balance"]).add_request(
{player1.steam_id: 'ca', player2.steam_id: 'ca'},
self.plugin.callback_ratings, CHAT_CHANNEL
)
def test_callback_ratings_with_no_game_running(self):
setup_no_game()
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Speccing Player", team="spectator")
connected_players(player1, player2, player3)
self.setup_balance_ratings({})
self.plugin.callback_ratings([], minqlx.CHAT_CHANNEL)
verify(self.db, times=0).get(any)
def test_callback_ratings_with_unsupported_game_type(self):
setup_game_in_progress("unsupported")
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Speccing Player", team="spectator")
connected_players(player1, player2, player3)
self.setup_balance_ratings({})
self.plugin.callback_ratings([], minqlx.CHAT_CHANNEL)
verify(self.db, times=0).get(any)
def test_callback_ratings_warns_low_elo_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
patch(minqlx.next_frame, lambda func: func)
patch(minqlx.thread, lambda func: func)
patch(time.sleep, lambda int: None)
when(self.db).get(any).thenReturn("2")
self.plugin.callback_ratings([player1, player2], minqlx.CHAT_CHANNEL)
verify(player2, times=12).center_print(matches(".*Skill warning.*8.*matches left.*"))
verify(player2).tell(matches(".*Skill Warning.*qlstats.*below.*800.*8.*of 10 application matches.*"))
def test_callback_ratings_announces_information_to_other_players(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
patch(minqlx.next_frame, lambda func: func)
patch(minqlx.thread, lambda func: func)
patch(time.sleep, lambda int: None)
when(self.db).get(any).thenReturn("2")
self.plugin.callback_ratings([player1, player2], minqlx.CHAT_CHANNEL)
assert_plugin_sent_to_console(matches("Fake Player2.*is below.*, but has.*8.*application matches left.*"))
def test_callback_ratings_announces_information_to_other_players_just_once_per_connect(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
self.plugin.announced_player_elos = [456]
patch(minqlx.next_frame, lambda func: func)
patch(minqlx.thread, lambda func: func)
patch(time.sleep, lambda int: None)
when(self.db).get(any).thenReturn("2")
self.plugin.callback_ratings([player1, player2], minqlx.CHAT_CHANNEL)
assert_plugin_sent_to_console(matches("Player.*is below.*, but has 8 application matches left.*"), times=0)
def test_callback_ratings_makes_exception_for_player_in_exception_list(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Fake Player3", team="red")
connected_players(player1, player2, player3)
self.setup_balance_ratings({(player1, 900), (player2, 799), (player3, 600)})
self.setup_exception_list([player3])
patch(minqlx.next_frame, lambda func: func)
patch(minqlx.thread, lambda func: func)
patch(time.sleep, lambda int: None)
when(self.db).get(any).thenReturn("2")
self.plugin.callback_ratings([player1, player2, player3], minqlx.CHAT_CHANNEL)
verify(player2, times=12).center_print(matches(".*Skill warning.*8.*matches left.*"))
verify(player2).tell(matches(".*Skill Warning.*qlstats.*below.*800.*8.*of 10 application matches.*"))
verify(player3, times=0).center_print(any)
verify(player3, times=0).tell(any)
def test_callback_ratings_warns_low_elo_player_when_application_games_not_set(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
patch(minqlx.next_frame, lambda func: func)
patch(minqlx.thread, lambda func: func)
patch(time.sleep, lambda int: None)
when(self.db).get(any).thenReturn(None)
self.plugin.callback_ratings([player1, player2], minqlx.CHAT_CHANNEL)
verify(player2, times=12).center_print(matches(".*Skill warning.*10.*matches left.*"))
verify(player2).tell(matches(".*Skill Warning.*qlstats.*below.*800.*10.*of 10 application matches.*"))
def test_callback_ratings_bans_low_elo_players_that_used_up_their_application_games(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
when(self.db).get(any).thenReturn("11")
spy2(minqlx.COMMANDS.handle_input)
when2(minqlx.COMMANDS.handle_input, any, any, any).thenReturn(None)
patch(minqlx.PlayerInfo, lambda *args: mock(spec=minqlx.PlayerInfo))
patch(minqlx.next_frame, lambda func: func)
when(self.db).delete(any).thenReturn(None)
self.plugin.callback_ratings([player1, player2], minqlx.CHAT_CHANNEL)
verify(minqlx.COMMANDS).handle_input(any, any, any)
verify(self.db).delete("minqlx:players:{}:minelo:abovegames".format(player2.steam_id))
verify(self.db).delete("minqlx:players:{}:minelo:freegames".format(player2.steam_id))
def test_handle_round_start_increases_application_games_for_untracked_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(False)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).incr("minqlx:players:{}:minelo:freegames".format(player2.steam_id))
def test_handle_round_start_makes_exception_for_player_in_exception_list(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Fake Player3", team="red")
connected_players(player1, player2, player3)
self.setup_balance_ratings({(player1, 900), (player2, 799), (player3, 600)})
self.setup_exception_list([player3])
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(False)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).incr("minqlx:players:{}:minelo:freegames".format(player2.steam_id))
verify(self.db, times=0).incr("minqlx:players:{}:minelo:freegames".format(player3.steam_id))
def test_handle_round_start_starts_tracking_for_low_elo_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(False)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
assert_that(self.plugin.tracked_player_sids, has_item(player2.steam_id))
def test_handle_round_start_resets_above_games_for_low_elo_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(True)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).delete("minqlx:players:{}:minelo:abovegames".format(player2.steam_id))
def test_handle_round_start_increases_above_games_for_application_games_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 801)})
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(True)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).incr("minqlx:players:{}:minelo:abovegames".format(player2.steam_id))
def test_handle_round_start_increases_above_games_for_application_games_player_with_no_aobve_games_set(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 801)})
when(self.db).get(any).thenReturn("1")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(True)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).incr("minqlx:players:{}:minelo:abovegames".format(player2.steam_id))
def test_handle_round_start_starts_tracking_of_above_elo_players_for_application_games_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 801)})
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(True)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
assert_that(self.plugin.tracked_player_sids, has_item(player2.steam_id))
def test_handle_round_start_removes_minelo_db_entries_for_above_elo_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 801)})
when(self.db).get(any).thenReturn("11")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(True)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).delete("minqlx:players:{}:minelo:freegames".format(player2.steam_id))
verify(self.db).delete("minqlx:players:{}:minelo:abovegames".format(player2.steam_id))
def test_handle_round_start_skips_already_tracked_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.plugin.tracked_player_sids.append(player2.steam_id)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
when(self.db).get(any).thenReturn(3)
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(False)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db, times=0).delete(any)
verify(self.db, times=0).delete(any)
def test_handle_round_start_with_unsupported_gametype(self):
setup_game_in_progress("unsupported")
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({})
self.plugin.handle_round_start(2)
verify(self.plugin._loaded_plugins["balance"], times=0).add_request(any, any, any)
def test_handle_round_start_with_no_balance_plugin(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
mocked_logger = mock(spec=logging.Logger)
spy2(minqlx.get_logger)
when(minqlx).get_logger(self.plugin).thenReturn(mocked_logger)
self.setup_no_balance_plugin()
self.plugin.handle_round_start(5)
verify(mocked_logger, atleast=1).warning(matches("Balance plugin not found.*"))
def test_cmd_mercis_shows_currently_connected_merciful_players(self):
player = fake_player(666, "Cmd using Player")
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Fake Player3", team="blue")
connected_players(player, player1, player2, player3)
self.setup_balance_ratings({(player, 1400), (player1, 801), (player2, 799), (player3, 900)})
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player1.steam_id)).thenReturn("2")
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player2.steam_id)).thenReturn("3")
when(self.db).get("minqlx:players:{}:minelo:abovegames".format(player1.steam_id)).thenReturn("6")
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player.steam_id)).thenReturn(None)
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player3.steam_id)).thenReturn(None)
self.plugin.cmd_mercis(player, ["!mercis"], self.reply_channel)
assert_channel_was_replied(self.reply_channel, matches("Fake Player1 \(elo: 801\):.*8.*application matches "
"left,.*6.*matches above.*"))
assert_channel_was_replied(self.reply_channel, matches("Fake Player2 \(elo: 799\):.*7.*application matches "
"left"))
def test_cmd_mercis_replies_to_main_cbannel_instead_of_team_chat(self):
self.addCleanup(self.reset_chat_channel, minqlx.CHAT_CHANNEL)
minqlx.CHAT_CHANNEL = mocked_channel()
player = fake_player(666, "Cmd using Player")
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Fake Player3", team="blue")
connected_players(player, player1, player2, player3)
self.setup_balance_ratings({(player, 1400), (player1, 801), (player2, 799), (player3, 900)})
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player1.steam_id)).thenReturn("2")
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player2.steam_id)).thenReturn("3")
when(self.db).get("minqlx:players:{}:minelo:abovegames".format(player1.steam_id)).thenReturn("6")
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player.steam_id)).thenReturn(None)
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player3.steam_id)).thenReturn(None)
self.plugin.cmd_mercis(player, ["!mercis"], minqlx.BLUE_TEAM_CHAT_CHANNEL)
assert_channel_was_replied(minqlx.CHAT_CHANNEL, matches("Fake Player1 \(elo: 801\):.*8.*application matches "
"left,.*6.*matches above.*"))
assert_channel_was_replied(minqlx.CHAT_CHANNEL, matches("Fake Player2 \(elo: 799\):.*7.*application matches "
"left"))
def reset_chat_channel(self, original_chat_channel):
minqlx.CHAT_CHANNEL = original_chat_channel
def test_cmd_mercis_shows_no_mercis_if_no_player_using_their_application_matches(self):
player = fake_player(666, "Cmd using Player")
connected_players(player)
self.setup_balance_ratings({(player, 1400)})
when(self.db).get(any).thenReturn(None)
self.plugin.cmd_mercis(player, ["!mercis"], minqlx.CHAT_CHANNEL)
assert_plugin_sent_to_console(any, times=0)
| 2.328125 | 2 |
meiduo_mall/celery_tasks/sms/tasks.py | Vent-Any/meiduo_mall_cangku | 0 | 1928 | from ronglian_sms_sdk import SmsSDK
from celery_tasks.main import app
# 写我们的任务(函数)
# 任务必须要celery的实例对象装饰器task装饰
# 任务包的任务需要celery调用自检检查函数。(在main里面写。)
@app.task
def celery_send_sms_code(mobile, sms_code):
accId = '<KEY>'
accToken = '514a8783b8c2481ebbeb6a814434796f'
appId = '<KEY>'
# 9.1. 创建荣联云 实例对象
sdk = SmsSDK(accId, accToken, appId)
tid = '1' # 我们发送短信的模板,值 只能是 1 因为我们是测试用户
mobile = '%s' % mobile # '手机号1,手机号2' 给哪些手机号发送验证码,只能是测试手机号
datas = (sms_code, 10) # ('变量1', '变量2') 涉及到模板的变量
# 您的验证码为{1},请于{2} 分钟内输入
# 您的验证码为666999,请于5 分钟内输入
# 9.2. 发送短信
sdk.sendMessage(tid, mobile, datas) | 1.890625 | 2 |
delphiIDE.py | JeisonJHA/Plugins-Development | 0 | 1929 | import sublime_plugin
class MethodDeclaration(object):
"""docstring for MethodDeclaration"""
def __init__(self):
self._methodclass = None
self.has_implementation = False
self.has_interface = False
@property
def has_implementation(self):
return self._has_implementation
@has_implementation.setter
def has_implementation(self, value):
self._has_implementation = value
@property
def has_interface(self):
return self._has_interface
@has_interface.setter
def has_interface(self, value):
self._has_interface = value
@property
def methodname(self):
return self._methodname
@methodname.setter
def methodname(self, value):
self._methodname = value
@property
def methodregion(self):
return self._methodregion
@methodregion.setter
def methodregion(self, value):
self._methodregion = value
@property
def visibility(self):
return self._visibility
@visibility.setter
def visibility(self, value):
self._visibility = value
@property
def params(self):
return self._params
@params.setter
def params(self, value):
self._params = value
@property
def methodclass(self):
return self._methodclass
@methodclass.setter
def methodclass(self, value):
self._methodclass = value
class ClassDeclaration(object):
"""docstring for ClassDeclaration"""
@property
def classname(self):
return self._classname
@classname.setter
def classname(self, value):
self._classname = value
@property
def classregion(self):
return self._classregion
@classregion.setter
def classregion(self, value):
self._classregion = value
@property
def privateregion(self):
return self._privateregion
@privateregion.setter
def privateregion(self, value):
self._privateregion = value
@property
def protectedregion(self):
return self._protectedregion
@protectedregion.setter
def protectedregion(self, value):
self._protectedregion = value
@property
def publicregion(self):
return self._publicregion
@publicregion.setter
def publicregion(self, value):
self._publicregion = value
@property
def publishedregion(self):
return self._publishedregion
@publishedregion.setter
def publishedregion(self, value):
self._publishedregion = value
class DelphiIdeCommand(sublime_plugin.TextCommand):
# // { "keys": ["ctrl+shift+x"], "command": "delphi_ide", "args": {"teste": "delphimethodnav"}}
# view.window().run_command('show_panel',
# args={"panel": 'output.find_results', "toggle": True})
def run(self, edit, teste):
print('teste[0]:%s' % teste)
method = None
try:
method = getattr(self, teste)
except AttributeError:
raise NotImplementedError("Class `{}` does not implement `{}`".
format(self.__class__.__name__,
teste))
method()
def delphimethodnav(self):
print('vai doido')
def getMethodInformation(self):
view = self.view
cursor_region = view.sel()[0]
cursor_pt = view.sel()[0].begin()
if not view.match_selector(cursor_pt,
'function.implementation.delphi'):
# exit because it is not in a method
return None
def params(region):
params_region = view.find_by_selector(
'meta.function.parameters.delphi')
param_name_region = view.find_by_selector(
'variable.parameter.function.delphi')
params_region_filt = [
s for s in params_region if region.contains(s)]
params_region_filt = [
s for s in param_name_region if
params_region_filt[0].contains(s)]
return params_region_filt
def paramsFromRegion(region):
try:
params_region_filt = params(region)
x = [view.substr(x) for x in params_region_filt]
return x
except:
return []
def getFunctionName():
functionname = view.find_by_selector('entity.name.function')
functionnamefiltered = [
n for n in functionname if method.methodregion[0].contains(n)]
return view.substr(functionnamefiltered[0])
# has_implementation
# has_interface
# methodname
# methodregion
# visibility
# params
# methodclass
method = MethodDeclaration()
selector = view.find_by_selector
method.methodregion = [r for r in selector('meta.function.delphi')
if cursor_region.intersects(r)]
method.methodname = getFunctionName()
method.params = self.paramsFromRegion(method.methodregion[0])
return method
def getClassInformation(self):
pass
| 2.515625 | 3 |
python/test_pip_package.py | syt123450/tfjs-converter | 0 | 1930 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test the Python API and shell binary of the tensorflowjs pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import tracking
from tensorflow.python.saved_model.save import save
import tensorflow_hub as hub
import tensorflowjs as tfjs
def _createKerasModel(layer_name_prefix, h5_path=None):
"""Create a Keras model for testing.
Args:
layer_name_prefix: A prefix string for layer names. This helps avoid
clashes in layer names between different test methods.
h5_path: Optional string path for a HDF5 (.h5) file to save the model
in.
Returns:
An instance of keras.Model.
"""
input_tensor = keras.layers.Input((3, ))
dense1 = keras.layers.Dense(
4,
use_bias=True,
kernel_initializer='ones',
bias_initializer='zeros',
name=layer_name_prefix + '1')(input_tensor)
output = keras.layers.Dense(
2,
use_bias=False,
kernel_initializer='ones',
name=layer_name_prefix + '2')(dense1)
model = keras.models.Model(inputs=[input_tensor], outputs=[output])
if h5_path:
model.save(h5_path)
return model
def _createTensorFlowSavedModelV1(name_scope, save_path):
"""Create a TensorFlow SavedModel for testing.
Args:
name_scope: Name scope to create the model under. This helps avoid
op and variable name clashes between different test methods.
save_path: The directory path in which to save the model.
"""
graph = tf.Graph()
with graph.as_default():
with tf.compat.v1.name_scope(name_scope):
x = tf.compat.v1.constant([[37.0, -23.0], [1.0, 4.0]])
w = tf.compat.v1.get_variable('w', shape=[2, 2])
y = tf.compat.v1.matmul(x, w)
output = tf.compat.v1.nn.softmax(y)
init_op = w.initializer
# Create a builder.
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(save_path)
with tf.compat.v1.Session() as sess:
# Run the initializer on `w`.
sess.run(init_op)
builder.add_meta_graph_and_variables(
sess, [tf.compat.v1.saved_model.tag_constants.SERVING],
signature_def_map={
"serving_default":
tf.compat.v1.saved_model.signature_def_utils.predict_signature_def(
inputs={"x": x},
outputs={"output": output})
},
assets_collection=None)
builder.save()
def _createTensorFlowSavedModel(name_scope, save_path):
"""Create a TensorFlow SavedModel for testing.
Args:
name_scope: Name scope to create the model under. This helps avoid
op and variable name clashes between different test methods.
save_path: The directory path in which to save the model.
"""
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
to_save = root.f.get_concrete_function(input_data)
save(root, save_path, to_save)
def _create_hub_module(save_path):
"""Create a TensorFlow Hub module for testing.
Args:
save_path: The directory path in which to save the model.
"""
# Module function that doubles its input.
def double_module_fn():
w = tf.Variable([2.0, 4.0])
x = tf.compat.v1.placeholder(dtype=tf.float32)
hub.add_signature(inputs=x, outputs=x*w)
graph = tf.Graph()
with graph.as_default():
spec = hub.create_module_spec(double_module_fn)
m = hub.Module(spec)
# Export the module.
with tf.compat.v1.Session(graph=graph) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
m.export(save_path, sess)
class APIAndShellTest(tf.test.TestCase):
"""Tests for the Python API of the pip package."""
@classmethod
def setUpClass(cls):
cls.class_tmp_dir = tempfile.mkdtemp()
cls.tf_saved_model_dir = os.path.join(cls.class_tmp_dir, 'tf_saved_model')
cls.tf_saved_model_v1_dir = os.path.join(
cls.class_tmp_dir, 'tf_saved_model_v1')
_createTensorFlowSavedModel('a', cls.tf_saved_model_dir)
_createTensorFlowSavedModelV1('b', cls.tf_saved_model_v1_dir)
cls.tf_hub_module_dir = os.path.join(cls.class_tmp_dir, 'tf_hub_module')
_create_hub_module(cls.tf_hub_module_dir)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.class_tmp_dir)
def setUp(self):
# Make sure this file is not being run from the source directory, to
# avoid picking up source files.
if os.path.isdir(
os.path.join(os.path.dirname(__file__), 'tensorflowjs')):
self.fail('Do not run this test from the Python source directory. '
'This file is intended to be run on pip install.')
self._tmp_dir = tempfile.mkdtemp()
super(APIAndShellTest, self).setUp()
def tearDown(self):
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
super(APIAndShellTest, self).tearDown()
def testVersionString(self):
self.assertEqual(2, tfjs.__version__.count('.'))
def testSaveKerasModel(self):
with self.test_session():
# First create a toy keras model.
model = _createKerasModel('MergedDense')
tfjs.converters.save_keras_model(model, self._tmp_dir)
# Briefly check the model topology.
with open(os.path.join(self._tmp_dir, 'model.json')) as f:
json_content = json.load(f)
model_json = json_content['modelTopology']
self.assertIsInstance(model_json['model_config'], dict)
self.assertIsInstance(model_json['model_config']['config'], dict)
self.assertIn('layers', model_json['model_config']['config'])
weights_manifest = json_content['weightsManifest']
self.assertIsInstance(weights_manifest, list)
# Briefly check the weights manifest.
weight_shapes = dict()
weight_dtypes = dict()
for manifest_item in weights_manifest:
for weight in manifest_item['weights']:
weight_name = weight['name']
weight_shapes[weight_name] = weight['shape']
weight_dtypes[weight_name] = weight['dtype']
self.assertEqual(
sorted(list(weight_shapes.keys())),
sorted([
'MergedDense1/kernel', 'MergedDense1/bias',
'MergedDense2/kernel'
]))
self.assertEqual(weight_shapes['MergedDense1/kernel'], [3, 4])
self.assertEqual(weight_shapes['MergedDense1/bias'], [4])
self.assertEqual(weight_shapes['MergedDense2/kernel'], [4, 2])
self.assertEqual(weight_dtypes['MergedDense1/kernel'], 'float32')
self.assertEqual(weight_dtypes['MergedDense1/bias'], 'float32')
self.assertEqual(weight_dtypes['MergedDense2/kernel'], 'float32')
def testLoadKerasModel(self):
# Use separate tf.Graph and tf.compat.v1.Session contexts to prevent name collision.
with tf.Graph().as_default(), tf.compat.v1.Session():
# First create a toy keras model.
model1 = _createKerasModel('MergedDense')
tfjs.converters.save_keras_model(model1, self._tmp_dir)
model1_weight_values = model1.get_weights()
with tf.Graph().as_default(), tf.compat.v1.Session():
# Load the model from saved artifacts.
model2 = tfjs.converters.load_keras_model(
os.path.join(self._tmp_dir, 'model.json'))
# Compare the loaded model with the original one.
model2_weight_values = model2.get_weights()
self.assertEqual(len(model1_weight_values), len(model2_weight_values))
for model1_weight_value, model2_weight_value in zip(
model1_weight_values, model2_weight_values):
self.assertAllClose(model1_weight_value, model2_weight_value)
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(self._tmp_dir, 'group*-*')))
def testInvalidInputFormatRaisesError(self):
process = subprocess.Popen(
[
'tensorflowjs_converter', '--input_format',
'nonsensical_format', self._tmp_dir, self._tmp_dir
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
self.assertGreater(process.returncode, 0)
self.assertIn(b'--input_format', tf.compat.as_bytes(stderr))
def testMissingInputPathRaisesError(self):
process = subprocess.Popen(
[
'tensorflowjs_converter'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
self.assertGreater(process.returncode, 0)
self.assertIn(b'input_path', tf.compat.as_bytes(stderr))
def testKerasH5ConversionWorksFromCLI(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
# First create a toy keras model.
os.makedirs(os.path.join(self._tmp_dir, 'keras_h5'))
h5_path = os.path.join(self._tmp_dir, 'keras_h5', 'model.h5')
_createKerasModel('MergedDenseForCLI', h5_path)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras', h5_path,
self._tmp_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# Briefly check the model topology.
with open(os.path.join(self._tmp_dir, 'model.json'), 'rt') as f:
json_content = json.load(f)
model_json = json_content['modelTopology']
self.assertIsInstance(model_json['model_config'], dict)
self.assertIsInstance(model_json['model_config']['config'], dict)
self.assertIn('layers', model_json['model_config']['config'])
weights_manifest = json_content['weightsManifest']
self.assertIsInstance(weights_manifest, list)
# Briefly check the weights manifest.
weight_shapes = dict()
weight_dtypes = dict()
for manifest_item in weights_manifest:
for weight in manifest_item['weights']:
weight_name = weight['name']
weight_shapes[weight_name] = weight['shape']
weight_dtypes[weight_name] = weight['dtype']
self.assertEqual(
sorted(list(weight_shapes.keys())),
sorted([
'MergedDenseForCLI1/kernel', 'MergedDenseForCLI1/bias',
'MergedDenseForCLI2/kernel'
]))
self.assertEqual(weight_shapes['MergedDenseForCLI1/kernel'], [3, 4])
self.assertEqual(weight_shapes['MergedDenseForCLI1/bias'], [4])
self.assertEqual(weight_shapes['MergedDenseForCLI2/kernel'], [4, 2])
self.assertEqual(weight_dtypes['MergedDenseForCLI1/kernel'], 'float32')
self.assertEqual(weight_dtypes['MergedDenseForCLI1/bias'], 'float32')
self.assertEqual(weight_dtypes['MergedDenseForCLI2/kernel'], 'float32')
# Verify that there is only one weight group due to the default
# non-split_weights_by_layer behavior. The model is a small one, which
# does not exceed the 4-MB shard size limit. Therefore, there should
# be only one weight file.
self.assertEqual(
1, len(glob.glob(os.path.join(self._tmp_dir, 'group*'))))
def testKerasH5ConversionSplitWeightsByLayerWorksFromCLI(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
# First create a toy keras model.
os.makedirs(os.path.join(self._tmp_dir, 'keras_h5'))
h5_path = os.path.join(self._tmp_dir, 'keras_h5', 'model.h5')
_createKerasModel('MergedDenseForCLI', h5_path)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras',
'--split_weights_by_layer', h5_path, self._tmp_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# Briefly check the model topology.
with open(os.path.join(self._tmp_dir, 'model.json'), 'rt') as f:
json_content = json.load(f)
model_json = json_content['modelTopology']
self.assertIsInstance(model_json['model_config'], dict)
self.assertIsInstance(model_json['model_config']['config'], dict)
self.assertIn('layers', model_json['model_config']['config'])
weights_manifest = json_content['weightsManifest']
self.assertIsInstance(weights_manifest, list)
# Briefly check the weights manifest.
weight_shapes = dict()
weight_dtypes = dict()
for manifest_item in weights_manifest:
for weight in manifest_item['weights']:
weight_name = weight['name']
weight_shapes[weight_name] = weight['shape']
weight_dtypes[weight_name] = weight['dtype']
self.assertEqual(
sorted(list(weight_shapes.keys())),
sorted([
'MergedDenseForCLI1/kernel', 'MergedDenseForCLI1/bias',
'MergedDenseForCLI2/kernel'
]))
self.assertEqual(weight_shapes['MergedDenseForCLI1/kernel'], [3, 4])
self.assertEqual(weight_shapes['MergedDenseForCLI1/bias'], [4])
self.assertEqual(weight_shapes['MergedDenseForCLI2/kernel'], [4, 2])
self.assertEqual(weight_dtypes['MergedDenseForCLI1/kernel'], 'float32')
self.assertEqual(weight_dtypes['MergedDenseForCLI1/bias'], 'float32')
self.assertEqual(weight_dtypes['MergedDenseForCLI2/kernel'], 'float32')
# Verify that there are two weight groups due to the optional flag
# --split_weights_by_layer behavior. The model is a small one. None of
# the layers should have weight sizes exceeding the 4-MB shard size
# limit.
self.assertEqual(
2, len(glob.glob(os.path.join(self._tmp_dir, 'group*'))))
def testKerasH5ConversionWithSignatureNameErrors(self):
process = subprocess.Popen(
[
'tensorflowjs_converter', '--input_format', 'keras',
'--signature_name', 'bar',
os.path.join(self._tmp_dir, 'foo.h5'),
os.path.join(self._tmp_dir, 'output')
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
self.assertGreater(process.returncode, 0)
self.assertIn(
b'The --signature_name flag is applicable only to',
tf.compat.as_bytes(stderr))
def testConvertTFSavedModelV1WithCommandLineWorks(self):
output_dir = os.path.join(self._tmp_dir)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tf_saved_model',
'--output_format', 'tfjs_graph_model',
self.tf_saved_model_v1_dir, output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{'dtype': 'float32', 'name': 'w', 'shape': [2, 2]}]}]
# Load the saved weights as a JSON string.
output_json = json.load(
open(os.path.join(output_dir, 'model.json'), 'rt'))
self.assertEqual(output_json['weightsManifest'], weights)
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def testConvertTFHubModuleWithCommandLineWorks(self):
output_dir = os.path.join(self._tmp_dir)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tf_hub',
self.tf_hub_module_dir, output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{
'shape': [2],
'name': 'module/Variable',
'dtype': 'float32'
}]
}]
# Load the saved weights as a JSON string.
output_json = json.load(
open(os.path.join(output_dir, 'model.json'), 'rt'))
self.assertEqual(output_json['weightsManifest'], weights)
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def testConvertTFSavedModelWithCommandLineWorks(self):
output_dir = os.path.join(self._tmp_dir)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tf_saved_model',
'--output_format', 'tfjs_graph_model',
self.tf_saved_model_dir, output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{
'dtype': 'float32',
'shape': [],
'name': 'StatefulPartitionedCall/mul'
}]
}]
# Load the saved weights as a JSON string.
output_json = json.load(
open(os.path.join(output_dir, 'model.json'), 'rt'))
weights_manifest = output_json['weightsManifest']
self.assertEqual(len(weights_manifest), len(weights))
if sys.version_info[0] < 3:
self.assertItemsEqual(weights_manifest[0]['paths'],
weights[0]['paths'])
self.assertItemsEqual(weights_manifest[0]['weights'],
weights[0]['weights'])
else:
self.assertCountEqual(weights_manifest[0]['paths'],
weights[0]['paths'])
self.assertCountEqual(weights_manifest[0]['weights'],
weights[0]['weights'])
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def testConvertTFHubModuleWithCommandLineWorks(self):
output_dir = os.path.join(self._tmp_dir)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tf_hub',
self.tf_hub_module_dir, output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{
'shape': [2],
'name': 'module/Variable',
'dtype': 'float32'
}]
}]
# Load the saved weights as a JSON string.
output_json = json.load(
open(os.path.join(output_dir, 'model.json'), 'rt'))
self.assertEqual(output_json['weightsManifest'], weights)
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def testConvertTensorflowjsArtifactsToKerasH5(self):
# 1. Create a toy keras model and save it as an HDF5 file.
os.makedirs(os.path.join(self._tmp_dir, 'keras_h5'))
h5_path = os.path.join(self._tmp_dir, 'keras_h5', 'model.h5')
with tf.Graph().as_default(), tf.compat.v1.Session():
model = _createKerasModel('MergedDenseForCLI', h5_path)
model_json = model.to_json()
# 2. Convert the HDF5 file to tensorflowjs format.
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras', h5_path,
self._tmp_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 3. Convert the tensorflowjs artifacts back to HDF5.
new_h5_path = os.path.join(self._tmp_dir, 'model_2.h5')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'keras',
os.path.join(self._tmp_dir, 'model.json'), new_h5_path])
process.communicate()
self.assertEqual(0, process.returncode)
# 4. Load the model back from the new HDF5 file and compare with the
# original model.
with tf.Graph().as_default(), tf.compat.v1.Session():
model_2 = keras.models.load_model(new_h5_path)
model_2_json = model_2.to_json()
self.assertEqual(model_json, model_2_json)
def testLoadTensorflowjsArtifactsAsKerasModel(self):
# 1. Create a toy keras model and save it as an HDF5 file.
os.makedirs(os.path.join(self._tmp_dir, 'keras_h5'))
h5_path = os.path.join(self._tmp_dir, 'keras_h5', 'model.h5')
with tf.Graph().as_default(), tf.compat.v1.Session():
model = _createKerasModel('MergedDenseForCLI', h5_path)
model_json = model.to_json()
# 2. Convert the HDF5 file to tensorflowjs format.
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras', h5_path,
self._tmp_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 3. Load the tensorflowjs artifacts as a keras.Model instance.
with tf.Graph().as_default(), tf.compat.v1.Session():
model_2 = tfjs.converters.load_keras_model(
os.path.join(self._tmp_dir, 'model.json'))
model_2_json = model_2.to_json()
self.assertEqual(model_json, model_2_json)
def testVersion(self):
process = subprocess.Popen(
['tensorflowjs_converter', '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = process.communicate()
self.assertEqual(0, process.returncode)
self.assertIn(
tf.compat.as_bytes('tensorflowjs %s' % tfjs.__version__),
tf.compat.as_bytes(stdout))
process = subprocess.Popen(
['tensorflowjs_converter', '-v'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = process.communicate()
self.assertEqual(0, process.returncode)
self.assertIn(
tf.compat.as_bytes('tensorflowjs %s' % tfjs.__version__),
tf.compat.as_bytes(stdout))
class ConvertTfKerasSavedModelTest(tf.test.TestCase):
def setUp(self):
super(ConvertTfKerasSavedModelTest, self).setUp()
self._tmp_dir = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
super(ConvertTfKerasSavedModelTest, self).tearDown()
def _createSimpleSequentialModel(self):
model = keras.Sequential()
model.add(keras.layers.Reshape([2, 3], input_shape=[6]))
model.add(keras.layers.LSTM(10))
model.add(keras.layers.Dense(1, activation='sigmoid'))
return model
def _createNestedSequentialModel(self):
model = keras.Sequential()
model.add(keras.layers.Dense(6, input_shape=[10], activation='relu'))
model.add(self._createSimpleSequentialModel())
return model
def _createFunctionalModelWithWeights(self):
input1 = keras.Input(shape=[8])
input2 = keras.Input(shape=[10])
y = keras.layers.Concatenate()([input1, input2])
y = keras.layers.Dense(4, activation='softmax')(y)
model = keras.Model([input1, input2], y)
return model
def testConvertTfKerasNestedSequentialSavedModelIntoTfjsFormat(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
x = np.random.randn(8, 10)
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createNestedSequentialModel()
y = model.predict(x)
keras.experimental.export_saved_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
# Implicit value of --output_format: tfjs_layers_model
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras_saved_model',
self._tmp_dir, tfjs_output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
model_json_path = os.path.join(tfjs_output_dir, 'model.json')
self.assertTrue(os.path.isfile(model_json_path))
# 3. Convert the tfjs model to keras h5 format.
new_h5_path = os.path.join(self._tmp_dir, 'new_h5.h5')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'keras', model_json_path, new_h5_path])
process.communicate()
self.assertEqual(0, process.returncode)
self.assertTrue(os.path.isfile(new_h5_path))
# 4. Load the model back and assert on the equality of the predict
# results.
model_prime = keras.models.load_model(new_h5_path)
new_y = model_prime.predict(x)
self.assertAllClose(y, new_y)
def testConvertTfKerasFunctionalSavedModelIntoTfjsFormat(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
x1 = np.random.randn(4, 8)
x2 = np.random.randn(4, 10)
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createFunctionalModelWithWeights()
y = model.predict([x1, x2])
keras.experimental.export_saved_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
# Use explicit --output_format value: tfjs_layers_model
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras_saved_model',
'--output_format', 'tfjs_layers_model',
self._tmp_dir, tfjs_output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
model_json_path = os.path.join(tfjs_output_dir, 'model.json')
self.assertTrue(os.path.isfile(model_json_path))
# 3. Convert the tfjs model to keras h5 format.
new_h5_path = os.path.join(self._tmp_dir, 'new_h5.h5')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'keras', model_json_path, new_h5_path])
process.communicate()
self.assertEqual(0, process.returncode)
self.assertTrue(os.path.isfile(new_h5_path))
# 4. Load the model back and assert on the equality of the predict
# results.
model_prime = keras.models.load_model(new_h5_path)
new_y = model_prime.predict([x1, x2])
self.assertAllClose(y, new_y)
def testUsingIncorrectKerasSavedModelRaisesError(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
x = np.random.randn(8, 10)
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createNestedSequentialModel()
y = model.predict(x)
keras.experimental.export_saved_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
# Use incorrect --input_format value: keras
process = subprocess.Popen(
[
'tensorflowjs_converter', '--input_format', 'keras',
self._tmp_dir, tfjs_output_dir
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
self.assertIn(
b'Expected path to point to an HDF5 file, '
b'but it points to a directory', tf.compat.as_bytes(stderr))
def testConvertTfjsLayersModelIntoShardedWeights(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
x = np.random.randn(8, 10)
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createNestedSequentialModel()
y = model.predict(x)
weights = model.get_weights()
total_weight_bytes = sum(np.size(w) for w in weights) * 4
keras.experimental.export_saved_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs_layers_model format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
# Implicit value of --output_format: tfjs_layers_model
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras_saved_model',
self._tmp_dir, tfjs_output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 3. Convert the tfjs_layers_model to another tfjs_layers_model,
# with sharded weights.
weight_shard_size_bytes = int(total_weight_bytes * 0.3)
# Due to the shard size, there ought to be 4 shards after conversion.
sharded_model_dir = os.path.join(self._tmp_dir, 'tfjs_sharded')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'tfjs_layers_model',
'--weight_shard_size_bytes', str(weight_shard_size_bytes),
os.path.join(tfjs_output_dir, 'model.json'), sharded_model_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 4. Check the sharded weight files and their sizes.
weight_files = sorted(
glob.glob(os.path.join(sharded_model_dir, 'group*.bin')))
self.assertEqual(len(weight_files), 4)
weight_file_sizes = [os.path.getsize(f) for f in weight_files]
self.assertEqual(sum(weight_file_sizes), total_weight_bytes)
self.assertEqual(weight_file_sizes[0], weight_file_sizes[1])
self.assertEqual(weight_file_sizes[0], weight_file_sizes[2])
self.assertLess(weight_file_sizes[3], weight_file_sizes[0])
# 5. Convert the sharded tfjs_layers_model back into a keras h5 file.
new_h5_path = os.path.join(self._tmp_dir, 'new_h5.h5')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
os.path.join(sharded_model_dir, 'model.json'), new_h5_path
])
process.communicate()
self.assertEqual(0, process.returncode)
with tf.Graph().as_default(), tf.compat.v1.Session():
# 6. Load the keras model and check the predict() output is close to
# before.
new_model = keras.models.load_model(new_h5_path)
new_y = new_model.predict(x)
self.assertAllClose(new_y, y)
def testConvertTfjsLayersModelWithQuantization(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
x = np.random.randn(8, 10)
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createNestedSequentialModel()
y = model.predict(x)
weights = model.get_weights()
total_weight_bytes = sum(np.size(w) for w in weights) * 4
keras.experimental.export_saved_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs_layers_model format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
# Implicit value of --output_format: tfjs_layers_model
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras_saved_model',
self._tmp_dir, tfjs_output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 3. Convert the tfjs_layers_model to another tfjs_layers_model,
# with uint16 quantization.
weight_shard_size_bytes = int(total_weight_bytes * 0.3)
# Due to the shard size, there ought to be 4 shards after conversion.
sharded_model_dir = os.path.join(self._tmp_dir, 'tfjs_sharded')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'tfjs_layers_model',
'--quantization_bytes', '2',
os.path.join(tfjs_output_dir, 'model.json'), sharded_model_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 4. Check the quantized weight file and its size.
weight_files = sorted(
glob.glob(os.path.join(sharded_model_dir, 'group*.bin')))
self.assertEqual(len(weight_files), 1)
weight_file_size = os.path.getsize(weight_files[0])
# The size of the weight file should reflect the uint16 quantization.
self.assertEqual(weight_file_size, total_weight_bytes // 2)
def testConvertTfjsLayersModelToTfjsGraphModel(self):
x = np.random.randn(8, 10)
# 1. Create a model for testing.
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation='relu', input_shape=[4]))
model.add(keras.layers.Dense(1, activation='sigmoid'))
h5_path = os.path.join(self._tmp_dir, 'model.h5')
model.save(h5_path)
# 2. Convert the keras saved model to tfjs_layers_model format.
layers_model_output_dir = os.path.join(self._tmp_dir, 'tfjs_layers')
# Implicit value of --output_format: tfjs_layers_model
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras',
h5_path, layers_model_output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 3. Convert the tfjs_layers_model to another tfjs_graph_model.
graph_model_dir = os.path.join(self._tmp_dir, 'tfjs_graph')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'tfjs_graph_model',
os.path.join(layers_model_output_dir, 'model.json'), graph_model_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 4. Check the model.json and weight file and its size.
self.assertTrue(os.path.isfile(os.path.join(graph_model_dir, 'model.json')))
weight_files = sorted(
glob.glob(os.path.join(graph_model_dir, 'group*.bin')))
self.assertEqual(len(weight_files), 1)
if __name__ == '__main__':
tf.test.main()
| 2.171875 | 2 |
script.ezclean/resources/lib/modules/skinz.py | rrosajp/script.ezclean | 5 | 1931 | <reponame>rrosajp/script.ezclean<gh_stars>1-10
# -*- coding: UTF-8 -*-
import os, re, shutil, time, xbmc
from resources.lib.modules import control
try: import json as simplejson
except: import simplejson
ADDONS = os.path.join(control.HOMEPATH, 'addons')
def currSkin():
return control.skin
def getOld(old):
try:
old = '"%s"' % old
query = '{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":%s}, "id":1}' % (old)
response = control.jsonrpc(query)
response = simplejson.loads(response)
if response.has_key('result'):
if response['result'].has_key('value'):
return response ['result']['value']
except:
pass
return None
def setNew(new, value):
try:
new = '"%s"' % new
value = '"%s"' % value
query = '{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":%s,"value":%s}, "id":1}' % (new, value)
response = control.jsonrpc(query)
except:
pass
return None
def swapSkins(skin):
old = 'lookandfeel.skin'
value = skin
current = getOld(old)
new = old
setNew(new, value)
def lookandFeelData(do='save'):
scan = ['lookandfeel.enablerssfeeds', 'lookandfeel.font', 'lookandfeel.rssedit', 'lookandfeel.skincolors', 'lookandfeel.skintheme', 'lookandfeel.skinzoom', 'lookandfeel.soundskin', 'lookandfeel.startupwindow', 'lookandfeel.stereostrength']
if do == 'save':
for item in scan:
query = '{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"%s"}, "id":1}' % (item)
response = control.jsonrpc(query)
if not 'error' in response:
match = re.compile('{"value":(.+?)}').findall(str(response))
control.setSetting(item.replace('lookandfeel', 'default'), match[0])
control.log("%s saved to %s" % (item, match[0]))
else:
for item in scan:
value = setting(item.replace('lookandfeel', 'default'))
query = '{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"%s","value":%s}, "id":1}' % (item, value)
response = control.jsonrpc(query)
control.log("%s restored to %s" % (item, value))
def defaultSkin():
control.log("[Default Skin Check]")
tempgui = os.path.join(USERDATAPATH, 'guitemp.xml')
gui = tempgui if os.path.exists(tempgui) else GUISETTINGS
if not os.path.exists(gui): return False
control.log("Reading gui file: %s" % gui)
guif = open(gui, 'r+')
msg = guif.read().replace('\n','').replace('\r','').replace('\t','').replace(' ',''); guif.close()
control.log("Opening gui settings")
match = re.compile('<lookandfeel>.+?<ski.+?>(.+?)</skin>.+?</lookandfeel>').findall(msg)
control.log("Matches: %s" % str(match))
if len(match) > 0:
skinid = match[0]
addonxml = os.path.join(ADDONS, match[0], 'addon.xml')
if os.path.exists(addonxml):
addf = open(addonxml, 'r+')
msg2 = addf.read().replace('\n','').replace('\r','').replace('\t',''); addf.close()
match2 = re.compile('<addon.+?ame="(.+?)".+?>').findall(msg2)
if len(match2) > 0: skinname = match2[0]
else: skinname = 'no match'
else: skinname = 'no file'
control.log("[Default Skin Check] Skin name: %s" % skinname)
control.log("[Default Skin Check] Skin id: %s" % skinid)
control.setSetting('defaultskin', skinid)
control.setSetting('defaultskinname', skinname)
control.setSetting('defaultskinignore', 'false')
if os.path.exists(tempgui):
control.log("Deleting Temp Gui File.")
os.remove(tempgui)
control.log("[Default Skin Check] End")
def checkSkin():
control.loga("Invalid Skin Check Start")
DEFAULTSKIN = setting('defaultskin')
DEFAULTNAME = setting('defaultskinname')
DEFAULTIGNORE = setting('defaultskinignore')
gotoskin = False
if not DEFAULTSKIN == '':
if os.path.exists(os.path.join(ADDONS, DEFAULTSKIN)):
if DIALOG.yesno(AddonTitle, "[COLOR %s]It seems that the skin has been set back to [COLOR %s]%s[/COLOR]" % (COLOR2, COLOR1, SKIN[5:].title()), "Would you like to set the skin back to:[/COLOR]", '[COLOR %s]%s[/COLOR]' % (COLOR1, DEFAULTNAME)):
gotoskin = DEFAULTSKIN
gotoname = DEFAULTNAME
else: control.loga("Skin was not reset"); control.setSetting('defaultskinignore', 'true'); gotoskin = False
else: control.setSetting('defaultskin', ''); control.setSetting('defaultskinname', ''); DEFAULTSKIN = ''; DEFAULTNAME = ''
if DEFAULTSKIN == '':
skinname = []
skinlist = []
for folder in glob.glob(os.path.join(ADDONS, 'skin.*/')):
xml = "%s/addon.xml" % folder
if os.path.exists(xml):
f = open(xml,mode='r'); g = f.read().replace('\n','').replace('\r','').replace('\t',''); f.close();
match = re.compile('<addon.+?id="(.+?)".+?>').findall(g)
match2 = re.compile('<addon.+?name="(.+?)".+?>').findall(g)
control.loga("%s: %s" % (folder, str(match[0])))
if len(match) > 0: skinlist.append(str(match[0])); skinname.append(str(match2[0]))
else: control.loga("ID not found for %s" % folder)
else: control.loga("ID not found for %s" % folder)
if len(skinlist) > 0:
if len(skinlist) > 1:
if DIALOG.yesno(control.AddonTitle, "[COLOR %s]It seems that the skin has been set back to [COLOR %s]%s[/COLOR]" % (COLOR2, COLOR1, SKIN[5:].title()), "Would you like to view a list of avaliable skins?[/COLOR]"):
choice = DIALOG.select("Select skin to switch to!", skinname)
if choice == -1: control.loga("Skin was not reset"); control.setSetting('defaultskinignore', 'true')
else:
gotoskin = skinlist[choice]
gotoname = skinname[choice]
else: control.loga("Skin was not reset"); control.setSetting('defaultskinignore', 'true')
else:
if DIALOG.yesno(control.AddonTitle, "It seems that the skin has been set back to [B]%s[/B]" % (SKIN[5:].title()), "Would you like to set the skin back to: ", '[B] %s [/B]' % (skinname[0])):
gotoskin = skinlist[0]
gotoname = skinname[0]
else: control.loga("Skin was not reset"); control.setSetting('defaultskinignore', 'true')
else: control.loga("No skins found in addons folder."); control.setSetting('defaultskinignore', 'true'); gotoskin = False
if gotoskin:
swapSkins(gotoskin)
x = 0
control.sleep(1000)
while not control.condVisibility("Window.isVisible(yesnodialog)") and x < 150:
x += 1
control.sleep(200)
if control.condVisibility("Window.isVisible(yesnodialog)"):
control.execute('SendClick(11)')
lookandFeelData('restore')
else: control.Notify(control.AddonTitle,'Skin Swap Timed Out!')
control.loga("Invalid Skin Check End")
| 2.21875 | 2 |
pyhap/characteristic.py | bdraco/HAP-python | 0 | 1932 | <filename>pyhap/characteristic.py
"""
All things for a HAP characteristic.
A Characteristic is the smallest unit of the smart home, e.g.
a temperature measuring or a device status.
"""
import logging
from pyhap.const import (
HAP_PERMISSION_READ,
HAP_REPR_DESC,
HAP_REPR_FORMAT,
HAP_REPR_IID,
HAP_REPR_MAX_LEN,
HAP_REPR_PERM,
HAP_REPR_TYPE,
HAP_REPR_VALID_VALUES,
HAP_REPR_VALUE,
)
from .util import hap_type_to_uuid, uuid_to_hap_type
logger = logging.getLogger(__name__)
# ### HAP Format ###
HAP_FORMAT_BOOL = "bool"
HAP_FORMAT_INT = "int"
HAP_FORMAT_FLOAT = "float"
HAP_FORMAT_STRING = "string"
HAP_FORMAT_ARRAY = "array"
HAP_FORMAT_DICTIONARY = "dictionary"
HAP_FORMAT_UINT8 = "uint8"
HAP_FORMAT_UINT16 = "uint16"
HAP_FORMAT_UINT32 = "uint32"
HAP_FORMAT_UINT64 = "uint64"
HAP_FORMAT_DATA = "data"
HAP_FORMAT_TLV8 = "tlv8"
HAP_FORMAT_DEFAULTS = {
HAP_FORMAT_BOOL: False,
HAP_FORMAT_INT: 0,
HAP_FORMAT_FLOAT: 0.0,
HAP_FORMAT_STRING: "",
HAP_FORMAT_ARRAY: "",
HAP_FORMAT_DICTIONARY: "",
HAP_FORMAT_UINT8: 0,
HAP_FORMAT_UINT16: 0,
HAP_FORMAT_UINT32: 0,
HAP_FORMAT_UINT64: 0,
HAP_FORMAT_DATA: "",
HAP_FORMAT_TLV8: "",
}
HAP_FORMAT_NUMERICS = (
HAP_FORMAT_INT,
HAP_FORMAT_FLOAT,
HAP_FORMAT_UINT8,
HAP_FORMAT_UINT16,
HAP_FORMAT_UINT32,
HAP_FORMAT_UINT64,
)
# ### HAP Units ###
HAP_UNIT_ARC_DEGREE = "arcdegrees"
HAP_UNIT_CELSIUS = "celsius"
HAP_UNIT_LUX = "lux"
HAP_UNIT_PERCENTAGE = "percentage"
HAP_UNIT_SECONDS = "seconds"
# ### Properties ###
PROP_FORMAT = "Format"
PROP_MAX_VALUE = "maxValue"
PROP_MIN_STEP = "minStep"
PROP_MIN_VALUE = "minValue"
PROP_PERMISSIONS = "Permissions"
PROP_UNIT = "unit"
PROP_VALID_VALUES = "ValidValues"
PROP_NUMERIC = (PROP_MAX_VALUE, PROP_MIN_VALUE, PROP_MIN_STEP, PROP_UNIT)
class CharacteristicError(Exception):
"""Generic exception class for characteristic errors."""
class Characteristic:
"""Represents a HAP characteristic, the smallest unit of the smart home.
A HAP characteristic is some measurement or state, like battery status or
the current temperature. Characteristics are contained in services.
Each characteristic has a unique type UUID and a set of properties,
like format, min and max values, valid values and others.
"""
__slots__ = (
"broker",
"display_name",
"properties",
"type_id",
"value",
"getter_callback",
"setter_callback",
"service",
"_uuid_str",
"_loader_display_name",
)
def __init__(self, display_name, type_id, properties):
"""Initialise with the given properties.
:param display_name: Name that will be displayed for this
characteristic, i.e. the `description` in the HAP representation.
:type display_name: str
:param type_id: UUID unique to this type of characteristic.
:type type_id: uuid.UUID
:param properties: A dict of properties, such as Format,
ValidValues, etc.
:type properties: dict
"""
self.broker = None
self.display_name = display_name
self.properties = properties
self.type_id = type_id
self.value = self._get_default_value()
self.getter_callback = None
self.setter_callback = None
self.service = None
self._uuid_str = uuid_to_hap_type(type_id)
self._loader_display_name = None
def __repr__(self):
"""Return the representation of the characteristic."""
return "<characteristic display_name={} value={} properties={}>".format(
self.display_name, self.value, self.properties
)
def _get_default_value(self):
"""Return default value for format."""
if self.properties.get(PROP_VALID_VALUES):
return min(self.properties[PROP_VALID_VALUES].values())
value = HAP_FORMAT_DEFAULTS[self.properties[PROP_FORMAT]]
return self.to_valid_value(value)
def get_value(self):
"""This is to allow for calling `getter_callback`
:return: Current Characteristic Value
"""
if self.getter_callback:
# pylint: disable=not-callable
self.value = self.to_valid_value(value=self.getter_callback())
return self.value
def to_valid_value(self, value):
"""Perform validation and conversion to valid value."""
if self.properties.get(PROP_VALID_VALUES):
if value not in self.properties[PROP_VALID_VALUES].values():
error_msg = "{}: value={} is an invalid value.".format(
self.display_name, value
)
logger.error(error_msg)
raise ValueError(error_msg)
elif self.properties[PROP_FORMAT] == HAP_FORMAT_STRING:
value = str(value)[:256]
elif self.properties[PROP_FORMAT] == HAP_FORMAT_BOOL:
value = bool(value)
elif self.properties[PROP_FORMAT] in HAP_FORMAT_NUMERICS:
if not isinstance(value, (int, float)):
error_msg = "{}: value={} is not a numeric value.".format(
self.display_name, value
)
logger.error(error_msg)
raise ValueError(error_msg)
value = min(self.properties.get(PROP_MAX_VALUE, value), value)
value = max(self.properties.get(PROP_MIN_VALUE, value), value)
return value
def override_properties(self, properties=None, valid_values=None):
"""Override characteristic property values and valid values.
:param properties: Dictionary with values to override the existing
properties. Only changed values are required.
:type properties: dict
:param valid_values: Dictionary with values to override the existing
valid_values. Valid values will be set to new dictionary.
:type valid_values: dict
"""
if not properties and not valid_values:
raise ValueError("No properties or valid_values specified to override.")
if properties:
self.properties.update(properties)
if valid_values:
self.properties[PROP_VALID_VALUES] = valid_values
try:
self.value = self.to_valid_value(self.value)
except ValueError:
self.value = self._get_default_value()
def set_value(self, value, should_notify=True):
"""Set the given raw value. It is checked if it is a valid value.
If not set_value will be aborted and an error message will be
displayed.
`Characteristic.setter_callback`
You may also define a `setter_callback` on the `Characteristic`.
This will be called with the value being set as the arg.
.. seealso:: Characteristic.value
:param value: The value to assign as this Characteristic's value.
:type value: Depends on properties["Format"]
:param should_notify: Whether a the change should be sent to
subscribed clients. Notify will be performed if the broker is set.
:type should_notify: bool
"""
logger.debug("set_value: %s to %s", self.display_name, value)
value = self.to_valid_value(value)
self.value = value
if should_notify and self.broker:
self.notify()
def client_update_value(self, value, sender_client_addr=None):
"""Called from broker for value change in Home app.
Change self.value to value and call callback.
"""
logger.debug(
"client_update_value: %s to %s from client: %s",
self.display_name,
value,
sender_client_addr,
)
self.value = value
self.notify(sender_client_addr)
if self.setter_callback:
# pylint: disable=not-callable
self.setter_callback(value)
def notify(self, sender_client_addr=None):
"""Notify clients about a value change. Sends the value.
.. seealso:: accessory.publish
.. seealso:: accessory_driver.publish
"""
self.broker.publish(self.value, self, sender_client_addr)
# pylint: disable=invalid-name
def to_HAP(self):
"""Create a HAP representation of this Characteristic.
Used for json serialization.
:return: A HAP representation.
:rtype: dict
"""
hap_rep = {
HAP_REPR_IID: self.broker.iid_manager.get_iid(self),
HAP_REPR_TYPE: self._uuid_str,
HAP_REPR_PERM: self.properties[PROP_PERMISSIONS],
HAP_REPR_FORMAT: self.properties[PROP_FORMAT],
}
# HAP_REPR_DESC (description) is optional and takes up
# quite a bit of space in the payload. Only include it
# if it has been changed from the default loader version
if (
not self._loader_display_name
or self._loader_display_name != self.display_name
):
hap_rep[HAP_REPR_DESC] = self.display_name
value = self.get_value()
if self.properties[PROP_FORMAT] in HAP_FORMAT_NUMERICS:
hap_rep.update(
{k: self.properties[k] for k in self.properties.keys() & PROP_NUMERIC}
)
if PROP_VALID_VALUES in self.properties:
hap_rep[HAP_REPR_VALID_VALUES] = sorted(
self.properties[PROP_VALID_VALUES].values()
)
elif self.properties[PROP_FORMAT] == HAP_FORMAT_STRING:
if len(value) > 64:
hap_rep[HAP_REPR_MAX_LEN] = min(len(value), 256)
if HAP_PERMISSION_READ in self.properties[PROP_PERMISSIONS]:
hap_rep[HAP_REPR_VALUE] = value
return hap_rep
@classmethod
def from_dict(cls, name, json_dict, from_loader=False):
"""Initialize a characteristic object from a dict.
:param json_dict: Dictionary containing at least the keys `Format`,
`Permissions` and `UUID`
:type json_dict: dict
"""
type_id = hap_type_to_uuid(json_dict.pop("UUID"))
char = cls(name, type_id, properties=json_dict)
if from_loader:
char._loader_display_name = ( # pylint: disable=protected-access
char.display_name
)
return char
| 2.703125 | 3 |
configs/_base_/datasets/uniter/vqa_dataset_uniter.py | linxi1158/iMIX | 23 | 1933 | <filename>configs/_base_/datasets/uniter/vqa_dataset_uniter.py<gh_stars>10-100
dataset_type = 'UNITER_VqaDataset'
data_root = '/home/datasets/mix_data/UNITER/VQA/'
train_datasets = ['train']
test_datasets = ['minival'] # name not in use, but have defined one to run
vqa_cfg = dict(
train_txt_dbs=[
data_root + 'vqa_train.db',
data_root + 'vqa_trainval.db',
data_root + 'vqa_vg.db',
],
train_img_dbs=[
data_root + 'coco_train2014/',
data_root + 'coco_val2014',
data_root + 'vg/',
],
val_txt_db=data_root + 'vqa_devval.db',
val_img_db=data_root + 'coco_val2014/',
ans2label_file=data_root + 'ans2label.json',
max_txt_len=60,
conf_th=0.2,
max_bb=100,
min_bb=10,
num_bb=36,
train_batch_size=20480, # 5120,
val_batch_size=40960, # 10240,
)
BUCKET_SIZE = 8192
train_data = dict(
samples_per_gpu=vqa_cfg['train_batch_size'],
workers_per_gpu=4,
pin_memory=True,
batch_sampler=dict(
type='TokenBucketSampler',
bucket_size=BUCKET_SIZE,
batch_size=vqa_cfg['train_batch_size'],
drop_last=True,
size_multiple=8,
),
data=dict(
type=dataset_type,
datacfg=vqa_cfg,
train_or_val=True,
),
)
test_data = dict(
samples_per_gpu=vqa_cfg['val_batch_size'],
workers_per_gpu=4,
batch_sampler=dict(
type='TokenBucketSampler',
bucket_size=BUCKET_SIZE,
batch_size=vqa_cfg['val_batch_size'],
drop_last=False,
size_multiple=8,
),
pin_memory=True,
data=dict(
type=dataset_type,
datacfg=vqa_cfg,
train_or_val=False,
),
)
post_processor = dict(
type='Evaluator',
metrics=[dict(type='UNITER_AccuracyMetric')],
dataset_converters=[dict(type='UNITER_DatasetConverter')],
)
| 1.546875 | 2 |
students/k3340/laboratory_works/laboratory_works/Arlakov_Denis/laboratiry_work_2_and_3/lab/django-react-ecommerce-master/home/urls.py | TonikX/ITMO_ICT_-WebProgramming_2020 | 10 | 1934 | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic import TemplateView
urlpatterns = [
path('api-auth/', include('rest_framework.urls')),
path('rest-auth/', include('rest_auth.urls')),
path('rest-auth/registration/', include('rest_auth.registration.urls')),
path('admin/', admin.site.urls),
path('api/', include('core.api.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
if not settings.DEBUG:
urlpatterns += [re_path(r'^.*',
TemplateView.as_view(template_name='index.html'))]
| 1.726563 | 2 |
20200416_Socialmail/mailserverUi.py | karta1782310/python-docx-automated-report-generation | 0 | 1935 | #!/bin/bash
# -*- coding: UTF-8 -*-
# 基本控件都在这里面
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QGridLayout, QMessageBox, QFileDialog,
QLabel, QLineEdit, QPushButton, QComboBox, QCheckBox, QDateTimeEdit,
QTextEdit, QTabWidget, QTableWidget, QTableWidgetItem, QHeaderView)
from PyQt5.QtGui import QPalette, QColor, QBrush
from PyQt5.QtCore import Qt, QDateTime
from pyqtgraph import GraphicsLayoutWidget, setConfigOption, setConfigOptions
import qdarkstyle, sys
import mylibrary.genmail as gm
from GenAndSendMail import insert_send_mail
from server.database import Database
from server.sendmail import Smtp
from server.client import Client
from email import generator
from pandas import DataFrame
from copy import deepcopy
class SubWindow(QWidget):
def __init__(self):
super().__init__()
self.resize(400,100)
self.main_layout = QGridLayout()
self.setLayout(self.main_layout)
self.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
self.main_layout.addWidget(QLabel('收件人'), 0, 0, 1, 1)
self.in_recipient = QLineEdit()
self.main_layout.addWidget(self.in_recipient, 0, 1, 1, 5)
self.btn_send = QPushButton('寄送')
self.main_layout.addWidget(self.btn_send, 1, 5, 1, 1)
class MailserverUi(QMainWindow):
def __init__(self):
super().__init__()
setConfigOption('background', '#19232D')
setConfigOption('foreground', 'd')
setConfigOptions(antialias = True)
# self.resize(720,500)
self.init_ui()
self.data_smtp = []
self.data_db = []
self.data_logs = []
self.data_temp_logs = []
# self.sub_win = SubWindow()
# 默認狀態欄
self.status = self.statusBar()
self.status.showMessage("開發者: 鄭鈺城, 聯絡資訊: <EMAIL>")
# 標題欄
self.setWindowTitle("社交郵件工程")
self.setWindowOpacity(1) # 窗口透明度
self.main_layout.setSpacing(0)
self.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
self.main_widget.setStyleSheet(
"""
QComboBox::item:checked {
height: 12px;
border: 1px solid #32414B;
margin-top: 0px;
margin-bottom: 0px;
padding: 4px;
padding-left: 0px;
}
"""
)
def init_ui(self):
# 創建視窗主部件
self.main_widget = QWidget()
# 創建主部件的網格佈局
self.main_layout = QGridLayout()
# 設置窗口主部件佈局為網格佈局
self.main_widget.setLayout(self.main_layout)
# 創建左側部件
self.left_widget = QWidget()
self.left_widget.setObjectName('left_widget')
self.left_layout = QGridLayout()
self.left_widget.setLayout(self.left_layout)
# 創建右側部件
self.right_widget = QWidget()
self.right_widget.setObjectName('right_widget')
self.right_layout = QGridLayout()
self.right_widget.setLayout(self.right_layout)
# 左側部件在第0行第0列,佔12行3列
self.main_layout.addWidget(self.left_widget, 0, 0, 12, 3)
# 右側部件在第0行第3列,佔12行8列
self.main_layout.addWidget(self.right_widget, 0, 3, 12, 8)
# 設置視窗主部件
self.setCentralWidget(self.main_widget)
# 主要功能按鈕
self.btn_sendmail = QPushButton("發送信件")
self.btn_sendmail.clicked.connect(self.display_send_mail)
self.btn_smtp = QPushButton("系統設定")
self.btn_smtp.clicked.connect(self.display_smtp_setting)
self.btn_db = QPushButton("資料庫設定")
self.btn_db.clicked.connect(self.display_db_setting)
self.btn_update_eml = QPushButton("修改樣板")
self.btn_update_eml.clicked.connect(self.display_update_eml)
self.btn_get_logs = QPushButton("觸發明細")
self.btn_get_logs.clicked.connect(self.display_logs)
self.btn_download_logs = QPushButton("下載觸發明細")
self.btn_download_logs.clicked.connect(self.logs_download)
self.quit_btn = QPushButton("退出")
self.quit_btn.clicked.connect(self.quit_act)
self.left_layout.addWidget(self.btn_sendmail, 2, 0, 1, 3)
self.left_layout.addWidget(self.btn_smtp, 3, 0, 1, 3)
self.left_layout.addWidget(self.btn_db, 4, 0, 1, 3)
self.left_layout.addWidget(self.btn_update_eml, 5, 0, 1, 3)
self.left_layout.addWidget(self.btn_get_logs, 6, 0, 1, 3)
self.left_layout.addWidget(self.btn_download_logs, 7, 0, 1, 3)
self.left_layout.addWidget(self.quit_btn, 8, 0, 1, 3)
# 主要功能查詢
self.in_data = QLineEdit()
self.in_data.setPlaceholderText("暫無")
self.left_layout.addWidget(self.in_data, 1, 0, 1, 3)
# 主要功能 log
self.query_result = QTableWidget()
self.left_layout.addWidget(self.query_result, 9, 0, 2, 3)
self.query_result.verticalHeader().setVisible(False)
self.right_display = GraphicsLayoutWidget()
self.right_layout.addWidget(self.right_display, 0, 3, 12, 8)
# 右側物件: sendmail
self.in_eml_type = QLineEdit()
self.in_eml_template = QLineEdit()
self.btn_eml_browse = QPushButton('瀏覽')
self.btn_eml_browse.clicked.connect(lambda: self.open_eml(self.in_eml_template))
self.in_recipient_group = QLineEdit()
self.in_recipient_excel = QLineEdit()
self.btn_recipient_browse = QPushButton('瀏覽')
self.btn_recipient_browse.clicked.connect(lambda: self.open_excel(self.in_recipient_excel))
self.in_annex_file = QLineEdit()
self.btn_annex_file = QPushButton('瀏覽')
self.btn_annex_file.clicked.connect(lambda: self.open_word(self.in_annex_file))
self.in_scheduler = QDateTimeEdit(QDateTime.currentDateTime())
self.in_scheduler.setCalendarPopup(True)
self.in_scheduler.setDisplayFormat('yyyy-MM-dd hh:mm')
self.cb_scheduler = QCheckBox('使用')
self.btn_sendmail_start = QPushButton('執行')
self.btn_sendmail_start.clicked.connect(self.send_mail)
# 右側物件: smtp
self.in_smtp_host = QLineEdit()
self.in_smtp_port = QLineEdit()
self.in_smtp_user = QLineEdit()
self.in_smtp_password = QLineEdit()
self.cb_smtp_ssl = QCheckBox('使用')
self.in_smtp_test = QLineEdit()
self.btn_smtp_save = QPushButton('儲存')
self.btn_smtp_save.clicked.connect(lambda: self.save_data(self.data_smtp))
self.btn_smtp_test = QPushButton('測試')
self.btn_smtp_test.clicked.connect(self.show_sub_win)
# 右側物件: db
self.in_db_host = QLineEdit()
self.in_db_port = QLineEdit()
self.in_db_user = QLineEdit()
self.in_db_password = QLineEdit()
self.in_db_database = QLineEdit()
self.in_db_domain = QLineEdit()
self.in_db_domain.setPlaceholderText('回收風險資訊動作的網址')
self.btn_db_save = QPushButton('儲存')
self.btn_db_save.clicked.connect(lambda: self.save_data(self.data_db))
# 右側物件: update eml
self.in_edit_sender = QLineEdit()
self.in_edit_sender_name = QLineEdit()
self.cb_edit_annex = QCheckBox('是')
self.in_edit_annex = QLineEdit()
self.btn_edit_annex = QPushButton('瀏覽')
self.btn_edit_annex.clicked.connect(lambda: self.open_annex(self.in_edit_annex))
self.in_edit_subject = QLineEdit()
self.mail_tab = QTabWidget()
self.mail_tab.setDocumentMode(True)
self.mail_tab.currentChanged.connect(self.print_html)
self.mail_tab_1 = QWidget()
self.mail_tab_2 = QWidget()
self.mail_tab.addTab(self.mail_tab_1, 'Html')
self.mail_tab.addTab(self.mail_tab_2, 'Web')
self.tab_1 = QGridLayout()
self.tab_2 = QGridLayout()
self.tab_1.setContentsMargins(0,0,0,0)
self.tab_2.setContentsMargins(0,0,0,0)
self.mail_tab_1.setLayout(self.tab_1)
self.mail_tab_2.setLayout(self.tab_2)
self.in_edit_html = QTextEdit()
self.in_edit_web = QWebEngineView()
self.tab_1.addWidget(self.in_edit_html, 1, 1, 1, 1)
self.tab_2.addWidget(self.in_edit_web, 1, 1, 1, 1)
self.btn_edit_eml_reset = QPushButton('清除')
self.btn_edit_eml_reset.clicked.connect(self.eml_reset)
self.btn_edit_eml_read = QPushButton('讀取')
self.btn_edit_eml_read.clicked.connect(self.eml_open)
self.btn_edit_eml_save = QPushButton('儲存')
self.btn_edit_eml_save.clicked.connect(self.eml_save)
# 右側物件: logs
self.tbw_logs = QTableWidget()
self.tbw_logs.verticalHeader().setVisible(False)
self.cmb_logs_choice = QComboBox()
self.in_logs_data = QLineEdit()
self.in_logs_data.setPlaceholderText("輸入資料")
self.btn_logs_search = QPushButton('執行')
self.btn_logs_search.clicked.connect(self.logs_change)
def display_send_mail(self):
self.clear_layout(self.right_layout)
labels = [ "信件類型 :", "信件模板 :", " 收件人群組 :", "收件人資料 :", '附件資料 :',"設定排程 :"]
for i, label in enumerate(labels):
self.right_layout.addWidget(QLabel(label), i, 3, 1, 1, Qt.AlignRight)
self.right_layout.addWidget(self.in_eml_type, 0, 4, 1, 7)
self.right_layout.addWidget(self.in_eml_template, 1, 4, 1, 6)
self.right_layout.addWidget(self.btn_eml_browse, 1, 10, 1, 1)
self.right_layout.addWidget(self.in_recipient_group, 2, 4, 1, 7)
self.right_layout.addWidget(self.in_recipient_excel, 3, 4, 1, 6)
self.right_layout.addWidget(self.btn_recipient_browse, 3, 10, 1, 1)
self.right_layout.addWidget(self.in_annex_file , 4, 4, 1, 6)
self.right_layout.addWidget(self.btn_annex_file, 4, 10, 1, 1)
self.right_layout.addWidget(self.in_scheduler, 5, 4, 1, 6)
self.right_layout.addWidget(self.cb_scheduler, 5, 10, 1, 1)
self.right_layout.addWidget(self.btn_sendmail_start, 6, 9, 1, 2)
def display_smtp_setting(self):
self.clear_layout(self.right_layout)
# 在右邊新增物件
labels = ["SMTP HOST :", "SMTP PORT :", "SMTP 帳號 :", "SMTP 密碼 :", "SMTP SSL :", " 測試信件內容 :"]
for i, label in enumerate(labels):
self.right_layout.addWidget(QLabel(label), i, 3, 1, 1, Qt.AlignRight)
self.right_layout.addWidget(self.in_smtp_host, 0, 4, 1, 7)
self.right_layout.addWidget(self.in_smtp_port, 1, 4, 1, 7)
self.right_layout.addWidget(self.in_smtp_user, 2, 4, 1, 7)
self.right_layout.addWidget(self.in_smtp_password, 3, 4, 1, 7)
self.right_layout.addWidget(self.cb_smtp_ssl, 4, 4, 1, 7)
self.right_layout.addWidget(self.in_smtp_test, 5, 4, 1, 7)
self.right_layout.addWidget(self.btn_smtp_save, 6, 9, 1, 2)
self.right_layout.addWidget(self.btn_smtp_test, 6, 7, 1, 2)
def display_db_setting(self):
self.clear_layout(self.right_layout)
# 在右邊新增物件
labels = ["資料庫 HOST :", "資料庫 PORT :", "資料庫 帳號 :", "資料庫 密碼 :", "使用資料庫名稱 :", "回收網址 :"]
for i, label in enumerate(labels):
self.right_layout.addWidget(QLabel(label), i, 3, 1, 1, Qt.AlignRight)
self.right_layout.addWidget(self.in_db_host, 0, 4, 1, 7)
self.right_layout.addWidget(self.in_db_port, 1, 4, 1, 7)
self.right_layout.addWidget(self.in_db_user, 2, 4, 1, 7)
self.right_layout.addWidget(self.in_db_password, 3, 4, 1, 7)
self.right_layout.addWidget(self.in_db_database, 4, 4, 1, 7)
self.right_layout.addWidget(self.in_db_domain, 5, 4, 1, 7)
self.right_layout.addWidget(self.btn_db_save, 6, 9, 1, 2)
def display_update_eml(self):
self.clear_layout(self.right_layout)
labels = ["寄件人 :", "寄件人名稱 :", " 是否加入附件 :", "附件名稱 :", "主旨 :", "內容 :"]
for i, label in enumerate(labels):
self.label = QLabel(label)
self.right_layout.addWidget(self.label, i, 3, 1, 1, Qt.AlignRight)
self.right_layout.addWidget(self.in_edit_sender, 0, 4, 1, 7)
self.right_layout.addWidget(self.in_edit_sender_name, 1, 4, 1, 7)
self.right_layout.addWidget(self.cb_edit_annex, 2, 4, 1, 7)
self.right_layout.addWidget(self.in_edit_annex, 3, 4, 1, 6)
self.right_layout.addWidget(self.btn_edit_annex, 3, 10, 1, 1)
self.right_layout.addWidget(self.in_edit_subject, 4, 4, 1, 7)
self.right_layout.addWidget(self.mail_tab, 5, 4, 6, 7)
self.right_layout.addWidget(self.btn_edit_eml_reset, 11, 5, 1, 2)
self.right_layout.addWidget(self.btn_edit_eml_read, 11, 7, 1, 2)
self.right_layout.addWidget(self.btn_edit_eml_save, 11, 9, 1, 2)
def display_logs(self):
self.data_temp_logs = []
self.tbw_logs.setRowCount(0)
self.clear_layout(self.right_layout)
self.right_layout.addWidget(self.tbw_logs, 1, 3, 11, 8)
self.right_layout.addWidget(QLabel('查詢 :'), 0, 3, 1, 1)
self.right_layout.addWidget(self.cmb_logs_choice, 0, 4, 1, 2)
self.right_layout.addWidget(self.in_logs_data, 0, 6, 1, 3)
self.right_layout.addWidget(self.btn_logs_search, 0, 9, 1, 2)
try:
db = Database(self.data_db[0], int(self.data_db[1]), self.data_db[2], self.data_db[3], self.data_db[4]) if self.data_db[:5] else Database()
self.data_logs = db.get_logs()
self.data_temp_logs = deepcopy(self.data_logs)
if self.data_logs:
row_num = len(self.data_logs)
col_num = len(self.data_logs[0])
col_lst = list(self.data_logs[0].keys())
self.cmb_logs_choice.clear()
self.cmb_logs_choice.addItems(col_lst)
self.tbw_logs.setRowCount(row_num)
self.tbw_logs.setColumnCount(col_num)
self.tbw_logs.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.tbw_logs.setHorizontalHeaderLabels(col_lst)
for i in range(row_num):
row_data = list(self.data_logs[i].values())
for j in range(col_num):
temp_data = row_data[j]
item = QTableWidgetItem(str(temp_data))
item.setForeground(QBrush(QColor(144, 182, 240)))
self.tbw_logs.setItem(i, j, item)
except:
QMessageBox.warning(self, 'Failed!', '資料庫連結失敗!', QMessageBox.Ok)
else:
db.__disconnect__()
def get_items_from_layout(self, layout):
return [layout.itemAt(i).widget() for i in range(layout.count())]
def save_data(self, data):
items = self.get_items_from_layout(self.right_layout)
data.clear()
try:
for item in items:
if type(item) == type(QLineEdit()):
data.append(item.text())
elif type(item) == type(QCheckBox()):
data.append(item.isChecked())
QMessageBox.information(self, 'Success!', '儲存成功!', QMessageBox.Ok)
except:
QMessageBox.warning(self, 'Failed!', '儲存失敗!', QMessageBox.Ok)
print(data)
def clear_layout(self, layout):
for i in reversed(range(layout.count())):
layout.itemAt(i).widget().setParent(None)
def open_eml(self, obj):
file_name, _ = QFileDialog.getOpenFileName(self, "選取檔案", "./", "Eml Files (*.eml)")
obj.setText(file_name)
def open_excel(self, obj):
file_name, _ = QFileDialog.getOpenFileName(self, "選取檔案", "./", "Excel Files (*.xlsx)")
obj.setText(file_name)
def open_word(self, obj):
file_name, _ = QFileDialog.getOpenFileName(self, "選取檔案", "./", "Word Files (*.doc *.docx)")
obj.setText(file_name)
def open_annex(self, obj):
file_name, _ = QFileDialog.getOpenFileName(self, "選取檔案", "./", "Annex Files (*.jpg *.png *.zip)")
org_files = obj.text()
all_files = org_files + ',' + file_name if org_files else file_name
obj.setText(all_files)
def print_html(self, index):
if index:
self.in_edit_web.setHtml(self.in_edit_html.toPlainText())
def send_mail(self):
eml_type = self.in_eml_type.text()
eml_file = self.in_eml_template.text()
user_group = self.in_recipient_group.text()
mail_excel = self.in_recipient_excel.text()
annex_file = self.in_annex_file.text()
url = self.data_db[5] if self.data_db else 'http://yumail.myvnc.com'
try:
if self.cb_scheduler.isChecked():
my_time = self.in_scheduler.text()+':00'
client = Client()
client.send(self.data_smtp[:4], self.data_db[:5], eml_type, eml_file, user_group, mail_excel, annex_file, url, my_time)
QMessageBox.information(self, 'Success!', '排程設定成功!', QMessageBox.Ok)
else:
sm = Smtp(self.data_smtp[0], int(self.data_smtp[1]), self.data_smtp[2], self.data_smtp[3]) if self.data_smtp else Smtp()
db = Database(self.data_db[0], int(self.data_db[1]), self.data_db[2], self.data_db[3], self.data_db[4]) if self.data_db else Database()
insert_send_mail(eml_type, eml_file, user_group, mail_excel, sm, db, annex=annex_file, url=url)
sm.close()
db.__disconnect__()
QMessageBox.information(self, 'Success!', '信件寄出成功!', QMessageBox.Ok)
except:
QMessageBox.warning(self, 'Failed!', '信件寄出失敗!', QMessageBox.Ok)
def show_sub_win(self):
if self.data_smtp:
self.sub_win = SubWindow()
self.sub_win.btn_send.clicked.connect(self.send_test)
self.sub_win.show()
else:
QMessageBox.warning(self, 'Failed!', '請確認有無 SMTP 資料!', QMessageBox.Ok)
def send_test(self):
try:
if self.data_smtp:
mailserver = Smtp(self.data_smtp[0], int(self.data_smtp[1]), self.data_smtp[2], self.data_smtp[3])
mail_msg = gm.gen_test_eml(['Test Email', '測試寄件人', self.data_smtp[2], self.sub_win.in_recipient.text()], self.data_smtp[5])
error = mailserver.send(mail_msg.as_string(), self.data_smtp[2], self.sub_win.in_recipient.text())
mailserver.close()
if error:
QMessageBox.warning(self, 'Warning!', '信件寄出成功!\nWaning: '+error, QMessageBox.Ok)
else:
QMessageBox.information(self, 'Success!', '信件寄出成功!', QMessageBox.Ok)
self.sub_win.in_recipient.clear()
except:
QMessageBox.warning(self, 'Failed!', '信件寄出失敗!', QMessageBox.Ok)
def eml_open(self):
self.in_edit_html.clear()
file_name, _ = QFileDialog.getOpenFileName(self, "選取檔案", "./", "Eml Files (*.eml)")
if not file_name:
return
header, html = gm.get_msg(file_name)
self.in_edit_sender.setText(header[2])
self.in_edit_sender_name.setText(header[1])
self.in_edit_subject.setText(header[0])
self.in_edit_html.insertPlainText(html)
def eml_save(self):
header, msg = [], ''
header.append(self.in_edit_subject.text())
header.append(self.in_edit_sender_name.text())
header.append(self.in_edit_sender.text())
header.append('<EMAIL>')
annex_file = self.in_edit_annex.text().split(',')
html = self.in_edit_html.toPlainText()
if not any(header[:3]) or not html:
return
try:
msg = gm.gen_eml(header, html, annex_file) if self.cb_edit_annex.isChecked() else gm.gen_eml(header, html)
file_path, _ = QFileDialog.getSaveFileName(self, '另存為...', './', 'Excel Files (*.eml)')
with open(file_path, 'w') as outfile:
gen = generator.Generator(outfile)
gen.flatten(msg)
QMessageBox.information(self, 'Success!', '儲存成功!', QMessageBox.Ok)
except:
QMessageBox.warning(self, 'Failed!', '儲存失敗!', QMessageBox.Ok)
def eml_reset(self):
items = self.get_items_from_layout(self.right_layout)
for item in items:
if type(item) == type(QLineEdit()):
item.clear()
self.cb_edit_annex.setChecked(False)
self.in_edit_html.clear()
def logs_change(self):
if not self.data_logs or not self.in_logs_data.text():
return
self.data_temp_logs = []
self.tbw_logs.setRowCount(0)
# header = {'郵件類型':'type', '郵件主旨':'subject', '使用者群組':'user_group', '使用者信箱':'user_email'}
condition = self.cmb_logs_choice.currentText()
content = self.in_logs_data.text()
row_num = len(self.data_logs)
col_num = len(self.data_logs[0])
# self.tbw_logs.setRowCount(row_num)
self.tbw_logs.setColumnCount(col_num)
for i in range(row_num):
switch = False
if condition == 'date' and content in str(self.data_logs[i][condition]):
switch = True
elif self.data_logs[i][condition] == content:
switch = True
if switch:
self.tbw_logs.insertRow(self.tbw_logs.rowCount())
row_data = list(self.data_logs[i].values())
self.data_temp_logs.append(self.data_logs[i])
for j in range(col_num):
temp_data = row_data[j]
item = QTableWidgetItem(str(temp_data))
item.setForeground(QBrush(QColor(144, 182, 240)))
self.tbw_logs.setItem(self.tbw_logs.rowCount()-1, j, item)
def logs_download(self):
if self.data_temp_logs:
try:
file_path, _ = QFileDialog.getSaveFileName(self, '另存為...', './', 'Excel Files (*.xlsx)')
if not file_path:
return
df = DataFrame(self.data_temp_logs)
df.to_excel(file_path, index=False)
QMessageBox.information(self, 'Success!', '儲存成功!', QMessageBox.Ok)
except:
QMessageBox.warning(self, 'Failed!', '儲存失敗!', QMessageBox.Ok)
else:
QMessageBox.warning(self, "缺少資料", "請確認是否有資料可以下載", QMessageBox.Ok)
def quit_act(self):
# sender 是发送信号的对象
sender = self.sender()
print(sender.text() + '键被按下')
qApp = QApplication.instance()
qApp.quit()
def main():
app = QApplication(sys.argv)
gui = MailserverUi()
gui.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | 2.28125 | 2 |
nntools/layers/corrmm.py | 317070/nntools | 0 | 1936 | """
GpuCorrMM-based convolutional layers
"""
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.cuda.basic_ops import gpu_contiguous
from theano.sandbox.cuda.blas import GpuCorrMM
from .. import init
from .. import nonlinearities
from . import base
# base class for all layers that rely on GpuCorrMM directly
class MMLayer(base.Layer):
pass
class Conv2DMMLayer(MMLayer):
def __init__(self, input_layer, num_filters, filter_size, strides=(1, 1), border_mode=None, untie_biases=False,
W=init.Uniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, pad=None,
flip_filters=False):
super(Conv2DMMLayer, self).__init__(input_layer)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = filter_size
self.strides = strides
self.untie_biases = untie_biases
self.flip_filters = flip_filters
if border_mode is not None and pad is not None:
raise RuntimeError("You cannot specify both 'border_mode' and 'pad'. To avoid ambiguity, please specify only one of them.")
elif border_mode is None and pad is None:
# no option specified, default to valid mode
self.pad = (0, 0)
elif border_mode is not None:
if border_mode == 'valid':
self.pad = (0, 0)
elif border_mode == 'full':
self.pad = (self.filter_size[0] - 1, self.filter_size[1] -1)
elif border_mode == 'same':
# only works for odd filter size, but the even filter size case is probably not worth supporting.
self.pad = ((self.filter_size[0] - 1) // 2, (self.filter_size[1] - 1) // 2)
else:
raise RuntimeError("Unsupported border_mode for Conv2DMMLayer: %s" % border_mode)
else:
self.pad = pad
self.W = self.create_param(W, self.get_W_shape())
if b is None:
self.b = None
elif self.untie_biases:
output_shape = self.get_output_shape()
self.b = self.create_param(b, (num_filters, output_shape[2], output_shape[3]))
else:
self.b = self.create_param(b, (num_filters,))
self.corr_mm_op = GpuCorrMM(subsample=self.strides, pad=self.pad)
def get_W_shape(self):
num_input_channels = self.input_layer.get_output_shape()[1]
return (self.num_filters, num_input_channels, self.filter_size[0], self.filter_size[1])
def get_params(self):
return [self.W] + self.get_bias_params()
def get_bias_params(self):
return [self.b] if self.b is not None else []
def get_output_shape_for(self, input_shape):
batch_size = input_shape[0]
input_width, input_height = input_shape[2:4]
output_width = (input_width + 2*self.pad[0] - self.filter_size[0]) // self.strides[0] + 1
output_height = (input_height + 2*self.pad[1] - self.filter_size[1]) // self.strides[1] + 1
return (batch_size, self.num_filters, output_width, output_height)
def get_output_for(self, input, *args, **kwargs):
filters = self.W
if self.flip_filters:
filters = filters[:, :, ::-1, ::-1] # flip width, height
contiguous_filters = gpu_contiguous(filters)
contiguous_input = gpu_contiguous(input)
conved = self.corr_mm_op(contiguous_input, contiguous_filters)
if self.b is None:
activation = conved
elif self.untie_biases:
activation = conved + self.b.dimshuffle('x', 0, 1, 2)
else:
activation = conved + self.b.dimshuffle('x', 0, 'x', 'x')
return self.nonlinearity(activation)
| 2.71875 | 3 |
tests/python/correctness/simple_test_aux_index.py | dubey/weaver | 163 | 1937 | <filename>tests/python/correctness/simple_test_aux_index.py
#! /usr/bin/env python
#
# ===============================================================
# Description: Sanity check for fresh install.
#
# Created: 2014-08-12 16:42:52
#
# Author: <NAME>, <EMAIL>
#
# Copyright (C) 2013, Cornell University, see the LICENSE file
# for licensing agreement
# ===============================================================
#
import sys
try:
import weaver.client as client
except ImportError:
import client
config_file=''
if len(sys.argv) > 1:
config_file = sys.argv[1]
# create client object
c = client.Client('172.16.17.32', 2002, config_file)
# check aux index
assert c.aux_index()
# 1. create node for user ayush
c.begin_tx()
c.create_node('ayush')
c.set_node_properties({'type': 'user', 'age': '25'}, 'ayush')
c.end_tx()
# 2. create node for user egs
c.begin_tx()
c.create_node('egs')
c.set_node_property('type', 'user', 'egs')
c.end_tx()
# 3. ayush follows egs
c.begin_tx()
c.create_edge('ayush', 'egs', 'e1')
c.set_edge_property(edge='e1', key='type', value='follows')
c.create_edge('egs', 'ayush', 'e2')
c.set_edge_property(edge='e2', key='type', value='followed_by')
c.end_tx()
# 4. add a post and restrict visibility to followers only
c.begin_tx()
c.create_node('post')
c.set_node_property('type', 'post', 'post')
c.set_node_property('visibility', 'followers', 'post')
e3 = c.create_edge('egs', 'post')
c.set_edge_property(edge=e3, key='type', value='posted')
c.end_tx()
# 5. 'like' the post
c.begin_tx()
e4 = c.create_edge('post', 'ayush')
c.set_edge_property(edge=e4, key='type', value='liked_by')
c.end_tx()
# 6. list all the people who like egs's post
return_nodes = c.traverse('egs', {'type': 'user'}).out_edge({'type': 'posted'}).node({'type': 'post'}).out_edge({'type': 'liked_by'}).node({'type': 'user'}).execute()
assert len(return_nodes) == 1, 'traversal returned incorrect #nodes'
assert 'ayush' in return_nodes, 'traversal returned bad node handle'
# 7. try to create node with same handle as before
c.begin_tx()
c.create_node('ayush')
try:
c.end_tx()
assert False, 'create node passed'
except client.WeaverError:
pass
# 8. try to create edge with same handle as before
c.begin_tx()
c.create_edge('ayush', 'egs', 'e1')
try:
c.end_tx()
assert False, 'create edge passed'
except client.WeaverError:
pass
# 9. add auxiliary handles to nodes
c.begin_tx()
c.add_alias('ad688', 'ayush')
c.add_alias('el33th4x0r', 'egs')
c.end_tx()
# 10. list all the people who like egs's post
# this time with aliases instead of handles
return_nodes = c.traverse('el33th4x0r', {'type': 'user'}).out_edge({'type': 'posted'}).node({'type': 'post'}).out_edge({'type': 'liked_by'}).node({'type': 'user'}).execute()
assert len(return_nodes) == 1, 'traversal returned incorrect #nodes'
assert 'ayush' in return_nodes, 'traversal returned bad node handle'
# 11. get node and check it is valid
ad = c.get_node('ayush')
assert 'ad688' in ad.aliases
assert 'type' in ad.properties
assert 'user' in ad.properties['type']
assert 'age' in ad.properties
assert '25' in ad.properties['age']
assert 'e1' in ad.out_edges
print 'Correctly executed 11 transactions of varying complexity, pass simple_test.'
print 'Success, you have a working Weaver setup!'
| 2.09375 | 2 |
ldtools/helpers.py | dmr/Ldtools | 3 | 1938 | <reponame>dmr/Ldtools<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
try:
unicode
except NameError:
basestring = unicode = str # Python 3
import logging
import rdflib
from rdflib import compare
logger = logging.getLogger("ldtools")
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
# The background is set with 40 plus the number of the color, and
# the foreground with 30
# These are the sequences need to get colored ouput
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
COL = {
'DEBUG': BLUE, 'INFO': MAGENTA,
'WARNING': YELLOW, 'CRITICAL': YELLOW, 'ERROR': RED}
def set_colored_logger(verbosity_level):
class ColoredFormatter(logging.Formatter):
def format(self, record):
if record.levelname in COL:
record.levelname = COLOR_SEQ % (
30 + COL[record.levelname]) + record.levelname + RESET_SEQ
record.msg = unicode(record.msg)
record.msg = COLOR_SEQ % (30 + GREEN) + record.msg + RESET_SEQ
return logging.Formatter.format(self, record)
formatter = ColoredFormatter("%(asctime)s %(name)s %(funcName)s:%(lineno)d"
" %(levelname)s: %(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
logger2 = logging.getLogger("ldtools._add_property")
logger2.setLevel(logging.INFO)
mapper = {1: logging.DEBUG,
2: logging.INFO,
3: logging.WARNING,
4: logging.ERROR,
5: None}
try:
log_level = mapper[verbosity_level]
except KeyError:
log_level = mapper[2]
if log_level:
logger.setLevel(log_level)
return logger
def my_graph_diff(graph1, graph2):
"""Compares graph2 to graph1 and highlights everything that changed.
Colored if pygments available"""
# quick fix for wrong type
if not type(graph1) == type(graph2) == rdflib.Graph:
if type(graph1) == rdflib.ConjunctiveGraph:
g1contexts = list(graph1.contexts())
assert len(g1contexts) == 1
graph1 = g1contexts[0]
if type(graph2) == rdflib.ConjunctiveGraph:
g2contexts = list(graph2.contexts())
assert len(g2contexts) == 1
graph2 = g2contexts[0]
# Return if both graphs are isomorphic
iso1 = compare.to_isomorphic(graph1)
iso2 = compare.to_isomorphic(graph2)
if graph1.identifier == graph2.identifier:
str_bit = u"The 2 '%s' Graphs" % graph1.identifier
else:
str_bit = (u"Graphs '%s' and '%s'"
% (graph1.identifier, graph2.identifier))
if iso1 == iso2:
logger.debug(u"%s are isomorphic" % str_bit)
return
print(u"Differences between %s." % str_bit)
in_both, in_first, in_second = compare.graph_diff(iso1, iso2)
def dump_nt_sorted(g):
return sorted(g.serialize(format='nt').splitlines())
sorted_first = dump_nt_sorted(in_first)
sorted_second = dump_nt_sorted(in_second)
import difflib
diff = difflib.unified_diff(
sorted_first,
sorted_second,
u'Original',
u'Current',
lineterm=''
)
try:
from pygments import highlight
from pygments.formatters import terminal
from pygments.lexers import web
lexer = web.XmlLexer()
formatter = terminal.TerminalFormatter()
print(highlight(u'\n'.join(diff), lexer, formatter))
except ImportError:
logger.info("Install pygments for colored diffs")
print(u'\n'.join(diff))
except UnicodeDecodeError:
print(u"Only in first", unicode(sorted_first))
print(u"Only in second", unicode(sorted_second))
| 1.960938 | 2 |
fakenet/diverters/debuglevels.py | AzzOnFire/flare-fakenet-ng | 0 | 1939 | # Debug print levels for fine-grained debug trace output control
DNFQUEUE = (1 << 0) # netfilterqueue
DGENPKT = (1 << 1) # Generic packet handling
DGENPKTV = (1 << 2) # Generic packet handling with TCP analysis
DCB = (1 << 3) # Packet handlign callbacks
DPROCFS = (1 << 4) # procfs
DIPTBLS = (1 << 5) # iptables
DNONLOC = (1 << 6) # Nonlocal-destined datagrams
DDPF = (1 << 7) # DPF (Dynamic Port Forwarding)
DDPFV = (1 << 8) # DPF (Dynamic Port Forwarding) Verbose
DIPNAT = (1 << 9) # IP redirection for nonlocal-destined datagrams
DMANGLE = (1 << 10) # Packet mangling
DPCAP = (1 << 11) # Pcap write logic
DIGN = (1 << 12) # Packet redirect ignore conditions
DFTP = (1 << 13) # FTP checks
DMISC = (1 << 27) # Miscellaneous
DCOMP = 0x0fffffff # Component mask
DFLAG = 0xf0000000 # Flag mask
DEVERY = 0x0fffffff # Log everything, low verbosity
DEVERY2 = 0x8fffffff # Log everything, complete verbosity
DLABELS = {
DNFQUEUE: 'NFQUEUE',
DGENPKT: 'GENPKT',
DGENPKTV: 'GENPKTV',
DCB: 'CB',
DPROCFS: 'PROCFS',
DIPTBLS: 'IPTABLES',
DNONLOC: 'NONLOC',
DDPF: 'DPF',
DDPFV: 'DPFV',
DIPNAT: 'IPNAT',
DMANGLE: 'MANGLE',
DPCAP: 'PCAP',
DIGN: 'IGN',
DFTP: 'FTP',
DIGN | DFTP: 'IGN-FTP',
DMISC: 'MISC',
}
DLABELS_INV = {v.upper(): k for k, v in DLABELS.items()}
| 2.1875 | 2 |
multichannel_lstm/train.py | zhr1201/Multi-channel-speech-extraction-using-DNN | 65 | 1940 | <filename>multichannel_lstm/train.py
'''
Script for training the model
'''
import tensorflow as tf
import numpy as np
from input import BatchGenerator
from model import MultiRnn
import time
from datetime import datetime
import os
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
sum_dir = 'sum' # dir to write summary
train_dir = 'ckpt' # dir to store the model
data_dir = 'train.pkl' # dir of the data set
NEFF = 129 # effective FFT points
batch_size = 128
num_steps = 20
epochs = 2000
cell_type = 'NL_LSTM'
state_size = 256
output_size = 129
num_layer = 3
learning_rate = 0.0001
# build the model
rnn_model = MultiRnn(
cell_type, state_size, output_size,
batch_size, num_layer, learning_rate, num_steps)
# input data and referene data placeholder
in_data = tf.placeholder(
tf.float32, [batch_size, num_steps, 2 * NEFF])
ref_data = tf.placeholder(
tf.float32, [batch_size, num_steps, NEFF])
# make inference
init_state, final_state, inf_data = rnn_model.inference(in_data)
# compute loss
loss = rnn_model.loss(inf_data, ref_data)
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.merge_all_summaries()
train_op = rnn_model.train(loss)
batch_gen = BatchGenerator(data_dir, batch_size, num_steps, epochs)
with tf.Session() as sess:
summary_writer = tf.train.SummaryWriter(
sum_dir, sess.graph)
sess.run(tf.initialize_all_variables())
steps = 0
# generator for epoch data
for idx, epoch in enumerate(batch_gen.gen_epochs()):
training_state = None
# generator for batch data
for f_data, b_data, r_data, v_data in epoch:
start_time = time.time()
steps += 1
in_data_np = np.concatenate((f_data, b_data), axis=2)
if steps % 100 == 0:
feed_dict = {in_data: in_data_np, ref_data: r_data}
if training_state is not None:
feed_dict[init_state] = training_state
# training the net
loss_value, training_state, _, summary_str, test_inf = sess.run(
[loss, final_state, train_op, summary_op, inf_data], feed_dict)
duration = time.time() - start_time
sec_per_batch = float(duration)
examples_per_sec = batch_size / duration
format_str = (
'%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch, epoch %d)')
print (format_str % (datetime.now(), steps, loss_value,
examples_per_sec, sec_per_batch,
idx))
summary_writer.add_summary(summary_str, steps)
else:
feed_dict = {in_data: in_data_np, ref_data: r_data}
if training_state is not None:
feed_dict[init_state] = training_state
loss_value, training_state, _ = sess.run(
[loss, final_state, train_op], feed_dict)
if steps % 10000 == 0:
checkpoint_path = os.path.join(train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=steps)
| 2.828125 | 3 |
python_modules/dagster/dagster/daemon/cli/__init__.py | elsenorbw/dagster | 0 | 1941 | <reponame>elsenorbw/dagster
import os
import sys
import threading
import time
import warnings
from contextlib import ExitStack
import click
import pendulum
from dagster import __version__
from dagster.core.instance import DagsterInstance
from dagster.daemon.controller import (
DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS,
DagsterDaemonController,
all_daemons_healthy,
all_daemons_live,
daemon_controller_from_instance,
debug_daemon_heartbeats,
get_daemon_status,
)
from dagster.utils.interrupts import capture_interrupts, raise_interrupts_as
def _get_heartbeat_tolerance():
tolerance = os.getenv(
"DAGSTER_DAEMON_HEARTBEAT_TOLERANCE",
)
return int(tolerance) if tolerance else DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS
@click.command(
name="run",
help="Run any daemons configured on the DagsterInstance.",
)
def run_command():
with capture_interrupts():
with DagsterInstance.get() as instance:
if instance.is_ephemeral:
raise Exception(
"dagster-daemon can't run using an in-memory instance. Make sure "
"the DAGSTER_HOME environment variable has been set correctly and that "
"you have created a dagster.yaml file there."
)
with daemon_controller_from_instance(
instance, heartbeat_tolerance_seconds=_get_heartbeat_tolerance()
) as controller:
controller.check_daemon_loop()
@click.command(
name="health-check",
help="DEPRECATED, use liveness-check instead",
)
def health_check_command():
warnings.warn("health-check is deprecated. Use liveness-check instead.")
with DagsterInstance.get() as instance:
if all_daemons_healthy(instance, heartbeat_tolerance_seconds=_get_heartbeat_tolerance()):
click.echo("Daemon healthy")
else:
click.echo("Daemon not healthy")
sys.exit(1)
@click.command(
name="liveness-check",
help="Check for recent heartbeats from the daemon.",
)
@click.option(
"--heartbeat-tolerance",
required=False,
default=DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS,
help="How long (in seconds) to allow a daemon to go without heartbeating before failing the dagster-daemon process.",
)
def liveness_check_command():
with DagsterInstance.get() as instance:
if all_daemons_live(instance, heartbeat_tolerance_seconds=_get_heartbeat_tolerance()):
click.echo("Daemon live")
else:
click.echo("Daemon(s) not running")
sys.exit(1)
@click.command(
name="wipe",
help="Wipe all heartbeats from storage.",
)
def wipe_command():
with DagsterInstance.get() as instance:
instance.wipe_daemon_heartbeats()
click.echo("Daemon heartbeats wiped")
@click.command(
name="heartbeat",
help="Read and write a heartbeat",
)
def debug_heartbeat_command():
with DagsterInstance.get() as instance:
debug_daemon_heartbeats(instance)
@click.command(
name="heartbeat-dump",
help="Log all heartbeat statuses",
)
def debug_heartbeat_dump_command():
with DagsterInstance.get() as instance:
for daemon_type in instance.get_required_daemon_types():
click.echo(get_daemon_status(instance, daemon_type))
@click.group(
commands={"heartbeat": debug_heartbeat_command, "heartbeat-dump": debug_heartbeat_dump_command}
)
def debug_group():
"Daemon debugging utils"
def create_dagster_daemon_cli():
commands = {
"run": run_command,
"health-check": health_check_command,
"liveness-check": liveness_check_command,
"wipe": wipe_command,
"debug": debug_group,
}
@click.group(commands=commands)
@click.version_option(version=__version__)
def group():
"CLI tools for working with the dagster daemon process."
return group
cli = create_dagster_daemon_cli()
def main():
cli(obj={}) # pylint:disable=E1123
| 2.09375 | 2 |
tests/exhaustive/nfl_tests.py | atklaus/sportsreference | 1 | 1942 | <filename>tests/exhaustive/nfl_tests.py
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))
from sportsreference.nfl.teams import Teams
for team in Teams():
print(team.name)
for player in team.roster.players:
print(player.name)
for game in team.schedule:
print(game.dataframe)
print(game.dataframe_extended)
| 2.796875 | 3 |
rust-old/python/examples/map_fields.py | SerebryakovMA/quelea | 3 | 1943 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import sys
sys.path.append("../")
from quelea import *
nx = 217
ny = 133
x0 = 0
x1 = 30 # lambdas
y0 = 0
y1 = 20 # lambdas
xs = np.linspace(x0, x1, nx)
ys = np.linspace(y0, y1, ny)
# 2d array of (x, y, z, t)
coords = np.array( [ [x, y, 0, 0] for x in xs for y in ys ] )
# for map_fields function this should be converted from 2D to 1D array
coords = coords.reshape((4 * nx * ny,))
ftype = 1 # plane wave
a0 = 1 # normalized field amplitude
omega = 1 # frequency
fparam = [a0, 1, 0, 0, 0, 1, 0, 0, omega] # parameters of the plane wave
ex, ey, ez, bx, by, bz = map_fields(coords, ftype, fparam)
# now convert to 2d arrays
ex = ex.reshape((nx, ny))
ey = ey.reshape((nx, ny))
ez = ez.reshape((nx, ny))
bx = bx.reshape((nx, ny))
by = by.reshape((nx, ny))
bz = bz.reshape((nx, ny))
ex = ex.transpose()
ey = ey.transpose()
ez = ez.transpose()
bx = bx.transpose()
by = by.transpose()
bz = bz.transpose()
plt.imshow(ey, cmap = 'RdYlBu', origin = 'lower', extent = [x0, x1, y0, y1])
plt.colorbar()
plt.clim(-a0, a0)
plt.savefig("map_fields.pdf")
| 2.390625 | 2 |
test.py | t-kaichi/hyperspoof | 10 | 1944 | <gh_stars>1-10
import os
from absl import app
from absl import flags
import numpy as np
import tqdm
from tensorflow.keras import Model
from albumentations import (
Compose, HorizontalFlip, RandomBrightness,RandomContrast,
ShiftScaleRotate, ToFloat, VerticalFlip)
from utils import reset_tf
from eval_utils import calc_score_variance
from models import build_seg_model, build_pixel_mlp_class_model
from VegetableSequence import VegetableDataset, VegetableSequence
from temporal_random_seed import TemporalRandomSeed
import myFlags
FLAGS = flags.FLAGS
def main(argv):
reset_tf(FLAGS.device)
ds_info = VegetableDataset(FLAGS.data_path)
dim = ds_info.hsi_dims
cats = ds_info.get_categories()
# spoof file path
assert FLAGS.spoof_type == "print" or FLAGS.spoof_type == "replay"
spooffn = "224_224.m.rf.npy"
spoofdir = '03' if FLAGS.spoof_type == 'print' else '04' # "04": replay
spooffns = [os.path.join(ds_info.DATASET_ROOT_PATH, str(i).zfill(2),
"05", spoofdir, spooffn) for i in cats]
# dataset generation
input_shape = (224, 224, dim)
AUGMENTATIONS_ALL = Compose([
HorizontalFlip(p=0.5),
VerticalFlip(p=0.2),
RandomContrast(limit=0.001, p=0.5),
RandomBrightness(limit=0.001, p=0.5),
ShiftScaleRotate(
shift_limit=0.3, scale_limit=0.9,
rotate_limit=30, border_mode=4, p=0.8),# cv2.BORDER_REFLECT_101
ToFloat(max_value=1024)
])
AUGMENTATIONS_SIMPLE = Compose([
ToFloat(max_value=1024)
])
test_aug_gen = VegetableSequence(dataset=ds_info, instance_ids=[5],
sample_ids=[1,2], random_state=2, batch_size=32,
augmentations=AUGMENTATIONS_ALL, isTest=True)
# build and load models
print("building model")
nb_classes = ds_info.object_categories
seg_model = build_seg_model(input_shape=input_shape)
seg_model.load_weights(FLAGS.seg_model)
pix_class_model = build_pixel_mlp_class_model(
nb_classes=nb_classes, input_shape=(1,dim))
pix_class_model.load_weights(FLAGS.class_model)
penultimate_feat_extractor = Model(inputs=pix_class_model.input,
outputs=pix_class_model.get_layer("penultimate").output)
def predict_pixel_merge(xs):
_xs_seg = np.argmax(seg_model.predict(xs), axis=-1)
assert len(_xs_seg) == len(xs)
_var_fs = [] # variance of the penultimate features
for i in range(len(xs)):
_x = xs[i]
_x_seg = _xs_seg[i]
_x_pixels = _x[_x_seg > 0]
_x_pixels = _x_pixels[:, np.newaxis, :]
_f_pixels = penultimate_feat_extractor.predict(_x_pixels,
batch_size=224*224*dim).reshape(-1, FLAGS.penultimate_nodes)
_var_f = np.sum(np.var(_f_pixels, axis=0))
_var_fs.append(_var_f)
return _var_fs
predict_func = predict_pixel_merge
var_fs = []
true_labels = []
# process live images
for i in tqdm.trange(FLAGS.live_augs, desc="live augumentations"):
for batch in tqdm.tqdm(test_aug_gen, desc="live augumentations batch"):
xs, ys = batch
var_f = predict_func(xs)
var_fs.extend(var_f)
true_labels.extend(np.argmax(ys, axis=1))
# process spoof images
with TemporalRandomSeed(2021):
for fn in tqdm.tqdm(spooffns, desc="spoofs"):
x = np.load(fn).astype("uint16")
xs_aug = np.array([AUGMENTATIONS_ALL(image=x)["image"]
for i in range(FLAGS.spoof_augs)])
var_f = predict_func(xs_aug)
var_fs.extend(var_f)
true_labels.extend([10000] * FLAGS.spoof_augs) # spoof label: 10000
# calculate accuracy
true_labels = np.array(true_labels)
var_fs = np.array(var_fs)
bin_labels, uncertainties, results = calc_score_variance(true_labels, var_fs)
# save results
expr_name = parentdirname(FLAGS.class_model)
save_result_cache(expr_name, bin_labels, uncertainties, results)
return 0
def save_result_cache(expr_name, labels, uncertainties, results):
dn = os.path.join(FLAGS.out_path, expr_name)
os.makedirs(dn, exist_ok=True)
np.save(os.path.join(dn, "binary_labels.npy"), labels)
np.save(os.path.join(dn, "uncertainties.npy"), uncertainties)
with open(os.path.join(dn, "results.txt"), "w") as f:
for i, result in enumerate(["TNR95: ", "Detection acc.: ", "ROC: "]):
f.write(result + str(results[i]) + "\n")
print("saved to " + dn)
def parentdirname(path):
return os.path.basename(os.path.dirname(path))
if __name__ == "__main__":
app.run(main) | 1.8125 | 2 |
generator/apps.py | TheJacksonLaboratory/jaxid_generator | 2 | 1945 | from django.conf import settings
from suit import apps
from suit.apps import DjangoSuitConfig
from suit.menu import ParentItem, ChildItem
APP_NAME = settings.APP_NAME
WIKI_URL = settings.WIKI_URL
class SuitConfig(DjangoSuitConfig):
name = 'suit'
verbose_name = 'Mbiome Core JAXid Generator'
site_title = 'Mbiome Core JAXid Tracking'
site_header = site_title
index_title = verbose_name
layout = 'vertical'
list_per_page = 35
# header_date_format = 'l, d-M-o'
# header_time_format = 'H:i e'
menu = (
ParentItem('JAX Id Record Lists',
use_first_child_url=True,
url='',
children=[
ChildItem('JAXid Records', model='id_generate.jaxiddetail'),
ChildItem(model='id_generate.boxid'),
ChildItem(model='id_generate.plateid'),
],
icon='fa fa-list-ul'),
ParentItem('Reference Data',
use_first_child_url=True,
url='',
children=[
ChildItem(model='id_generate.projectcode'),
ChildItem(model='id_generate.nucleicacidtype'),
ChildItem(model='id_generate.sampletype'),
ChildItem(model='id_generate.sequencingtype'),
],
icon='fa fa-list'),
ParentItem(
label='Generate new JAXid''s',
url=f'/{APP_NAME}/manage/id_generate/jaxiddetail/import/',
permissions='id_generate.change_jaxiddetail',
icon='fa fa-rocket'),
ParentItem(
label='Generate new Box ID''s',
url=f'/{APP_NAME}/manage/id_generate/boxid/import/',
permissions='id_generate.change_boxid',
icon='fa fa-cube'),
ParentItem(
label='Generate new Plate ID''s',
url=f'/{APP_NAME}/manage/id_generate/plateid/import/',
permissions='id_generate.change_plateid',
icon='fa fa-circle-o-notch'),
ParentItem(
label='Authorization',
children=[
ChildItem('Staff', model='auth.user'),
ChildItem(model='auth.group'),
ChildItem(model='admin.logentry'),
],
icon='fa fa-user-circle'),
ParentItem(
label='SOP and Request Sheet',
use_first_child_url=False,
url='',
children=[
ChildItem('View JAX ID Request SOP',
target_blank=True,
url=f'{WIKI_URL}/Wet%20Lab%20SOPs/Forms/All.aspx?parent=1&id=%2Fsites%2FMicrobiomeCoreWiki%2FWet%20Lab%20SOPs%2FJAX%20ID%20Request%20SOP%2Edocx'),
ChildItem('View JAX ID Request Template Sheet',
url=f'{WIKI_URL}/Sample Sheet Templates/Forms/All.aspx?parent=1&id=%2Fsites%2FMicrobiomeCoreWiki%2FSample Sheet Templates%2FJAX ID Request Template Sample Sheet.xlsx'),
],
icon='fa fa-file'),
)
# menu_handler = None
menu_show_home = False
# Show changelist top actions only if any row is selected
toggle_changelist_top_actions = False
# # Enables two column layout for change forms with submit row on the right
form_submit_on_right = False
# Hide name/"original" column for all tabular inlines.
# May be overridden in Inline class by suit_form_inlines_hide_original = False
#form_inlines_hide_original = False
form_size = {
'default': apps.SUIT_FORM_SIZE_LARGE,
'widgets': {
'AutosizedTextarea': apps.SUIT_FORM_SIZE_X_LARGE,
'Textarea': apps.SUIT_FORM_SIZE_X_LARGE,
},
}
# form_size setting can be overridden in ModelAdmin using suit_form_size parameter
#
# Example:
# ----------------------------------------------
# suit_form_size = {
# 'default': 'col-xs-12 col-sm-2', 'col-xs-12 col-sm-10',
# 'fields': {
# 'field_name': SUIT_FORM_SIZE_LARGE,
# 'field_name2': SUIT_FORM_SIZE_X_LARGE,
# },
# 'widgets': {
# 'widget_class_name': SUIT_FORM_SIZE_FULL,
# 'AdminTextareaWidget': SUIT_FORM_SIZE_FULL,
# },
# 'fieldsets': {
# 'fieldset_name': SUIT_FORM_SIZE_FULL,
# 'fieldset_name2': SUIT_FORM_SIZE_FULL,
# }
# }
| 1.898438 | 2 |
tiled-lutnet/training-software/MNIST-CIFAR-SVHN/models/MNIST/scripts/lutnet_init.py | awai54st/LUTNet | 38 | 1946 | import h5py
import numpy as np
np.set_printoptions(threshold=np.nan)
from shutil import copyfile
copyfile("dummy_lutnet.h5", "pretrained_bin.h5") # create pretrained.h5 using datastructure from dummy.h5
bl = h5py.File("baseline_pruned.h5", 'r')
#dummy = h5py.File("dummy.h5", 'r')
pretrained = h5py.File("pretrained_bin.h5", 'r+')
# dense layer 1
bl_w1 = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_1:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable:0"]
zero_fill = np.zeros(np.shape(np.array(bl_w1)))
pret_w1 = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_1:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable:0"]
pret_w1[...] = np.array(bl_w1)
p_gamma[...] = np.array(bl_gamma)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 2
bl_w1 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]])
rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]])
rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]])
for i in range(weight_shape[0]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 3
bl_w1 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]])
rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]])
rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]])
for i in range(weight_shape[0]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 4
bl_w1 = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]])
rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]])
rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]])
for i in range(weight_shape[0]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 5
bl_w1 = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_4"]["residual_sign_4"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_4"]["residual_sign_4"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]])
rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]])
rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]])
for i in range(weight_shape[0]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# bn 1
bl_beta = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 2
bl_beta = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 3
bl_beta = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 4
bl_beta = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 5
bl_beta = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
pretrained.close()
| 2.265625 | 2 |
pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/classify/weka.py | wangyum/anaconda | 0 | 1947 | <reponame>wangyum/anaconda
# Natural Language Toolkit: Interface to Weka Classsifiers
#
# Copyright (C) 2001-2015 NLTK Project
# Author: <NAME> <<EMAIL>>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Classifiers that make use of the external 'Weka' package.
"""
from __future__ import print_function
import time
import tempfile
import os
import subprocess
import re
import zipfile
from sys import stdin
from nltk import compat
from nltk.probability import DictionaryProbDist
from nltk.internals import java, config_java
from nltk.classify.api import ClassifierI
_weka_classpath = None
_weka_search = ['.',
'/usr/share/weka',
'/usr/local/share/weka',
'/usr/lib/weka',
'/usr/local/lib/weka',]
def config_weka(classpath=None):
global _weka_classpath
# Make sure java's configured first.
config_java()
if classpath is not None:
_weka_classpath = classpath
if _weka_classpath is None:
searchpath = _weka_search
if 'WEKAHOME' in os.environ:
searchpath.insert(0, os.environ['WEKAHOME'])
for path in searchpath:
if os.path.exists(os.path.join(path, 'weka.jar')):
_weka_classpath = os.path.join(path, 'weka.jar')
version = _check_weka_version(_weka_classpath)
if version:
print(('[Found Weka: %s (version %s)]' %
(_weka_classpath, version)))
else:
print('[Found Weka: %s]' % _weka_classpath)
_check_weka_version(_weka_classpath)
if _weka_classpath is None:
raise LookupError('Unable to find weka.jar! Use config_weka() '
'or set the WEKAHOME environment variable. '
'For more information about Weka, please see '
'http://www.cs.waikato.ac.nz/ml/weka/')
def _check_weka_version(jar):
try:
zf = zipfile.ZipFile(jar)
except SystemExit as KeyboardInterrupt:
raise
except:
return None
try:
try:
return zf.read('weka/core/version.txt')
except KeyError:
return None
finally:
zf.close()
class WekaClassifier(ClassifierI):
def __init__(self, formatter, model_filename):
self._formatter = formatter
self._model = model_filename
def prob_classify_many(self, featuresets):
return self._classify_many(featuresets, ['-p', '0', '-distribution'])
def classify_many(self, featuresets):
return self._classify_many(featuresets, ['-p', '0'])
def _classify_many(self, featuresets, options):
# Make sure we can find java & weka.
config_weka()
temp_dir = tempfile.mkdtemp()
try:
# Write the test data file.
test_filename = os.path.join(temp_dir, 'test.arff')
self._formatter.write(test_filename, featuresets)
# Call weka to classify the data.
cmd = ['weka.classifiers.bayes.NaiveBayes',
'-l', self._model, '-T', test_filename] + options
(stdout, stderr) = java(cmd, classpath=_weka_classpath,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Check if something went wrong:
if stderr and not stdout:
if 'Illegal options: -distribution' in stderr:
raise ValueError('The installed version of weka does '
'not support probability distribution '
'output.')
else:
raise ValueError('Weka failed to generate output:\n%s'
% stderr)
# Parse weka's output.
return self.parse_weka_output(stdout.decode(stdin.encoding).split('\n'))
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
def parse_weka_distribution(self, s):
probs = [float(v) for v in re.split('[*,]+', s) if v.strip()]
probs = dict(zip(self._formatter.labels(), probs))
return DictionaryProbDist(probs)
def parse_weka_output(self, lines):
# Strip unwanted text from stdout
for i,line in enumerate(lines):
if line.strip().startswith("inst#"):
lines = lines[i:]
break
if lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'prediction']:
return [line.split()[2].split(':')[1]
for line in lines[1:] if line.strip()]
elif lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'distribution']:
return [self.parse_weka_distribution(line.split()[-1])
for line in lines[1:] if line.strip()]
# is this safe:?
elif re.match(r'^0 \w+ [01]\.[0-9]* \?\s*$', lines[0]):
return [line.split()[1] for line in lines if line.strip()]
else:
for line in lines[:10]:
print(line)
raise ValueError('Unhandled output format -- your version '
'of weka may not be supported.\n'
' Header: %s' % lines[0])
# [xx] full list of classifiers (some may be abstract?):
# ADTree, AODE, BayesNet, ComplementNaiveBayes, ConjunctiveRule,
# DecisionStump, DecisionTable, HyperPipes, IB1, IBk, Id3, J48,
# JRip, KStar, LBR, LeastMedSq, LinearRegression, LMT, Logistic,
# LogisticBase, M5Base, MultilayerPerceptron,
# MultipleClassifiersCombiner, NaiveBayes, NaiveBayesMultinomial,
# NaiveBayesSimple, NBTree, NNge, OneR, PaceRegression, PART,
# PreConstructedLinearModel, Prism, RandomForest,
# RandomizableClassifier, RandomTree, RBFNetwork, REPTree, Ridor,
# RuleNode, SimpleLinearRegression, SimpleLogistic,
# SingleClassifierEnhancer, SMO, SMOreg, UserClassifier, VFI,
# VotedPerceptron, Winnow, ZeroR
_CLASSIFIER_CLASS = {
'naivebayes': 'weka.classifiers.bayes.NaiveBayes',
'C4.5': 'weka.classifiers.trees.J48',
'log_regression': 'weka.classifiers.functions.Logistic',
'svm': 'weka.classifiers.functions.SMO',
'kstar': 'weka.classifiers.lazy.KStar',
'ripper': 'weka.classifiers.rules.JRip',
}
@classmethod
def train(cls, model_filename, featuresets,
classifier='naivebayes', options=[], quiet=True):
# Make sure we can find java & weka.
config_weka()
# Build an ARFF formatter.
formatter = ARFF_Formatter.from_train(featuresets)
temp_dir = tempfile.mkdtemp()
try:
# Write the training data file.
train_filename = os.path.join(temp_dir, 'train.arff')
formatter.write(train_filename, featuresets)
if classifier in cls._CLASSIFIER_CLASS:
javaclass = cls._CLASSIFIER_CLASS[classifier]
elif classifier in cls._CLASSIFIER_CLASS.values():
javaclass = classifier
else:
raise ValueError('Unknown classifier %s' % classifier)
# Train the weka model.
cmd = [javaclass, '-d', model_filename, '-t', train_filename]
cmd += list(options)
if quiet:
stdout = subprocess.PIPE
else: stdout = None
java(cmd, classpath=_weka_classpath, stdout=stdout)
# Return the new classifier.
return WekaClassifier(formatter, model_filename)
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
class ARFF_Formatter:
"""
Converts featuresets and labeled featuresets to ARFF-formatted
strings, appropriate for input into Weka.
Features and classes can be specified manually in the constructor, or may
be determined from data using ``from_train``.
"""
def __init__(self, labels, features):
"""
:param labels: A list of all class labels that can be generated.
:param features: A list of feature specifications, where
each feature specification is a tuple (fname, ftype);
and ftype is an ARFF type string such as NUMERIC or
STRING.
"""
self._labels = labels
self._features = features
def format(self, tokens):
"""Returns a string representation of ARFF output for the given data."""
return self.header_section() + self.data_section(tokens)
def labels(self):
"""Returns the list of classes."""
return list(self._labels)
def write(self, outfile, tokens):
"""Writes ARFF data to a file for the given data."""
if not hasattr(outfile, 'write'):
outfile = open(outfile, 'w')
outfile.write(self.format(tokens))
outfile.close()
@staticmethod
def from_train(tokens):
"""
Constructs an ARFF_Formatter instance with class labels and feature
types determined from the given data. Handles boolean, numeric and
string (note: not nominal) types.
"""
# Find the set of all attested labels.
labels = set(label for (tok, label) in tokens)
# Determine the types of all features.
features = {}
for tok, label in tokens:
for (fname, fval) in tok.items():
if issubclass(type(fval), bool):
ftype = '{True, False}'
elif issubclass(type(fval), (compat.integer_types, float, bool)):
ftype = 'NUMERIC'
elif issubclass(type(fval), compat.string_types):
ftype = 'STRING'
elif fval is None:
continue # can't tell the type.
else:
raise ValueError('Unsupported value type %r' % ftype)
if features.get(fname, ftype) != ftype:
raise ValueError('Inconsistent type for %s' % fname)
features[fname] = ftype
features = sorted(features.items())
return ARFF_Formatter(labels, features)
def header_section(self):
"""Returns an ARFF header as a string."""
# Header comment.
s = ('% Weka ARFF file\n' +
'% Generated automatically by NLTK\n' +
'%% %s\n\n' % time.ctime())
# Relation name
s += '@RELATION rel\n\n'
# Input attribute specifications
for fname, ftype in self._features:
s += '@ATTRIBUTE %-30r %s\n' % (fname, ftype)
# Label attribute specification
s += '@ATTRIBUTE %-30r {%s}\n' % ('-label-', ','.join(self._labels))
return s
def data_section(self, tokens, labeled=None):
"""
Returns the ARFF data section for the given data.
:param tokens: a list of featuresets (dicts) or labelled featuresets
which are tuples (featureset, label).
:param labeled: Indicates whether the given tokens are labeled
or not. If None, then the tokens will be assumed to be
labeled if the first token's value is a tuple or list.
"""
# Check if the tokens are labeled or unlabeled. If unlabeled,
# then use 'None'
if labeled is None:
labeled = tokens and isinstance(tokens[0], (tuple, list))
if not labeled:
tokens = [(tok, None) for tok in tokens]
# Data section
s = '\n@DATA\n'
for (tok, label) in tokens:
for fname, ftype in self._features:
s += '%s,' % self._fmt_arff_val(tok.get(fname))
s += '%s\n' % self._fmt_arff_val(label)
return s
def _fmt_arff_val(self, fval):
if fval is None:
return '?'
elif isinstance(fval, (bool, compat.integer_types)):
return '%s' % fval
elif isinstance(fval, float):
return '%r' % fval
else:
return '%r' % fval
if __name__ == '__main__':
from nltk.classify.util import names_demo, binary_names_demo_features
def make_classifier(featuresets):
return WekaClassifier.train('/tmp/name.model', featuresets,
'C4.5')
classifier = names_demo(make_classifier, binary_names_demo_features)
| 2.21875 | 2 |
src/si/data/dataset.py | pg428/SIB | 0 | 1948 | import pandas as pd
import numpy as np
from src.si.util.util import label_gen
__all__ = ['Dataset']
class Dataset:
def __init__(self, X=None, Y=None,
xnames: list = None,
yname: str = None):
""" Tabular Dataset"""
if X is None:
raise Exception("Trying to instanciate a DataSet without any data")
self.X = X
self.Y = Y
self.xnames = xnames if xnames else label_gen(X.shape[1])
self.yname = yname if yname else 'Y'
@classmethod
def from_data(cls, filename, sep=",", labeled=True):
"""Creates a DataSet from a data file.
:param filename: The filename
:type filename: str
:param sep: attributes separator, defaults to ","
:type sep: str, optional
:return: A DataSet object
:rtype: DataSet
"""
data = np.genfromtxt(filename, delimiter=sep)
if labeled:
X = data[:, 0:-1]
Y = data[:, -1]
else:
X = data
Y = None
return cls(X, Y)
@classmethod
def from_dataframe(cls, df, ylabel=None):
"""Creates a DataSet from a pandas dataframe.
:param df: [description]
:type df: [type]
:param ylabel: [description], defaults to None
:type ylabel: [type], optional
:return: [description]
:rtype: [type]
"""
if ylabel and ylabel in df.columns:
X = df.loc[:, df.columns != ylabel].to_numpy() #transforma num array de numpy
Y = df.loc[:, ylabel].to_numpy()
# xnames = df.columns.tolist().remove(ylabel)
yname = ylabel
xnames = df.columns.tolist()
for name in xnames:
if name == yname:
xnames.remove(yname)
else:
X = df.to_numpy()
Y = None
xnames = df.columns.tolist()
yname = None
return cls(X, Y, xnames, yname)
def __len__(self):
"""Returns the number of data points."""
return self.X.shape[0]
def hasLabel(self):
"""Returns True if the dataset constains labels (a dependent variable)"""
return self.Y is not None
def getNumFeatures(self):
"""Returns the number of features"""
return self.X.shape[1]
def getNumClasses(self):
"""Returns the number of label classes or 0 if the dataset has no dependent variable."""
return len(np.unique(self.Y)) if self.hasLabel() else 0
def writeDataset(self, filename, sep=","):
"""Saves the dataset to a file
:param filename: The output file path
:type filename: str
:param sep: The fields separator, defaults to ","
:type sep: str, optional
"""
fullds = np.hstack((self.X, self.Y.reshape(len(self.Y), 1)))
np.savetxt(filename, fullds, delimiter=sep)
def toDataframe(self):
""" Converts the dataset into a pandas DataFrame"""
if self.hasLabel():
df = pd.DataFrame(np.hstack((self.X, self.Y.reshape(len(self.Y), 1))), columns=self.xnames[:]+[self.yname]) #columns=np.hstack((self.xnames, self.yname)))
else:
df = pd.DataFrame(self.X.copy(), columns=self.xnames[:])
return df
def getXy(self):
return self.X, self.Y
def summary(dataset, format='df'):
""" Returns the statistics of a dataset(mean, std, max, min)
:param dataset: A Dataset object
:type dataset: si.data.Dataset
:param format: Output format ('df':DataFrame, 'dict':dictionary ), defaults to 'df'
:type format: str, optional
"""
if format not in ["df", "dict"]:
raise Exception("Invalid format. Choose between 'df' and 'dict'.")
if dataset.hasLabel():
data = np.hstack((dataset.X, dataset.Y.reshape(len(dataset.Y), 1)))
#data = np.hstack([dataset.X, np.reshape(dataset.Y, (-1, 1))])
columns = dataset.xnames[:] + [dataset.yname]
else:
data = dataset.X
columns = dataset.xnames[:]
stats = {}
if type(dataset.Y[0]) is str:
for i in range(data.shape[1]-1): #ve colunas
_means = np.mean(data[:, i], axis=0)
_vars = np.var(data[:, i], axis=0)
_maxs = np.max(data[:, i], axis=0)
_mins = np.min(data[:, i], axis=0)
stat = {"mean": _means,
"var": _vars,
"max": _maxs,
"min": _mins
}
stats[columns[i]] = stat
else:
for i in range(data.shape[1]): # ve colunas
_means = np.mean(data[:, i], axis=0)
_vars = np.var(data[:, i], axis=0)
_maxs = np.max(data[:, i], axis=0)
_mins = np.min(data[:, i], axis=0)
stat = {"mean": _means,
"var": _vars,
"max": _maxs,
"min": _mins
}
stats[columns[i]] = stat
# _means = np.mean(data, axis=0)
# _vars = np.var(data, axis=0)
# _maxs = np.max(data, axis=0)
# _mins = np.min(data, axis=0)
# stats = {}
# for i in range(data.shape[1]):
# stat = {"mean": _means[i],
# "var": _vars[i],
# "max": _maxs[i],
# "min": _mins[i]
# }
# stats[columns[i]] = stat
if format == "dict":
return stats
else:
return pd.DataFrame(stats)
| 3.390625 | 3 |
stubs/m5stack_flowui-1_4_0-beta/display.py | RonaldHiemstra/micropython-stubs | 38 | 1949 | <reponame>RonaldHiemstra/micropython-stubs
"""
Module: 'display' on M5 FlowUI v1.4.0-beta
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.11.0', version='v1.11-284-g5d8e1c867 on 2019-08-30', machine='ESP32 module with ESP32')
# Stubber: 1.3.1
class TFT:
''
BLACK = 0
BLUE = 255
BMP = 2
BOTTOM = -9004
CENTER = -9003
COLOR_BITS16 = 16
COLOR_BITS24 = 24
CYAN = 65535
DARKCYAN = 32896
DARKGREEN = 32768
DARKGREY = 8421504
FONT_7seg = 9
FONT_Comic = 4
FONT_Default = 0
FONT_DefaultSmall = 8
FONT_DejaVu18 = 1
FONT_DejaVu24 = 2
FONT_DejaVu40 = 11
FONT_DejaVu56 = 12
FONT_DejaVu72 = 13
FONT_Minya = 5
FONT_Small = 7
FONT_Tooney = 6
FONT_Ubuntu = 3
GREEN = 65280
GREENYELLOW = 11336748
HSPI = 1
JPG = 1
LANDSCAPE = 1
LANDSCAPE_FLIP = 3
LASTX = 7000
LASTY = 8000
LIGHTGREY = 12632256
M5STACK = 6
MAGENTA = 16515327
MAROON = 8388608
NAVY = 128
OLIVE = 8421376
ORANGE = 16557056
PINK = 16564426
PORTRAIT = 0
PORTRAIT_FLIP = 2
PURPLE = 8388736
RED = 16515072
RIGHT = -9004
VSPI = 2
WHITE = 16579836
YELLOW = 16579584
def arc():
pass
def attrib7seg():
pass
def backlight():
pass
def circle():
pass
def clear():
pass
def clearwin():
pass
def compileFont():
pass
def deinit():
pass
def drawCircle():
pass
def drawLine():
pass
def drawPixel():
pass
def drawRect():
pass
def drawRoundRect():
pass
def drawTriangle():
pass
def ellipse():
pass
def fill():
pass
def fillCircle():
pass
def fillRect():
pass
def fillRoundRect():
pass
def fillScreen():
pass
def fillTriangle():
pass
def font():
pass
def fontSize():
pass
def getCursor():
pass
def get_bg():
pass
def get_fg():
pass
def hsb2rgb():
pass
def image():
pass
def init():
pass
def line():
pass
def lineByAngle():
pass
def orient():
pass
def pixel():
pass
def polygon():
pass
def print():
pass
def println():
pass
def qrcode():
pass
def rect():
pass
def resetwin():
pass
def restorewin():
pass
def roundrect():
pass
def savewin():
pass
def screensize():
pass
def setBrightness():
pass
def setColor():
pass
def setCursor():
pass
def setRotation():
pass
def setTextColor():
pass
def set_bg():
pass
def set_fg():
pass
def setwin():
pass
def text():
pass
def textClear():
pass
def textWidth():
pass
def text_x():
pass
def text_y():
pass
def tft_deselect():
pass
def tft_readcmd():
pass
def tft_select():
pass
def tft_setspeed():
pass
def tft_writecmd():
pass
def tft_writecmddata():
pass
def triangle():
pass
def winsize():
pass
| 2.125 | 2 |
dbclient/__init__.py | dmoore247/db-migration | 0 | 1950 | import json, requests, datetime
from cron_descriptor import get_description
from .dbclient import dbclient
from .JobsClient import JobsClient
from .ClustersClient import ClustersClient
from .WorkspaceClient import WorkspaceClient
from .ScimClient import ScimClient
from .LibraryClient import LibraryClient
from .HiveClient import HiveClient
from .parser import *
| 1.109375 | 1 |
UI/ControlSlider/__init__.py | peerke88/SkinningTools | 7 | 1951 | <filename>UI/ControlSlider/__init__.py
# -*- coding: utf-8 -*-
# SkinWeights command and component editor
# Copyright (C) 2018 <NAME>
# Website: http://www.trevorius.com
#
# pyqt attribute sliders
# Copyright (C) 2018 <NAME>
# Website: http://danieleniero.com/
#
# neighbour finding algorythm
# Copyright (C) 2018 <NAME>
# Website: http://www.janpijpers.com/
#
# skinningTools and UI
# Copyright (C) 2018 <NAME>
# Website: http://www.perryleijten.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# See http://www.gnu.org/licenses/gpl.html for a copy of the GNU General
# Public License.
# --------------------------------------------------------------------------------------
| 1.523438 | 2 |
homeassistant/components/fritz/sensor.py | EuleMitKeule/core | 3 | 1952 | <reponame>EuleMitKeule/core
"""AVM FRITZ!Box binary sensors."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from datetime import datetime, timedelta
import logging
from typing import Any, Literal
from fritzconnection.core.exceptions import (
FritzActionError,
FritzActionFailedError,
FritzConnectionException,
FritzInternalError,
FritzServiceError,
)
from fritzconnection.lib.fritzstatus import FritzStatus
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DATA_GIGABYTES,
DATA_RATE_KILOBITS_PER_SECOND,
DATA_RATE_KILOBYTES_PER_SECOND,
DEVICE_CLASS_TIMESTAMP,
ENTITY_CATEGORY_DIAGNOSTIC,
SIGNAL_STRENGTH_DECIBELS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.dt import utcnow
from .common import FritzBoxBaseEntity, FritzBoxTools
from .const import DOMAIN, DSL_CONNECTION, UPTIME_DEVIATION
_LOGGER = logging.getLogger(__name__)
def _uptime_calculation(seconds_uptime: float, last_value: datetime | None) -> datetime:
"""Calculate uptime with deviation."""
delta_uptime = utcnow() - timedelta(seconds=seconds_uptime)
if (
not last_value
or abs((delta_uptime - last_value).total_seconds()) > UPTIME_DEVIATION
):
return delta_uptime
return last_value
def _retrieve_device_uptime_state(
status: FritzStatus, last_value: datetime
) -> datetime:
"""Return uptime from device."""
return _uptime_calculation(status.device_uptime, last_value)
def _retrieve_connection_uptime_state(
status: FritzStatus, last_value: datetime | None
) -> datetime:
"""Return uptime from connection."""
return _uptime_calculation(status.connection_uptime, last_value)
def _retrieve_external_ip_state(status: FritzStatus, last_value: str) -> str:
"""Return external ip from device."""
return status.external_ip # type: ignore[no-any-return]
def _retrieve_kb_s_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload transmission rate."""
return round(status.transmission_rate[0] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_kb_s_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download transmission rate."""
return round(status.transmission_rate[1] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_max_kb_s_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload max transmission rate."""
return round(status.max_bit_rate[0] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_max_kb_s_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download max transmission rate."""
return round(status.max_bit_rate[1] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_gb_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload total data."""
return round(status.bytes_sent / 1000 / 1000 / 1000, 1) # type: ignore[no-any-return]
def _retrieve_gb_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download total data."""
return round(status.bytes_received / 1000 / 1000 / 1000, 1) # type: ignore[no-any-return]
def _retrieve_link_kb_s_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload link rate."""
return round(status.max_linked_bit_rate[0] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_link_kb_s_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download link rate."""
return round(status.max_linked_bit_rate[1] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_link_noise_margin_sent_state(
status: FritzStatus, last_value: str
) -> float:
"""Return upload noise margin."""
return status.noise_margin[0] / 10 # type: ignore[no-any-return]
def _retrieve_link_noise_margin_received_state(
status: FritzStatus, last_value: str
) -> float:
"""Return download noise margin."""
return status.noise_margin[1] / 10 # type: ignore[no-any-return]
def _retrieve_link_attenuation_sent_state(
status: FritzStatus, last_value: str
) -> float:
"""Return upload line attenuation."""
return status.attenuation[0] / 10 # type: ignore[no-any-return]
def _retrieve_link_attenuation_received_state(
status: FritzStatus, last_value: str
) -> float:
"""Return download line attenuation."""
return status.attenuation[1] / 10 # type: ignore[no-any-return]
@dataclass
class FritzRequireKeysMixin:
"""Fritz sensor data class."""
value_fn: Callable[[FritzStatus, Any], Any]
@dataclass
class FritzSensorEntityDescription(SensorEntityDescription, FritzRequireKeysMixin):
"""Describes Fritz sensor entity."""
connection_type: Literal["dsl"] | None = None
SENSOR_TYPES: tuple[FritzSensorEntityDescription, ...] = (
FritzSensorEntityDescription(
key="external_ip",
name="External IP",
icon="mdi:earth",
value_fn=_retrieve_external_ip_state,
),
FritzSensorEntityDescription(
key="device_uptime",
name="Device Uptime",
device_class=DEVICE_CLASS_TIMESTAMP,
entity_category=ENTITY_CATEGORY_DIAGNOSTIC,
value_fn=_retrieve_device_uptime_state,
),
FritzSensorEntityDescription(
key="connection_uptime",
name="Connection Uptime",
device_class=DEVICE_CLASS_TIMESTAMP,
entity_category=ENTITY_CATEGORY_DIAGNOSTIC,
value_fn=_retrieve_connection_uptime_state,
),
FritzSensorEntityDescription(
key="kb_s_sent",
name="Upload Throughput",
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:upload",
value_fn=_retrieve_kb_s_sent_state,
),
FritzSensorEntityDescription(
key="kb_s_received",
name="Download Throughput",
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:download",
value_fn=_retrieve_kb_s_received_state,
),
FritzSensorEntityDescription(
key="max_kb_s_sent",
name="Max Connection Upload Throughput",
native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND,
icon="mdi:upload",
entity_category=ENTITY_CATEGORY_DIAGNOSTIC,
value_fn=_retrieve_max_kb_s_sent_state,
),
FritzSensorEntityDescription(
key="max_kb_s_received",
name="Max Connection Download Throughput",
native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND,
icon="mdi:download",
entity_category=ENTITY_CATEGORY_DIAGNOSTIC,
value_fn=_retrieve_max_kb_s_received_state,
),
FritzSensorEntityDescription(
key="gb_sent",
name="GB sent",
state_class=STATE_CLASS_TOTAL_INCREASING,
native_unit_of_measurement=DATA_GIGABYTES,
icon="mdi:upload",
value_fn=_retrieve_gb_sent_state,
),
FritzSensorEntityDescription(
key="gb_received",
name="GB received",
state_class=STATE_CLASS_TOTAL_INCREASING,
native_unit_of_measurement=DATA_GIGABYTES,
icon="mdi:download",
value_fn=_retrieve_gb_received_state,
),
FritzSensorEntityDescription(
key="link_kb_s_sent",
name="Link Upload Throughput",
native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND,
icon="mdi:upload",
value_fn=_retrieve_link_kb_s_sent_state,
connection_type=DSL_CONNECTION,
),
FritzSensorEntityDescription(
key="link_kb_s_received",
name="Link Download Throughput",
native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND,
icon="mdi:download",
value_fn=_retrieve_link_kb_s_received_state,
connection_type=DSL_CONNECTION,
),
FritzSensorEntityDescription(
key="link_noise_margin_sent",
name="Link Upload Noise Margin",
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS,
icon="mdi:upload",
value_fn=_retrieve_link_noise_margin_sent_state,
connection_type=DSL_CONNECTION,
),
FritzSensorEntityDescription(
key="link_noise_margin_received",
name="Link Download Noise Margin",
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS,
icon="mdi:download",
value_fn=_retrieve_link_noise_margin_received_state,
connection_type=DSL_CONNECTION,
),
FritzSensorEntityDescription(
key="link_attenuation_sent",
name="Link Upload Power Attenuation",
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS,
icon="mdi:upload",
value_fn=_retrieve_link_attenuation_sent_state,
connection_type=DSL_CONNECTION,
),
FritzSensorEntityDescription(
key="link_attenuation_received",
name="Link Download Power Attenuation",
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS,
icon="mdi:download",
value_fn=_retrieve_link_attenuation_received_state,
connection_type=DSL_CONNECTION,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up entry."""
_LOGGER.debug("Setting up FRITZ!Box sensors")
fritzbox_tools: FritzBoxTools = hass.data[DOMAIN][entry.entry_id]
if (
not fritzbox_tools.connection
or "WANIPConn1" not in fritzbox_tools.connection.services
):
# Only routers are supported at the moment
return
dsl: bool = False
try:
dslinterface = await hass.async_add_executor_job(
fritzbox_tools.connection.call_action,
"WANDSLInterfaceConfig:1",
"GetInfo",
)
dsl = dslinterface["NewEnable"]
except (
FritzInternalError,
FritzActionError,
FritzActionFailedError,
FritzServiceError,
):
pass
entities = [
FritzBoxSensor(fritzbox_tools, entry.title, description)
for description in SENSOR_TYPES
if dsl or description.connection_type != DSL_CONNECTION
]
async_add_entities(entities, True)
class FritzBoxSensor(FritzBoxBaseEntity, SensorEntity):
"""Define FRITZ!Box connectivity class."""
entity_description: FritzSensorEntityDescription
def __init__(
self,
fritzbox_tools: FritzBoxTools,
device_friendly_name: str,
description: FritzSensorEntityDescription,
) -> None:
"""Init FRITZ!Box connectivity class."""
self.entity_description = description
self._last_device_value: str | None = None
self._attr_available = True
self._attr_name = f"{device_friendly_name} {description.name}"
self._attr_unique_id = f"{fritzbox_tools.unique_id}-{description.key}"
super().__init__(fritzbox_tools, device_friendly_name)
def update(self) -> None:
"""Update data."""
_LOGGER.debug("Updating FRITZ!Box sensors")
try:
status: FritzStatus = self._fritzbox_tools.fritz_status
self._attr_available = True
except FritzConnectionException:
_LOGGER.error("Error getting the state from the FRITZ!Box", exc_info=True)
self._attr_available = False
return
self._attr_native_value = (
self._last_device_value
) = self.entity_description.value_fn(status, self._last_device_value)
| 2.046875 | 2 |
Ifc/IfcBase.py | gsimon75/IFC_parser | 28 | 1953 | from Ifc.ClassRegistry import ifc_class, ifc_abstract_class, ifc_fallback_class
@ifc_abstract_class
class IfcEntity:
"""
Generic IFC entity, only for subclassing from it
"""
def __init__(self, rtype, args):
"""
rtype: Resource type
args: Arguments in *reverse* order, so you can just args.pop() from it
"""
self.rtype = rtype
def __str__(self):
return self.rtype
def __json__(self):
return {'rtype': self.rtype}
@ifc_fallback_class
class IfcGenericEntity(IfcEntity):
"""
Generic IFC entity: type and args
"""
def __init__(self, rtype, args):
IfcEntity.__init__(self, rtype, args)
self.args = args
self.args.reverse()
def __str__(self):
return "Gen<{sup}>{a}".format(
sup=IfcEntity.__str__(self),
a=self.args)
@ifc_class
class IfcScalarValue(IfcEntity):
def __init__(self, rtype, args):
IfcEntity.__init__(self, rtype, args)
self.value = args.pop()
def __str__(self):
return str(self.value)
@ifc_class
class BOOLEAN(IfcScalarValue):
pass
@ifc_class
class REAL(IfcScalarValue):
pass
@ifc_class
class BINARY(IfcScalarValue):
pass
@ifc_class
class INTEGER(IfcScalarValue):
pass
@ifc_class
class NUMBER(IfcScalarValue):
pass
@ifc_class
class STRING(IfcScalarValue):
pass
@ifc_class
class LOGICAL(IfcScalarValue):
pass
class Omitted:
"""
Marked with '*' it states that some supertype had defined that attribute, but in the subtype it is a derived
(calculated) value, so it no longer makes sense to explicitely assign value to it.
"""
# TODO: Haven't tried if it can be handled 'just as expected'
def __init__(self):
pass
def __str__(self):
return "<omitted>"
def __json__(self):
return None
# class-level, enough to reference, no need to create multiple instances (doesn't hurt though)
omitted = Omitted()
class Reference:
"""
Refers to another entity by its index
"""
def __init__(self, index):
self.index = index
def __str__(self):
return "<#{idx}>".format(idx=self.index)
def __json__(self):
return {'ref': self.index}
class EnumValue:
"""
Item from some set of enumerated values.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return "<.{val}.>".format(val=self.value)
def __json__(self):
return self.value
@ifc_class
class STEPHeader(IfcEntity):
def __init__(self):
IfcEntity.__init__(self, "STEPHeader", [])
self.fields = {}
def add(self, e):
self.fields[e.rtype] = e
def __str__(self):
return "STEPHeader({f})".format(f=", ".join(map(lambda f: "{n}: {v}".format(n=f[0], v=str(f[1])), self.fields.iteritems())))
# vim: set sw=4 ts=4 et:
| 2.703125 | 3 |
middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/data_collector.py | xe1gyq/stx-utils | 0 | 1954 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import logging
import os
from io_monitor.constants import DOMAIN
from io_monitor.utils.data_window import DataCollectionWindow
LOG = logging.getLogger(DOMAIN)
class DeviceDataCollector(object):
# Moving average windows
MA_WINDOW_SMA = 0
MA_WINDOW_MED = 1
MA_WINDOW_LAR = 2
# Device status
STATUS_NORMAL = "N"
STATUS_BUILDING = "B"
STATUS_CONGESTED = "L"
# Data tracked
DATA_IOPS = "iops"
DATA_AWAIT = "await"
def __init__(self, device_node, data_elements,
size_sma, size_med, size_lar):
self.node = device_node
if os.path.exists('/sys/block/' + self.node + '/dm/name'):
self.name = open('/sys/block/' + self.node + '/dm/name',
'r').read().rstrip()
else:
self.name = self.node
self.data_dict = {}
self.data_caps = {self.DATA_AWAIT: -1, self.DATA_IOPS: -1}
self.timestamp = None
self.congestion_status = self.STATUS_NORMAL
self.congestion_await_minimal_spike = -1
self.congestion_await_sustained = -1
for element in data_elements:
self.data_dict.update({element: [
DataCollectionWindow(size_sma, stuck_data_override=True),
DataCollectionWindow(size_med, stuck_data_override=True),
DataCollectionWindow(size_lar, stuck_data_override=True)]})
def update_congestion_status(self):
# Bail if threshold is not set
if self.congestion_await_sustained == -1:
return
ma_sma = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_SMA)
ma_med = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_MED)
ma_lar = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_LAR)
# Set the congestion status based on await moving average
if self.congestion_status is self.STATUS_NORMAL:
if ma_sma > self.congestion_await_sustained:
self.congestion_status = self.STATUS_BUILDING
if self.congestion_status is self.STATUS_BUILDING:
if ma_lar > self.congestion_await_sustained:
self.congestion_status = self.STATUS_CONGESTED
LOG.warn("Node %s (%s) is experiencing high await times."
% (self.node, self.name))
elif ma_sma < self.congestion_await_sustained:
self.congestion_status = self.STATUS_NORMAL
if self.congestion_status is self.STATUS_CONGESTED:
if ma_med < self.congestion_await_sustained:
self.congestion_status = self.STATUS_BUILDING
def update_data(self, ts, element, value):
self.timestamp = ts
# LOG.debug("%s: e = %s, v= %f" % (self.node, element, value))
for w in [self.MA_WINDOW_SMA,
self.MA_WINDOW_MED,
self.MA_WINDOW_LAR]:
self.data_dict[element][w].update(value, self.data_caps[element])
def get_latest(self, element):
if element not in self.data_dict:
LOG.error("Error: invalid element requested = %s" % element)
return 0
return self.data_dict[element][self.MA_WINDOW_SMA].get_latest()
def get_average(self, element, window):
if window not in [self.MA_WINDOW_SMA,
self.MA_WINDOW_MED,
self.MA_WINDOW_LAR]:
LOG.error("WindowError: invalid window requested = %s" % window)
return 0
if element not in self.data_dict:
LOG.error("Error: invalid element requested = %s" % element)
return 0
return self.data_dict[element][window].get_average()
def is_data_stale(self, ts):
return not (ts == self.timestamp)
def get_congestion_status(self, debug=False):
if debug:
ma_sma = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_SMA)
ma_med = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_MED)
ma_lar = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_LAR)
LOG.debug("%s [ %6.2f %6.2f %6.2f ] %d" %
(self.node, ma_sma, ma_med, ma_lar,
self.congestion_await_sustained))
return self.congestion_status
def set_data_caps(self, element, cap):
if element in self.data_caps:
self.data_caps[element] = cap
def set_congestion_thresholds(self, await_minimal_spike,
await_sustained_congestion):
self.congestion_await_minimal_spike = await_minimal_spike
self.congestion_await_sustained = await_sustained_congestion
def get_element_windows_avg_list(self, element):
return [self.get_average(element, self.MA_WINDOW_SMA),
self.get_average(element, self.MA_WINDOW_MED),
self.get_average(element, self.MA_WINDOW_LAR)]
def get_element_windows_avg_string(self, element):
return "%s [ %9.2f, %9.2f, %9.2f ]" % (
element,
self.get_average(element, self.MA_WINDOW_SMA),
self.get_average(element, self.MA_WINDOW_MED),
self.get_average(element, self.MA_WINDOW_LAR))
| 2.046875 | 2 |
examples/language-modeling/debias_lm_hps_tune.py | SoumyaBarikeri/transformers | 1 | 1955 | <filename>examples/language-modeling/debias_lm_hps_tune.py
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, CTRL, BERT, RoBERTa, XLNet).
GPT, GPT-2 and CTRL are fine-tuned using a causal language modeling (CLM) loss. BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss. XLNet is fine-tuned using a permutation language modeling (PLM) loss.
"""
import logging
import math
import os
from dataclasses import dataclass, field
from typing import Optional
import torch
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
HfArgumentParser,
# LineByLineTextDatasetLabels,
LineByLineTextDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
import ray
from ray import tune
from transformers.file_utils import is_torch_tpu_available
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
from ray.tune.schedulers import PopulationBasedTraining
from ray.tune import CLIReporter
# if is_wandb_available():
# import wandb
ray.shutdown()
ray.init(log_to_driver=True, ignore_reinit_error=True)
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
force_pad_token: bool = field(
default=False,
metadata={
"help": "Whether to force the addition of a padding token to tokenizer that does not already have one."
},
)
debiasing_head: Optional[str] = field(
default=None, metadata={"help": "The type of de-biasing head to be used"}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_data_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
eval_data_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
mlm: bool = field(
default=False, metadata={"help": "Train with masked-language modeling loss instead of language modeling."}
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
plm_probability: float = field(
default=1 / 6,
metadata={
"help": "Ratio of length of a span of masked tokens to surrounding context length for permutation language modeling."
},
)
max_span_length: int = field(
default=5, metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."}
)
block_size: int = field(
default=-1,
metadata={
"help": "Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def get_dataset(
args: DataTrainingArguments,
tokenizer: PreTrainedTokenizer,
evaluate: bool = False,
cache_dir: Optional[str] = None,
):
file_path = args.eval_data_file if evaluate else args.train_data_file
if args.line_by_line:
return LineByLineTextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size)
# return LineByLineTextDatasetLabels(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size)
else:
return TextDataset(
tokenizer=tokenizer,
file_path=file_path,
block_size=args.block_size,
overwrite_cache=args.overwrite_cache,
cache_dir=cache_dir,
)
class TuneTransformerTrainer(Trainer):
def create_optimizer_and_scheduler(self, num_training_steps: int):
if self.optimizer is None:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
self.optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.args.learning_rate,
betas=(self.args.adam_beta1, self.args.adam_beta2),
eps=self.args.adam_epsilon,
)
if self.lr_scheduler is None:
self.lr_scheduler = get_linear_schedule_with_warmup(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps
)
return self.current_optimizer, self.current_scheduler
def evaluate(self,
eval_dataset= None):
eval_dataloader = self.get_eval_dataloader(eval_dataset)
output = self.prediction_loop(
eval_dataloader, description="Evaluation")
self.log(output.metrics)
self.save_state()
tune.report(**output.metrics)
return output.metrics
def save_state(self):
with tune.checkpoint_dir(step=self.global_step) as checkpoint_dir:
self.args.output_dir = checkpoint_dir
# This is the directory name that Huggingface requires.
output_dir = os.path.join(
self.args.output_dir,
f"{PREFIX_CHECKPOINT_DIR}-{self.global_step}")
self.save_model(output_dir)
self.current_optimizer, self.current_scheduler = self.create_optimizer_and_scheduler(360)
if self.is_world_master():
torch.save(self.current_optimizer.state_dict(),
os.path.join(output_dir, "optimizer.pt"))
torch.save(self.current_scheduler.state_dict(),
os.path.join(output_dir, "scheduler.pt"))
def recover_checkpoint(tune_checkpoint_dir, model_name=None):
if tune_checkpoint_dir is None or len(tune_checkpoint_dir) == 0:
return model_name
# Get subdirectory used for Huggingface.
subdirs = [
os.path.join(tune_checkpoint_dir, name)
for name in os.listdir(tune_checkpoint_dir)
if os.path.isdir(os.path.join(tune_checkpoint_dir, name))
]
# There should only be 1 subdir.
assert len(subdirs) == 1, subdirs
return subdirs[0]
# def train_transformer(config, checkpoint_dir=None):
# train_dataset, eval_dataset = get_datasets(config)
#
# training_args = TrainingArguments(
# output_dir=tune.get_trial_dir(),
# learning_rate=config["learning_rate"],
# do_train=True,
# do_eval=True,
# evaluate_during_training=True,
# # Run eval after every epoch.
# eval_steps=(len(train_dataset) // config["per_gpu_train_batch_size"]) +
# 1,
# # We explicitly set save to 0, and do checkpointing in evaluate instead
# save_steps=0,
# num_train_epochs=config["num_epochs"],
# max_steps=config["max_steps"],
# per_device_train_batch_size=config["per_gpu_train_batch_size"],
# per_device_eval_batch_size=config["per_gpu_val_batch_size"],
# warmup_steps=0,
# weight_decay=config["weight_decay"],
# logging_dir="./logs",
# )
#
# model_name_or_path = recover_checkpoint(checkpoint_dir, config["model_name"])
# # num_labels = glue_tasks_num_labels[config["task_name"]]
#
# config = AutoConfig.from_pretrained(
# model_name_or_path,
# num_labels=num_labels,
# finetuning_task=task_name,
# )
# model = AutoModelForSequenceClassification.from_pretrained(
# model_name_or_path,
# config=config,
# )
#
# # Use our modified TuneTransformerTrainer
# tune_trainer = TuneTransformerTrainer(
# model=model,
# args=training_args,
# train_dataset=train_dataset,
# eval_dataset=eval_dataset,
# compute_metrics=utils.build_compute_metrics_fn(task_name),
# )
# tune_trainer.train(model_name_or_path)
def train_transformer(config, checkpoint_dir=None):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
# parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
# model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument."
)
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config_in = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config_in = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config_in = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name"
)
if tokenizer.pad_token_id is None:
if model_args.force_pad_token:
# See PR 3388. Some tokenizers don't had pad tokens which causes errors at the encoding step in the collate_fn.
# We give here the option to force the addition of a pad token. The attention mask is used to ignore this token
# when feeding to the model.x
tokenizer.add_special_tokens({"pad_token": "<pad>"})
else:
logger.warning(
"Attempting to train a model whose tokenizer has no padding token. This may result in errors in the encoding step. Set the --force_pad_token flag to fix this."
)
model_name_or_path = recover_checkpoint(checkpoint_dir, config["model_name"])
if model_args.model_name_or_path:
model = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config_in,
cache_dir=model_args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = AutoModelWithLMHead.from_config(config_in)
special_tokens_dict = {'bos_token': '<bos>', 'eos_token': '<eos>', 'pad_token': '<pad>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if config_in.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)."
)
if data_args.block_size <= 0:
data_args.block_size = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
data_args.block_size = min(data_args.block_size, tokenizer.max_len)
# Get datasets
train_dataset = (
get_dataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir) if training_args.do_train else None
)
# print('train_dataset {}'.format(train_dataset.examples[0]))
eval_dataset = (
get_dataset(data_args, tokenizer=tokenizer, evaluate=True, cache_dir=model_args.cache_dir)
if training_args.do_eval
else None
)
if config_in.model_type == "xlnet":
data_collator = DataCollatorForPermutationLanguageModeling(
tokenizer=tokenizer,
plm_probability=data_args.plm_probability,
max_span_length=data_args.max_span_length,
)
else:
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability
)
training_args = TrainingArguments(
output_dir=tune.get_trial_dir(),
learning_rate=config["learning_rate"],
do_train=True,
do_eval=True,
evaluate_during_training=True,
# Run eval after every epoch.
eval_steps=(len(train_dataset) // config["per_gpu_train_batch_size"]) + 1,
# We explicitly set save to 0, and do checkpointing in evaluate instead
save_steps=0,
num_train_epochs=config["num_epochs"],
max_steps=config["max_steps"],
per_device_train_batch_size=config["per_gpu_train_batch_size"],
per_device_eval_batch_size=config["per_gpu_val_batch_size"],
warmup_steps=0,
weight_decay=config["weight_decay"],
logging_dir="./logs")
# Initialize our Trainer
tune_trainer = TuneTransformerTrainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
prediction_loss_only=True,
# compute_metrics=compute_metrics,
)
if training_args.do_train:
model_path = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
tune_trainer.train(model_path=model_path)
if __name__ == "__main__":
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
config = {
# These 3 configs below were defined earlier
"model_name": model_args.model_name_or_path,
"task_name": "CLM",
"data_dir": "",
"per_gpu_val_batch_size": 32,
"per_gpu_train_batch_size": tune.choice([16, 32, 64]),
"learning_rate": tune.uniform(1e-5, 5e-5),
"weight_decay": tune.uniform(0.0, 0.3),
"num_epochs": tune.choice([2, 3, 4, 5]),
"max_steps": -1, # We use num_epochs instead.
"wandb": {
"project": "pbt_transformers",
"reinit": True,
"allow_val_change": True
}
}
logger.info(config)
scheduler = PopulationBasedTraining(
time_attr="training_iteration",
metric="eval_loss",
mode="min",
perturbation_interval=2,
hyperparam_mutations={
"weight_decay": lambda: tune.uniform(0.0, 0.3).func(None),
"learning_rate": lambda: tune.uniform(1e-5, 5e-5).func(None),
"per_gpu_train_batch_size": [16, 32, 64],
})
reporter = CLIReporter(
parameter_columns={
"weight_decay": "w_decay",
"learning_rate": "lr",
"per_gpu_train_batch_size": "train_bs/gpu",
"num_epochs": "num_epochs"
},
metric_columns=[
"eval_acc", "eval_loss", "epoch", "training_iteration"
])
analysis = tune.run(
train_transformer,
resources_per_trial={
"cpu": 1,
"gpu": 1
},
config=config,
num_samples=3,
scheduler=scheduler,
keep_checkpoints_num=3,
checkpoint_score_attr="training_iteration",
progress_reporter=reporter,
local_dir="./ray_results/",
name="tune_trans")
best_config = analysis.get_best_config(metric="eval_loss", mode="min")
print(best_config)
| 1.921875 | 2 |
checksums.py | pgp/RootHelperClientTestInteractions | 1 | 1956 | from net_common import *
import struct
import sys
def getDirHashOpts(withNames=False,
ignoreThumbsFiles=True,
ignoreUnixHiddenFiles=True,
ignoreEmptyDirs=True):
return bytearray([((1 if withNames else 0) +
(2 if ignoreThumbsFiles else 0) +
(4 if ignoreUnixHiddenFiles else 0) +
(8 if ignoreEmptyDirs else 0))])
if __name__ == "__main__":
sock = get_connected_local_socket()
path = encodeString('/dev/shm/exampleDir')
# path = encodeString('/dev/null')
sock.sendall(bytearray(b'\x0A')) # HASH request
# sock.sendall(bytearray(b'\x01')) # choose MD5 algorithm
sock.sendall(bytearray(b'\x06')) # choose SHA3-224 algorithm
sock.sendall(getDirHashOpts(withNames=True,ignoreUnixHiddenFiles=False)) # send dirHashOpts byte (unused for regular files)
sock.sendall(struct.pack("@H", len(path))) # len of path as unsigned short
sock.sendall(path)
resp = sock.recv(1) # response first byte: \x00 OK or \xFF ERROR
if resp != b'\x00':
print("Error byte received, errno is:", struct.unpack("@i", sock.recv(4))[0])
sys.exit(0)
# print(toHex(sock.recv(16))) # 128 bit (16 byte) md5 digest size
print(toHex(sock.recv(28))) # 224 bit (28 byte) sha3-224 digest size
sock.close()
| 2.078125 | 2 |
investment_report/migrations/0020_auto_20180911_1005.py | uktrade/pir-api | 1 | 1957 | <filename>investment_report/migrations/0020_auto_20180911_1005.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-09-11 10:05
from __future__ import unicode_literals
import config.s3
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('investment_report', '0019_auto_20180820_1304'),
]
operations = [
migrations.AddField(
model_name='contact',
name='website_href',
field=models.URLField(default='https://invest.great.gov.uk/contact/', help_text='Custom link for website (used for tracking)', max_length=255),
preserve_default=False,
)
]
| 1.390625 | 1 |
tests/test-scripts/threadpools.py | whalesalad/filprofiler | 521 | 1958 | <gh_stars>100-1000
"""Validate that number of threads in thread pools is set to 1."""
import numexpr
import blosc
import threadpoolctl
# APIs that return previous number of threads:
assert numexpr.set_num_threads(2) == 1
assert blosc.set_nthreads(2) == 1
for d in threadpoolctl.threadpool_info():
assert d["num_threads"] == 1, d
| 2.296875 | 2 |
scripts/viewStokespat.py | David-McKenna/AntPat | 5 | 1959 | <gh_stars>1-10
#!/usr/bin/env python
"""A simple viewer for Stokes patterns based on two far-field pattern files.
(Possibly based on one FF pattern files if it has two requests: one for each
polarization channel.)"""
import os
import argparse
import numpy
import matplotlib.pyplot as plt
from antpat.reps.sphgridfun.tvecfun import TVecFields
from antpat.radfarfield import RadFarField
from antpat.dualpolelem import DualPolElem
FEKOsuffix = 'ffe'
GRASPsuffix = 'swe'
NECsuffix = 'out'
def Jones2Stokes(Jones):
"""Convert Jones matrix to Stokes vector. This assumes dual-pol antenna receiving unpolarized unit
valued radiation i.e. incoming Stokes = (1,0,0,0)."""
brightmat = numpy.matmul(Jones, numpy.swapaxes(numpy.conjugate(Jones),-1,-2))
StokesI = numpy.real(brightmat[...,0,0]+brightmat[...,1,1])
StokesQ = numpy.real(brightmat[...,0,0]-brightmat[...,1,1])
StokesU = numpy.real(brightmat[...,0,1]+brightmat[...,1,0])
StokesV = numpy.imag(brightmat[...,0,1]-brightmat[...,1,0])
return StokesI, StokesQ, StokesU, StokesV
def plotStokes_fromFEKOfiles(p_chan_file, q_chan_file, freq):
(tvf_p, tvf_q) = (TVecFields(), TVecFields())
tvf_p.load_ffe(p_chan_file)
tvf_q.load_ffe(q_chan_file)
(ant_p, ant_q) = (RadFarField(tvf_p), RadFarField(tvf_q))
(p_chan_name, q_chan_name) = (os.path.basename(p_chan_file), os.path.basename(q_chan_file))
(ant_p.name, ant_q.name) = (p_chan_name, q_chan_name)
dualpolAnt = DualPolElem(ant_p, ant_q)
THETA, PHI, Jones = dualpolAnt.getJonesPat(freq)
(StokesI, StokesQ, StokesU, StokesV) = Jones2Stokes(Jones)
x = THETA*numpy.cos(PHI)
y = THETA*numpy.sin(PHI)
#x= THETA
#y=PHI
xyNames = ('theta*cos(phi)','theta*sin(phi)')
fig = plt.figure()
ax1 = fig.add_subplot(221)
plt.pcolormesh(x, y, 10*numpy.log10(StokesI), label="I")
#plt.pcolormesh(x, y, StokesI, label="I")
plt.colorbar()
ax1.set_title('I (dB)')
ax2 = fig.add_subplot(222)
plt.pcolormesh(x, y, StokesQ/StokesI, label="Q")
plt.colorbar()
ax2.set_title('Q/I')
ax3 = fig.add_subplot(223)
plt.pcolormesh(x, y, StokesU/StokesI, label="U")
plt.colorbar()
ax3.set_title('U/I')
ax4 = fig.add_subplot(224)
plt.pcolormesh(x, y, StokesV/StokesI, label="V")
plt.colorbar()
ax4.set_title('V/I')
fig.suptitle('Stokes (azimuthal-equidistant proj) @ ' +str(freq/1e9)+' GHz')
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("p_chan_file",
help='Filename of polarization channel p')
parser.add_argument("q_chan_file",
help='Filename of polarization channel p')
parser.add_argument("freq", nargs='?', type=float,
help="Frequency in Hertz")
args = parser.parse_args()
if args.p_chan_file.endswith(FEKOsuffix):
plotStokes_fromFEKOfiles(args.p_chan_file, args.q_chan_file, args.freq)
elif args.p_chan_file.endswith(GRASPsuffix):
print("Not implemented yet.")
elif args.p_chan_file.endswith(NECsuffix):
print("Not implemented yet.")
else:
print("Far-field pattern file type not known")
exit(1)
| 2.546875 | 3 |
utils.py | lingjiao10/Facial-Expression-Recognition.Pytorch | 0 | 1960 | '''Some helper functions for PyTorch, including:
- progress_bar: progress bar mimic xlua.progress.
- set_lr : set the learning rate
- clip_gradient : clip gradient
'''
import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Function
#获取控制台行、列数
if sys.platform == 'win32':
term_width = 80
else:
print('###', os.popen('stty size', 'r').read())
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 30.
last_time = time.time()
begin_time = last_time
#[==>........ 19/225 ...........] | Loss: 1.961 | Acc: 22.000% (537/2432)
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def set_lr(optimizer, lr):
for group in optimizer.param_groups:
group['lr'] = lr
def clip_gradient(optimizer, grad_clip):
for group in optimizer.param_groups:
#print(group['params'])
for param in group['params']:
param.grad.data.clamp_(-grad_clip, grad_clip)
| 2.859375 | 3 |
string-method/src/analysis/FE_analysis/index_converter.py | delemottelab/gpcr-string-method-2019 | 0 | 1961 | from __future__ import absolute_import, division, print_function
import logging
import sys
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
import numpy as np
import utils
logger = logging.getLogger("indexconverter")
class IndexConverter(object):
def __init__(self, ndim, ngrid):
self.ndim = ndim
self.ngrid = ngrid
self._modulus = [(ngrid - 1) ** (ndim - j - 1) for j in range(ndim)]
self._zerodim = np.zeros((self.ndim,))
self.nbins = int(np.rint((ngrid - 1) ** ndim))
def convert_to_vector(self, grid):
if grid.shape[0] != self.ngrid - 1:
raise Exception("Wrong dimension of grid. Expect length fo %s got %s" % (self.ngrid - 1, grid.shape[0]))
vector = np.empty((self.nbins,))
for bin_idx in range(self.nbins):
vector[bin_idx] = grid[tuple(self.convert_to_grid_idx(bin_idx))]
return vector
def convert_to_grid(self, vector):
grid_shape = tuple(np.zeros(self.ndim).astype(int) + (self.ngrid - 1))
if len(vector.shape) > 1:
grids = np.empty((len(vector),) + grid_shape)
for idx, v in enumerate(vector):
grids[idx] = self.convert_to_grid(v)
return grids
else:
grid = np.zeros(grid_shape)
for idx in range(len(vector)):
grid[tuple(self.convert_to_grid_idx(idx))] = vector[idx]
return grid
def convert_to_grid_idx(self, bin_idx):
if bin_idx >= self.nbins or bin_idx < 0:
print(self.nbins, self.ndim, self.nbins ** self.ndim)
raise Exception("Invalid index %s. You are probably outside the grid..." % bin_idx)
grid_idx = ((self._zerodim + bin_idx) / self._modulus) % (self.ngrid - 1)
return grid_idx.astype(int)
def convert_to_bin_idx(self, grid_idx):
bin_idx = utils.rint(np.sum(grid_idx * self._modulus))
if bin_idx >= self.nbins or bin_idx < 0:
raise Exception(
"Invalid bin index %s. You are probably outside the grid. Size:%s" % (bin_idx, self.nbins))
return bin_idx
| 2.265625 | 2 |
Ex029 Aula 11-Cores no Terminal.py | andersontmachado/ExerciciosPython | 1 | 1962 | <gh_stars>1-10
print('\033[7;30mOla mundo\033[m!!!')
| 1.5625 | 2 |
cirq-pasqal/cirq_pasqal/pasqal_device.py | pavoljuhas/Cirq | 1 | 1963 | # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import FrozenSet, Callable, List, Sequence, Any, Union, Dict
import numpy as np
import networkx as nx
import cirq
from cirq import _compat, GridQubit, LineQubit
from cirq.ops import NamedQubit
from cirq_pasqal import ThreeDQubit, TwoDQubit, PasqalGateset
@cirq.value.value_equality
class PasqalDevice(cirq.devices.Device):
"""A generic Pasqal device.
The most general of Pasqal devices, enforcing only restrictions expected to
be shared by all future devices. Serves as the parent class of all Pasqal
devices, but can also be used on its own for hosting a nearly unconstrained
device. When used as a circuit's device, the qubits have to be of the type
cirq.NamedQubit and assumed to be all connected, the idea behind it being
that after submission, all optimization and transpilation necessary for its
execution on the specified device are handled internally by Pasqal.
"""
def __init__(self, qubits: Sequence[cirq.Qid]) -> None:
"""Initializes a device with some qubits.
Args:
qubits (NamedQubit): Qubits on the device, exclusively unrelated to
a physical position.
Raises:
TypeError: If the wrong qubit type is provided.
ValueError: If the number of qubits is greater than the devices maximum.
"""
if len(qubits) > 0:
q_type = type(qubits[0])
for q in qubits:
if not isinstance(q, self.supported_qubit_type):
raise TypeError(
'Unsupported qubit type: {!r}. This device '
'supports qubit types: {}'.format(q, self.supported_qubit_type)
)
if not type(q) is q_type:
raise TypeError("All qubits must be of same type.")
if len(qubits) > self.maximum_qubit_number:
raise ValueError(
'Too many qubits. {} accepts at most {} '
'qubits.'.format(type(self), self.maximum_qubit_number)
)
self.gateset = PasqalGateset()
self.qubits = qubits
self._metadata = cirq.DeviceMetadata(
qubits, nx.from_edgelist([(a, b) for a in qubits for b in qubits if a != b])
)
# pylint: enable=missing-raises-doc
@property
def supported_qubit_type(self):
return (NamedQubit,)
@property
def maximum_qubit_number(self):
return 100
@property
def metadata(self):
return self._metadata
@_compat.deprecated(fix='Use metadata.qubit_set() if applicable.', deadline='v0.15')
def qubit_set(self) -> FrozenSet[cirq.Qid]:
return frozenset(self.qubits)
def qubit_list(self):
return [qubit for qubit in self.qubits]
def is_pasqal_device_op(self, op: cirq.Operation) -> bool:
if not isinstance(op, cirq.Operation):
raise ValueError('Got unknown operation:', op)
return op in self.gateset
def validate_operation(self, operation: cirq.Operation):
"""Raises an error if the given operation is invalid on this device.
Args:
operation: The operation to validate.
Raises:
ValueError: If the operation is not valid.
NotImplementedError: If the operation is a measurement with an invert
mask.
"""
if not isinstance(operation, cirq.GateOperation):
raise ValueError("Unsupported operation")
if not self.is_pasqal_device_op(operation):
raise ValueError(f'{operation.gate!r} is not a supported gate')
for qub in operation.qubits:
if not isinstance(qub, self.supported_qubit_type):
raise ValueError(
'{} is not a valid qubit for gate {!r}. This '
'device accepts gates on qubits of type: '
'{}'.format(qub, operation.gate, self.supported_qubit_type)
)
if qub not in self.metadata.qubit_set:
raise ValueError(f'{qub} is not part of the device.')
if isinstance(operation.gate, cirq.MeasurementGate):
if operation.gate.invert_mask != ():
raise NotImplementedError(
"Measurements on Pasqal devices don't support invert_mask."
)
def validate_circuit(self, circuit: 'cirq.AbstractCircuit') -> None:
"""Raises an error if the given circuit is invalid on this device.
A circuit is invalid if any of its moments are invalid or if there
is a non-empty moment after a moment with a measurement.
Args:
circuit: The circuit to validate
Raises:
ValueError: If the given circuit can't be run on this device
"""
super().validate_circuit(circuit)
# Measurements must be in the last non-empty moment
has_measurement_occurred = False
for moment in circuit:
if has_measurement_occurred:
if len(moment.operations) > 0:
raise ValueError("Non-empty moment after measurement")
for operation in moment.operations:
if isinstance(operation.gate, cirq.MeasurementGate):
has_measurement_occurred = True
def __repr__(self):
return f'pasqal.PasqalDevice(qubits={sorted(self.qubits)!r})'
def _value_equality_values_(self):
return self.qubits
def _json_dict_(self):
return cirq.protocols.obj_to_dict_helper(self, ['qubits'])
class PasqalVirtualDevice(PasqalDevice):
"""A Pasqal virtual device with qubits in 3d.
A virtual representation of a Pasqal device, enforcing the constraints
typically found in a physical device. The qubits can be positioned in 3d
space, although 2d layouts will be supported sooner and are thus
recommended. Only accepts qubits with physical placement.
"""
def __init__(
self, control_radius: float, qubits: Sequence[Union[ThreeDQubit, GridQubit, LineQubit]]
) -> None:
"""Initializes a device with some qubits.
Args:
control_radius: the maximum distance between qubits for a controlled
gate. Distance is measured in units of the coordinates passed
into the qubit constructor.
qubits: Qubits on the device, identified by their x, y, z position.
Must be of type ThreeDQubit, TwoDQubit, LineQubit or GridQubit.
Raises:
ValueError: if the wrong qubit type is provided or if invalid
parameter is provided for control_radius."""
super().__init__(qubits)
if not control_radius >= 0:
raise ValueError('Control_radius needs to be a non-negative float.')
if len(self.qubits) > 1:
if control_radius > 3.0 * self.minimal_distance():
raise ValueError(
'Control_radius cannot be larger than 3 times'
' the minimal distance between qubits.'
)
self.control_radius = control_radius
self.gateset = PasqalGateset(include_additional_controlled_ops=False)
self.controlled_gateset = cirq.Gateset(cirq.AnyIntegerPowerGateFamily(cirq.CZPowGate))
@property
def supported_qubit_type(self):
return (ThreeDQubit, TwoDQubit, GridQubit, LineQubit)
def validate_operation(self, operation: cirq.Operation):
"""Raises an error if the given operation is invalid on this device.
Args:
operation: the operation to validate
Raises:
ValueError: If the operation is not valid
"""
super().validate_operation(operation)
# Verify that a controlled gate operation is valid
if operation in self.controlled_gateset:
for p in operation.qubits:
for q in operation.qubits:
if self.distance(p, q) > self.control_radius:
raise ValueError(f"Qubits {p!r}, {q!r} are too far away")
def validate_moment(self, moment: cirq.Moment):
"""Raises an error if the given moment is invalid on this device.
Args:
moment: The moment to validate.
Raises:
ValueError: If the given moment is invalid.
"""
super().validate_moment(moment)
if len(moment) > 1:
for operation in moment:
if not isinstance(operation.gate, cirq.MeasurementGate):
raise ValueError("Cannot do simultaneous gates. Use cirq.InsertStrategy.NEW.")
def minimal_distance(self) -> float:
"""Returns the minimal distance between two qubits in qubits.
Args:
qubits: qubit involved in the distance computation
Raises:
ValueError: If the device has only one qubit
Returns:
The minimal distance between qubits, in spacial coordinate units.
"""
if len(self.qubits) <= 1:
raise ValueError("Two qubits to compute a minimal distance.")
return min([self.distance(q1, q2) for q1 in self.qubits for q2 in self.qubits if q1 != q2])
def distance(self, p: Any, q: Any) -> float:
"""Returns the distance between two qubits.
Args:
p: qubit involved in the distance computation
q: qubit involved in the distance computation
Raises:
ValueError: If p or q not part of the device
Returns:
The distance between qubits p and q.
"""
all_qubits = self.qubit_list()
if p not in all_qubits or q not in all_qubits:
raise ValueError("Qubit not part of the device.")
if isinstance(p, GridQubit):
return np.sqrt((p.row - q.row) ** 2 + (p.col - q.col) ** 2)
if isinstance(p, LineQubit):
return abs(p.x - q.x)
return np.sqrt((p.x - q.x) ** 2 + (p.y - q.y) ** 2 + (p.z - q.z) ** 2)
def __repr__(self):
return ('pasqal.PasqalVirtualDevice(control_radius={!r}, qubits={!r})').format(
self.control_radius, sorted(self.qubits)
)
def _value_equality_values_(self) -> Any:
return (self.control_radius, self.qubits)
def _json_dict_(self) -> Dict[str, Any]:
return cirq.protocols.obj_to_dict_helper(self, ['control_radius', 'qubits'])
@_compat.deprecated_class(
deadline='v0.16', fix='Use cirq.optimize_for_target_gateset(circuit, gateset=PasqalGateset()).'
)
class PasqalConverter(cirq.neutral_atoms.ConvertToNeutralAtomGates):
"""A gate converter for compatibility with Pasqal processors.
Modified version of ConvertToNeutralAtomGates, where a new 'convert' method
'pasqal_convert' takes the 'keep' function as an input.
"""
def pasqal_convert(
self, op: cirq.Operation, keep: Callable[[cirq.Operation], bool]
) -> List[cirq.Operation]:
def on_stuck_raise(bad):
return TypeError(
"Don't know how to work with {!r}. "
"It isn't a native PasqalDevice operation, "
"a 1 or 2 qubit gate with a known unitary, "
"or composite.".format(bad)
)
return cirq.protocols.decompose(
op,
keep=keep,
intercepting_decomposer=self._convert_one,
on_stuck_raise=None if self.ignore_failures else on_stuck_raise,
)
| 2.109375 | 2 |
command_line/show.py | huwjenkins/dials | 0 | 1964 | <reponame>huwjenkins/dials
import os
import sys
import numpy as np
import iotbx.phil
from cctbx import uctbx
from dxtbx.model.experiment_list import ExperimentListFactory
from scitbx.math import five_number_summary
import dials.util
from dials.array_family import flex
from dials.util import Sorry, tabulate
help_message = """
Examples::
dials.show models.expt
dials.show image_*.cbf
dials.show observations.refl
"""
phil_scope = iotbx.phil.parse(
"""\
show_scan_varying = False
.type = bool
.help = "Whether or not to show the crystal at each scan point."
show_shared_models = False
.type = bool
.help = "Show which models are linked to which experiments"
show_all_reflection_data = False
.type = bool
.help = "Whether or not to print individual reflections"
show_intensities = False
.type = bool
show_centroids = False
.type = bool
show_profile_fit = False
.type = bool
show_flags = False
.type = bool
.help = "Show a summary table of reflection flags"
show_identifiers = False
.type = bool
.help = "Show experiment identifiers map if set"
image_statistics{
show_corrected = False
.type = bool
.help = "Show statistics on the distribution of values in each corrected image"
show_raw = False
.type = bool
.help = "Show statistics on the distribution of values in each raw image"
}
max_reflections = None
.type = int
.help = "Limit the number of reflections in the output."
""",
process_includes=True,
)
def beam_centre_mm(detector, s0):
x, y = (None, None)
for panel_id, panel in enumerate(detector):
try:
x, y = panel.get_ray_intersection(s0)
except RuntimeError:
continue
else:
if panel.is_coord_valid_mm((x, y)):
break
else:
x, y = (None, None)
return panel_id, (x, y)
def beam_centre_raw_image_px(detector, s0):
panel_id, (x, y) = beam_centre_mm(detector, s0)
panel = detector[panel_id]
x_px, y_px = panel.millimeter_to_pixel((x, y))
offset = panel.get_raw_image_offset()
return x_px + offset[0], y_px + offset[1]
def show_beam(detector, beam):
# standard static beam model string
s = str(beam)
# report whether the beam is scan-varying
if beam.num_scan_points > 0:
s += " s0 sampled at " + str(beam.num_scan_points) + " scan points\n"
# add static model beam centres
panel_id, (x, y) = beam_centre_mm(detector, beam.get_s0())
if panel_id >= 0 and x is not None and y is not None:
x_px, y_px = detector[panel_id].millimeter_to_pixel((x, y))
if len(detector) > 1:
beam_centre_mm_str = " mm: panel %i, (%.2f,%.2f)" % (panel_id, x, y)
beam_centre_px_str = " px: panel %i, (%.2f,%.2f)" % (
panel_id,
x_px,
y_px,
)
x_raw_px, y_raw_px = beam_centre_raw_image_px(detector, beam.get_s0())
beam_centre_raw_px_str = " px, raw image: ({:.2f},{:.2f})".format(
x_raw_px,
y_raw_px,
)
x_raw_mm, y_raw_mm = detector[panel_id].pixel_to_millimeter(
(x_raw_px, y_raw_px)
)
beam_centre_raw_mm_str = " mm, raw image: ({:.2f},{:.2f})".format(
x_raw_mm,
y_raw_mm,
)
else:
beam_centre_mm_str = f" mm: ({x:.2f},{y:.2f})"
beam_centre_px_str = f" px: ({x_px:.2f},{y_px:.2f})"
beam_centre_raw_px_str = ""
beam_centre_raw_mm_str = ""
s += "\nBeam centre: \n"
s += beam_centre_mm_str + "\n" + beam_centre_px_str + "\n"
if beam_centre_raw_mm_str:
s += beam_centre_raw_mm_str + "\n"
if beam_centre_raw_px_str:
s += beam_centre_raw_px_str + "\n"
# report range of scan-varying model beam centres
if beam.num_scan_points > 0:
# get scan-varying beam centres, ensuring all on same panel
sv_s0 = beam.get_s0_at_scan_points()
impacts = [beam_centre_mm(detector, s0) for s0 in sv_s0]
pnl, xy = zip(*impacts)
uniq_pnls = set(pnl)
if len(uniq_pnls) > 1 or min(uniq_pnls) < 0:
return s
if any(e == (None, None) for e in xy):
return s
pnl = list(uniq_pnls)[0]
x_mm, y_mm = zip(*xy)
# convert to pixels
xy = [detector[pnl].millimeter_to_pixel(e) for e in xy]
x_px, y_px = zip(*xy)
s += "Beam centre range (mm): ([{:.2f},{:.2f}],[{:.2f},{:.2f}])\n".format(
min(x_mm),
max(x_mm),
min(y_mm),
max(y_mm),
)
s += "Beam centre range (px): ([{:.2f},{:.2f}],[{:.2f},{:.2f}])\n".format(
min(x_px),
max(x_px),
min(y_px),
max(y_px),
)
return s
def show_goniometer(goniometer):
# standard static goniometer model string
s = str(goniometer)
# report whether the goniometer is scan-varying
if goniometer.num_scan_points > 0:
s += (
" Setting rotation sampled at "
+ str(goniometer.num_scan_points)
+ " scan points\n"
)
return s
@dials.util.show_mail_handle_errors()
def run(args=None):
import dials.util.log
dials.util.log.print_banner()
from dials.util.options import OptionParser, reflections_and_experiments_from_files
usage = "dials.show [options] models.expt | image_*.cbf"
parser = OptionParser(
usage=usage,
phil=phil_scope,
read_experiments=True,
read_experiments_from_images=True,
read_reflections=True,
check_format=False,
epilog=help_message,
)
params, options = parser.parse_args(args=args, show_diff_phil=True)
reflections, experiments = reflections_and_experiments_from_files(
params.input.reflections, params.input.experiments
)
if len(experiments) == 0 and len(reflections) == 0:
parser.print_help()
exit()
if len(experiments):
if not all(e.detector for e in experiments):
sys.exit("Error: experiment has no detector")
if not all(e.beam for e in experiments):
sys.exit("Error: experiment has no beam")
print(show_experiments(experiments, show_scan_varying=params.show_scan_varying))
if params.image_statistics.show_raw:
show_image_statistics(experiments, "raw")
if params.image_statistics.show_corrected:
show_image_statistics(experiments, "corrected")
if params.show_shared_models:
print()
print(model_connectivity(experiments))
if len(reflections):
print(
show_reflections(
reflections,
show_intensities=params.show_intensities,
show_profile_fit=params.show_profile_fit,
show_centroids=params.show_centroids,
show_all_reflection_data=params.show_all_reflection_data,
show_flags=params.show_flags,
max_reflections=params.max_reflections,
show_identifiers=params.show_identifiers,
)
)
def show_experiments(experiments, show_scan_varying=False):
text = []
for i_expt, expt in enumerate(experiments):
text.append("Experiment %i:" % i_expt)
format_class = expt.imageset.get_format_class()
if format_class.__name__ != "Format":
text.append(f"Format class: {format_class.__name__}")
if expt.identifier != "":
text.append(f"Experiment identifier: {expt.identifier}")
try:
template = expt.imageset.get_template()
except AttributeError:
template = None
if template:
text.append(f"Image template: {template}")
text.append(str(expt.detector))
text.append(
"Max resolution (at corners): %f"
% (expt.detector.get_max_resolution(expt.beam.get_s0()))
)
text.append(
"Max resolution (inscribed): %f"
% (expt.detector.get_max_inscribed_resolution(expt.beam.get_s0()))
)
text.append("")
text.append(show_beam(expt.detector, expt.beam))
if expt.scan is not None:
text.append(str(expt.scan))
if expt.goniometer is not None:
text.append(show_goniometer(expt.goniometer))
if expt.crystal is not None:
text.append(expt.crystal.as_str(show_scan_varying=show_scan_varying))
if expt.crystal.num_scan_points:
abc = flex.vec3_double()
angles = flex.vec3_double()
for n in range(expt.crystal.num_scan_points):
(
a,
b,
c,
alpha,
beta,
gamma,
) = expt.crystal.get_unit_cell_at_scan_point(n).parameters()
abc.append((a, b, c))
angles.append((alpha, beta, gamma))
a, b, c = abc.mean()
alpha, beta, gamma = angles.mean()
mean_unit_cell = uctbx.unit_cell((a, b, c, alpha, beta, gamma))
text.append(f" Average unit cell: {mean_unit_cell}")
if expt.profile is not None:
text.append(str(expt.profile))
if expt.scaling_model is not None:
text.append(str(expt.scaling_model))
return "\n".join(text)
def show_image_statistics(experiments, im_type):
if im_type == "raw":
raw = True
elif im_type == "corrected":
raw = False
else:
raise ValueError(f"Unknown im_type: {im_type}")
# To show image statistics, check_format has to be true. So we have to reinstatiate
# the experiment list here
try:
experiments = ExperimentListFactory.from_json(
experiments.as_json(), check_format=True
)
except OSError as e:
raise Sorry(
f"Unable to read image data. Please check {e.filename} is accessible"
)
print(f"Five number summary of the {im_type} images")
for i_expt, expt in enumerate(experiments):
for i in range(len(expt.imageset)):
identifier = os.path.basename(expt.imageset.get_image_identifier(i))
if raw:
pnl_data = expt.imageset.get_raw_data(i)
else:
pnl_data = expt.imageset.get_corrected_data(i)
if not isinstance(pnl_data, tuple):
pnl_data = (pnl_data,)
flat_data = pnl_data[0].as_1d()
for p in pnl_data[1:]:
flat_data.extend(p.as_1d())
fns = five_number_summary(flat_data)
print(
"{}: Min: {:.1f} Q1: {:.1f} Med: {:.1f} Q3: {:.1f} Max: {:.1f}".format(
identifier, *fns
)
)
def model_connectivity(experiments):
def model_connectivity_impl(experiments, model):
text = [""]
text.append(f"{model.capitalize()}:")
models = getattr(experiments, f"{model}s")()
rows = [[""] + [str(j) for j in range(len(models))]]
for j, e in enumerate(experiments):
row = ["Experiment %d" % j]
for m in models:
if getattr(e, model) is m:
row.append("x")
else:
row.append(".")
rows.append(row)
text.append(tabulate(rows, tablefmt="plain"))
return text
if len(experiments) == 1:
return ""
text = []
text.append("Experiment / Models")
text.extend(model_connectivity_impl(experiments, "detector"))
text.extend(model_connectivity_impl(experiments, "crystal"))
text.extend(model_connectivity_impl(experiments, "beam"))
return "\n".join(text)
def _create_flag_count_table(table):
"""Generate a summary table of flag values in a reflection table.
:param table: A reflection table
:returns: A string of the formatted flags table
"""
# Calculate the counts of entries that match each flag
numpy_flags = table["flags"].as_numpy_array()
flag_count = {
flag: np.sum(numpy_flags & value != 0)
for value, flag in table.flags.values.items()
}
# Work out the numeric-value order of the flags
flag_order = sorted(table.flags.values.values(), key=lambda x: x.real)
# Build the actual table
flag_rows = [["Flag", "Count", "%"]]
max_count_len = max(5, len(str(max(flag_count.values()))))
last_flag = None
for flag in flag_order:
indent = ""
# As a hint for reading, indent any 'summary' flags.
# A summary flag is any flag which overlaps with the previous one.
if last_flag and (last_flag.real & flag.real):
indent = " "
last_flag = flag
# Add the row to the table we're building
flag_rows.append(
[
indent + flag.name,
"{:{:d}d}".format(flag_count[flag], max_count_len),
f"{100 * flag_count[flag] / len(table):5.01f}",
]
)
# Build the array of output strings
text = []
text.append("Reflection flags:")
text.append(tabulate(flag_rows, headers="firstrow"))
return "\n".join(text)
def show_reflections(
reflections,
show_intensities=False,
show_profile_fit=False,
show_centroids=False,
show_all_reflection_data=False,
show_flags=False,
max_reflections=None,
show_identifiers=False,
):
text = []
from orderedset import OrderedSet
formats = {
"miller_index": "%i, %i, %i",
"d": "%.2f",
"qe": "%.3f",
"dqe": "%.3f",
"id": "%i",
"imageset_id": "%i",
"panel": "%i",
"flags": "%i",
"background.mean": "%.1f",
"background.dispersion": "%.1f",
"background.mse": "%.1f",
"background.sum.value": "%.1f",
"background.sum.variance": "%.1f",
"intensity.prf.value": "%.1f",
"intensity.prf.variance": "%.1f",
"intensity.sum.value": "%.1f",
"intensity.sum.variance": "%.1f",
"intensity.cor.value": "%.1f",
"intensity.cor.variance": "%.1f",
"intensity.scale.value": "%.1f",
"intensity.scale.variance": "%.1f",
"Ih_values": "%.1f",
"lp": "%.3f",
"num_pixels.background": "%i",
"num_pixels.background_used": "%i",
"num_pixels.foreground": "%i",
"num_pixels.valid": "%i",
"partial_id": "%i",
"partiality": "%.4f",
"profile.correlation": "%.3f",
"profile.rmsd": "%.3f",
"xyzcal.mm": "%.2f, %.2f, %.2f",
"xyzcal.px": "%.2f, %.2f, %.2f",
"delpsical.rad": "%.3f",
"delpsical2": "%.3f",
"delpsical.weights": "%.3f",
"xyzobs.mm.value": "%.2f, %.2f, %.2f",
"xyzobs.mm.variance": "%.4e, %.4e, %.4e",
"xyzobs.px.value": "%.2f, %.2f, %.2f",
"xyzobs.px.variance": "%.4f, %.4f, %.4f",
"s1": "%.4f, %.4f, %.4f",
"s2": "%.4f, %.4f, %.4f",
"shoebox": "%.1f",
"rlp": "%.4f, %.4f, %.4f",
"zeta": "%.3f",
"x_resid": "%.3f",
"x_resid2": "%.3f",
"y_resid": "%.3f",
"y_resid2": "%.3f",
"kapton_absorption_correction": "%.3f",
"kapton_absorption_correction_sigmas": "%.3f",
"inverse_scale_factor": "%.3f",
"inverse_scale_factor_variance": "%.3f",
}
for rlist in reflections:
from dials.algorithms.shoebox import MaskCode
foreground_valid = MaskCode.Valid | MaskCode.Foreground
text.append("")
text.append(f"Reflection list contains {len(rlist)} reflections")
if len(rlist) == 0:
continue
rows = [["Column", "min", "max", "mean"]]
for k, col in rlist.cols():
if k in formats and "%" not in formats.get(k, "%s"):
# Allow blanking out of entries that wouldn't make sense
rows.append(
[
k,
formats.get(k, "%s"),
formats.get(k, "%s"),
formats.get(k, "%s"),
]
)
elif type(col) in (flex.double, flex.int, flex.size_t):
if type(col) in (flex.int, flex.size_t):
col = col.as_double()
rows.append(
[
k,
formats.get(k, "%s") % flex.min(col),
formats.get(k, "%s") % flex.max(col),
formats.get(k, "%s") % flex.mean(col),
]
)
elif type(col) in (flex.vec3_double, flex.miller_index):
if isinstance(col, flex.miller_index):
col = col.as_vec3_double()
rows.append(
[
k,
formats.get(k, "%s") % col.min(),
formats.get(k, "%s") % col.max(),
formats.get(k, "%s") % col.mean(),
]
)
elif isinstance(col, flex.shoebox):
rows.append([k, "", "", ""])
si = col.summed_intensity().observed_value()
rows.append(
[
" summed I",
formats.get(k, "%s") % flex.min(si),
formats.get(k, "%s") % flex.max(si),
formats.get(k, "%s") % flex.mean(si),
]
)
x1, x2, y1, y2, z1, z2 = col.bounding_boxes().parts()
bbox_sizes = ((z2 - z1) * (y2 - y1) * (x2 - x1)).as_double()
rows.append(
[
" N pix",
formats.get(k, "%s") % flex.min(bbox_sizes),
formats.get(k, "%s") % flex.max(bbox_sizes),
formats.get(k, "%s") % flex.mean(bbox_sizes),
]
)
fore_valid = col.count_mask_values(foreground_valid).as_double()
rows.append(
[
" N valid foreground pix",
formats.get(k, "%s") % flex.min(fore_valid),
formats.get(k, "%s") % flex.max(fore_valid),
formats.get(k, "%s") % flex.mean(fore_valid),
]
)
text.append(tabulate(rows, headers="firstrow"))
if show_flags:
text.append(_create_flag_count_table(rlist))
if show_identifiers:
if rlist.experiment_identifiers():
text.append(
"""Experiment identifiers id-map values:\n%s"""
% (
"\n".join(
"id:"
+ str(k)
+ " -> experiment identifier:"
+ str(rlist.experiment_identifiers()[k])
for k in rlist.experiment_identifiers().keys()
)
)
)
intensity_keys = (
"miller_index",
"d",
"intensity.prf.value",
"intensity.prf.variance",
"intensity.sum.value",
"intensity.sum.variance",
"background.mean",
"profile.correlation",
"profile.rmsd",
)
profile_fit_keys = ("miller_index", "d")
centroid_keys = (
"miller_index",
"d",
"xyzcal.mm",
"xyzcal.px",
"xyzobs.mm.value",
"xyzobs.mm.variance",
"xyzobs.px.value",
"xyzobs.px.variance",
)
keys_to_print = OrderedSet()
if show_intensities:
for k in intensity_keys:
keys_to_print.add(k)
if show_profile_fit:
for k in profile_fit_keys:
keys_to_print.add(k)
if show_centroids:
for k in centroid_keys:
keys_to_print.add(k)
if show_all_reflection_data:
for k in formats:
keys_to_print.add(k)
def format_column(key, data, format_strings=None):
if isinstance(data, flex.vec3_double):
c_strings = [
c.as_string(format_strings[i].strip())
for i, c in enumerate(data.parts())
]
elif isinstance(data, flex.miller_index):
c_strings = [
c.as_string(format_strings[i].strip())
for i, c in enumerate(data.as_vec3_double().parts())
]
elif isinstance(data, flex.size_t):
c_strings = [data.as_int().as_string(format_strings[0].strip())]
elif isinstance(data, flex.shoebox):
x1, x2, y1, y2, z1, z2 = data.bounding_boxes().parts()
bbox_sizes = ((z2 - z1) * (y2 - y1) * (x2 - x1)).as_double()
c_strings = [bbox_sizes.as_string(format_strings[0].strip())]
key += " (N pix)"
else:
c_strings = [data.as_string(format_strings[0].strip())]
column = flex.std_string()
max_element_lengths = [c.max_element_length() for c in c_strings]
for i in range(len(c_strings[0])):
column.append(
f"%{len(key)}s"
% ", ".join(
("%%%is" % max_element_lengths[j]) % c_strings[j][i]
for j in range(len(c_strings))
)
)
return column
if keys_to_print:
keys = [k for k in keys_to_print if k in rlist]
if max_reflections is not None:
max_reflections = min(len(rlist), max_reflections)
else:
max_reflections = len(rlist)
columns = []
for k in keys:
columns.append(
format_column(k, rlist[k], format_strings=formats[k].split(","))
)
text.append("")
text.append("Printing %i of %i reflections:" % (max_reflections, len(rlist)))
line = []
for j in range(len(columns)):
key = keys[j]
if key == "shoebox":
key += " (N pix)"
width = max(len(key), columns[j].max_element_length())
line.append("%%%is" % width % key)
text.append(" ".join(line))
for i in range(max_reflections):
line = (c[i] for c in columns)
text.append(" ".join(line))
return "\n".join(text)
if __name__ == "__main__":
run()
| 2.25 | 2 |
app/config/env_jesa.py | OuissalTAIM/jenkins | 0 | 1965 | # -*- coding: utf-8 -*-
from enum import Enum, IntEnum, unique
import os
APP_NAME = "mine2farm"
NETWORK_NAME = "CenterAxis"
LOG_LEVEL_CONSOLE = "WARNING"
LOG_LEVEL_FILE = "INFO"
APP_FOLDER = os.getenv("JESA_MINE2FARM_HOME", "C:/GitRepos/mine2farm/")
LOG_FOLDER = APP_FOLDER + "app/log/"
LOG_FILE = "%(asctime)_" + APP_NAME + ".log"
OUTPUT_FOLDER = "%s%s" % (APP_FOLDER, "outputs/")
CANVAS_URL = "http://127.0.0.1/canvas.xlsm"
# DB
DB_NAME = None
DB_HOST = "172.29.161.208"
DB_PORT = 5006
DATA_SERVICE_ADD = "172.29.161.208"
DATA_SERVICE_PORT = 5001
# Results
DB_RESULT_NAME = "%s_results" % DB_NAME if DB_NAME is not None else None
DB_DETAILED_RESULT_COLLECTION_NAME = "detailed"
DB_GLOBAL_RESULT_COLLECTION_NAME = "global"
DB_GLOBAL_BEST_RESULT_COLLECTION_NAME = "global_best"
DB_DETAILED_BEST_RESULT_COLLECTION_NAME = "detailed_best"
DB_SENSITIVITY_COLLECTION_NAME = "sensitivity"
RESULT_BATCHES_SIZE = 25
HEAD_DATA_BITS = 17
DB_NAME_BITS = 20
RANDOMIZE_RESULTS = False
# RabbitMQ
RABBITMQ_SERVER = "localhost"
RABBITMQ_SIMULATOR_QUEUE_NAME = "SIMULATE"
RABBITMQ_CYCLE = 3
RABBITMQ_DETAILED_RESULT_QUEUE_NAME = "SAVE_DETAIL"
RABBITMQ_GLOBAL_RESULT_QUEUE_NAME = "SAVE_GLOBAL"
RABBITMQ_MAX_WORKER = RABBITMQ_CYCLE
RABBITMQ_PATH = "C:\\Program Files\\RabbitMQ Server\\rabbitmq_server-3.8.1\\sbin"
# Memcached
MEMCACHED_SERVER = 'localhost'
MEMCACHED_PORT = 11211
# Dashboard
DB_LOAD_FROM_SERVICE = True
# Monitoring
MONITORING_APP_NAME = "mine2farm_monitor"
MONITORING_SERVER = "172.29.161.208"
MONITORING_PORT = 5002
MONITORING_DB_NAME = "task_history"
MONITORING_COLLECTION_HISTORY_NAME = "task"
MONITORING_COLLECTION_HISTORY_BEST_NAME = "best_scenarios_history"
MONITORING_STEP = 1
MONITORING_NB_PAGE = 10
# Mongodb-bi
MONGODB_BI_PATH = "C:\\Program Files\\MongoDB\\Connector for BI\\2.13\\bin"
# Mongodb
MONGO_SERVER_PATH = "C:\\Program Files\\MongoDB\\Server\\4.0\\bin"
# params
LOGISTICS_LP = False
MODE_DEBUG = False
GRANUL_RELAX = False
class HTML_STATUS(IntEnum):
ERROR = -1
OK = 0
# Model
MONIKER_SEPARATOR = "/"
WACC = 0.1
T0 = 2020
TMAX = 2031
class PriceParams(Enum):
WACC = 0
TENOR = 1
VOLUME = 2
class PipelineType(Enum):
COMMON = 0
PRODUCER = 1
TRANSPORT = 2
BALANCE = 3
PRICE = 4
SALES = 5
@unique
class PipelineLayer(IntEnum):
UNDEFINED = -1
MINE = 0
BENEFICIATION = 1
SAP = 2
PAP = 3
GRANULATION = 4
LOGISTICS = 5
RAW_MATERIALS = 8
COMMON = 9
SALES_PLAN = 10
MINE_BENEFICIATION = 11
UNIT_CONVERSION_MATRIX = 12
PIPELINE_SCHEMA = {
PipelineLayer.COMMON: {
"type": PipelineType.COMMON,
"dico": ["location", "opex", "unit", "currency", "output", "names", "products"]
},
PipelineLayer.MINE: {
"type": PipelineType.PRODUCER,
"dico": ["mine.name", "mine.extraction", "mine.quality", "mine.capex"],
"options": "mining_options",
"production": "mining_specific_production",
"opex": "mining_opex___specific_consumptions",
"capex": "mining_capex",
"priority_mines": "prioritymines"
},
PipelineLayer.BENEFICIATION: {
"type": PipelineType.PRODUCER,
"dico": ["beneficiation.name", "beneficitation.process", "beneficitation.quality", "beneficitation.capex"],
"options": "beneficiation_options",
"production": "beneficiation_production",
"opex": "beneficiation_opex___specific_consumptions",
"capex": "beneficiation_capex"
},
PipelineLayer.SAP: {
"type": PipelineType.PRODUCER,
"dico": ["sap.name", "sap.process", "sap.product", "sap.capex", "sap.capacity[kt]"],
"options": "sap___power_plant_options",
"production": "sap___power_plant_production",
"opex": "sap___power_plant_opex___specific_consumptions",
"capex": "sap___power_plant_capex",
"product_type": "sap.product"
},
PipelineLayer.PAP: {
"type": PipelineType.PRODUCER,
"dico": ["pap.name", "pap.process", "pap.product", "pap.capex", "pap.size[kt]", "pap.input"],
"options": "pap_options",
"production": "pap_production",
"opex": "pap_opex___specific_consumptions",
"capex": "pap_capex",
"product_type": "pap.product"
},
PipelineLayer.GRANULATION: {
"type": PipelineType.PRODUCER,
"dico": ["granulation.name", "granulation.process", "granulation.product", "granulation.capex", "granulation.input"],
"options": "granulation_options",
"production": "granulation_production",
"opex": "granulation_opex",
"capex": "granulation_capex"
},
PipelineLayer.LOGISTICS: {
"type": PipelineType.TRANSPORT,
"dico": ["logistics.name", "logistics.process", "logistics.product", "logistics.capex"],
"options": "logistics_options",
"production": None,
"opex": "logistics_opex",
"capex": "logistics_capex"
},
PipelineLayer.RAW_MATERIALS: {
"type": PipelineType.PRICE,
"data": "raw_materials"
},
PipelineLayer.SALES_PLAN: {
"type": PipelineType.SALES,
"data": "sales_plan"
},
PipelineLayer.UNIT_CONVERSION_MATRIX: {
"type": PipelineType.COMMON,
"data": "conv_matrix"
},
}
SUPPLY_CHAIN = "mine2port"
DEPARTURE_ARRIVAL = {SUPPLY_CHAIN: (PipelineLayer.MINE),
"sap2pap": (PipelineLayer.SAP, PipelineLayer.PAP)}
COMBO_NODES = {
PipelineLayer.MINE_BENEFICIATION: {
"url": "mining_wp_connections",
"upstream_layer": PipelineLayer.MINE,
"downstream_layer": PipelineLayer.BENEFICIATION
}
}
COMBO_NODES_SEPARATION = "--"
class FunctionType(Enum):
COST_PV = 0
CASH_COST = 1
FULL_COST = 2
class ScenarioGeneratorType(IntEnum):
FROM_PATHS = 0
FROM_OPTIONS = 1
SPECIFIC_SCENARIOS = 2
SCENARIO_GEN_TYPE = ScenarioGeneratorType.FROM_OPTIONS
PIPELINE_METADATA = {
PipelineLayer.MINE: {
"type": PipelineType.PRODUCER,
"production": ["Name", "Extraction", "Quality", "Unit"],
"opex": ["Name", "Extraction", "Capacity", "Item", "Unit"],
"capex": ["Name", "Extraction", "Capacity", "Item", "Unit", "CAPEX"]
},
PipelineLayer.BENEFICIATION: {
"type": PipelineType.PRODUCER,
"production": ["Process", "InputQuality", "OutputQuality", "Humidity", "Unit"],
"opex": ["Process", "InputQuality", "OutputQuality", "Item", "Unit"],
"capex": ["Name", "Process", "Capacity", "Item", "Unit", "CAPEX"]
},
PipelineLayer.SAP: {
"type": PipelineType.PRODUCER,
"production": ["Location", "Process", "Product", "Unit"],
"opex": ["Location", "Process", "Item", "Unit"],
"capex": ["Location", "Process", "Capacity", "Item", "Unit", "CAPEX"]
},
PipelineLayer.PAP: {
"type": PipelineType.PRODUCER,
"production": ["Process", "Input", "Product", "Unit"],
"opex": ["Location", "Process", "Capacity", "Input", "Item", "Product", "Unit"],
"capex": ["Location", "Process", "Capacity", "Item", "Unit", "CAPEX"]
},
PipelineLayer.GRANULATION: {
"type": PipelineType.PRODUCER,
"production": ["Process", "Input", "Product", "Unit"],
"opex": ["Location", "ProductionSite", "Process", "Capacity", "Product", "Item", "Unit"],
"capex": ["Location", "ProductionSite", "Product", "Process", "Capacity", "Item", "Unit", "CAPEX"]
},
PipelineLayer.LOGISTICS: {
"type": PipelineType.TRANSPORT,
"opex": ["Upstream", "Downstream", "Method", "Product", "Capacity", "Item", "Unit"],
"capex": ["Upstream", "Downstream", "Method", "Product", "Capacity", "Item", "Unit", "CAPEX"]
},
PipelineLayer.RAW_MATERIALS: {
"type": PipelineType.PRICE,
"columns": ["Item", "Unit"]
},
PipelineLayer.SALES_PLAN: {
"type": PipelineType.PRICE,
"columns": ["Type", "Product", "Unit"]
},
PipelineLayer.UNIT_CONVERSION_MATRIX: {
"type": PipelineType.COMMON,
"columns": ["Initial Unit", "Uniform Unit", "Conversion Rate"]
},
}
class ShuffleLevel(IntEnum):
UNDEFINED = 0
SHUFFLE_WITHOUT_PERM = 1
SHUFFLE_WITH_PERMUTATIONS = 2
SHUFFLE_WITH_PERMUTATIONS_WITH_FILTERS = 3
SHUFFLE_WITH_UNNAMED = 4
SHUFFLE_LEVELS = {
PipelineLayer.MINE: ShuffleLevel.UNDEFINED,
PipelineLayer.BENEFICIATION: ShuffleLevel.UNDEFINED,
PipelineLayer.SAP: ShuffleLevel.SHUFFLE_WITH_UNNAMED,
PipelineLayer.PAP: ShuffleLevel.SHUFFLE_WITH_UNNAMED,
PipelineLayer.GRANULATION: ShuffleLevel.UNDEFINED,
PipelineLayer.LOGISTICS: ShuffleLevel.UNDEFINED,
PipelineLayer.MINE_BENEFICIATION: ShuffleLevel.UNDEFINED
} | 2.09375 | 2 |
myFirstApp/travello/models.py | cankush625/Django | 0 | 1966 | from django.db import models
# Create your models here.
class Destination(models.Model) :
name = models.CharField(max_length = 100)
img = models.ImageField(upload_to = 'pics')
desc = models.TextField()
price = models.IntegerField()
offer = models.BooleanField(default = False)
class News() :
id : int
img : str
date : int
month : str
headline : str
category : str
desc : str | 2.265625 | 2 |
app/app.py | Moustique-bot/hands-on-2021 | 0 | 1967 | import base64
import io
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
import numpy as np
import tensorflow as tf
from PIL import Image
from constants import CLASSES
import yaml
with open('app.yaml') as yaml_data :
params = yaml.safe_load(yaml_data)
IMAGE_WIDTH = params['IMAGE_WIDTH']
IMAGE_HEIGHT = params['IMAGE_HEIGHT']
PATH_MODEL = params['PATH_MODEL']
# Load DNN model
classifier = tf.keras.models.load_model(PATH_MODEL)
def classify_image(image, model, image_box=None):
"""Classify image by model
Parameters
----------
content: image content
model: tf/keras classifier
Returns
-------
class id returned by model classifier
"""
images_list = []
image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT), box=image_box)
# box argument clips image to (x1, y1, x2, y2)
image = np.array(image)
images_list.append(image)
return model.predict_classes(np.array(images_list))
app = dash.Dash('Traffic Signs Recognition', external_stylesheets=[dbc.themes.BOOTSTRAP])
pre_style = {
'whiteSpace': 'pre-wrap',
'wordBreak': 'break-all',
'whiteSpace': 'normal'
}
# Define application layout
navbar = dbc.NavbarSimple(
children=[
dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem('Réseau de Neurones', header=True),
dbc.DropdownMenuItem('SVM', href="#"),
],
nav=True,
in_navbar=True,
label='Modèle',
),
],
brand="Menu",
brand_href="#",
color= "#d90054",
dark=True
)
cards = html.Div(
[
dbc.Card(
dbc.CardBody(
[
html.H5("Présentation", className="card-title"),
html.P(
[
'Cette application à pour but de réaliser des modèles capables de classer des panneaux de signalisation allemand à partir d\'une image. L\'application fonctionne de la manière suivante : vous déposer une image à l\'emplacement indiqué et la prédiction du modèle apparait immédiatement en dessous. En haut à droite vous pouvez sélectionner le modèle que vous voulez tester.',
],
className='card-text',
),
]
),
className='w-75 mb-3',
color='#f1cbd1',
outline='Black',
style={
'margin-top': '75px',
'margin-left': '185px'},
),
]
)
app.layout = html.Div([
html.Div([navbar]),
html.Div(cards),
dcc.Upload(
id='bouton-chargement',
children=html.Div([
'Cliquer-déposer ou ',
html.A('sélectionner une image')
]),
style={
'width': '50%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin-top': '75px',
'margin-left': '370px',
}
),
html.Div(id='mon-image'),
html.Div(id='ma-zone-resultat')
])
@app.callback(Output('mon-image', 'children'),
[Input('bouton-chargement', 'contents')])
def update_output(contents):
if contents is not None:
content_type, content_string = contents.split(',')
if 'image' in content_type:
image = Image.open(io.BytesIO(base64.b64decode(content_string)))
predicted_class = classify_image(image, classifier)[0]
return html.Div([
html.Hr(style={'margin-top': '75px'}),
html.Img(src=contents, style={'margin-left': '750px'}),
html.H4('Classe prédite : {}'.format(CLASSES[predicted_class]), style={'textAlign': 'center'}),
html.Hr(),
#html.Div('Raw Content'),
#html.Pre(contents, style=pre_style)
])
else:
try:
# Décodage de l'image transmise en base 64 (cas des fichiers ppm)
# fichier base 64 --> image PIL
image = Image.open(io.BytesIO(base64.b64decode(content_string)))
# image PIL --> conversion PNG --> buffer mémoire
buffer = io.BytesIO()
image.save(buffer, format='PNG')
# buffer mémoire --> image base 64
buffer.seek(0)
img_bytes = buffer.read()
content_string = base64.b64encode(img_bytes).decode('ascii')
# Appel du modèle de classification
predicted_class = classify_image(image, classifier)[0]
# Affichage de l'image
return html.Div([
html.Hr(style={'margin-top': '75px'}),
html.Img(src='data:image/png;base64,' + content_string, style={'margin-left': '750px'}),
html.H4('Classe prédite : {}'.format(CLASSES[predicted_class]), style={'textAlign': 'center'}),
html.Hr(),
])
except:
return html.Div([
html.Hr(),
html.Div('Uniquement des images svp : {}'.format(content_type)),
html.Hr(),
html.Div('Raw Content'),
html.Pre(contents, style=pre_style)
])
# Manage interactions with callbacks
@app.callback(
Output(component_id='ma-zone-resultat', component_property='children'),
[Input(component_id='mon-champ-texte', component_property='value')]
)
def update_output_div(input_value):
return html.H3('Valeur saisie ici "{}"'.format(input_value))
# Start the application
if __name__ == '__main__':
app.run_server(debug=True) | 2.4375 | 2 |
books/rakutenapi.py | NobukoYano/LibraryApp | 1 | 1968 | <gh_stars>1-10
import json
import requests
from django.conf import settings
class rakuten:
def get_json(self, isbn: str) -> dict:
appid = settings.RAKUTEN_APP_ID
# API request template
api = "https://app.rakuten.co.jp/services/api/BooksTotal/"\
"Search/20170404?format=json&isbnjan={isbnjan}&"\
"applicationId={appid}"
# format get api URL
url = api.format(isbnjan=isbn, appid=appid)
# execute
r = requests.get(url)
# decode to json
# Check the status code
status_code = r.status_code
if status_code != 200:
# if failed
return None
data = json.loads(r.text)
if data['count'] == 0:
return None
json_data = {}
json_data['isbn'] = data['Items'][0]['Item']['isbn']
json_data['title'] = data['Items'][0]['Item']['title']
json_data['publisher'] = data['Items'][0]['Item']['publisherName']
json_data['pubdate'] = data['Items'][0]['Item']['salesDate']
json_data['cover'] = data['Items'][0]['Item']['largeImageUrl']
json_data['author'] = data['Items'][0]['Item']['author']
return json_data
| 2.5 | 2 |
Random-Programs/optimization/root/v4.py | naumoff0/Archive | 0 | 1969 | print(int(input(""))**0.5) | 2.90625 | 3 |
sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_01_01_preview/models/_models_py3.py | rsdoherty/azure-sdk-for-python | 2,728 | 1970 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, List, Optional
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info:
list[~azure.mgmt.authorization.v2018_01_01_preview.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~azure.mgmt.authorization.v2018_01_01_preview.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
*,
error: Optional["ErrorDetail"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class Permission(msrest.serialization.Model):
"""Role definition permissions.
:param actions: Allowed actions.
:type actions: list[str]
:param not_actions: Denied actions.
:type not_actions: list[str]
:param data_actions: Allowed Data actions.
:type data_actions: list[str]
:param not_data_actions: Denied Data actions.
:type not_data_actions: list[str]
"""
_attribute_map = {
'actions': {'key': 'actions', 'type': '[str]'},
'not_actions': {'key': 'notActions', 'type': '[str]'},
'data_actions': {'key': 'dataActions', 'type': '[str]'},
'not_data_actions': {'key': 'notDataActions', 'type': '[str]'},
}
def __init__(
self,
*,
actions: Optional[List[str]] = None,
not_actions: Optional[List[str]] = None,
data_actions: Optional[List[str]] = None,
not_data_actions: Optional[List[str]] = None,
**kwargs
):
super(Permission, self).__init__(**kwargs)
self.actions = actions
self.not_actions = not_actions
self.data_actions = data_actions
self.not_data_actions = not_data_actions
class PermissionGetResult(msrest.serialization.Model):
"""Permissions information.
:param value: An array of permissions.
:type value: list[~azure.mgmt.authorization.v2018_01_01_preview.models.Permission]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Permission]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Permission"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(PermissionGetResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ProviderOperation(msrest.serialization.Model):
"""Operation.
:param name: The operation name.
:type name: str
:param display_name: The operation display name.
:type display_name: str
:param description: The operation description.
:type description: str
:param origin: The operation origin.
:type origin: str
:param properties: The operation properties.
:type properties: any
:param is_data_action: The dataAction flag to specify the operation type.
:type is_data_action: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'object'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
description: Optional[str] = None,
origin: Optional[str] = None,
properties: Optional[Any] = None,
is_data_action: Optional[bool] = None,
**kwargs
):
super(ProviderOperation, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.description = description
self.origin = origin
self.properties = properties
self.is_data_action = is_data_action
class ProviderOperationsMetadata(msrest.serialization.Model):
"""Provider Operations metadata.
:param id: The provider id.
:type id: str
:param name: The provider name.
:type name: str
:param type: The provider type.
:type type: str
:param display_name: The provider display name.
:type display_name: str
:param resource_types: The provider resource types.
:type resource_types: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ResourceType]
:param operations: The provider operations.
:type operations: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ProviderOperation]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'resource_types': {'key': 'resourceTypes', 'type': '[ResourceType]'},
'operations': {'key': 'operations', 'type': '[ProviderOperation]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
name: Optional[str] = None,
type: Optional[str] = None,
display_name: Optional[str] = None,
resource_types: Optional[List["ResourceType"]] = None,
operations: Optional[List["ProviderOperation"]] = None,
**kwargs
):
super(ProviderOperationsMetadata, self).__init__(**kwargs)
self.id = id
self.name = name
self.type = type
self.display_name = display_name
self.resource_types = resource_types
self.operations = operations
class ProviderOperationsMetadataListResult(msrest.serialization.Model):
"""Provider operations metadata list.
:param value: The list of providers.
:type value:
list[~azure.mgmt.authorization.v2018_01_01_preview.models.ProviderOperationsMetadata]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ProviderOperationsMetadata]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ProviderOperationsMetadata"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(ProviderOperationsMetadataListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ResourceType(msrest.serialization.Model):
"""Resource Type.
:param name: The resource type name.
:type name: str
:param display_name: The resource type display name.
:type display_name: str
:param operations: The resource type operations.
:type operations: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ProviderOperation]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'operations': {'key': 'operations', 'type': '[ProviderOperation]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
operations: Optional[List["ProviderOperation"]] = None,
**kwargs
):
super(ResourceType, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.operations = operations
class RoleAssignment(msrest.serialization.Model):
"""Role Assignments.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role assignment ID.
:vartype id: str
:ivar name: The role assignment name.
:vartype name: str
:ivar type: The role assignment type.
:vartype type: str
:param scope: The role assignment scope.
:type scope: str
:param role_definition_id: The role definition ID.
:type role_definition_id: str
:param principal_id: The principal ID.
:type principal_id: str
:param can_delegate: The Delegation flag for the role assignment.
:type can_delegate: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'role_definition_id': {'key': 'properties.roleDefinitionId', 'type': 'str'},
'principal_id': {'key': 'properties.principalId', 'type': 'str'},
'can_delegate': {'key': 'properties.canDelegate', 'type': 'bool'},
}
def __init__(
self,
*,
scope: Optional[str] = None,
role_definition_id: Optional[str] = None,
principal_id: Optional[str] = None,
can_delegate: Optional[bool] = None,
**kwargs
):
super(RoleAssignment, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.scope = scope
self.role_definition_id = role_definition_id
self.principal_id = principal_id
self.can_delegate = can_delegate
class RoleAssignmentCreateParameters(msrest.serialization.Model):
"""Role assignment create parameters.
All required parameters must be populated in order to send to Azure.
:param role_definition_id: Required. The role definition ID used in the role assignment.
:type role_definition_id: str
:param principal_id: Required. The principal ID assigned to the role. This maps to the ID
inside the Active Directory. It can point to a user, service principal, or security group.
:type principal_id: str
:param can_delegate: The delegation flag used for creating a role assignment.
:type can_delegate: bool
"""
_validation = {
'role_definition_id': {'required': True},
'principal_id': {'required': True},
}
_attribute_map = {
'role_definition_id': {'key': 'properties.roleDefinitionId', 'type': 'str'},
'principal_id': {'key': 'properties.principalId', 'type': 'str'},
'can_delegate': {'key': 'properties.canDelegate', 'type': 'bool'},
}
def __init__(
self,
*,
role_definition_id: str,
principal_id: str,
can_delegate: Optional[bool] = None,
**kwargs
):
super(RoleAssignmentCreateParameters, self).__init__(**kwargs)
self.role_definition_id = role_definition_id
self.principal_id = principal_id
self.can_delegate = can_delegate
class RoleAssignmentFilter(msrest.serialization.Model):
"""Role Assignments filter.
:param principal_id: Returns role assignment of the specific principal.
:type principal_id: str
:param can_delegate: The Delegation flag for the role assignment.
:type can_delegate: bool
"""
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'can_delegate': {'key': 'canDelegate', 'type': 'bool'},
}
def __init__(
self,
*,
principal_id: Optional[str] = None,
can_delegate: Optional[bool] = None,
**kwargs
):
super(RoleAssignmentFilter, self).__init__(**kwargs)
self.principal_id = principal_id
self.can_delegate = can_delegate
class RoleAssignmentListResult(msrest.serialization.Model):
"""Role assignment list operation result.
:param value: Role assignment list.
:type value: list[~azure.mgmt.authorization.v2018_01_01_preview.models.RoleAssignment]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[RoleAssignment]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["RoleAssignment"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(RoleAssignmentListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class RoleDefinition(msrest.serialization.Model):
"""Role definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role definition ID.
:vartype id: str
:ivar name: The role definition name.
:vartype name: str
:ivar type: The role definition type.
:vartype type: str
:param role_name: The role name.
:type role_name: str
:param description: The role definition description.
:type description: str
:param role_type: The role type.
:type role_type: str
:param permissions: Role definition permissions.
:type permissions: list[~azure.mgmt.authorization.v2018_01_01_preview.models.Permission]
:param assignable_scopes: Role definition assignable scopes.
:type assignable_scopes: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'role_name': {'key': 'properties.roleName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'role_type': {'key': 'properties.type', 'type': 'str'},
'permissions': {'key': 'properties.permissions', 'type': '[Permission]'},
'assignable_scopes': {'key': 'properties.assignableScopes', 'type': '[str]'},
}
def __init__(
self,
*,
role_name: Optional[str] = None,
description: Optional[str] = None,
role_type: Optional[str] = None,
permissions: Optional[List["Permission"]] = None,
assignable_scopes: Optional[List[str]] = None,
**kwargs
):
super(RoleDefinition, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.role_name = role_name
self.description = description
self.role_type = role_type
self.permissions = permissions
self.assignable_scopes = assignable_scopes
class RoleDefinitionFilter(msrest.serialization.Model):
"""Role Definitions filter.
:param role_name: Returns role definition with the specific name.
:type role_name: str
:param type: Returns role definition with the specific type.
:type type: str
"""
_attribute_map = {
'role_name': {'key': 'roleName', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
role_name: Optional[str] = None,
type: Optional[str] = None,
**kwargs
):
super(RoleDefinitionFilter, self).__init__(**kwargs)
self.role_name = role_name
self.type = type
class RoleDefinitionListResult(msrest.serialization.Model):
"""Role definition list operation result.
:param value: Role definition list.
:type value: list[~azure.mgmt.authorization.v2018_01_01_preview.models.RoleDefinition]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[RoleDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["RoleDefinition"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(RoleDefinitionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
| 2.03125 | 2 |
school/views.py | pa-one-patel/college_managenment | 1 | 1971 | <filename>school/views.py
from django.shortcuts import render,redirect,reverse
from . import forms,models
from django.db.models import Sum
from django.contrib.auth.models import Group
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required,user_passes_test
def home_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'school/index.html')
#for showing signup/login button for teacher(by sumit)
def adminclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'school/adminclick.html')
#for showing signup/login button for teacher(by sumit)
def teacherclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'school/teacherclick.html')
#for showing signup/login button for student(by sumit)
def studentclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'school/studentclick.html')
def admin_signup_view(request):
form=forms.AdminSigupForm()
if request.method=='POST':
form=forms.AdminSigupForm(request.POST)
if form.is_valid():
user=form.save()
user.set_password(<PASSWORD>)
user.save()
my_admin_group = Group.objects.get_or_create(name='ADMIN')
my_admin_group[0].user_set.add(user)
return HttpResponseRedirect('adminlogin')
return render(request,'school/adminsignup.html',{'form':form})
def student_signup_view(request):
form1=forms.StudentUserForm()
form2=forms.StudentExtraForm()
mydict={'form1':form1,'form2':form2}
if request.method=='POST':
form1=forms.StudentUserForm(request.POST)
form2=forms.StudentExtraForm(request.POST)
if form1.is_valid() and form2.is_valid():
user=form1.save()
user.set_password(<PASSWORD>)
user.save()
f2=form2.save(commit=False)
f2.user=user
user2=f2.save()
my_student_group = Group.objects.get_or_create(name='STUDENT')
my_student_group[0].user_set.add(user)
return HttpResponseRedirect('studentlogin')
return render(request,'school/studentsignup.html',context=mydict)
def teacher_signup_view(request):
form1=forms.TeacherUserForm()
form2=forms.TeacherExtraForm()
mydict={'form1':form1,'form2':form2}
if request.method=='POST':
form1=forms.TeacherUserForm(request.POST)
form2=forms.TeacherExtraForm(request.POST)
if form1.is_valid() and form2.is_valid():
user=form1.save()
user.set_password(<PASSWORD>)
user.save()
f2=form2.save(commit=False)
f2.user=user
user2=f2.save()
my_teacher_group = Group.objects.get_or_create(name='TEACHER')
my_teacher_group[0].user_set.add(user)
return HttpResponseRedirect('teacherlogin')
return render(request,'school/teachersignup.html',context=mydict)
#for checking user is techer , student or admin(by sumit)
def is_admin(user):
return user.groups.filter(name='ADMIN').exists()
def is_teacher(user):
return user.groups.filter(name='TEACHER').exists()
def is_student(user):
return user.groups.filter(name='STUDENT').exists()
def afterlogin_view(request):
if is_admin(request.user):
return redirect('admin-dashboard')
elif is_teacher(request.user):
accountapproval=models.TeacherExtra.objects.all().filter(user_id=request.user.id,status=True)
if accountapproval:
return redirect('teacher-dashboard')
else:
return render(request,'school/teacher_wait_for_approval.html')
elif is_student(request.user):
accountapproval=models.StudentExtra.objects.all().filter(user_id=request.user.id,status=True)
if accountapproval:
return redirect('student-dashboard')
else:
return render(request,'school/student_wait_for_approval.html')
#for dashboard of adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_dashboard_view(request):
teachercount=models.TeacherExtra.objects.all().filter(status=True).count()
pendingteachercount=models.TeacherExtra.objects.all().filter(status=False).count()
studentcount=models.StudentExtra.objects.all().filter(status=True).count()
pendingstudentcount=models.StudentExtra.objects.all().filter(status=False).count()
teachersalary=models.TeacherExtra.objects.filter(status=True).aggregate(Sum('salary'))
pendingteachersalary=models.TeacherExtra.objects.filter(status=False).aggregate(Sum('salary'))
studentfee=models.StudentExtra.objects.filter(status=True).aggregate(Sum('fee',default=0))
pendingstudentfee=models.StudentExtra.objects.filter(status=False).aggregate(Sum('fee'))
notice=models.Notice.objects.all()
#aggregate function return dictionary so fetch data from dictionay(by sumit)
mydict={
'teachercount':teachercount,
'pendingteachercount':pendingteachercount,
'studentcount':studentcount,
'pendingstudentcount':pendingstudentcount,
'teachersalary':teachersalary['salary__sum'],
'pendingteachersalary':pendingteachersalary['salary__sum'],
'studentfee':studentfee['fee__sum'],
'pendingstudentfee':pendingstudentfee['fee__sum'],
'notice':notice
}
return render(request,'school/admin_dashboard.html',context=mydict)
#for teacher sectionnnnnnnn by adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_teacher_view(request):
return render(request,'school/admin_teacher.html')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_add_teacher_view(request):
form1=forms.TeacherUserForm()
form2=forms.TeacherExtraForm()
mydict={'form1':form1,'form2':form2}
if request.method=='POST':
form1=forms.TeacherUserForm(request.POST)
form2=forms.TeacherExtraForm(request.POST)
if form1.is_valid() and form2.is_valid():
user=form1.save()
user.set_password(<PASSWORD>)
user.save()
f2=form2.save(commit=False)
f2.user=user
f2.status=True
f2.save()
my_teacher_group = Group.objects.get_or_create(name='TEACHER')
my_teacher_group[0].user_set.add(user)
return HttpResponseRedirect('admin-teacher')
return render(request,'school/admin_add_teacher.html',context=mydict)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_view_teacher_view(request):
teachers=models.TeacherExtra.objects.all().filter(status=True)
return render(request,'school/admin_view_teacher.html',{'teachers':teachers})
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_approve_teacher_view(request):
teachers=models.TeacherExtra.objects.all().filter(status=False)
return render(request,'school/admin_approve_teacher.html',{'teachers':teachers})
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def approve_teacher_view(request,pk):
teacher=models.TeacherExtra.objects.get(id=pk)
teacher.status=True
teacher.save()
return redirect(reverse('admin-approve-teacher'))
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def delete_teacher_view(request,pk):
teacher=models.TeacherExtra.objects.get(id=pk)
user=models.User.objects.get(id=teacher.user_id)
user.delete()
teacher.delete()
return redirect('admin-approve-teacher')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def delete_teacher_from_school_view(request,pk):
teacher=models.TeacherExtra.objects.get(id=pk)
user=models.User.objects.get(id=teacher.user_id)
user.delete()
teacher.delete()
return redirect('admin-view-teacher')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def update_teacher_view(request,pk):
teacher=models.TeacherExtra.objects.get(id=pk)
user=models.User.objects.get(id=teacher.user_id)
form1=forms.TeacherUserForm(instance=user)
form2=forms.TeacherExtraForm(instance=teacher)
mydict={'form1':form1,'form2':form2}
if request.method=='POST':
form1=forms.TeacherUserForm(request.POST,instance=user)
form2=forms.TeacherExtraForm(request.POST,instance=teacher)
print(form1)
if form1.is_valid() and form2.is_valid():
user=form1.save()
user.set_password(<PASSWORD>)
user.save()
f2=form2.save(commit=False)
f2.status=True
f2.save()
return redirect('admin-view-teacher')
return render(request,'school/admin_update_teacher.html',context=mydict)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_view_teacher_salary_view(request):
teachers=models.TeacherExtra.objects.all()
return render(request,'school/admin_view_teacher_salary.html',{'teachers':teachers})
#for student by adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_student_view(request):
return render(request,'school/admin_student.html')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_add_student_view(request):
form1=forms.StudentUserForm()
form2=forms.StudentExtraForm()
mydict={'form1':form1,'form2':form2}
if request.method=='POST':
form1=forms.StudentUserForm(request.POST)
form2=forms.StudentExtraForm(request.POST)
if form1.is_valid() and form2.is_valid():
print("form is valid")
user=form1.save()
user.set_password(<PASSWORD>)
user.save()
f2=form2.save(commit=False)
f2.user=user
f2.status=True
f2.save()
my_student_group = Group.objects.get_or_create(name='STUDENT')
my_student_group[0].user_set.add(user)
else:
print("form is invalid")
return HttpResponseRedirect('admin-student')
return render(request,'school/admin_add_student.html',context=mydict)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_view_student_view(request):
students=models.StudentExtra.objects.all().filter(status=True)
return render(request,'school/admin_view_student.html',{'students':students})
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def delete_student_from_school_view(request,pk):
student=models.StudentExtra.objects.get(id=pk)
user=models.User.objects.get(id=student.user_id)
user.delete()
student.delete()
return redirect('admin-view-student')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def delete_student_view(request,pk):
student=models.StudentExtra.objects.get(id=pk)
user=models.User.objects.get(id=student.user_id)
user.delete()
student.delete()
return redirect('admin-approve-student')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def update_student_view(request,pk):
student=models.StudentExtra.objects.get(id=pk)
user=models.User.objects.get(id=student.user_id)
form1=forms.StudentUserForm(instance=user)
form2=forms.StudentExtraForm(instance=student)
mydict={'form1':form1,'form2':form2}
if request.method=='POST':
form1=forms.StudentUserForm(request.POST,instance=user)
form2=forms.StudentExtraForm(request.POST,instance=student)
print(form1)
if form1.is_valid() and form2.is_valid():
user=form1.save()
user.set_password(<PASSWORD>)
user.save()
f2=form2.save(commit=False)
f2.status=True
f2.save()
return redirect('admin-view-student')
return render(request,'school/admin_update_student.html',context=mydict)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_approve_student_view(request):
students=models.StudentExtra.objects.all().filter(status=False)
return render(request,'school/admin_approve_student.html',{'students':students})
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def approve_student_view(request,pk):
students=models.StudentExtra.objects.get(id=pk)
students.status=True
students.save()
return redirect(reverse('admin-approve-student'))
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_view_student_fee_view(request):
students=models.StudentExtra.objects.all()
return render(request,'school/admin_view_student_fee.html',{'students':students})
#attendance related viewwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww(by sumit)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_attendance_view(request):
return render(request,'school/admin_attendance.html')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_take_attendance_view(request,cl):
students=models.StudentExtra.objects.all().filter(cl=cl)
print(students)
aform=forms.AttendanceForm()
if request.method=='POST':
form=forms.AttendanceForm(request.POST)
if form.is_valid():
Attendances=request.POST.getlist('present_status')
date=form.cleaned_data['date']
for i in range(len(Attendances)):
AttendanceModel=models.Attendance()
AttendanceModel.cl=cl
AttendanceModel.date=date
AttendanceModel.present_status=Attendances[i]
AttendanceModel.roll=students[i].roll
AttendanceModel.save()
return redirect('admin-attendance')
else:
print('form invalid')
return render(request,'school/admin_take_attendance.html',{'students':students,'aform':aform})
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_view_attendance_view(request,cl):
form=forms.AskDateForm()
if request.method=='POST':
form=forms.AskDateForm(request.POST)
if form.is_valid():
date=form.cleaned_data['date']
attendancedata=models.Attendance.objects.all().filter(date=date,cl=cl)
studentdata=models.StudentExtra.objects.all().filter(cl=cl)
mylist=zip(attendancedata,studentdata)
return render(request,'school/admin_view_attendance_page.html',{'cl':cl,'mylist':mylist,'date':date})
else:
print('form invalid')
return render(request,'school/admin_view_attendance_ask_date.html',{'cl':cl,'form':form})
#fee related view by adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_fee_view(request):
return render(request,'school/admin_fee.html')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_view_fee_view(request,cl):
feedetails=models.StudentExtra.objects.all().filter(cl=cl)
return render(request,'school/admin_view_fee.html',{'feedetails':feedetails,'cl':cl})
#notice related viewsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss(by sumit)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_notice_view(request):
form=forms.NoticeForm()
if request.method=='POST':
form=forms.NoticeForm(request.POST)
if form.is_valid():
form=form.save(commit=False)
form.by=request.user.first_name
form.save()
return redirect('admin-dashboard')
return render(request,'school/admin_notice.html',{'form':form})
#for TEACHER LOGIN SECTIONNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN(by sumit)
@login_required(login_url='teacherlogin')
@user_passes_test(is_teacher)
def teacher_dashboard_view(request):
teacherdata=models.TeacherExtra.objects.all().filter(status=True,user_id=request.user.id)
notice=models.Notice.objects.all()
mydict={
'salary':teacherdata[0].salary,
'mobile':teacherdata[0].mobile,
'date':teacherdata[0].joindate,
'notice':notice
}
return render(request,'school/teacher_dashboard.html',context=mydict)
@login_required(login_url='teacherlogin')
@user_passes_test(is_teacher)
def teacher_attendance_view(request):
return render(request,'school/teacher_attendance.html')
@login_required(login_url='teacherlogin')
@user_passes_test(is_teacher)
def teacher_take_attendance_view(request,cl):
students=models.StudentExtra.objects.all().filter(cl=cl)
aform=forms.AttendanceForm()
if request.method=='POST':
form=forms.AttendanceForm(request.POST)
if form.is_valid():
Attendances=request.POST.getlist('present_status')
date=form.cleaned_data['date']
for i in range(len(Attendances)):
AttendanceModel=models.Attendance()
AttendanceModel.cl=cl
AttendanceModel.date=date
AttendanceModel.present_status=Attendances[i]
AttendanceModel.roll=students[i].roll
AttendanceModel.save()
return redirect('teacher-attendance')
else:
print('form invalid')
return render(request,'school/teacher_take_attendance.html',{'students':students,'aform':aform})
@login_required(login_url='teacherlogin')
@user_passes_test(is_teacher)
def teacher_view_attendance_view(request,cl):
form=forms.AskDateForm()
if request.method=='POST':
form=forms.AskDateForm(request.POST)
if form.is_valid():
date=form.cleaned_data['date']
attendancedata=models.Attendance.objects.all().filter(date=date,cl=cl)
studentdata=models.StudentExtra.objects.all().filter(cl=cl)
mylist=zip(attendancedata,studentdata)
return render(request,'school/teacher_view_attendance_page.html',{'cl':cl,'mylist':mylist,'date':date})
else:
print('form invalid')
return render(request,'school/teacher_view_attendance_ask_date.html',{'cl':cl,'form':form})
@login_required(login_url='teacherlogin')
@user_passes_test(is_teacher)
def teacher_notice_view(request):
form=forms.NoticeForm()
if request.method=='POST':
form=forms.NoticeForm(request.POST)
if form.is_valid():
form=form.save(commit=False)
form.by=request.user.first_name
form.save()
return redirect('teacher-dashboard')
else:
print('form invalid')
return render(request,'school/teacher_notice.html',{'form':form})
#FOR STUDENT AFTER THEIR Loginnnnnnnnnnnnnnnnnnnnn(by sumit)
@login_required(login_url='studentlogin')
@user_passes_test(is_student)
def student_dashboard_view(request):
studentdata=models.StudentExtra.objects.all().filter(status=True,user_id=request.user.id)
notice=models.Notice.objects.all()
mydict={
'roll':studentdata[0].roll,
'mobile':studentdata[0].mobile,
'fee':studentdata[0].fee,
'notice':notice
}
return render(request,'school/student_dashboard.html',context=mydict)
@login_required(login_url='studentlogin')
@user_passes_test(is_student)
def student_attendance_view(request):
form=forms.AskDateForm()
if request.method=='POST':
form=forms.AskDateForm(request.POST)
if form.is_valid():
date=form.cleaned_data['date']
studentdata=models.StudentExtra.objects.all().filter(user_id=request.user.id,status=True)
attendancedata=models.Attendance.objects.all().filter(date=date,cl=studentdata[0].cl,roll=studentdata[0].roll)
mylist=zip(attendancedata,studentdata)
return render(request,'school/student_view_attendance_page.html',{'mylist':mylist,'date':date})
else:
print('form invalid')
return render(request,'school/student_view_attendance_ask_date.html',{'form':form})
# for aboutus and contact ussssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss (by sumit)
def aboutus_view(request):
return render(request,'school/aboutus.html')
def contactus_view(request):
sub = forms.ContactusForm()
if request.method == 'POST':
sub = forms.ContactusForm(request.POST)
if sub.is_valid():
email = sub.cleaned_data['Email']
name=sub.cleaned_data['Name']
message = sub.cleaned_data['Message']
send_mail(str(name)+' || '+str(email),message, EMAIL_HOST_USER, ['<EMAIL>'], fail_silently = False)
return render(request, 'school/contactussuccess.html')
return render(request, 'school/contactus.html', {'form':sub})
| 2.15625 | 2 |
PaddleCV/tracking/pytracking/features/deep.py | weiwei1115/models | 2 | 1972 | <reponame>weiwei1115/models
import os
import numpy as np
from paddle import fluid
from ltr.models.bbreg.atom import atom_resnet50, atom_resnet18
from ltr.models.siamese.siam import siamfc_alexnet
from ltr.models.siam.siam import SiamRPN_AlexNet, SiamMask_ResNet50_sharp, SiamMask_ResNet50_base
from pytracking.admin.environment import env_settings
from pytracking.features.featurebase import MultiFeatureBase
from pytracking.libs import TensorList
from pytracking.libs.paddle_utils import n2p
class ResNet18(MultiFeatureBase):
"""ResNet18 feature.
args:
output_layers: List of layers to output.
net_path: Relative or absolute net path (default should be fine).
use_gpu: Use GPU or CPU.
"""
def __init__(self,
output_layers=('block2', ),
net_path='atom_iou',
use_gpu=True,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.output_layers = list(output_layers)
self.use_gpu = use_gpu
self.net_path = net_path
def initialize(self):
with fluid.dygraph.guard():
if os.path.isabs(self.net_path):
net_path_full = self.net_path
else:
net_path_full = os.path.join(env_settings().network_path,
self.net_path)
self.net = atom_resnet18(
backbone_pretrained=False,
backbone_is_test=True,
iounet_is_test=True)
state_dictsm, _ = fluid.load_dygraph(net_path_full)
self.net.load_dict(state_dictsm)
self.net.train()
self.iou_predictor = self.net.bb_regressor
self.layer_stride = {
'conv0': 2,
'conv1': 2,
'block0': 4,
'block1': 8,
'block2': 16,
'block3': 32,
'classification': 16,
'fc': None
}
self.layer_dim = {
'conv0': 64,
'conv1': 64,
'block0': 64,
'block1': 128,
'block2': 256,
'block3': 512,
'classification': 256,
'fc': None
}
self.iounet_feature_layers = self.net.bb_regressor_layer
if isinstance(self.pool_stride, int) and self.pool_stride == 1:
self.pool_stride = [1] * len(self.output_layers)
self.feature_layers = sorted(
list(set(self.output_layers + self.iounet_feature_layers)))
self.mean = np.reshape([0.485, 0.456, 0.406], [1, -1, 1, 1])
self.std = np.reshape([0.229, 0.224, 0.225], [1, -1, 1, 1])
def free_memory(self):
if hasattr(self, 'net'):
del self.net
if hasattr(self, 'iou_predictor'):
del self.iou_predictor
if hasattr(self, 'iounet_backbone_features'):
del self.iounet_backbone_features
if hasattr(self, 'iounet_features'):
del self.iounet_features
def dim(self):
return TensorList([self.layer_dim[l] for l in self.output_layers])
def stride(self):
return TensorList([
s * self.layer_stride[l]
for l, s in zip(self.output_layers, self.pool_stride)
])
def extract(self, im: np.ndarray, debug_save_name=None):
with fluid.dygraph.guard():
if debug_save_name is not None:
np.savez(debug_save_name, im)
im = im / 255. # don't use im /= 255. since we don't want to alter the input
im -= self.mean
im /= self.std
im = n2p(im)
output_features = self.net.extract_features(im, self.feature_layers)
# Store the raw resnet features which are input to iounet
iounet_backbone_features = TensorList([
output_features[layer] for layer in self.iounet_feature_layers
])
self.iounet_backbone_features = iounet_backbone_features.numpy()
# Store the processed features from iounet, just before pooling
self.iounet_features = TensorList([
f.numpy()
for f in self.iou_predictor.get_iou_feat(
iounet_backbone_features)
])
output = TensorList([
output_features[layer].numpy() for layer in self.output_layers
])
return output
class ResNet50(MultiFeatureBase):
"""ResNet50 feature.
args:
output_layers: List of layers to output.
net_path: Relative or absolute net path (default should be fine).
use_gpu: Use GPU or CPU.
"""
def __init__(self,
output_layers=('block2', ),
net_path='atom_iou',
use_gpu=True,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.output_layers = list(output_layers)
self.use_gpu = use_gpu
self.net_path = net_path
def initialize(self):
with fluid.dygraph.guard():
if os.path.isabs(self.net_path):
net_path_full = self.net_path
else:
net_path_full = os.path.join(env_settings().network_path,
self.net_path)
self.net = atom_resnet50(
backbone_pretrained=False,
backbone_is_test=True,
iounet_is_test=True)
state_dictsm, _ = fluid.load_dygraph(net_path_full)
self.net.load_dict(state_dictsm)
self.net.train()
self.iou_predictor = self.net.bb_regressor
self.layer_stride = {
'conv0': 2,
'conv1': 2,
'block0': 4,
'block1': 8,
'block2': 16,
'block3': 32,
'classification': 16,
'fc': None
}
self.layer_dim = {
'conv0': 64,
'conv1': 64,
'block0': 256,
'block1': 512,
'block2': 1024,
'block3': 2048,
'classification': 256,
'fc': None
}
self.iounet_feature_layers = self.net.bb_regressor_layer
if isinstance(self.pool_stride, int) and self.pool_stride == 1:
self.pool_stride = [1] * len(self.output_layers)
self.feature_layers = sorted(
list(set(self.output_layers + self.iounet_feature_layers)))
self.mean = np.reshape([0.485, 0.456, 0.406], [1, -1, 1, 1])
self.std = np.reshape([0.229, 0.224, 0.225], [1, -1, 1, 1])
def free_memory(self):
if hasattr(self, 'net'):
del self.net
if hasattr(self, 'iou_predictor'):
del self.iou_predictor
if hasattr(self, 'iounet_backbone_features'):
del self.iounet_backbone_features
if hasattr(self, 'iounet_features'):
del self.iounet_features
def dim(self):
return TensorList([self.layer_dim[l] for l in self.output_layers])
def stride(self):
return TensorList([
s * self.layer_stride[l]
for l, s in zip(self.output_layers, self.pool_stride)
])
def extract(self, im: np.ndarray, debug_save_name=None):
with fluid.dygraph.guard():
if debug_save_name is not None:
np.savez(debug_save_name, im)
im = im / 255. # don't use im /= 255. since we don't want to alter the input
im -= self.mean
im /= self.std
im = n2p(im)
output_features = self.net.extract_features(im, self.feature_layers)
# Store the raw resnet features which are input to iounet
iounet_backbone_features = TensorList([
output_features[layer] for layer in self.iounet_feature_layers
])
self.iounet_backbone_features = iounet_backbone_features.numpy()
# Store the processed features from iounet, just before pooling
self.iounet_features = TensorList([
f.numpy()
for f in self.iou_predictor.get_iou_feat(
iounet_backbone_features)
])
output = TensorList([
output_features[layer].numpy() for layer in self.output_layers
])
return output
class SFCAlexnet(MultiFeatureBase):
"""Alexnet feature.
args:
output_layers: List of layers to output.
net_path: Relative or absolute net path (default should be fine).
use_gpu: Use GPU or CPU.
"""
def __init__(self,
output_layers=('conv5', ),
net_path='estimator',
use_gpu=True,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.output_layers = list(output_layers)
self.use_gpu = use_gpu
self.net_path = net_path
def initialize(self):
with fluid.dygraph.guard():
if os.path.isabs(self.net_path):
net_path_full = self.net_path
else:
net_path_full = os.path.join(env_settings().network_path,
self.net_path)
self.net = siamfc_alexnet(
backbone_pretrained=False,
backbone_is_test=True,
estimator_is_test=True)
state_dictsm, _ = fluid.load_dygraph(net_path_full)
self.net.load_dict(state_dictsm)
self.net.train()
self.target_estimator = self.net.target_estimator
self.layer_stride = {'conv5': 8}
self.layer_dim = {'conv5': 256}
self.estimator_feature_layers = self.net.target_estimator_layer
if isinstance(self.pool_stride, int) and self.pool_stride == 1:
self.pool_stride = [1] * len(self.output_layers)
self.feature_layers = sorted(
list(set(self.output_layers + self.estimator_feature_layers)))
self.mean = np.reshape([0., 0., 0.], [1, -1, 1, 1])
self.std = np.reshape([1 / 255., 1 / 255., 1 / 255.], [1, -1, 1, 1])
def free_memory(self):
if hasattr(self, 'net'):
del self.net
if hasattr(self, 'target_estimator'):
del self.target_estimator
if hasattr(self, 'estimator_backbone_features'):
del self.estimator_backbone_features
def dim(self):
return TensorList([self.layer_dim[l] for l in self.output_layers])
def stride(self):
return TensorList([
s * self.layer_stride[l]
for l, s in zip(self.output_layers, self.pool_stride)
])
def extract(self, im: np.ndarray, debug_save_name=None):
with fluid.dygraph.guard():
if debug_save_name is not None:
np.savez(debug_save_name, im)
im = im / 255. # don't use im /= 255. since we don't want to alter the input
im -= self.mean
im /= self.std
im = n2p(im)
output_features = self.net.extract_features(im, self.feature_layers)
# Store the raw backbone features which are input to estimator
estimator_backbone_features = TensorList([
output_features[layer]
for layer in self.estimator_feature_layers
])
self.estimator_backbone_features = estimator_backbone_features.numpy(
)
output = TensorList([
output_features[layer].numpy() for layer in self.output_layers
])
return output
class SRPNAlexNet(MultiFeatureBase):
"""Alexnet feature.
args:
output_layers: List of layers to output.
net_path: Relative or absolute net path (default should be fine).
use_gpu: Use GPU or CPU.
"""
def __init__(self,
net_path='estimator',
use_gpu=True,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.use_gpu = use_gpu
self.net_path = net_path
def initialize(self):
with fluid.dygraph.guard():
if os.path.isabs(self.net_path):
net_path_full = self.net_path
else:
net_path_full = os.path.join(env_settings().network_path, self.net_path)
self.net = SiamRPN_AlexNet(backbone_pretrained=False, is_test=True)
state_dict, _ = fluid.load_dygraph(net_path_full)
self.net.load_dict(state_dict)
self.net.eval()
def free_memory(self):
if hasattr(self, 'net'):
del self.net
def extract(self, im: np.ndarray, debug_save_name=None):
with fluid.dygraph.guard():
if debug_save_name is not None:
np.savez(debug_save_name, im)
im = n2p(im)
output_features = self.net.extract_backbone_features(im)
# Store the raw backbone features which are input to estimator
output = TensorList([layer.numpy() for layer in output_features])
return output
class SMaskResNet50_base(MultiFeatureBase):
"""Resnet50-dilated feature.
args:
output_layers: List of layers to output.
net_path: Relative or absolute net path (default should be fine).
use_gpu: Use GPU or CPU.
"""
def __init__(self,
net_path='estimator',
use_gpu=True,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.use_gpu = use_gpu
self.net_path = net_path
def initialize(self):
with fluid.dygraph.guard():
if os.path.isabs(self.net_path):
net_path_full = self.net_path
else:
net_path_full = os.path.join(env_settings().network_path, self.net_path)
self.net = SiamMask_ResNet50_base(backbone_pretrained=False, is_test=True)
state_dict, _ = fluid.load_dygraph(net_path_full)
self.net.load_dict(state_dict)
self.net.eval()
def free_memory(self):
if hasattr(self, 'net'):
del self.net
def extract(self, im: np.ndarray, debug_save_name=None):
with fluid.dygraph.guard():
if debug_save_name is not None:
np.savez(debug_save_name, im)
im = n2p(im)
output_features = self.net.extract_backbone_features(im)
# Store the raw backbone features which are input to estimator
output = TensorList([layer.numpy() for layer in output_features])
return output
class SMaskResNet50_sharp(MultiFeatureBase):
"""Resnet50-dilated feature.
args:
output_layers: List of layers to output.
net_path: Relative or absolute net path (default should be fine).
use_gpu: Use GPU or CPU.
"""
def __init__(self,
net_path='estimator',
use_gpu=True,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.use_gpu = use_gpu
self.net_path = net_path
def initialize(self):
with fluid.dygraph.guard():
if os.path.isabs(self.net_path):
net_path_full = self.net_path
else:
net_path_full = os.path.join(env_settings().network_path, self.net_path)
self.net = SiamMask_ResNet50_sharp(backbone_pretrained=False, is_test=True)
state_dict, _ = fluid.load_dygraph(net_path_full)
self.net.load_dict(state_dict)
self.net.eval()
def free_memory(self):
if hasattr(self, 'net'):
del self.net
def extract(self, im: np.ndarray, debug_save_name=None):
with fluid.dygraph.guard():
if debug_save_name is not None:
np.savez(debug_save_name, im)
im = n2p(im)
output_features = self.net.extract_backbone_features(im)
# Store the raw backbone features which are input to estimator
output = TensorList([layer.numpy() for layer in output_features])
return output
| 1.914063 | 2 |
netesto/local/psPlot.py | fakeNetflix/facebook-repo-fbkutils | 346 | 1973 | #!/usr/bin/env python2
import sys
import random
import os.path
import shutil
import commands
import types
import math
#gsPath = '/usr/local/bin/gs'
gsPath = 'gs'
logFile = '/dev/null'
#logFile = 'plot.log'
#--- class PsPlot(fname, pageHeader, pageSubHeader, plotsPerPage)
#
class PsPlot(object):
def __init__(self, fname, pageHeader, pageSubHeader, plotsPerPage):
self.foutPath = os.path.dirname(fname)+'/'
if self.foutPath == '/':
self.foutPath = ''
self.foutName = os.path.basename(fname)
self.fname = fname+'.ps'
self.pageHeader = pageHeader
self.pageSubHeader = pageSubHeader
self.plotsPerPage = plotsPerPage
self.yfix1 = ''
self.yfix2 = ''
self.xGrid = 1
self.yGrid = 1
self.xUniform = False
self.xLen = 6.5 #inches
self.seriesTitle = ' '
self.x0 = 0
self.xInc = 0
self.xCount = 0
self.xList = []
self.xDict = {}
self.y1Inc = 0
self.y1Count = 0
self.y1LogScale = 0
self.y2Inc = 0
self.y2Count = 0
self.y2LogScale = 0
self.xOffset = 0
self.colors = [ (0.7,0.7,0.7), (0,0,0.8), (0.8,0,0),
(0.42,0.55,0.14), (0.6,0.5,0.3), (0.6,0.2,0.8),
(0,0.8,0),
(0.4,0.3,0.5), (0.5,0.5,0.5), (0.8,0.0,0.0), (0,0,0) ]
self.colorsN = 11
self.colorRed = (0.8,0,0)
self.colorGreen = (0,0.8,0)
self.colorBlue = (0,0,0.8)
self.colorAqua = (0,0.5,0.5)
self.colorWhite = (1,1,1)
self.ColorBlack = (0,0,0)
self.xSize = 1800
self.ySize = 900
shutil.copy('plot-header.ps', self.fname)
self.fout = open(self.fname, 'a')
self.flog = open(logFile, 'a')
# self.flog = open('./psPlot.out', 'a')
if plotsPerPage == 4:
print >>self.fout, '/doGraph { graph4v } def'
print >>self.fout, '/nextGraph { nextGraph4v } def'
elif plotsPerPage == 3:
print >>self.fout, '/doGraph { graph3v } def'
print >>self.fout, '/nextGraph { nextGraph3v } def'
elif plotsPerPage == 2:
print >>self.fout, '/doGraph { graph2v } def'
print >>self.fout, '/nextGraph { nextGraph2v } def'
else:
print >>self.fout, '/doGraph { graph1v } def'
print >>self.fout, '/nextGraph { nextGraph1v } def'
print >>self.fout, '/showpage {\n 40 742 moveto'
print >>self.fout, '/Helvetica findfont 12 scalefont setfont'
if self.pageHeader != '':
print >>self.fout, '(',self.pageHeader,') show'
if self.pageSubHeader != '':
print >>self.fout, '40 726 moveto\n (',self.pageSubHeader,') show'
print >>self.fout, 'showpage\n} bind def'
print >>self.fout, 'doGraph'
#--- End()
#
def End(self):
print >>self.fout, '\nshowpage\nend'
self.fout.close()
#--- GetInc(vMin, vMax)
def GetInc(self,vMin, vMax):
ff = 1.0
while vMax <= 1 and vMax > 0:
ff *= 0.10
vMin *= 10
vMax *= 10
v0 = int(vMin)
v1 = int(vMax+0.99)
f = 1
w = v1 - v0
if w == 0:
v1 = v0 + 1
w = 1
while w/f >= 100:
f *= 10
# w = int(w/f)
v0 = int(v0/f)
v1 = int(v1/f)
if (vMin % f) != 0 and vMax == v1:
v1 += 1
w = v1 - v0
if w <= 10:
vInc = 1
elif w <= 20:
vInc = 2
else:
m = 10
while w/m > 100:
m *= 10
if (v0 >= 0) and (v0 % m) != 0:
v0 = int(v0 / m) * m
if (v1 % m) != 0:
v1 = int(v1 / m) * m + m
w = v1 - v0
if w <= 5*m:
vInc = m/2
else:
vInc = m
else:
vInc = m
# if (vMax/f)%vInc != 0 or v1 % vInc != 0:
if v1 % vInc != 0:
v1 = int(v1/vInc)*vInc + vInc
if (v0 % vInc) != 0:
v0 = int(v0/vInc)*vInc
v0 += vInc
v0 *= (f*ff)
v1 *= (f*ff)
vInc *= (f*ff)
return v0, v1, vInc
#--- ValueConvert(v)
#
def ValueConvert(self, v, inc):
if inc > 0:
logInc = int(math.log10(v/inc))
d = math.pow(10,logInc)
if d == 0:
d = 10.0
else:
d = 10.0
if d == 1 and float(v)/inc > 1.0:
d = 10.0
if v >= 1000000000 and inc > 1:
s = int(v/(1000000000/d))/d
if s*d == int(s)*d:
s = int(s)
r = str(s) + 'G'
elif v >= 1000000 and inc > 1:
s = int(v/(1000000/d))/d
if s*d == int(s)*d:
s = int(s)
r = str(s) + 'M'
elif v >= 1000 and inc > 1:
s = int(v/(1000/d))/d
if s*d == int(s)*d:
s = int(s)
r = str(s) + 'K'
elif v >= 1:
s = int(v*d)/d
if s*d == int(s)*d:
s = int(s)
r = str(s)
else:
r = str(int(v*100)/100.0)
return r
#--- GetAxis(vBeg, vEnd, vInc, logFlag)
#
def GetAxis(self, vBeg, vEnd, vInc, logFlag):
fix = '{ 0 add }'
if isinstance(vBeg,list):
vList = vBeg
vList.append(' ')
self.xUniform = True
v0 = 1
v1 = len(vList)
vi = 1
fix = '{ '+str(v0-vi)+' sub '+str(vi)+' div }'
logFlag = 0
else:
if vInc == 0:
v0,v1,vi = self.GetInc(vBeg,vEnd)
else:
v0 = vBeg
v1 = vEnd
vi = vInc
if vBeg > 0 and (logFlag==1 or (logFlag==0 and (vEnd/vBeg > 100))):
v0 = vBeg
v1 = vEnd
logFlag = 1
v0Log = math.log10(v0)
t = math.ceil(v0Log)
ff = math.modf(v0Log)
if math.fabs(ff[0]) < math.fabs(v0Log)/1000 and t < 0:
t += 1
logOffset = 0
while t < 1:
logOffset += 1
t += 1
v0 = math.pow(10,math.floor(v0Log)+1)
v1 = math.pow(10,math.ceil(math.log10(v1)))
vi = 1
vList = []
v = v0
while v <= v1:
vList.append(self.ValueConvert(v,0))
v *= 10
if v0 > 1:
logOffset -= (math.log10(v0) - 1)
# substract 1 from above inside parent?
fix = '{ dup 0 eq { } { log '+str(logOffset)+' add } ifelse }'
else:
logFlag = 0
v = v0
vList = []
n = 0
while True:
vList.append(self.ValueConvert(v,vi))
if v > vEnd:
break
n += 1
v = v0 + n*vi
fix = '{ '+str(v0-vi)+' sub '+str(vi)+' div }'
print >>self.flog, 'v0:',v0,' vi:',vi,' v1:',v1,' (',vEnd,')'
print >>self.flog, 'vList: ', vList
print >>self.flog, 'logFlag: ', logFlag, ' fix: ', fix
return v0,v1,vi,vList,fix,logFlag
#--- SetXLen(xlen)
def SetXLen(self, xlen):
self.xLen = xlen
print >>self.fout, '/xAxisLen %.2f def' % self.xLen
print >>self.fout, 'doGraph'
return
#--- SetXSize(xsize)
def SetXSize(self, xsize):
self.xSize = xsize
return
#--- SetYSize(ysize)
def SetYSize(self, ysize):
self.ySize = ysize
return
#--- SetPlotBgLevel(level)
#
def SetPlotBgLevel(self,level):
print >>self.fout, '/plotBgLevel ', level, 'def\n'
return
#--- SetPlotPercentDir(value)
def SetPlotPercentDir(self,value):
if value == 'Vertical':
print >>self.fout, '/plotNumPercentDir 1 def\n'
else:
print >>self.fout, '/plotNumPercentDir 0 def\n'
return
#--- SetPlotYLogScale(axis,value)
#
def SetPlotYLogScale(self,axis,value):
if value == 'Off':
v = -1
elif value == 'On':
v = 1
else:
v = 0;
if axis == 1:
self.y1LogScale = v
else:
self.y2LogScale = v
return
#--- SetPlot(xbeg,xend,xinc,ybeg,yend,yinc,xtitle,ytitle,title)
#
def SetPlot(self,xbeg,xend,xinc,ybeg,yend,yinc,xtitle,ytitle,title):
print >>self.fout, '\n\nnextGraph\n1 setlinewidth\n'
(x0,x1,xi,xList,fix,logFlag) = self.GetAxis(xbeg,xend,xinc,0)
self.x0 = x0
self.xInc = xi
self.xCount = len(xList)
self.xList = xList
self.xDict = {}
k = 1
for x in xList:
self.xDict[x] = k
k=k+1
print >>self.fout, '/xfix ', fix, ' def\n'
(y0,y1,yi,yList,fix,logFlag) = self.GetAxis(ybeg,yend,yinc,
self.y1LogScale)
self.y1Inc = yi
self.y1Count = len(yList)
self.yfix1 = '/yfix '+fix+' def\n /yinc yinc1 def'
print >>self.fout, self.yfix1
print >>self.fout, '[ '
for x in xList:
self.fout.write('('+str(x)+') ')
self.fout.write(' ]\n[ ')
for y in yList:
self.fout.write('('+str(y)+') ')
print >>self.fout, ' ]'
print >>self.fout, '('+xtitle+')\n('+ytitle+')\naxes\n'
print >>self.fout, self.xGrid, self.yGrid, ' grid\n'
print >>self.fout, '/ymtitle ypos ylen add 10 add def\n'
# Multiple lines in title are separated by '|'
print >>self.flog, 'Main Title: '+title
titleLines = title.split('|')
for t in titleLines:
if len(t) > 0:
print >>self.flog, ' '+t
print >>self.fout, '('+t+')\n'
print >>self.fout, 'Mtitles\n'
# print >>self.fout, '('+title+')\nMtitles\n'
if logFlag == 1:
print >>self.fout, 'beginFunction\n'
for ys in yList:
factor = 1
if ys[-1:] == 'K':
yss = ys[:-1]
factor = 1000
elif ys[-1:] == 'M':
yss = ys[:-1]
factor = 1000000
else:
yss = ys
y = float(yss)*factor/10.0
k = 2
while k < 10:
print >>self.fout, 0, k*y
k += 1
print >>self.fout, 'endFunction\n'
print >>self.fout, '19 { 0 0 0 setrgbcolor } plotSymbolsC\n'
return y1
#--- SetPlot2(xbeg,xend,xinc,ybeg,yend,yinc,zbeg,zend,zinc,
# xtitle,ytitle,ztitle,title)
#
def SetPlot2(self,xbeg,xend,xinc,ybeg,yend,yinc,zbeg,zend,zinc,
xtitle,ytitle,ztitle,title):
rv = self.SetPlot(xbeg,xend,xinc,ybeg,yend,yinc,xtitle,ytitle,title)
(z0,z1,zi,zList,fix,logFlag) = self.GetAxis(zbeg,zend,zinc,self.y2LogScale)
self.y2Inc = zi
self.y2Count = len(zList)
print >>self.fout, '/Flag2Yaxes 1 def'
self.yfix2 = '/yfix '+fix+' def\n/yinc yinc2 def'
print >>self.fout, 'axpos axlen add aypos aylen'
self.fout.write('[ ')
for z in zList:
self.fout.write('('+str(z)+') ')
self.fout.write(' ]')
if ztitle != '':
print >>self.fout, '('+ztitle+') vaxis2'
if logFlag == 1:
print >>self.fout, self.yfix2
print >>self.fout, 'beginFunction\n'
for zs in zList:
factor = 1
if zs[-1:] == 'K':
zss = zs[:-1]
factor = 1000
elif zs[-1:] == 'M':
zss = zs[:-1]
factor = 1000000
else:
zss = zs
y = float(zss)*factor/10.0
k = 2
while k < 10:
print >>self.fout, self.xCount, k*y
k += 1
print >>self.fout, 'endFunction\n'
print >>self.fout, '18 { 0.72 0.52 0.5 setrgbcolor } plotSymbolsC\n'
return rv
#--- SetColor(color)
#
def SetColor(self, color):
rv = ' { '+str(color[0])+' '+str(color[1])+' '+str(color[2])+ \
' setrgbcolor } '
return rv
#--- GetColorIndx(indx)
#
def GetColorIndx(self, indx):
color = self.colors[indx % self.colorsN]
rv = ' { '+str(color[0])+' '+str(color[1])+' '+str(color[2])+ \
' setrgbcolor } '
return rv
#--- SetColorIndx(indx, r, g, b)
#
def SetColorIndx(self, indx, r, g, b):
self.colors[indx][0] = r
self.colors[indx][1] = g
self.colors[indx][2] = b
return rv
#--- outputPS(string)
#
def outputPS(self, s):
print >>self.fout, s
#--- SeriesNames(names)
#
def SeriesNames(self, names):
indx = len(names) - 1
if indx == 0:
return
print >>self.fout, '('+self.seriesTitle+')'
while indx >= 0:
if names[indx] != None:
print >>self.fout, '('+names[indx]+') '
print >>self.fout, self.SetColor(self.colors[indx % self.colorsN])
indx -= 1
print >>self.fout, 'fdescriptionsC'
#--- PlotVBars(xList, type)
#
def PlotVBars(self, xList, type):
flog = self.flog
print >>self.fout, self.yfix1
print >>self.fout, 'beginFunction\n'
endFun = 'endFunction\n'
indx = 0
for x in xList:
if x == ' ' and indx == len(xList)-1:
continue
indx += 1
print >>self.fout, x, 0.0
if (indx != 0) and (indx % 1000) == 0:
print >>self.fout, endFun+type+'\nbeginFunction\n'
print >>self.fout, x
print >>self.fout, endFun, type, '\n'
return
#--- PlotData(axis, xList, yList, zList, id, type)
#
def PlotData(self, axis, xList, yList, zList, id, type):
flog = self.flog
print >>flog, 'graph xList: ', self.xList, ' xList: ', xList, \
' yList: ', yList
print >>self.fout, '%\n% Plot '+id+'\n%\n'
print >>self.fout, '/xfix { ', self.x0 - self.xInc - self.xOffset,' sub ', self.xInc, ' div ', 0.0,' add } def\n'
if axis == 2:
print >>self.fout, self.yfix2
elif axis == 1:
print >>self.fout, self.yfix1
# else:
# print >>self.fout, '/yfix { 0 add } def\n'
print >>self.fout, 'beginFunction\n'
if isinstance(zList,list):
endFun = 'endFunctionW\n'
else:
endFun = 'endFunction\n'
indx = 0
for x in xList:
if x == ' ' and indx == len(xList)-1:
continue
if len(yList) <= indx:
continue
y = yList[indx]
if isinstance(zList,list):
if len(zList) <= indx:
continue
z = zList[indx]
else:
z = ''
indx += 1
if self.xUniform == True:
g_indx = self.xDict[x]
print >>self.fout, g_indx, y, z
else:
print >>self.fout, x, y, z
if (indx != 0) and (indx % 1000) == 0:
print >>self.fout, endFun+type+'\nbeginFunction\n'
if self.xUniform == True:
print >>self.fout, g_indx, y, z
else:
print >>self.fout, x, y, z
print >>self.fout, endFun, type, '\n'
return
#--- GetImage()
#
def GetImage(self):
flog = self.flog
print >>self.fout, 'showpage\n'
self.fout.flush()
os.fsync(self.fout)
if self.plotsPerPage == 1:
# size = ' -g1200x550 '
size = ' -g%dx%d ' % (self.xSize, self.ySize)
xres = int(100 * self.xSize * 6.5 / (1200 * self.xLen))
yres = int(110 * self.ySize / 550)
res = ' -r%dx%d ' % (xres, yres)
cmdStr = gsPath + ' -sDEVICE=jpeg'+size+'-sOutputFile='+self.foutPath+self.foutName+'.jpg -dNOPAUSE '+ res +self.fname+' -c quit'
# cmdStr = gsPath + ' -sDEVICE=jpeg'+size+'-sOutputFile='+self.foutPath+self.foutName+'.jpg -dNOPAUSE -r100x100 '+self.fname+' -c quit'
else:
size = ' -g1200x1100 '
cmdStr = gsPath + ' -sDEVICE=jpeg'+size+'-sOutputFile='+self.foutPath+self.foutName+'%d.jpg -dNOPAUSE -r100x100 '+self.fname+' -c quit'
print >>flog, 'cmdStr: ', cmdStr
output = commands.getoutput(cmdStr)
print >>flog, 'output from gs command: ', output
return self.foutPath+self.foutName+'.jpg'
#--- Main
#
def main():
tMin = 0
tMax = 100000
stateList = [0,1,2,2,3,3,3,3,4]
fname = 'sched.txt'
if len(sys.argv) == 2:
fname = sys.argv[1]
elif len(sys.argv) == 3:
tMin = int(sys.argv[1])
tMax = int(sys.argv[2])
elif len(sys.argv) == 4:
tMin = int(sys.argv[1])
tMax = int(sys.argv[2])
fname = sys.argv[3]
elif len(sys.argv) != 1:
print 'USAGE: psPlot.py [tMin tMax] [fname]'
sys.exit(1)
print 'tMin,tMax: ', tMin, tMax, 'fname: ', fname
p = PsPlot('./p', 'Header', 'SubHeader', 1)
fromStateList = []
toStateList = []
time1List = []
time2List = []
indx = 0
oldTime = 0
fin = open(fname, 'r')
for inputLine in fin:
inputLine = inputLine.replace(' ','')
inputLine = inputLine.replace("'", '')
i1 = inputLine.find('(')
i2 = inputLine.find(')')
inputList = inputLine[i1+1:i2-1].split(',')
s1 = stateList[int(inputList[0])]
s2 = stateList[int(inputList[1])]
t = int(inputList[2])
if indx != 0 and t >= tMin and t <= tMax:
fromStateList.append(s1)
toStateList.append(s2)
time1List.append(oldTime)
time2List.append(t)
oldTime = t
indx += 1
p.SetPlot(tMin, tMax, 0, 0, 2, 0, 'Time', 'Socket/State', 'Chavey\'s Plot')
state = 0
while state <= 4:
t1List = []
t2List = []
sList = []
indx = 0
for s in toStateList:
if s == state:
t1List.append(time1List[indx])
t2List.append(time2List[indx])
sList.append(0.10 + s*0.20)
indx += 1
p.PlotData(1,t1List, t2List, sList, 'Test',
'0.1 in 0 '+p.SetColor(p.colors[state])+' plotWbarsC',
sys.stdout)
state += 1
image = p.GetImage(sys.stdout)
print 'Image file: ', image
p.End()
if __name__ == "__main__":
main()
| 2.28125 | 2 |
physio2go/exercises/migrations/0003_auto_20161128_1753.py | hamole/physio2go | 0 | 1974 | <filename>physio2go/exercises/migrations/0003_auto_20161128_1753.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-28 06:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('exercises', '0002_auto_20161128_1718'),
]
operations = [
migrations.RenameModel(
old_name='Exercises',
new_name='Exercise',
),
]
| 1.585938 | 2 |
setup.py | pasinskim/mender-python-client | 0 | 1975 | # Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
import re
VERSIONFILE = "src/mender/_version.py"
version_string_line = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
match = re.search(VSRE, version_string_line, re.M)
if match:
version_string = match.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="mender-python-client-mendersoftware",
version=version_string,
license="Apache 2.0",
author="Mendersoftware",
author_email="<EMAIL>",
description="A Python implementation of the Mender client interface",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mendersoftware/mender-python-client",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
keywords=["mender", "OTA", "updater"],
packages=setuptools.find_packages(where="src"),
install_requires=["cryptography", "requests", "msgpack", "websockets"],
entry_points={"console_scripts": ["mender-python-client=mender.mender:main"]},
package_dir={"": "src"},
python_requires=">=3.6",
zip_safe=False,
include_package_data=True,
)
| 1.554688 | 2 |
Q295-v2.py | Linchin/python_leetcode_git | 0 | 1976 | """
295
find median from data stream
hard
"""
from heapq import *
class MedianFinder:
# max heap and min heap
def __init__(self):
"""
initialize your data structure here.
"""
self.hi = []
self.lo = []
def addNum(self, num: int) -> None:
heappush(self.lo, -heappushpop(self.hi, num))
while len(self.lo) > len(self.hi):
heappush(self.hi, -heappop(self.lo))
def findMedian(self) -> float:
if len(self.hi) > len(self.lo):
return self.hi[0]
if len(self.hi) == len(self.lo):
return (self.hi[0] - self.lo[0]) / 2.0
sol = MedianFinder()
sol.addNum(1)
print(sol.findMedian())
sol.addNum(2)
print(sol.findMedian()) | 3.40625 | 3 |
raisimPy/examples/newtonsCradle.py | mstoelzle/raisimLib | 0 | 1977 | <reponame>mstoelzle/raisimLib
import os
import numpy as np
import raisimpy as raisim
import math
import time
raisim.World.setLicenseFile(os.path.dirname(os.path.abspath(__file__)) + "/../../rsc/activation.raisim")
world = raisim.World()
ground = world.addGround()
world.setTimeStep(0.001)
world.setMaterialPairProp("steel", "steel", 0.1, 1.0, 0.0)
pin1 = world.addSphere(0.1, 0.8)
pin1.setAppearance("1,0,0,0.3")
pin1.setPosition(0.0, 0.0, 3.0)
pin1.setBodyType(raisim.BodyType.STATIC)
pin2 = world.addSphere(0.1, 0.8)
pin2.setAppearance("0,1,0,0.3")
pin2.setPosition(0.3, 0.0, 3.0)
pin2.setBodyType(raisim.BodyType.STATIC)
pin3 = world.addSphere(0.1, 0.8)
pin3.setAppearance("0,0,1,0.3")
pin3.setPosition(0.6, 0.0, 3.0)
pin3.setBodyType(raisim.BodyType.STATIC)
pin4 = world.addSphere(0.1, 0.8)
pin4.setAppearance("1,0,0,0.3")
pin4.setPosition(0.9, 0.0, 3.0)
pin4.setBodyType(raisim.BodyType.STATIC)
pin5 = world.addSphere(0.1, 0.8)
pin5.setPosition(0.9, 0.0, 6.0)
pin5.setBodyType(raisim.BodyType.STATIC)
pin6 = world.addSphere(0.1, 0.8)
pin6.setPosition(-3., 0.0, 7.0)
pin6.setBodyType(raisim.BodyType.STATIC)
pin7 = world.addSphere(0.1, 0.8)
pin7.setPosition(-4., 0.0, 7.0)
pin7.setBodyType(raisim.BodyType.STATIC)
anymalB_urdf_file = os.path.dirname(os.path.abspath(__file__)) + "/../../rsc/anymal/urdf/anymal.urdf"
anymalC_urdf_file = os.path.dirname(os.path.abspath(__file__)) + "/../../rsc/anymal_c/urdf/anymal.urdf"
anymalC = world.addArticulatedSystem(anymalC_urdf_file)
anymalB = world.addArticulatedSystem(anymalB_urdf_file)
jointNominalConfig = np.array([-3, 0, 4.54, 1.0, 0.0, 0.0, 0.0, 0.03, 0.4, -0.8, -0.03, 0.4, -0.8, 0.03, -0.4, 0.8, -0.03, -0.4, 0.8])
jointVelocityTarget = np.zeros([anymalC.getDOF()])
jointPgain = np.ones(anymalC.getDOF()) * 100.0
jointDgain = np.ones(anymalC.getDOF()) * 1.0
anymalC.setGeneralizedCoordinate(jointNominalConfig)
anymalC.setPdGains(jointPgain, jointDgain)
anymalC.setPdTarget(jointNominalConfig, jointVelocityTarget)
anymalC.setName("anymalC")
jointNominalConfig[0] = -4
anymalB.setGeneralizedCoordinate(jointNominalConfig)
anymalB.setPdGains(jointPgain, jointDgain)
anymalB.setPdTarget(jointNominalConfig, jointVelocityTarget)
anymalB.setName("anymalB")
ball1 = world.addSphere(0.1498, 0.8, "steel")
ball1.setPosition(0, 0.0, 1.0)
ball2 = world.addSphere(0.1499, 0.8, "steel")
ball2.setPosition(0.3, 0.0, 1.0)
ball3 = world.addSphere(0.1499, 0.8, "steel")
ball3.setPosition(0.6, 0.0, 1.0)
ball4 = world.addSphere(0.1499, 0.8, "steel")
ball4.setPosition(2.9, 0.0, 3.0)
box = world.addBox(.1, .1, .1, 1)
box.setPosition(0.9, 0.0, 4.2)
world.addStiffWire(pin1, 0, np.zeros(3), ball1, 0, np.zeros(3), 2.0)
world.addStiffWire(pin2, 0, np.zeros(3), ball2, 0, np.zeros(3), 2.0)
world.addStiffWire(pin3, 0, np.zeros(3), ball3, 0, np.zeros(3), 2.0)
world.addStiffWire(pin4, 0, np.zeros(3), ball4, 0, np.zeros(3), 2.0)
wire5 = world.addCompliantWire(pin5, 0, np.zeros(3), box, 0, np.zeros(3), 2.0, 200)
wire5.setStretchType(raisim.StretchType.BOTH)
wire6 = world.addCompliantWire(pin6, 0, np.zeros(3), anymalC, 0, np.zeros(3), 2.0, 1000)
wire6.setStretchType(raisim.StretchType.BOTH)
wire7 = world.addCustomWire(pin7, 0, np.zeros(3), anymalB, 0, np.zeros(3), 2.0)
wire7.setTension(310)
server = raisim.RaisimServer(world)
server.launchServer(8080)
for i in range(500000):
time.sleep(0.001)
server.integrateWorldThreadSafe()
if i == 5000:
world.removeObject(wire7)
server.killServer()
| 1.789063 | 2 |
nova/virt/hyperv/volumeops.py | viveknandavanam/nova | 1 | 1978 | <gh_stars>1-10
# Copyright 2012 <NAME>
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Storage-related functions (attach, detach, etc).
"""
import time
from os_brick.initiator import connector
from os_win import utilsfactory
from oslo_log import log as logging
from oslo_utils import strutils
import nova.conf
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova import utils
from nova.virt import driver
from nova.virt.hyperv import constants
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class VolumeOps(object):
"""Management class for Volume-related tasks
"""
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._default_root_device = 'vda'
self.volume_drivers = {
constants.STORAGE_PROTOCOL_SMBFS: SMBFSVolumeDriver(),
constants.STORAGE_PROTOCOL_ISCSI: ISCSIVolumeDriver(),
constants.STORAGE_PROTOCOL_FC: FCVolumeDriver()}
def _get_volume_driver(self, connection_info):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
return self.volume_drivers[driver_type]
def attach_volumes(self, volumes, instance_name):
for vol in volumes:
self.attach_volume(vol['connection_info'], instance_name)
def disconnect_volumes(self, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
self.disconnect_volume(vol['connection_info'])
def attach_volume(self, connection_info, instance_name,
disk_bus=constants.CTRL_TYPE_SCSI):
tries_left = CONF.hyperv.volume_attach_retry_count + 1
while tries_left:
try:
self._attach_volume(connection_info,
instance_name,
disk_bus)
break
except Exception as ex:
tries_left -= 1
if not tries_left:
LOG.exception(
_LE("Failed to attach volume %(connection_info)s "
"to instance %(instance_name)s. "),
{'connection_info': strutils.mask_dict_password(
connection_info),
'instance_name': instance_name})
self.disconnect_volume(connection_info)
raise exception.VolumeAttachFailed(
volume_id=connection_info['serial'],
reason=ex)
else:
LOG.warning(
_LW("Failed to attach volume %(connection_info)s "
"to instance %(instance_name)s. "
"Tries left: %(tries_left)s."),
{'connection_info': strutils.mask_dict_password(
connection_info),
'instance_name': instance_name,
'tries_left': tries_left})
time.sleep(CONF.hyperv.volume_attach_retry_interval)
def _attach_volume(self, connection_info, instance_name,
disk_bus=constants.CTRL_TYPE_SCSI):
LOG.debug(
"Attaching volume: %(connection_info)s to %(instance_name)s",
{'connection_info': strutils.mask_dict_password(connection_info),
'instance_name': instance_name})
volume_driver = self._get_volume_driver(connection_info)
volume_driver.attach_volume(connection_info,
instance_name,
disk_bus)
qos_specs = connection_info['data'].get('qos_specs') or {}
if qos_specs:
volume_driver.set_disk_qos_specs(connection_info,
qos_specs)
def disconnect_volume(self, connection_info):
volume_driver = self._get_volume_driver(connection_info)
volume_driver.disconnect_volume(connection_info)
def detach_volume(self, connection_info, instance_name):
LOG.debug("Detaching volume: %(connection_info)s "
"from %(instance_name)s",
{'connection_info': strutils.mask_dict_password(
connection_info),
'instance_name': instance_name})
volume_driver = self._get_volume_driver(connection_info)
volume_driver.detach_volume(connection_info, instance_name)
volume_driver.disconnect_volume(connection_info)
def fix_instance_volume_disk_paths(self, instance_name, block_device_info):
# Mapping containing the current disk paths for each volume.
actual_disk_mapping = self.get_disk_path_mapping(block_device_info)
if not actual_disk_mapping:
return
# Mapping containing virtual disk resource path and the physical
# disk path for each volume serial number. The physical path
# associated with this resource may not be the right one,
# as physical disk paths can get swapped after host reboots.
vm_disk_mapping = self._vmutils.get_vm_physical_disk_mapping(
instance_name)
for serial, vm_disk in vm_disk_mapping.items():
actual_disk_path = actual_disk_mapping[serial]
if vm_disk['mounted_disk_path'] != actual_disk_path:
self._vmutils.set_disk_host_res(vm_disk['resource_path'],
actual_disk_path)
def get_volume_connector(self):
# NOTE(lpetrut): the Windows os-brick connectors
# do not use a root helper.
conn = connector.get_connector_properties(
root_helper=None,
my_ip=CONF.my_block_storage_ip,
multipath=CONF.hyperv.use_multipath_io,
enforce_multipath=True,
host=CONF.host)
return conn
def connect_volumes(self, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
connection_info = vol['connection_info']
volume_driver = self._get_volume_driver(connection_info)
volume_driver.connect_volume(connection_info)
def get_disk_path_mapping(self, block_device_info):
block_mapping = driver.block_device_info_get_mapping(block_device_info)
disk_path_mapping = {}
for vol in block_mapping:
connection_info = vol['connection_info']
disk_serial = connection_info['serial']
disk_path = self.get_disk_resource_path(connection_info)
disk_path_mapping[disk_serial] = disk_path
return disk_path_mapping
def get_disk_resource_path(self, connection_info):
volume_driver = self._get_volume_driver(connection_info)
return volume_driver.get_disk_resource_path(connection_info)
@staticmethod
def bytes_per_sec_to_iops(no_bytes):
# Hyper-v uses normalized IOPS (8 KB increments)
# as IOPS allocation units.
return (
(no_bytes + constants.IOPS_BASE_SIZE - 1) //
constants.IOPS_BASE_SIZE)
@staticmethod
def validate_qos_specs(qos_specs, supported_qos_specs):
unsupported_specs = set(qos_specs.keys()).difference(
supported_qos_specs)
if unsupported_specs:
msg = (_LW('Got unsupported QoS specs: '
'%(unsupported_specs)s. '
'Supported qos specs: %(supported_qos_specs)s') %
{'unsupported_specs': unsupported_specs,
'supported_qos_specs': supported_qos_specs})
LOG.warning(msg)
class BaseVolumeDriver(object):
_is_block_dev = True
_protocol = None
_extra_connector_args = {}
def __init__(self):
self._conn = None
self._diskutils = utilsfactory.get_diskutils()
self._vmutils = utilsfactory.get_vmutils()
@property
def _connector(self):
if not self._conn:
scan_attempts = CONF.hyperv.mounted_disk_query_retry_count
scan_interval = CONF.hyperv.mounted_disk_query_retry_interval
self._conn = connector.InitiatorConnector.factory(
protocol=self._protocol,
root_helper=None,
use_multipath=CONF.hyperv.use_multipath_io,
device_scan_attempts=scan_attempts,
device_scan_interval=scan_interval,
**self._extra_connector_args)
return self._conn
def connect_volume(self, connection_info):
return self._connector.connect_volume(connection_info['data'])
def disconnect_volume(self, connection_info):
self._connector.disconnect_volume(connection_info['data'])
def get_disk_resource_path(self, connection_info):
disk_paths = self._connector.get_volume_paths(connection_info['data'])
if not disk_paths:
vol_id = connection_info['serial']
err_msg = _("Could not find disk path. Volume id: %s")
raise exception.DiskNotFound(err_msg % vol_id)
return self._get_disk_res_path(disk_paths[0])
def _get_disk_res_path(self, disk_path):
if self._is_block_dev:
# We need the Msvm_DiskDrive resource path as this
# will be used when the disk is attached to an instance.
disk_number = self._diskutils.get_device_number_from_device_name(
disk_path)
disk_res_path = self._vmutils.get_mounted_disk_by_drive_number(
disk_number)
else:
disk_res_path = disk_path
return disk_res_path
def attach_volume(self, connection_info, instance_name,
disk_bus=constants.CTRL_TYPE_SCSI):
dev_info = self.connect_volume(connection_info)
serial = connection_info['serial']
disk_path = self._get_disk_res_path(dev_info['path'])
ctrller_path, slot = self._get_disk_ctrl_and_slot(instance_name,
disk_bus)
if self._is_block_dev:
# We need to tag physical disk resources with the volume
# serial number, in order to be able to retrieve them
# during live migration.
self._vmutils.attach_volume_to_controller(instance_name,
ctrller_path,
slot,
disk_path,
serial=serial)
else:
self._vmutils.attach_drive(instance_name,
disk_path,
ctrller_path,
slot)
def detach_volume(self, connection_info, instance_name):
disk_path = self.get_disk_resource_path(connection_info)
LOG.debug("Detaching disk %(disk_path)s "
"from instance: %(instance_name)s",
dict(disk_path=disk_path,
instance_name=instance_name))
self._vmutils.detach_vm_disk(instance_name, disk_path,
is_physical=self._is_block_dev)
def _get_disk_ctrl_and_slot(self, instance_name, disk_bus):
if disk_bus == constants.CTRL_TYPE_IDE:
# Find the IDE controller for the vm.
ctrller_path = self._vmutils.get_vm_ide_controller(
instance_name, 0)
# Attaching to the first slot
slot = 0
else:
# Find the SCSI controller for the vm
ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._vmutils.get_free_controller_slot(ctrller_path)
return ctrller_path, slot
def set_disk_qos_specs(self, connection_info, disk_qos_specs):
LOG.info(_LI("The %(protocol)s Hyper-V volume driver "
"does not support QoS. Ignoring QoS specs."),
dict(protocol=self._protocol))
class ISCSIVolumeDriver(BaseVolumeDriver):
_is_block_dev = True
_protocol = constants.STORAGE_PROTOCOL_ISCSI
def __init__(self, *args, **kwargs):
self._extra_connector_args = dict(
initiator_list=CONF.hyperv.iscsi_initiator_list)
super(ISCSIVolumeDriver, self).__init__(*args, **kwargs)
class SMBFSVolumeDriver(BaseVolumeDriver):
_is_block_dev = False
_protocol = constants.STORAGE_PROTOCOL_SMBFS
_extra_connector_args = dict(local_path_for_loopback=True)
def export_path_synchronized(f):
def wrapper(inst, connection_info, *args, **kwargs):
export_path = inst._get_export_path(connection_info)
@utils.synchronized(export_path)
def inner():
return f(inst, connection_info, *args, **kwargs)
return inner()
return wrapper
def _get_export_path(self, connection_info):
return connection_info['data']['export'].replace('/', '\\')
@export_path_synchronized
def attach_volume(self, *args, **kwargs):
super(SMBFSVolumeDriver, self).attach_volume(*args, **kwargs)
@export_path_synchronized
def disconnect_volume(self, *args, **kwargs):
# We synchronize those operations based on the share path in order to
# avoid the situation when a SMB share is unmounted while a volume
# exported by it is about to be attached to an instance.
super(SMBFSVolumeDriver, self).disconnect_volume(*args, **kwargs)
def set_disk_qos_specs(self, connection_info, qos_specs):
supported_qos_specs = ['total_iops_sec', 'total_bytes_sec']
VolumeOps.validate_qos_specs(qos_specs, supported_qos_specs)
total_bytes_sec = int(qos_specs.get('total_bytes_sec') or 0)
total_iops_sec = int(qos_specs.get('total_iops_sec') or
VolumeOps.bytes_per_sec_to_iops(
total_bytes_sec))
if total_iops_sec:
disk_path = self.get_disk_resource_path(connection_info)
self._vmutils.set_disk_qos_specs(disk_path, total_iops_sec)
class FCVolumeDriver(BaseVolumeDriver):
_is_block_dev = True
_protocol = constants.STORAGE_PROTOCOL_FC
| 1.679688 | 2 |
-Loan-Approval-Analysis/code.py | lakshit-sharma/greyatom-python-for-data-science | 0 | 1979 | # --------------
# Importing header files
import numpy as np
import pandas as pd
from scipy.stats import mode
# code starts here
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes(include = 'object')
print(categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
print(numerical_var)
banks = bank.drop(columns=['Loan_ID'])
bank_mode = banks.mode()
banks = banks.fillna(bank_mode.iloc[0])
print(banks.isnull().sum())
avg_loan_amount = pd.pivot_table(banks, index=['Gender', 'Married', 'Self_Employed'], values='LoanAmount', aggfunc = 'mean')
print(avg_loan_amount)
loan_approved_se = banks[ (banks['Self_Employed'] == "Yes") & (banks['Loan_Status'] == "Y") ]
loan_approved_nse = banks[ (banks['Self_Employed'] == "No") & (banks['Loan_Status'] == "Y") ]
percentage_se = (len(loan_approved_se) / 614) * 100
percentage_nse = (len(loan_approved_nse) / 614) * 100
# loan amount term
loan_term = banks['Loan_Amount_Term'].apply(lambda x: int(x)/12 )
big_loan_term=len(loan_term[loan_term>=25])
print(big_loan_term)
columns_to_show = ['ApplicantIncome', 'Credit_History']
loan_groupby=banks.groupby(['Loan_Status'])[columns_to_show]
# Check the mean value
mean_values=loan_groupby.agg([np.mean])
print(mean_values)
# code ends here
| 3.03125 | 3 |
others/train_RNN.py | jacobswan1/Video2Commonsense | 31 | 1980 | <filename>others/train_RNN.py
''' Training Scropt for V2C captioning task. '''
__author__ = '<NAME>'
import os
import numpy as np
from opts import *
from utils.utils import *
import torch.optim as optim
from model.Model import Model
from torch.utils.data import DataLoader
from utils.dataloader import VideoDataset
from model.transformer.Optim import ScheduledOptim
def train(loader, model, optimizer, opt, cap_vocab, cms_vocab):
model.train()
for epoch in range(opt['epochs']):
iteration = 0
for data in loader:
torch.cuda.synchronize()
if opt['cms'] == 'int':
cms_labels = data['int_labels']
elif opt['cms'] == 'eff':
cms_labels = data['eff_labels']
else:
cms_labels = data['att_labels']
if opt['cuda']:
fc_feats = data['fc_feats'].cuda()
cap_labels = data['cap_labels'].cuda()
cms_labels = cms_labels.cuda()
optimizer.zero_grad()
# cap_probs, cms_probs = model(fc_feats, cap_labels, cap_pos, cms_labels, cms_pos)
cap_probs, _, cms_probs, _ = model(fc_feats, cap_labels, cms_labels)
# note: currently we just used most naive cross-entropy as training objective,
# advanced loss func. like SELF-CRIT, different loss weights or stronger video feature
# may lead performance boost, however is not the goal of this work.
cap_loss, cap_n_correct = cal_performance(cap_probs.view(-1, cap_probs.shape[-1]),
cap_labels[:, 1:], smoothing=True)
cms_loss, cms_n_correct = cal_performance(cms_probs.view(-1, cms_probs.shape[-1]),
cms_labels[:, 1:], smoothing=True)
# compute the token prediction Acc.
non_pad_mask = cap_labels[:, 1:].ne(Constants.PAD)
n_word = non_pad_mask.sum().item()
cms_non_pad_mask = cms_labels[:, 1:].ne(Constants.PAD)
cms_n_word = cms_non_pad_mask.sum().item()
cap_loss /= n_word
cms_loss /= n_word
loss = cms_loss + cap_loss
loss.backward()
optimizer.step_and_update_lr()
torch.nn.utils.clip_grad_norm_(filter(lambda p: p.requires_grad, model.parameters()), 1)
# update parameters
cap_train_loss = cap_loss.item()
cms_train_loss = cms_loss.item()
# multi-gpu case, not necessary in newer PyTorch version or on single GPU.
if opt['cuda']: torch.cuda.synchronize()
iteration += 1
if iteration % opt['print_loss_every'] ==0:
print('iter %d (epoch %d), cap_train_loss = %.6f, cms_train_loss = %.6f,'
' current step = %d, current lr = %.3E, cap_acc = %.3f, cms_acc = %.3f'
% (iteration, epoch, cap_train_loss, cms_train_loss, optimizer.n_current_steps,
optimizer._optimizer.param_groups[0]['lr'],
cap_n_correct/n_word, cms_n_correct/cms_n_word))
# show the intermediate generations
if opt['show_predict']:
cap_pr, cap_gt = show_prediction(cap_probs, cap_labels[:, :-1], cap_vocab, caption=True)
cms_pr, cms_gt = show_prediction(cms_probs, cms_labels[:, :-1], cms_vocab, caption=False)
print(' \n')
with open(opt['info_path'], 'a') as f:
f.write('model_%d, cap_loss: %.6f, cms_loss: %.6f\n'
% (epoch, cap_train_loss, cms_train_loss))
f.write('\n %s \n %s' % (cap_pr, cap_gt))
f.write('\n %s \n %s' % (cms_pr, cms_gt))
f.write('\n')
if epoch % opt['save_checkpoint_every'] == 0:
# save the checkpoint
model_path = os.path.join(opt['output_dir'],
'CMS_CAP_MODEL_{}_lr_{}_BS_{}_Layer_{}_ATTHEAD_{}_HID_{}_RNNLayer_{}_epoch_{}.pth'
.format(opt['cms'], opt['init_lr'], opt['batch_size'], opt['num_layer'],
opt['num_head'], opt['dim_model'], opt['rnn_layer'], epoch))
torch.save(model.state_dict(), model_path)
print('model saved to %s' % model_path)
with open(opt['model_info_path'], 'a') as f:
f.write('model_%d, cap_loss: %.6f, cms_loss: %.6f\n'
% (epoch, cap_train_loss/n_word, cms_train_loss/n_word))
def main(opt):
# load and define dataloader
dataset = VideoDataset(opt, 'train')
dataloader = DataLoader(dataset, batch_size=opt['batch_size'], shuffle=True)
opt['cms_vocab_size'] = dataset.get_cms_vocab_size()
opt['cap_vocab_size'] = dataset.get_cap_vocab_size()
if opt['cms'] == 'int':
cms_text_length = opt['int_max_len']
elif opt['cms'] == 'eff':
cms_text_length = opt['eff_max_len']
else:
cms_text_length = opt['att_max_len']
# model initialization.
from model.S2VTModel import S2VTModel
model = S2VTModel(
dataset.get_cap_vocab_size(),
dataset.get_cms_vocab_size(),
opt['cap_max_len'],
cms_text_length,
opt["dim_model"],
opt["dim_word"],
opt['dim_vis_feat'],
n_layers=opt['rnn_layer'])
# number of parameters
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print('number of learnable parameters are {}'.format(params))
if opt['cuda']: model = model.cuda()
# resume from previous checkpoint if indicated
if opt['load_checkpoint'] and opt['resume']:
cap_state_dict = torch.load(opt['load_checkpoint'])
model_dict = model.state_dict()
model_dict.update(cap_state_dict)
model.load_state_dict(model_dict)
optimizer = ScheduledOptim(optim.Adam(filter(lambda x: x.requires_grad, model.parameters()),
betas=(0.9, 0.98), eps=1e-09), 512, opt['warm_up_steps'])
# note: though we set the init learning rate as np.power(d_model, -0.5),
# grid search indicates different LR may improve the results.
opt['init_lr'] = round(optimizer.init_lr, 3)
# create checkpoint output directory
dir = os.path.join(opt['checkpoint_path'], 'S2VT_CMS_CAP_MODEL_{}_lr_{}_BS_{}_Layer_{}_ATTHEAD_{}_HID_{}_RNNLayer_{}'
.format(opt['cms'], opt['init_lr'], opt['batch_size'], opt['num_layer'],
opt['num_head'], opt['dim_model'], opt['rnn_layer']))
if not os.path.exists(dir): os.makedirs(dir)
# save the model snapshot to local
info_path = os.path.join(dir, 'iteration_info_log.log')
print('model architecture saved to {} \n {}'.format(info_path, str(model)))
with open(info_path, 'a') as f:
f.write(str(model))
f.write('\n')
f.write(str(params))
f.write('\n')
# log file directory
opt['output_dir'] = dir
opt['info_path'] = info_path
opt['model_info_path'] = os.path.join(opt['output_dir'], 'checkpoint_loss_log.log')
train(dataloader, model, optimizer, opt, dataset.get_cap_vocab(), dataset.get_cms_vocab())
if __name__ == '__main__':
opt = parse_opt()
opt = vars(opt)
main(opt) | 2.296875 | 2 |
new-influx-client.py | benlamonica/energy-monitor | 0 | 1981 | <filename>new-influx-client.py<gh_stars>0
import influxdb_client
from influxdb_client import InfluxDBClient
bucket = "python-client-sandbox"
org = "Energy Monitor"
token = "miQdAvNXHiNDVVzPzV5FpkCaR_8qdQ-L1FlPCOXQPI325Kbrh1fgfhkcDUZ4FepaebDdpZ-A1gmtnnjU0_hViA=="
url = "http://localhost:9999"
client = InfluxDBClient(url=url, token=token, org=org)
writeApi = client.write_api()
write_api.write("my-bucket", "my-org", [{"measurement": "h2o_feet", "tags": {"location": "coyote_creek"}, "fields": {"water_level": 1}, "time": 1}])
| 2.328125 | 2 |
tests/test_agent/test_manhole.py | guidow/pyfarm-agent | 0 | 1982 | # No shebang line, this module is meant to be imported
#
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import namedtuple
from pprint import pprint
from random import randint
from StringIO import StringIO
from textwrap import dedent
try:
from unittest.mock import patch
except ImportError: # pragma: no cover
from mock import patch
from twisted.internet.protocol import ServerFactory
from twisted.cred.portal import Portal
from twisted.conch.telnet import (
ITelnetProtocol, TelnetBootstrapProtocol, TelnetTransport)
from pyfarm.agent.testutil import TestCase
from pyfarm.agent.manhole import (
LoggingManhole, TransportProtocolFactory, TelnetRealm,
manhole_factory, show)
Peer = namedtuple("Peer", ("host", "port"))
class FakeLoggingManhole(LoggingManhole):
QUIT = False
GET_PEER_CALLS = 0
class terminal(object):
RIGHT_ARROW, LEFT_ARROW = None, None
class transport(object):
@classmethod
def getPeer(cls):
FakeLoggingManhole.GET_PEER_CALLS += 1
return Peer(os.urandom(12).encode("hex"), randint(1024, 65535))
def handle_QUIT(self):
self.QUIT = True
class TestManholeBase(TestCase):
def setUp(self):
TelnetRealm.NAMESPACE = None
FakeLoggingManhole.GET_PEER_CALLS = 0
FakeLoggingManhole.QUIT = False
class TestManholeFactory(TestManholeBase):
def test_assertions(self):
with self.assertRaises(AssertionError):
manhole_factory(None, "", "")
with self.assertRaises(AssertionError):
manhole_factory({}, None, "")
with self.assertRaises(AssertionError):
manhole_factory({}, "", None)
def test_instance_one(self):
namespace = {"bob": None}
username = os.urandom(32).encode("hex")
password = os.urandom(32).encode("hex")
manhole_factory(namespace, username, password)
with self.assertRaises(AssertionError):
manhole_factory(namespace, username, password)
def test_instance(self):
namespace = {"bob": None}
username = os.urandom(32).encode("hex")
password = <PASSWORD>andom(32).encode("hex")
manhole = manhole_factory(namespace, username, password)
self.assertEqual(namespace, {"bob": None})
self.assertEqual(
TelnetRealm.NAMESPACE,
{"bob": None, "pp": pprint, "show": show})
self.assertIsInstance(manhole, ServerFactory)
self.assertIsInstance(manhole.protocol, TransportProtocolFactory)
self.assertIsInstance(manhole.protocol.portal, Portal)
# There could be multiple password checkers, check for the one
# we know we should have added.
for _, instance in manhole.protocol.portal.checkers.items():
found = False
for user, passwd in instance.users.items():
if user == username and passwd == password:
found = True
if found:
break
else:
self.fail("Failed to find correct username and password.")
def test_request_avatar(self):
realm = TelnetRealm()
avatar = realm.requestAvatar(None, ITelnetProtocol)
self.assertEqual(len(avatar), 3)
self.assertIs(avatar[0], ITelnetProtocol)
self.assertIsInstance(avatar[1], TelnetBootstrapProtocol)
self.assertTrue(callable(avatar[2]))
def test_request_avatar_error(self):
realm = TelnetRealm()
with self.assertRaises(NotImplementedError):
realm.requestAvatar(None, None)
def test_protocol_factory(self):
factory = TransportProtocolFactory(None)
transport = factory()
self.assertIsInstance(transport, TelnetTransport)
class TestManholeShow(TestManholeBase):
def test_uses_namespace(self):
namespace = {"bob": None}
username = os.urandom(32).encode("hex")
password = os.urandom(32).encode("hex")
manhole_factory(namespace, username, password)
output = StringIO()
with patch("sys.stdout", output):
show()
output.seek(0)
output = output.getvalue().strip()
self.assertEqual(output, "objects: ['bob', 'pp', 'show']")
def test_custom_object(self):
class Foobar(object):
a, b, c, d, e = True, 1, "yes", {}, 0.0
output = StringIO()
with patch("sys.stdout", output):
show(Foobar)
output.seek(0)
output = output.getvalue().strip()
self.assertEqual(
output,
dedent("""
data attributes of <class 'tests.test_agent.test_manhole.Foobar'>
a : True
b : 1
c : yes
d : {} (0 elements)
e : 0.0
""").strip())
def test_wrap_long_line(self):
class Foobar(object):
a = " " * 90
output = StringIO()
with patch("sys.stdout", output):
show(Foobar)
output.seek(0)
output = output.getvalue().strip()
self.assertEqual(
output,
dedent("""
data attributes of <class 'tests.test_agent.test_manhole.Foobar'>
a : ' """ +
""" '...
""").strip())
class TestLoggingManhole(TestManholeBase):
def test_line_received(self):
f = FakeLoggingManhole()
f.lineReceived("exit")
self.assertTrue(f.QUIT)
| 1.992188 | 2 |
func-button/klSigmode.py | xcgoo/uiKLine | 232 | 1983 | <gh_stars>100-1000
# coding: utf-8
"""
插入所有需要的库,和函数
"""
#----------------------------------------------------------------------
def klSigmode(self):
"""查找模式"""
if self.mode == 'deal':
self.canvas.updateSig(self.signalsOpen)
self.mode = 'dealOpen'
else:
self.canvas.updateSig(self.signals)
self.mode = 'deal'
| 2.140625 | 2 |
utils/thin.py | BnF-jadis/projet | 5 | 1984 | <filename>utils/thin.py
# 2020, BackThen Maps
# Coded by <NAME> https://github.com/RPetitpierre
# For Bibliothèque nationale de France (BnF)
import cv2, thinning, os
import numpy as np
import pandas as pd
import shapefile as shp
from skimage.measure import approximate_polygon
from PIL import Image, ImageDraw
from utils.utils import *
from utils.match import toLatLon
Image.MAX_IMAGE_PIXELS = 500000000
def skeletonize(road_network: np.ndarray, path: str = "workshop/vectorized.png", largest_component: bool = False):
''' Thinning/skeletonization of the road network image to a wired model.
Input(s):
road_network: black and white image of the road network (streets in white)
path: path where the skeletonized image should be saved
largest_component: if True, only the largest road network component will be kept
Output(s):
vectorized: skeletonized image
'''
assert len(road_network.shape) == 2, 'ERROR: road_network must be grayscale image'
img = cv2.resize(road_network, (road_network.shape[1]//2, road_network.shape[0]//2))
vectorized = thinning.guo_hall_thinning(img)
vectorized[vectorized > 100] = 255
vectorized[vectorized <= 100] = 0
if largest_component:
try:
_, labels, stats, _ = cv2.connectedComponentsWithStats(vectorized.copy(), connectivity=8, stats=cv2.CC_STAT_AREA)
stats = stats[1:]
main_component = (np.argmax(stats[:,4])+1).astype('int32')
vectorized = (labels == main_component).astype('uint8')*255
except:
'Warning: Skeletonization failed to apply largest_component = True param. Skipping.'
cv2.imwrite(path, vectorized)
return vectorized
def findNodes(image: np.ndarray):
''' Find the nodes in the road network skeleton image.
Input(s):
image: skeletonized image
Output(s):
nodes: array of nodes coordinates (x, y)
degree: degrees of the nodes (2=endpoint, 4=crossroads of 3 streets, 5=crossroads of 4 streets, etc.)
addresses: directions of the crossing roads, with regard to the node
'''
img = image.copy()
# Find row and column locations that are non-zero
(rows, cols) = np.nonzero(img)
nodes, degree, addresses = [], [], []
for (r,c) in zip(rows, cols):
if r > 0 and c > 0 and r < image.shape[0]-1 and c < image.shape[1]-1:
# Extract an 8-connected neighbourhood
(col_neigh, row_neigh) = np.meshgrid(np.array([c-1, c, c+1]), np.array([r-1, r, r+1]))
# Cast to int to index into image
col_neigh = col_neigh.astype('int')
row_neigh = row_neigh.astype('int')
# Convert into a single 1D array and check for non-zero locations
pix_neighbourhood = img[row_neigh, col_neigh].ravel() != 0
# If the number of non-zero locations equals 2, add this to our list of coordinates
n_neighbours = np.sum(pix_neighbourhood)
if (n_neighbours == 2) or (n_neighbours >= 4):
nodes.append((r, c))
degree.append(n_neighbours)
direction_set = np.where(pix_neighbourhood == True)[0]
direction_set = direction_set[direction_set != 4]
addresses.append(direction_set)
nodes = np.asarray(nodes)
return nodes, degree, addresses
def cleanNodesEdges(df_nodes: pd.DataFrame):
df = df_nodes.copy()
new_addresses, new_degree = [], []
for ind, address in df['address'].iteritems():
new_address = avoidDiagonalEdges(address)
new_addresses.append(new_address)
new_degree.append(len(new_address) + 1)
df['address'] = new_addresses
df['degree'] = new_degree
return df
def avoidDiagonalEdges(address: list, direction: int = None):
right, diagonal = [1, 3, 5, 7], {0: [1, 3], 2: [1, 5], 6: [3, 7], 8: [5, 7]}
new_address = []
for r in right:
if r in address:
new_address.append(r)
for d in diagonal.keys():
if d in address:
if not(diagonal[d][0] in address) and not(diagonal[d][1] in address):
if direction != None:
if not((8-direction) in diagonal[d]):
new_address.append(d)
else:
new_address.append(d)
return new_address
def explorePath(start_x: int, start_y: int, start_dir: int, image: np.ndarray, nodes_grid: np.ndarray):
''' Follow the path from one given start node and direction until the next node, and stores the pixels
on the way.
Input(s):
start_x: start node x-coordinate
start_y: start node y-coordinate
start_dir: starting direction ({0, 1, 2,
3, -, 5,
6, 7, 8})
image: skeletonized image of the road network
nodes_grid: grid of the nodes of the skeletonized image
Output(s):
way: list of pixel coordinates on the way
direction: last direction to reach the 2nd node
nodes_grid[x, y]: degree of the arrival node
'''
def absoluteWay(x: int, y: int, way: int):
if way == 0:
x_, y_ = x-1, y-1
elif way == 1:
x_, y_ = x-1, y
elif way == 2:
x_, y_ = x-1, y+1
elif way == 3:
x_, y_ = x, y-1
elif way == 5:
x_, y_ = x, y+1
elif way == 6:
x_, y_ = x+1, y-1
elif way == 7:
x_, y_ = x+1, y
elif way == 8:
x_, y_ = x+1, y+1
else:
raise AttributeError('Parameters invalid: (' + str(x) + ',' + str(y) + ',' + str(way) + '), way \
should be comprised between 0 and 8, and != 4. x, y and way should be of type int.')
return x_, y_
def noTurnBack(direction: int):
wrong_paths = []
if direction == 0:
wrong_paths = [5, 7]
elif direction == 1:
wrong_paths = [6, 8]
elif direction == 2:
wrong_paths = [3, 7]
elif direction == 3:
wrong_paths = [2, 8]
elif direction == 5:
wrong_paths = [0, 6]
elif direction == 6:
wrong_paths = [1, 5]
elif direction == 7:
wrong_paths = [0, 2]
elif direction == 8:
wrong_paths = [1, 3]
return wrong_paths
direction = start_dir
x, y = start_x, start_y
assert image[x, y] != 0, 'ERROR: start point is not white'
end = False
way = [(x, y)]
# First iteration
new_x, new_y = absoluteWay(x, y, direction)
assert image[new_x, new_y] != 0, 'ERROR: 2nd point is not white'
way.append((new_x, new_y))
x, y = new_x, new_y
wrong_paths = noTurnBack(direction)
wrong_paths_active = True
if nodes_grid[x, y]:
end = True
direction = 8-start_dir
while not(end):
if x > 0 and y > 0 and x < image.shape[0]-1 and y < image.shape[1]-1:
# Extract an 8-connected neighbourhood
(row_neigh, col_neigh) = np.meshgrid(np.array([x-1, x, x+1]), np.array([y-1, y, y+1]))
# Cast to int to index into image
col_neigh, row_neigh = col_neigh.astype('int'), row_neigh.astype('int')
# Convert into a single 1D array and check for non-zero locations
try:
pix_neighbourhood = image[row_neigh, col_neigh].transpose().ravel() != 0
except:
print(x, y, image.shape, )
raise AssertionError()
# If the number of non-zero locations equals 2, add this to our list of coordinates
n_neighbours = np.sum(pix_neighbourhood)
direction_set = np.where(pix_neighbourhood == True)[0]
last_ds = [wrong_paths]
last_ds.append(direction_set)
direction_set = direction_set[direction_set != 4]
last_ds.append(direction_set)
direction_set = direction_set[direction_set != (8-direction)]
last_ds.append(direction_set)
direction_set = np.asarray(avoidDiagonalEdges(direction_set, direction))
last_ds.append(direction_set)
if wrong_paths_active:
for wrong_path in wrong_paths:
direction_set = direction_set[direction_set != wrong_path]
wrong_paths_active = False
if len(direction_set) != 1:
end = True
break
direction = direction_set[0]
new_x, new_y = absoluteWay(x, y, direction)
way.append((new_x, new_y))
x, y = new_x, new_y
if nodes_grid[x, y]:
end = True
else:
end = True
return way, direction, nodes_grid[x, y]
def findSegments(df_nodes: pd.DataFrame, image: np.ndarray, min_length: int = 30, return_simple_ways: bool = True):
''' Find all the road segments in the network. Keep the ones that are longer than a given length or non-terminal.
Optionally, compute the Douglas-Peucker simple itinerary of each segment and return it.
Input(s):
df_nodes: list of nodes
image: skeletonized image of the road network
min_length: min segment length if the segment is terminal
return_simple_ways: if True, compute the Douglas-Peucker simple itinerary of each segment and return it
Output(s):
(Optional)(simple_ways: the Douglas-Peucker simple itinerary of each segmenty)
ways: list of segments, containing all the pixels on the way between each couple of nodes
nodes_grid: image containing all the nodes found in the image and their degree
'''
img = image.copy()
done, ways = [], []
df_nodes = df_nodes.sort_values(by='degree').reset_index(drop=True)
nodes_grid = np.zeros(image.shape)
for ind, row in df_nodes[['x', 'y', 'degree']].iterrows():
nodes_grid[row['x'], row['y']] = row['degree']
nodes_grid = nodes_grid.astype('int')
for ind, node in df_nodes.iterrows():
for direct in node['address']:
code = str(node['x']) + '_' + str(node['y']) + '_' + str(direct)
if not(code in done):
way, last_direct, degree = explorePath(start_x=node['x'], start_y=node['y'],
start_dir=direct, image=img, nodes_grid=nodes_grid)
if not((len(way) <= min_length) and ((node['degree'] == 2) or (degree == 2))):
done.append(str(way[-1][0]) + '_' + str(way[-1][1]) + '_' + str(8-last_direct))
ways.append(way)
if return_simple_ways:
simple_ways = []
for way in ways:
inv_way = np.asarray([np.asarray(way)[:,1], image.shape[0]-np.asarray(way)[:,0]]).transpose()
simple_ways.append(approximate_polygon(np.asarray(inv_way), tolerance=1.6).tolist())
return simple_ways, ways, nodes_grid
else:
return ways, nodes_grid
def thinImage(image: np.ndarray, image_name: str, export_file_path: str, exportPNG: bool = False,
exportJSON: bool = False, exportSVG: bool = False, exportSHP: bool = False, geoloc: bool = False):
assert (exportPNG or exportJSON or exportSVG or exportSHP)
# Convert to B&W
road_network = image.copy()
road_network[road_network < 254] = 0
road_network[road_network < 255/2] = 0
road_network[road_network >= 255/2] = 255
vectorized = skeletonize(road_network, largest_component = True)
nodes, degree, addresses = findNodes(vectorized)
if len(degree) < 0:
return [], [], np.zeros((image.shape[1], image.shape[0]))
df_nodes = pd.DataFrame({'x': nodes[:,0], 'y': nodes[:,1], 'degree': degree, 'address': addresses })
df_nodes = df_nodes.sort_values(by='degree').reset_index(drop=True)
df_nodes = cleanNodesEdges(df_nodes)
df_nodes = df_nodes[df_nodes['degree'] != 3]
if (exportJSON or exportSHP):
simple_segments, full_segments, nodes_grid = findSegments(df_nodes, vectorized, min_length = 15,
return_simple_ways = True)
else:
full_segments, nodes_grid = findSegments(df_nodes, vectorized, min_length = 15,
return_simple_ways = False)
simple_segments = []
if exportPNG:
toPNG(full_segments, vectorized, export_file_path)
elif exportSVG:
toPNG(full_segments, vectorized, os.path.join('workshop', 'thin.png'))
if geoloc:
if exportJSON:
project_name = getProjectName()
try:
with open(os.path.join('save', project_name, 'match' , 'primary', image_name + '.json')) as data:
data = json.load(data)
M = np.asarray(data['M'])
simple_segments_JSON = []
for segment in simple_segments:
s = np.asarray([2*np.asarray(segment)[:,0], image.shape[0]-(2*np.asarray(segment)[:,1])]).T
simple_segments_JSON.append(toLatLon((s@M[:, :2]) + M[:, 2:3].transpose()).tolist())
except:
print("La géolocalisation de l'image {} n'a pas encore été calculée. Par conséquent, \
il n'est pas possible de calculer la géolocalisation de son réseau filaire".format(image_name))
simple_segments_JSON = simple_segments
else:
print('La géolocalisation du réseau filaire ne fonctionne que pour le format JSON actuellement.')
else:
simple_segments_JSON = simple_segments
if exportJSON:
with open(export_file_path.replace('png', 'json'), 'w') as outfile:
json.dump(simple_segments_JSON, outfile)
if exportSHP:
os.makedirs(export_file_path.replace('.png', ''), exist_ok=True)
toShapefile(simple_segments, os.path.join(export_file_path.replace('.png', ''), image_name))
if exportSVG:
print("\nAvertissement: Si vous n'avez jamais utilisé cette commande, \
installez d'abord Homebrew, ImageMagick et Potrace via le terminal.\n")
print('Pour installer Homebrew:\n',
' /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"')
print('Pour installer ImageMagick:\n', ' brew install imagemagick')
print('Pour installer Potrace: \n', ' brew install potrace\n')
if exportPNG:
png_path = export_file_path
else:
png_path = os.path.join('workshop', 'thin.png')
pnm_path = os.path.join('workshop', 'thin.pnm')
svg_path = export_file_path.replace('png', 'svg')
os.system('convert ' + png_path + pnm_path)
os.system('potrace ' + pnm_path + ' -s -o ' + svg_path)
return simple_segments, full_segments, nodes_grid
def toPNG(segments: list, vectorized: np.ndarray, out_path: str):
''' Save a given set of segments as a bitmap image from the road network.
Input(s):
segments: list of segments, containing all the pixels on the way between each couple of nodes
vectorized: skeletonized image of the road network
out_path: the path, where the output bitmap image should be save
'''
canvas = (np.ones(vectorized.shape)*255).astype('uint8')
cv2.imwrite('workshop/canvas.png', canvas);
bitmap = Image.open('workshop/canvas.png')
draw = ImageDraw.Draw(bitmap)
for segment in segments:
coords = []
for point in segment:
coords.append((point[1], point[0]))
draw.line(coords, fill = 'black', width=0)
bitmap.save(out_path)
def toShapefile(simple_ways, out_path):
w = shp.Writer(out_path)
w.field('DeletionFlag', 'C', 1, 0)
w.field('gid', 'N', 11, 0)
w.field('streetname', 'C', 41, 0)
w.field('note', 'C', 32, 0)
for i in range(len(simple_ways)):
w.line([simple_ways[i]])
w.record('01', i, '', '')
w.close()
| 2.859375 | 3 |
easy2fa/tests/test_checkinput.py | lutostag/otp | 3 | 1985 | from unittest import TestCase
from unittest.mock import patch
from easy2fa import cli
class TestCheckInput(TestCase):
@patch('builtins.input')
def test_default(self, mock_input):
mock_input.return_value = ''
self.assertEquals(cli.check_input('prompt', default='one'), 'one')
mock_input.return_value = 'two'
self.assertEquals(cli.check_input('prompt', default='one'), 'two')
@patch('builtins.input')
@patch('builtins.print')
def test_assertions(self, mock_print, mock_input):
def assertion(value):
if value not in ['yes', 'no']:
return 'use yes or no'
mock_input.side_effect = ['input', '', 'no']
self.assertEquals(cli.check_input('prompt', assertion=assertion),
'no')
mock_print.assert_called_with('\tInvalid input: use yes or no')
| 3.1875 | 3 |
bert_finetuning/data_loader.py | nps1ngh/adversarial-bert-german-attacks-defense | 0 | 1986 | from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from bert_finetuning.data import GermanData
class GermanDataLoader:
def __init__(
self,
data_paths,
model_name,
do_cleansing,
max_sequence_length,
batch_size=8,
dataset_cls=GermanData,
):
self.german_data = dataset_cls(
data_paths,
model_name,
max_sequence_length=max_sequence_length,
do_cleansing=do_cleansing,
)
self.batch_size = batch_size
self.create_loaders()
def create_loaders(self):
"""
Create Torch dataloaders for data splits
"""
self.german_data.text_to_tensors()
print("creating dataloaders")
train_data = TensorDataset(
self.german_data.train_inputs,
self.german_data.train_masks,
self.german_data.train_labels,
)
train_sampler = RandomSampler(train_data)
self.train_dataloader = DataLoader(
train_data, sampler=train_sampler, batch_size=self.batch_size
)
validation_data = TensorDataset(
self.german_data.validation_inputs,
self.german_data.validation_masks,
self.german_data.validation_labels,
)
validation_sampler = SequentialSampler(validation_data)
self.validation_dataloader = DataLoader(
validation_data, sampler=validation_sampler, batch_size=self.batch_size
)
test_data = TensorDataset(
self.german_data.test_inputs,
self.german_data.test_masks,
self.german_data.test_labels,
)
test_sampler = SequentialSampler(test_data)
self.test_dataloader = DataLoader(
test_data, sampler=test_sampler, batch_size=self.batch_size
)
print("finished creating dataloaders")
"""
** FOR DEBUGGING **
if __name__ == "__main__":
## define data paths
germeval_data_paths = {
"train": "./datasets/hasoc_dataset/hasoc_german_train.csv",
"dev": "./datasets/hasoc_dataset/hasoc_german_validation.csv",
"test": "./datasets/hasoc_dataset/hasoc_german_test.csv",
}
hasoc_german_data_paths = {
"train": "./datasets/hasoc_dataset/hasoc_german_train.csv",
"dev": "./datasets/hasoc_dataset/hasoc_german_validation.csv",
"test": "./datasets/hasoc_dataset/hasoc_german_test.csv",
}
## create dataloaders
print("creating germeval dataloaders...")
germ_eval_dataloader = GermanDataLoader(germeval_data_paths)
print("creating hasoc dataloaders...")
hasoc_german_dataloader = GermanDataLoader(hasoc_german_data_paths)
"""
| 2.671875 | 3 |
data/dirty_mnist.py | Karthik-Ragunath/DDU | 43 | 1987 | <reponame>Karthik-Ragunath/DDU<filename>data/dirty_mnist.py
import torch
import numpy as np
import torch.utils.data as data
from torch.utils.data import Subset
from data.fast_mnist import create_MNIST_dataset
from data.ambiguous_mnist.ambiguous_mnist_dataset import AmbiguousMNIST
def get_train_valid_loader(root, batch_size, val_seed=1, val_size=0.1, **kwargs):
error_msg = "[!] val_size should be in the range [0, 1]."
assert (val_size >= 0) and (val_size <= 1), error_msg
# load the dataset
mnist_train_dataset, _ = create_MNIST_dataset()
# AmbiguousMNIST does whiten the data itself
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_dataset = data.ConcatDataset(
[mnist_train_dataset, AmbiguousMNIST(root=root, train=True, device=device),]
)
valid_dataset = data.ConcatDataset(
[mnist_train_dataset, AmbiguousMNIST(root=root, train=True, device=device),]
)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(val_size * num_train))
np.random.seed(val_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_subset = Subset(train_dataset, train_idx)
valid_subset = Subset(valid_dataset, valid_idx)
train_loader = torch.utils.data.DataLoader(train_subset, batch_size=batch_size, num_workers=0, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_subset, batch_size=batch_size, num_workers=0, shuffle=False)
return train_loader, valid_loader
def get_test_loader(root, batch_size, **kwargs):
# load the dataset
_, mnist_test_dataset = create_MNIST_dataset()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
test_dataset = data.ConcatDataset(
[mnist_test_dataset, AmbiguousMNIST(root=root, train=False, device=device),]
)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=0)
return test_loader
| 2.6875 | 3 |
vantage6/server/resource/recover.py | jaspersnel/vantage6-server | 2 | 1988 | # -*- coding: utf-8 -*-
import logging
import datetime
from flask import request, render_template
from flask_jwt_extended import (
create_access_token,
decode_token
)
from jwt.exceptions import DecodeError
from flasgger import swag_from
from http import HTTPStatus
from pathlib import Path
from sqlalchemy.orm.exc import NoResultFound
from vantage6.common import logger_name
from vantage6.server import db
from vantage6.server.resource import (
ServicesResources
)
module_name = logger_name(__name__)
log = logging.getLogger(module_name)
def setup(api, api_base, services):
path = "/".join([api_base, module_name])
log.info(f'Setting up "{path}" and subdirectories')
api.add_resource(
ResetPassword,
path+'/reset',
endpoint="reset_password",
methods=('POST',),
resource_class_kwargs=services
)
api.add_resource(
RecoverPassword,
path+'/lost',
endpoint='recover_password',
methods=('POST',),
resource_class_kwargs=services
)
# ------------------------------------------------------------------------------
# Resources / API's
# ------------------------------------------------------------------------------
class ResetPassword(ServicesResources):
"""user can use recover token to reset their password."""
@swag_from(str(Path(r"swagger/post_reset_password.yaml")),
endpoint='reset_password')
def post(self):
""""submit email-adress receive token."""
# retrieve user based on email or username
body = request.get_json()
reset_token = body.get("reset_token")
password = body.get("password")
if not reset_token or not password:
return {"msg": "reset token and/or password is missing!"}, \
HTTPStatus.BAD_REQUEST
# obtain user
try:
user_id = decode_token(reset_token)['identity'].get('id')
except DecodeError:
return {"msg": "Invalid recovery token!"}, HTTPStatus.BAD_REQUEST
log.debug(user_id)
user = db.User.get(user_id)
# set password
user.set_password(password)
user.save()
log.info(f"Successfull password reset for '{user.username}'")
return {"msg": "password successfully been reset!"}, \
HTTPStatus.OK
class RecoverPassword(ServicesResources):
"""send a mail containing a recover token"""
@swag_from(str(Path(r"swagger/post_recover_password.yaml")),
endpoint='recover_password')
def post(self):
"""username or email generates a token which is mailed."""
# default return string
ret = {"msg": "If the username or email is our database you "
"will soon receive an email"}
# obtain username/email from request'
body = request.get_json()
username = body.get("username")
email = body.get("email")
if not (email or username):
return {"msg": "No username or email provided!"}, \
HTTPStatus.BAD_REQUEST
# find user in the database, if not here we stop!
try:
if username:
user = db.User.get_by_username(username)
else:
user = db.User.get_by_email(email)
except NoResultFound:
# we do not tell them.... But we won't continue either
return ret
log.info(f"Password reset requested for '{user.username}'")
# generate a token that can reset their password
expires = datetime.timedelta(hours=1)
reset_token = create_access_token(
{"id": str(user.id)}, expires_delta=expires
)
self.mail.send_email(
"password reset",
sender="<EMAIL>",
recipients=[user.email],
text_body=render_template("mail/reset_password_token.txt",
token=reset_token),
html_body=render_template("mail/reset_password_token.html",
token=reset_token)
)
return ret
| 2.0625 | 2 |
examples/basic_examples/aws_sns_sqs_middleware_service.py | tranvietanh1991/tomodachi | 1 | 1989 | import os
from typing import Any, Callable, Dict
import tomodachi
from tomodachi import aws_sns_sqs, aws_sns_sqs_publish
from tomodachi.discovery import AWSSNSRegistration
from tomodachi.envelope import JsonBase
async def middleware_function(
func: Callable, service: Any, message: Any, topic: str, context: Dict, *args: Any, **kwargs: Any
) -> Any:
# Functionality before function is called
service.log("middleware before")
return_value = await func(*args, **kwargs)
# There's also the possibility to pass in extra arguments or keywords arguments, for example:
# return_value = await func(*args, id='overridden', **kwargs)
# Functinoality after function is called
service.log("middleware after")
return return_value
class ExampleAWSSNSSQSService(tomodachi.Service):
name = "example-aws-sns-sqs-service"
log_level = "INFO"
uuid = str(os.environ.get("SERVICE_UUID") or "")
# Build own "discovery" functions, to be run on start and stop
# See tomodachi/discovery/aws_sns_registration.py for example
discovery = [AWSSNSRegistration]
# The message envelope class defines how a message should be processed when sent and received
# See tomodachi/envelope/json_base.py for a basic example using JSON and transferring some metadata
message_envelope = JsonBase
# Adds a middleware function that is run on every incoming message.
# Several middlewares can be chained.
message_middleware = [middleware_function]
# Some options can be specified to define credentials, used ports, hostnames, access log, etc.
options = {
"aws_sns_sqs": {
"region_name": None, # specify AWS region (example: 'eu-west-1')
"aws_access_key_id": None, # specify AWS access key (example: '<KEY>')
"aws_secret_access_key": None, # specify AWS secret key (example: 'f7sha92hNotarealsecretkeyn29ShnSYQi3nzgA')
},
"aws_endpoint_urls": {
"sns": None, # For example 'http://localhost:4575' if localstack is used for testing
"sqs": None, # For example 'http://localhost:4576' if localstack is used for testing
},
}
@aws_sns_sqs("example-route1")
async def route1a(self, data: Any) -> None:
self.log('Received data (function: route1a) - "{}"'.format(data))
async def _started_service(self) -> None:
async def publish(data: Any, topic: str) -> None:
self.log('Publish data "{}"'.format(data))
await aws_sns_sqs_publish(self, data, topic=topic, wait=False)
await publish("友達", "example-route1")
| 2.375 | 2 |
ex9.py | ThitsarAung/python-exercises | 0 | 1990 | <reponame>ThitsarAung/python-exercises
types_of_people = 10
x = f"There are {types_of_people} types of people."
binary = "binary"
do_not = "don't"
y = f"Those who know {binary} and those who {do_not}."
print(x)
print(y)
print(f"I said: {x}")
print(f"I also said: '{y}'")
hilarious = False
joke_evaluation = "Isn't that joke so funny?! {}"
print(joke_evaluation.format(hilarious))
w="This is the left side of..."
e="a string with a right side."
print(w + e)
| 4 | 4 |
mmdnn/conversion/caffe/writer.py | 2yz/MMdnn | 3,442 | 1991 | <reponame>2yz/MMdnn
import base64
from google.protobuf import json_format
from importlib import import_module
import json
import numpy as np
import os
import sys
from mmdnn.conversion.caffe.errors import ConversionError
from mmdnn.conversion.caffe.common_graph import fetch_attr_value
from mmdnn.conversion.caffe.utils import get_lower_case, get_upper_case, get_real_name
class JsonFormatter(object):
'''Dumpt a DL graph into a Json file.'''
def __init__(self, graph):
self.graph_def = graph.as_graph_def()
def dump(self, json_path):
json_txt = json_format.MessageToJson(self.graph_def)
parsed = json.loads(json_txt)
formatted = json.dumps(parsed, indent=4, sort_keys=True)
with open(json_path, 'w') as f:
f.write(formatted)
class PyWriter(object):
'''Dumpt a DL graph into a Python script.'''
def __init__(self, graph, data, target):
self.graph = graph
self.data = data
self.tab = ' ' * 4
self.prefix = ''
target = target.lower()
if target == 'tensorflow':
self.target = target
self.net = 'TensorFlowNetwork'
elif target == 'keras':
self.target = target
self.net = 'KerasNetwork'
elif target == 'caffe':
self.target = target
self.net = 'CaffeNetwork'
else:
raise ConversionError('Target %s is not supported yet.' % target)
def indent(self):
self.prefix += self.tab
def outdent(self):
self.prefix = self.prefix[:-len(self.tab)]
def statement(self, s):
return self.prefix + s + '\n'
def emit_imports(self):
return self.statement('from dlconv.%s import %s\n' % (self.target, self.net))
def emit_class_def(self, name):
return self.statement('class %s(%s):' % (name, self.net))
def emit_setup_def(self):
return self.statement('def setup(self):')
def emit_node(self, node):
'''Emits the Python source for this node.'''
def pair(key, value):
return '%s=%s' % (key, value)
args = []
for input in node.input:
input = input.strip().split(':')
name = ''.join(input[:-1])
idx = int(input[-1])
assert name in self.graph.node_dict
parent = self.graph.get_node(name)
args.append(parent.output[idx])
#FIXME:
output = [node.output[0]]
# output = node.output
for k, v in node.attr:
if k == 'cell_type':
args.append(pair(k, "'" + fetch_attr_value(v) + "'"))
else:
args.append(pair(k, fetch_attr_value(v)))
args.append(pair('name', "'" + node.name + "'")) # Set the node name
args = ', '.join(args)
return self.statement('%s = self.%s(%s)' % (', '.join(output), node.op, args))
def dump(self, code_output_dir):
if not os.path.exists(code_output_dir):
os.makedirs(code_output_dir)
file_name = get_lower_case(self.graph.name)
code_output_path = os.path.join(code_output_dir, file_name + '.py')
data_output_path = os.path.join(code_output_dir, file_name + '.npy')
with open(code_output_path, 'w') as f:
f.write(self.emit())
with open(data_output_path, 'wb') as f:
np.save(f, self.data)
return code_output_path, data_output_path
def emit(self):
# Decompose DAG into chains
chains = []
for node in self.graph.topologically_sorted():
attach_to_chain = None
if len(node.input) == 1:
parent = get_real_name(node.input[0])
for chain in chains:
if chain[-1].name == parent: # Node is part of an existing chain.
attach_to_chain = chain
break
if attach_to_chain is None: # Start a new chain for this node.
attach_to_chain = []
chains.append(attach_to_chain)
attach_to_chain.append(node)
# Generate Python code line by line
source = self.emit_imports()
source += self.emit_class_def(self.graph.name)
self.indent()
source += self.emit_setup_def()
self.indent()
blocks = []
for chain in chains:
b = ''
for node in chain:
b += self.emit_node(node)
blocks.append(b[:-1])
source += '\n\n'.join(blocks)
return source
class ModelSaver(object):
def __init__(self, code_output_path, data_output_path):
self.code_output_path = code_output_path
self.data_output_path = data_output_path
def dump(self, model_output_dir):
'''Return the file path containing graph in generated model files.'''
if not os.path.exists(model_output_dir):
os.makedirs(model_output_dir)
sys.path.append(os.path.dirname(self.code_output_path))
file_name = os.path.splitext(os.path.basename(self.code_output_path))[0]
module = import_module(file_name)
class_name = get_upper_case(file_name)
net = getattr(module, class_name)
return net.dump(self.data_output_path, model_output_dir)
class GraphDrawer(object):
def __init__(self, toolkit, meta_path):
self.toolkit = toolkit.lower()
self.meta_path = meta_path
def dump(self, graph_path):
if self.toolkit == 'tensorflow':
from dlconv.tensorflow.visualizer import TensorFlowVisualizer
if self._is_web_page(graph_path):
TensorFlowVisualizer(self.meta_path).dump_html(graph_path)
else:
raise NotImplementedError('Image format or %s is unsupported!' % graph_path)
elif self.toolkit == 'keras':
from dlconv.keras.visualizer import KerasVisualizer
png_path, html_path = (None, None)
if graph_path.endswith('.png'):
png_path = graph_path
elif self._is_web_page(graph_path):
png_path = graph_path + ".png"
html_path = graph_path
else:
raise NotImplementedError('Image format or %s is unsupported!' % graph_path)
KerasVisualizer(self.meta_path).dump_png(png_path)
if html_path:
self._png_to_html(png_path, html_path)
os.remove(png_path)
else:
raise NotImplementedError('Visualization of %s is unsupported!' % self.toolkit)
def _is_web_page(self, path):
return path.split('.')[-1] in ('html', 'htm')
def _png_to_html(self, png_path, html_path):
with open(png_path, "rb") as f:
encoded = base64.b64encode(f.read()).decode('utf-8')
source = """<!DOCTYPE>
<html>
<head>
<meta charset="utf-8">
<title>Keras</title>
</head>
<body>
<img alt="Model Graph" src="data:image/png;base64,{base64_str}" />
</body>
</html>""".format(base64_str=encoded)
with open(html_path, 'w', encoding='utf-8') as f:
f.write(source) | 2.453125 | 2 |
week1/85-maximal-rectangle.py | LionTao/algo_weekend | 0 | 1992 | <gh_stars>0
"""
leetcode-85
给定一个仅包含 0 和 1 , 大小为 rows x cols 的二维二进制矩阵, 找出只包含 1 的最大矩形, 并返回其面积。
"""
from typing import List
class Solution:
def maximalRectangle(self, matrix: List[List[str]]) -> int:
"""
统计直方图然后单调递增栈
"""
rows = len(matrix)
if rows == 0:
return 0
columns = len(matrix[0])
res = 0
heights = [0]*columns
for r in range(rows):
for c in range(columns):
if matrix[r][c]=="1":
heights[c]+=1
else:
heights[c]=0
res = max(res,self.largestRectangleArea(heights))
def largestRectangleArea(self, heights: List[int]) -> int:
#单调递增栈
heights = [-1] + heights + [-1]
res = 0
ascend_stack = []
for i in range(len(heights)):
while ascend_stack and heights[ascend_stack[-1]] > heights[i]:
window_L_height_min_height = heights[ascend_stack.pop(-1)]
window_L = ascend_stack[-1] + 1
window_R = i - 1
cur_area = window_L_height_min_height * (window_R - window_L + 1)
res = max(res, cur_area)
ascend_stack.append(i)
return res | 3.484375 | 3 |
pandapower/test/opf/test_costs_pwl.py | mathildebadoual/pandapower | 1 | 1993 | <reponame>mathildebadoual/pandapower
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pytest
from pandapower.optimal_powerflow import OPFNotConverged
import pandapower as pp
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
def test_cost_piecewise_linear_gen():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,
min_q_kvar=-50)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_kw=20, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_piecewise_linear_cost(net, 0, "gen", np.array([[-150, -100], [-75, -50], [0, 0]]))
# run OPF
pp.runopp(net, verbose=False)
assert net["OPF_converged"]
assert net.res_cost - net.res_gen.p_kw.values / 1.5 < 1e-3
def test_cost_piecewise_linear_eg():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10)
pp.create_ext_grid(net, 0, max_p_kw=0, min_p_kw=-50)
pp.create_gen(net, 1, p_kw=-10, max_p_kw=0, min_p_kw=-50, controllable=True)
# pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_kw=20, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_piecewise_linear_cost(net, 0, "ext_grid", np.array([[-50, -500], [0, 0]]))
# run OPF
pp.runopp(net, verbose=False)
assert net["OPF_converged"]
assert net.res_cost - - net.res_ext_grid.p_kw.values * 10 < 1e-3
# check and assert result
def test_get_costs():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,
min_q_kvar=-50)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_kw=20, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_piecewise_linear_cost(net, 0, "gen", np.array([[-150, -300], [0, 0]]))
# run OPF
pp.runopp(net, verbose=False)
assert net["OPF_converged"]
assert net.res_cost == 2 * net.res_gen.p_kw.values
# check and assert result
def test_cost_piecewise_linear_sgen():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_sgen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,
min_q_kvar=-50)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_kw=20, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_piecewise_linear_cost(net, 0, "sgen", np.array([[-150, -100], [-75, -50], [0, 0]]))
# run OPF
pp.runopp(net, verbose=False)
assert net["OPF_converged"]
assert net.res_cost - net.res_sgen.p_kw.values / 1.5 < 1e-3
def test_cost_piecewise_linear_load():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_load(net, 1, p_kw=100, controllable=True, max_p_kw=150, min_p_kw=50, max_q_kvar=0,
min_q_kvar=0)
pp.create_ext_grid(net, 0)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_piecewise_linear_cost(net, 0, "load", np.array([[0, 0], [75, 50], [150, 100]]))
# run OPF
pp.runopp(net, verbose=False)
assert net["OPF_converged"]
assert abs(net.res_cost - net.res_load.p_kw.values / 1.5) < 1e-3
def test_cost_piecewise_linear_sgen_uneven_slopes():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_sgen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,
min_q_kvar=-50)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_kw=20, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_piecewise_linear_cost(net, 0, "sgen", np.array([[-150, -200], [-75, -50], [0, 0]]))
# run OPF
pp.runopp(net, verbose=False)
assert net["OPF_converged"]
assert net.res_cost - net.res_sgen.p_kw.values / 1.5 < 1e-3
def test_cost_piecewise_linear_load_uneven_slopes():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_load(net, 1, p_kw=100, controllable=True, max_p_kw=150, min_p_kw=50, max_q_kvar=0,
min_q_kvar=0)
pp.create_ext_grid(net, 0)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_piecewise_linear_cost(net, 0, "load", np.array([[0, 0], [75, 51], [150, 101]]))
# run OPF
with pytest.raises(OPFNotConverged):
pp.runopp(net, verbose=False)
assert net["OPF_converged"]
assert abs(net.res_cost - net.res_load.p_kw.values / 1.5) < 1e-3
def test_cost_piecewise_linear_sgen_very_unsteady_slopes():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.5
vm_min = 0.5
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_sgen(net, 1, p_kw=-1000, controllable=True, max_p_kw=0, min_p_kw=-1500,
max_q_kvar=50,
min_q_kvar=-50)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_kw=20, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_piecewise_linear_cost(net, 0, "sgen", np.array([[-1500, 2],[-750,1 ], [0,2]]))
# run OPF
pp.runopp(net, verbose=False)
assert net["OPF_converged"]
# assert net.res_cost - net.res_sgen.p_kw.values / 1.5 < 1e-3
if __name__ == "__main__":
# test_cost_piecewise_linear_sgen_very_unsteady_slopes()
pytest.main(["test_costs_pwl.py", "-s"])
| 2.484375 | 2 |
cookie_refresh.py | guoxianru/cookie_pool_lite | 0 | 1994 | # -*- coding: utf-8 -*-
# @Author: GXR
# @CreateTime: 2022-01-20
# @UpdateTime: 2022-01-20
import redis
import config
import cookie_login
from cookie_api import app
red = redis.Redis(
host=config.REDIS_HOST,
port=config.REDIS_PORT,
db=config.REDIS_DB,
decode_responses=True,
)
# 刷新cookie数量
def cookie_refresh():
while 1:
cookie_list = red.smembers(config.REDIS_KEY_COOKIE)
if len(cookie_list) >= config.COOKIE_COUNT:
break
cookie_login.run_cookie_login(1)
app.logger.info("[cookie数量正常]-[%s]" % len(cookie_list))
def run_cookie_refresh():
cookie_refresh()
if __name__ == "__main__":
run_cookie_refresh()
| 2.703125 | 3 |
feemodel/app/__init__.py | bitcoinfees/feemodel | 12 | 1995 | from feemodel.app.transient import TransientOnline
from feemodel.app.pools import PoolsOnlineEstimator
from feemodel.app.predict import Prediction
from feemodel.app.simonline import SimOnline
__all__ = [
'TransientOnline',
'PoolsOnlineEstimator',
'Prediction',
'SimOnline'
]
| 1.1875 | 1 |
examples/server/models/image_file_upload.py | ParikhKadam/django-angular | 941 | 1996 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
# start tutorial
from django.db import models
from djng.forms import NgModelFormMixin, NgFormValidationMixin
from djng.styling.bootstrap3.forms import Bootstrap3ModelForm
class SubscribeUser(models.Model):
full_name = models.CharField(
"<NAME>",
max_length=99)
avatar = models.ImageField("Avatar", blank=False, null=True)
permit = models.FileField("Permit", blank=True, null=True)
class SubscribeForm(NgModelFormMixin, NgFormValidationMixin, Bootstrap3ModelForm):
use_required_attribute = False
scope_prefix = 'subscribe_data'
form_name = 'my_form'
class Meta:
model = SubscribeUser
fields = ['full_name', 'avatar', 'permit']
| 2.296875 | 2 |
python/tvm/topi/hexagon/slice_ops/add_subtract_multiply.py | yangulei/tvm | 4,640 | 1997 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Compute and schedule for add, multiply, subtract slice op
Please note the following assumptions made by the implementation:
1) The inputs will be multiple of crouton layout except for the axis that needs broadcasting."""
from tvm import te
from tvm import tir
from tvm import topi
from ..utils import get_layout_transform_fn
def add_broadcast_compute(input_a, input_b):
"""Call the add op from topi"""
return topi.add(input_a, input_b)
def subtract_broadcast_compute(input_a, input_b):
"""Call the subtract op from topi"""
return topi.subtract(input_a, input_b)
def multiply_broadcast_compute(input_a, input_b):
"""Call the multiply op from topi"""
return topi.multiply(input_a, input_b)
def tir_broadcast_schedule(
out_m,
input_a,
input_b,
output_layout: str,
input_a_layout: str,
input_b_layout: str,
op_name: str,
):
"""Schedule for input and output layout nhwc-8h2w32c2w-2d considering broadcast"""
func = te.create_prim_func([input_a, input_b, out_m])
s = tir.Schedule(func)
block_dict = {"add": "T_add", "subtract": "T_subtract", "multiply": "T_multiply"}
block = s.get_block(block_dict[op_name])
if input_a_layout == "nhwc-8h2w32c2w-2d":
input_a_transformed_layout = get_layout_transform_fn(input_a_layout)
s.transform_layout(block, buffer=("read", 0), index_map=input_a_transformed_layout)
if input_b_layout == "nhwc-8h2w32c2w-2d":
input_b_transformed_layout = get_layout_transform_fn(input_b_layout)
s.transform_layout(block, buffer=("read", 1), index_map=input_b_transformed_layout)
output_transformed_layout = get_layout_transform_fn(output_layout)
s.transform_layout(block, buffer=("write", 0), index_map=output_transformed_layout)
n, h, w, c = s.get_loops(block)
h_o, h_i = s.split(h, [None, 8])
w_o, w_i = s.split(w, [None, 4])
c_o, c_i = s.split(c, [None, 32])
wio, wii = s.split(w_i, [None, 2])
s.reorder(n, h_o, w_o, c_o, h_i, wio, c_i, wii)
fused = s.fuse(c_i, wii)
s.vectorize(fused)
return s
| 2.140625 | 2 |
python_modules/automation/automation/docker/dagster_docker.py | jrouly/dagster | 0 | 1998 | <filename>python_modules/automation/automation/docker/dagster_docker.py
import contextlib
import os
from collections import namedtuple
import yaml
from dagster import __version__ as current_dagster_version
from dagster import check
from .ecr import ecr_image, get_aws_account_id, get_aws_region
from .utils import (
execute_docker_build,
execute_docker_push,
execute_docker_tag,
python_version_image_tag,
)
# Default repository prefix used for local images
DEFAULT_LOCAL_PREFIX = "dagster"
# Location of the template assets used here
IMAGES_PATH = os.path.join(os.path.dirname(__file__), "images")
@contextlib.contextmanager
def do_nothing(_cwd):
yield
class DagsterDockerImage(namedtuple("_DagsterDockerImage", "image build_cm path")):
"""Represents a Dagster image.
Properties:
image (str): Name of the image
build_cm (function): function that is a context manager for build (e.g. for populating a
build cache)
path (Optional(str)): The path to the image's path. Defaults to docker/images/<IMAGE NAME>
"""
def __new__(cls, image, build_cm=do_nothing, path=None):
return super(DagsterDockerImage, cls).__new__(
cls,
check.str_param(image, "image"),
check.callable_param(build_cm, "build_cm"),
check.opt_str_param(
path, "path", default=os.path.join(os.path.dirname(__file__), "images", image)
),
)
@property
def python_versions(self):
"""List of Python versions supported for this image."""
with open(os.path.join(self.path, "versions.yaml"), "r") as f:
versions = yaml.safe_load(f.read())
return list(versions.keys())
def _get_last_updated_for_python_version(self, python_version):
"""Retrieve the last_updated timestamp for a particular python_version of this image."""
check.str_param(python_version, "python_version")
with open(os.path.join(self.path, "last_updated.yaml"), "r") as f:
last_updated = yaml.safe_load(f.read())
return last_updated[python_version]
def _set_last_updated_for_python_version(self, timestamp, python_version):
"""Update the last_updated timestamp for a particular python_version of this image."""
check.str_param(timestamp, "timestamp")
check.str_param(python_version, "python_version")
last_updated = {}
last_updated_path = os.path.join(self.path, "last_updated.yaml")
if os.path.exists(last_updated_path):
with open(last_updated_path, "r") as f:
last_updated = yaml.safe_load(f.read())
last_updated[python_version] = timestamp
with open(os.path.join(self.path, "last_updated.yaml"), "w") as f:
yaml.dump(last_updated, f, default_flow_style=False)
def local_image(self, python_version):
"""Generates the local image name, like: "dagster/foo:some-tag" """
check.str_param(python_version, "python_version")
last_updated = self._get_last_updated_for_python_version(python_version)
tag = python_version_image_tag(python_version, last_updated)
return "{}/{}:{}".format(DEFAULT_LOCAL_PREFIX, self.image, tag)
def aws_image(self, python_version=None, custom_tag=None):
"""Generates the AWS ECR image name, like:
"1234567890.dkr.ecr.us-west-1.amazonaws.com/foo:some-tag"
"""
check.invariant(not (python_version and custom_tag))
check.opt_str_param(python_version, "python_version")
check.opt_str_param(custom_tag, "custom_tag")
if python_version:
last_updated = self._get_last_updated_for_python_version(python_version)
tag = python_version_image_tag(python_version, last_updated)
else:
tag = custom_tag
return ecr_image(
self.image,
tag,
aws_account_id=get_aws_account_id(),
aws_region=get_aws_region(),
)
def _get_docker_args(self, python_version):
"""Retrieve Docker arguments from this image's versions.yaml, and update with latest Dagster
version.
Also, we allow references in the image versions.yaml to another Dagster image to use as a
base image. If defined, set the BASE_IMAGE Docker arg from the full name of the parent
image.
"""
with open(os.path.join(self.path, "versions.yaml"), "r") as f:
versions = yaml.safe_load(f.read())
image_info = versions.get(python_version, {})
docker_args = image_info.get("docker_args", {})
if "base_image" in image_info:
check.invariant(
"BASE_IMAGE" not in docker_args, "Cannot override an existing BASE_IMAGE"
)
base_image = DagsterDockerImage(image_info["base_image"]["name"])
source = image_info["base_image"]["source"]
if source == "aws":
docker_args["BASE_IMAGE"] = base_image.aws_image(python_version)
elif source == "local":
docker_args["BASE_IMAGE"] = base_image.local_image(python_version)
else:
raise Exception("Unrecognized source {}".format(source))
# Set Dagster version
docker_args["DAGSTER_VERSION"] = current_dagster_version
return docker_args
def build(self, timestamp, dagster_version, python_version):
check.str_param(timestamp, "timestamp")
check.str_param(python_version, "python_version")
check.invariant(
dagster_version == current_dagster_version,
desc="Current dagster version ({}) does not match provided arg ({})".format(
current_dagster_version, dagster_version
),
)
with self.build_cm(self.path):
self._set_last_updated_for_python_version(timestamp, python_version)
execute_docker_build(
self.local_image(python_version),
docker_args=self._get_docker_args(python_version),
cwd=self.path,
)
def push(self, python_version, custom_tag=None):
"""Push this image to ECR."""
if custom_tag:
execute_docker_tag(
self.local_image(python_version),
self.aws_image(python_version=None, custom_tag=custom_tag),
)
execute_docker_push(self.aws_image(python_version=None, custom_tag=custom_tag))
else:
execute_docker_tag(self.local_image(python_version), self.aws_image(python_version))
execute_docker_push(self.aws_image(python_version))
| 2.390625 | 2 |
chrome/test/telemetry/chromeos/login_unittest.py | Fusion-Rom/android_external_chromium_org | 231 | 1999 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import unittest
from telemetry.core import browser_finder
from telemetry.core import exceptions
from telemetry.core import extension_to_load
from telemetry.core import util
from telemetry.core.backends.chrome import cros_interface
from telemetry.unittest import options_for_unittests
class CrOSAutoTest(unittest.TestCase):
def setUp(self):
options = options_for_unittests.GetCopy()
self._cri = cros_interface.CrOSInterface(options.cros_remote,
options.cros_ssh_identity)
self._is_guest = options.browser_type == 'cros-chrome-guest'
self._username = '' if self._is_guest else options.browser_options.username
self._password = options.browser_options.password
def _IsCryptohomeMounted(self):
"""Returns True if cryptohome is mounted"""
cryptohomeJSON, _ = self._cri.RunCmdOnDevice(['/usr/sbin/cryptohome',
'--action=status'])
cryptohomeStatus = json.loads(cryptohomeJSON)
return (cryptohomeStatus['mounts'] and
cryptohomeStatus['mounts'][0]['mounted'])
def _CreateBrowser(self, autotest_ext=False, auto_login=True):
"""Finds and creates a browser for tests. if autotest_ext is True,
also loads the autotest extension"""
options = options_for_unittests.GetCopy()
if autotest_ext:
extension_path = os.path.join(os.path.dirname(__file__), 'autotest_ext')
self._load_extension = extension_to_load.ExtensionToLoad(
path=extension_path,
browser_type=options.browser_type,
is_component=True)
options.extensions_to_load = [self._load_extension]
browser_to_create = browser_finder.FindBrowser(options)
self.assertTrue(browser_to_create)
options.browser_options.create_browser_with_oobe = True
options.browser_options.auto_login = auto_login
b = browser_to_create.Create()
b.Start()
return b
def _GetAutotestExtension(self, browser):
"""Returns the autotest extension instance"""
extension = browser.extensions[self._load_extension]
self.assertTrue(extension)
return extension
def _GetLoginStatus(self, browser):
extension = self._GetAutotestExtension(browser)
self.assertTrue(extension.EvaluateJavaScript(
"typeof('chrome.autotestPrivate') != 'undefined'"))
extension.ExecuteJavaScript('''
window.__login_status = null;
chrome.autotestPrivate.loginStatus(function(s) {
window.__login_status = s;
});
''')
return util.WaitFor(
lambda: extension.EvaluateJavaScript('window.__login_status'), 10)
def testCryptohomeMounted(self):
"""Verifies cryptohome mount status for regular and guest user and when
logged out"""
with self._CreateBrowser() as b:
self.assertEquals(1, len(b.tabs))
self.assertTrue(b.tabs[0].url)
self.assertTrue(self._IsCryptohomeMounted())
chronos_fs = self._cri.FilesystemMountedAt('/home/chronos/user')
self.assertTrue(chronos_fs)
if self._is_guest:
self.assertEquals(chronos_fs, 'guestfs')
else:
home, _ = self._cri.RunCmdOnDevice(['/usr/sbin/cryptohome-path',
'user', self._username])
self.assertEquals(self._cri.FilesystemMountedAt(home.rstrip()),
chronos_fs)
self.assertFalse(self._IsCryptohomeMounted())
self.assertEquals(self._cri.FilesystemMountedAt('/home/chronos/user'),
'/dev/mapper/encstateful')
def testLoginStatus(self):
"""Tests autotestPrivate.loginStatus"""
with self._CreateBrowser(autotest_ext=True) as b:
login_status = self._GetLoginStatus(b)
self.assertEquals(type(login_status), dict)
self.assertEquals(not self._is_guest, login_status['isRegularUser'])
self.assertEquals(self._is_guest, login_status['isGuest'])
self.assertEquals(login_status['email'], self._username)
self.assertFalse(login_status['isScreenLocked'])
def _IsScreenLocked(self, browser):
return self._GetLoginStatus(browser)['isScreenLocked']
def _LockScreen(self, browser):
self.assertFalse(self._IsScreenLocked(browser))
extension = self._GetAutotestExtension(browser)
self.assertTrue(extension.EvaluateJavaScript(
"typeof chrome.autotestPrivate.lockScreen == 'function'"))
logging.info('Locking screen')
extension.ExecuteJavaScript('chrome.autotestPrivate.lockScreen();')
logging.info('Waiting for the lock screen')
def ScreenLocked():
return (browser.oobe and
browser.oobe.EvaluateJavaScript("typeof Oobe == 'function'") and
browser.oobe.EvaluateJavaScript(
"typeof Oobe.authenticateForTesting == 'function'"))
util.WaitFor(ScreenLocked, 10)
self.assertTrue(self._IsScreenLocked(browser))
def _AttemptUnlockBadPassword(self, browser):
logging.info('Trying a bad password')
def ErrorBubbleVisible():
return not browser.oobe.EvaluateJavaScript('''
document.getElementById('bubble').hidden
''')
self.assertFalse(ErrorBubbleVisible())
browser.oobe.ExecuteJavaScript('''
Oobe.authenticateForTesting('%s', 'bad');
''' % self._username)
util.WaitFor(ErrorBubbleVisible, 10)
self.assertTrue(self._IsScreenLocked(browser))
def _UnlockScreen(self, browser):
logging.info('Unlocking')
browser.oobe.ExecuteJavaScript('''
Oobe.authenticateForTesting('%s', '%s');
''' % (self._username, self._password))
util.WaitFor(lambda: not browser.oobe, 10)
self.assertFalse(self._IsScreenLocked(browser))
def testScreenLock(self):
"""Tests autotestPrivate.screenLock"""
with self._CreateBrowser(autotest_ext=True) as browser:
self._LockScreen(browser)
self._AttemptUnlockBadPassword(browser)
self._UnlockScreen(browser)
def testLogout(self):
"""Tests autotestPrivate.logout"""
with self._CreateBrowser(autotest_ext=True) as b:
extension = self._GetAutotestExtension(b)
try:
extension.ExecuteJavaScript('chrome.autotestPrivate.logout();')
except (exceptions.BrowserConnectionGoneException,
exceptions.BrowserGoneException):
pass
util.WaitFor(lambda: not self._IsCryptohomeMounted(), 20)
def _SwitchRegion(self, region):
self._cri.RunCmdOnDevice(['stop', 'ui'])
# Change VPD (requires RW-enabled firmware).
# To save time, region and initial_timezone are not set.
vpd = {'initial_locale': region.language_code,
'keyboard_layout': region.keyboard}
for (key, value) in vpd.items():
self._cri.RunCmdOnDevice(['vpd', '-s', '"%s"="%s"' % (key, value)])
# Remove cached files to clear initial locale info and force regeneration.
self._cri.RunCmdOnDevice(['rm', '/home/chronos/Local\ State'])
self._cri.RunCmdOnDevice(['rm', '/home/chronos/.oobe_completed'])
self._cri.RunCmdOnDevice(['dump_vpd_log', '--force'])
self._cri.RunCmdOnDevice(['start', 'ui'])
def _OobeHasOption(self, browser, selectId, value):
hasOptionJs = '''
// Check that the option is present, and selected if it is the default.
(function hasOption(selectId, value, isDefault) {
var options = document.getElementById(selectId).options;
for (var i = 0; i < options.length; i++) {
if (options[i].value == value) {
// The option is present. Make sure it's selected if necessary.
return !isDefault || options.selectedIndex == i;
}
}
return false;
})("%s", "%s", %s);
'''
return browser.oobe.EvaluateJavaScript(
hasOptionJs % (selectId, value, 'true'))
def _ResolveLanguage(self, locale):
# If the locale matches a language but not the country, fall back to
# an existing locale. See ui/base/l10n/l10n_util.cc.
lang, _, region = map(str.lower, locale.partition('-'))
if not region:
return ""
# Map from other countries to a localized country
if lang == 'es' and region == 'es':
return 'es-419'
if lang == 'zh':
if region in ('hk', 'mo'):
return 'zh-TW'
return 'zh-CN'
if lang == 'en':
if region in ('au', 'ca', 'nz', 'za'):
return 'en-GB'
return 'en-US'
# No mapping found
return ""
def testOobeLocalization(self):
"""Tests different region configurations at OOBE"""
# Save the original device localization settings.
# To save time, only read initial_locale and keyboard_layout.
initial_region = self.Region('', '', '', '', '')
initial_region.language_code, _ = self._cri.RunCmdOnDevice(
['vpd', '-g', 'initial_locale'])
initial_region.keyboard, _ = self._cri.RunCmdOnDevice(
['vpd', '-g', 'keyboard_layout'])
for region in self.REGIONS_LIST:
self._SwitchRegion(region)
with self._CreateBrowser(auto_login=False) as browser:
# Ensure the dropdown lists have been created.
util.WaitFor(lambda: browser.oobe.EvaluateJavaScript(
'document.getElementById("language-select") != null'),
10)
# Find the language, or an acceptable fallback value.
languageFound = self._OobeHasOption(browser,
'language-select',
region.language_code)
if not languageFound:
fallback = self._ResolveLanguage(region.language_code)
self.assertTrue(fallback and
self._OobeHasOption(browser,
'language-select',
fallback))
# Find the keyboard layout.
self.assertTrue(self._OobeHasOption(
browser, 'keyboard-select', region.keyboard))
# Test is finished. Restore original region settings.
self._SwitchRegion(initial_region)
# The Region class and region list will be available in regions.py.
class Region(object):
def __init__(self, region_code, keyboard, time_zone, language_code,
keyboard_mechanical_layout, description=None, notes=None):
self.region_code = region_code
self.keyboard = keyboard
self.time_zone = time_zone
self.language_code = language_code
self.keyboard_mechanical_layout = keyboard_mechanical_layout
self.description = description or region_code
self.notes = notes
class Enum(frozenset):
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
KeyboardMechanicalLayout = Enum(['ANSI', 'ISO', 'JIS', 'ABNT2'])
_KML = KeyboardMechanicalLayout
REGIONS_LIST = [
Region('au', 'xkb:us::eng', 'Australia/Sydney', 'en-AU', _KML.ANSI,
'Australia'),
Region('ca.ansi', 'xkb:us::eng', 'America/Toronto', 'en-CA', _KML.ANSI,
'Canada (US keyboard)',
'Canada with US (ANSI) keyboard; see http://goto/cros-canada'),
Region('ca.fr', 'xkb:ca::fra', 'America/Toronto', 'fr-CA', _KML.ISO,
'Canada (French keyboard)',
('Canadian French (ISO) keyboard. The most common configuration for '
'Canadian French SKUs. See http://goto/cros-canada')),
Region('ca.hybrid', 'xkb:ca:eng:eng', 'America/Toronto', 'en-CA', _KML.ISO,
'Canada (hybrid)',
('Canada with hybrid xkb:ca:eng:eng + xkb:ca::fra keyboard (ISO), '
'defaulting to English language and keyboard. Used only if there '
'needs to be a single SKU for all of Canada. See '
'http://goto/cros-canada')),
Region('ca.multix', 'xkb:ca:multix:fra', 'America/Toronto', 'fr-CA',
_KML.ISO, 'Canada (multilingual)',
("Canadian Multilingual keyboard; you probably don't want this. See "
"http://goto/cros-canada")),
Region('de', 'xkb:de::ger', 'Europe/Berlin', 'de', _KML.ISO, 'Germany'),
Region('fi', 'xkb:fi::fin', 'Europe/Helsinki', 'fi', _KML.ISO, 'Finland'),
Region('fr', 'xkb:fr::fra', 'Europe/Paris', 'fr', _KML.ISO, 'France'),
Region('gb', 'xkb:gb:extd:eng', 'Europe/London', 'en-GB', _KML.ISO, 'UK'),
Region('ie', 'xkb:gb:extd:eng', 'Europe/Dublin', 'en-GB', _KML.ISO,
'Ireland'),
Region('in', 'xkb:us::eng', 'Asia/Calcutta', 'en-US', _KML.ANSI, 'India'),
Region('my', 'xkb:us::eng', 'Asia/Kuala_Lumpur', 'ms', _KML.ANSI,
'Malaysia'),
Region('nl', 'xkb:us:intl:eng', 'Europe/Amsterdam', 'nl', _KML.ANSI,
'Netherlands'),
Region('nordic', 'xkb:se::swe', 'Europe/Stockholm', 'en-US', _KML.ISO,
'Nordics',
('Unified SKU for Sweden, Norway, and Denmark. This defaults '
'to Swedish keyboard layout, but starts with US English language '
'for neutrality. Use if there is a single combined SKU for Nordic '
'countries.')),
Region('se', 'xkb:se::swe', 'Europe/Stockholm', 'sv', _KML.ISO, 'Sweden',
("Use this if there separate SKUs for Nordic countries (Sweden, "
"Norway, and Denmark), or the device is only shipping to Sweden. "
"If there is a single unified SKU, use 'nordic' instead.")),
Region('sg', 'xkb:us::eng', 'Asia/Singapore', 'en-GB', _KML.ANSI,
'Singapore'),
Region('us', 'xkb:us::eng', 'America/Los_Angeles', 'en-US', _KML.ANSI,
'United States'),
]
| 2.015625 | 2 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.