content
stringlengths 5
1.05M
|
---|
import sys, os
#--------------------------------------------------------------------------------------------------
if len(sys.argv) != 2:
raise Exception('Usage: python ./mob_uploader.py input_path')
if not os.path.exists(sys.argv[1]):
raise Exception('Path does not exist: ' + sys.argv[1])
if not os.path.exists(os.path.join(sys.argv[1], '__SETTINGS__.py')):
raise Exception('Path does not contain __SETTINGS__.py: ' + sys.argv[1])
sys.path.append(sys.argv[1])
#--------------------------------------------------------------------------------------------------
from edx_gen import _util
import __SETTINGS__
#--------------------------------------------------------------------------------------------------
# Text strings
WARNING = " WARNING:"
#--------------------------------------------------------------------------------------------------
# process one course
def main():
print("Start processing")
# get the main mooc input folder, which we assume is the first folder
root_folder = os.path.normpath(sys.argv[1])
course_path = os.path.join(root_folder, 'Course')
# loop
for [section, section_path] in _util.getSubFolders(course_path):
print("- section", section)
section_filename = section.lower()
for [subsection, subsection_path] in _util.getSubFolders(section_path):
print("-- subsection", subsection)
subsection_filename = section_filename + '_' + subsection.lower()
for [unit, unit_path] in _util.getSubFolders(subsection_path):
print("--- unit", unit)
unit_filename = subsection_filename + '_' + unit.lower()
for [component, component_path] in _util.getFiles(unit_path):
[component_name, component_ext] = component.split('.')
component_filename = unit_filename + '_' + component_name
# create the filename on s3
# this matches the url created in _mob_iframe.py
mob_filename = component_filename + '.' + component_ext
# upload an answer to a private repo
if _util.ends(mob_filename, __SETTINGS__.MOB_ANSWER_FILENAMES):
_util.upload_s3_answer(component_path, mob_filename)
# upload an example to a public repo
elif _util.ends(mob_filename, __SETTINGS__.MOB_EXAMPLE_FILENAMES):
_util.upload_s3_example(component_path, mob_filename)
# ignore files with wrong extension
else:
pass
print("Finished processing")
#--------------------------------------------------------------------------------------------------
main()
|
import numpy as np
import pandas as pd
import torch
import torchvision
import torch.nn.functional as F
from torchvision import datasets,transforms,models
import matplotlib.pyplot as plt
#from train import get_pretrained_model
from torch import nn,optim
from PIL import Image
#%matplotlib inline
def load_dataset(data_direc):
data_dir = data_direc
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
valid_transforms=transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485,0.456,0.406],
[0.229,0.224,0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
train_data=datasets.ImageFolder(train_dir,transform=train_transforms)
valid_data=datasets.ImageFolder(valid_dir,transform=valid_transforms)
test_data=datasets.ImageFolder(test_dir,transform=test_transforms)
trainloader =torch.utils.data.DataLoader(train_data,batch_size=64,shuffle=True)
validloader =torch.utils.data.DataLoader(valid_data,batch_size=64)
testloader =torch.utils.data.DataLoader(test_data,batch_size=64)
return testloader,validloader,testloader,train_data
def load_checkpoint(filepath,device):
# Checkpoint for when using GPU
if torch.cuda.is_available():
map_location=lambda storage, loc: storage.cuda()
else:
map_location='cpu'
pretrained_model= 'vgg16' #get_pretrained_model()
checkpoint = torch.load(filepath, map_location=map_location)
model = getattr(models,pretrained_model)(pretrained=True)
model.classifier=checkpoint['classifier']
model.class_to_idx=checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
#device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
im=Image.open(image)
transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
## Transforming image for use with network
trans_im = transform(im)
# Converting to Numpy array
array_im_tfd = np.array(trans_im)
return array_im_tfd
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
#image = image.numpy().transpose((1, 2, 0))
image=image.transpose((1,2,0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
def predict_im(image_path, model, topk):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# TODO: Implement the code to predict the class from an image file
k=topk
test_img=process_image(image_path)
test_img=torch.from_numpy(test_img)
batch_img=torch.unsqueeze(test_img,0)
outputs=model(batch_img)
top_ps,top_indices = torch.topk(outputs,k)
top_ps=torch.exp(top_ps)
class_to_idx_inv={k:v for v,k in model.class_to_idx.items()}
top_ps=top_ps.view(k).tolist()
top_indices=top_indices.view(k).tolist()
final_indices=[class_to_idx_inv[x] for x in top_indices]
return top_ps,final_indices
|
"""AyudaEnPython: https://www.facebook.com/groups/ayudapython
"""
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
def load_data(min: int = 0, max: int = 10, size: int = 20) -> np.ndarray:
return np.random.randint(min, max, size=size)
def statistics(ser: pd.Series) -> None:
for _ in ("mean", "mode", "median"):
print(f"{_}: {ser.__getattribute__(_)()}")
def show_plots(ser: pd.Series, arr: np.ndarray) -> None:
_, axs = plt.subplots(1, 3, figsize=(15, 5))
for _, type_ in enumerate(("bar", "pie")):
ser.value_counts().plot(kind=type_, ax=axs[_])
ser.quantile(arr).plot(kind="box", ax=axs[2])
plt.show()
def main():
data = load_data()
ser = pd.Series(data)
print(ser.value_counts())
statistics(ser)
show_plots(ser, np.arange(0.1, 1, 0.1))
if __name__ == "__main__":
main()
|
__copyright__ = "Copyright (C) 2019 Zachary J Weiner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import pyopencl as cl
import pyopencl.array as cla
import pyopencl.clrandom as clr
import pystella as ps
import pytest
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests)
from pystella.multigrid import JacobiIterator, NewtonIterator
@pytest.mark.parametrize("h", [1])
@pytest.mark.parametrize("dtype", [np.float64])
@pytest.mark.parametrize("Solver", [JacobiIterator, NewtonIterator])
def test_relax(ctx_factory, grid_shape, proc_shape, h, dtype, Solver, timing=False):
if min(grid_shape) < 128:
pytest.skip("test_relax needs larger grids, for now")
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
rank_shape = tuple(Ni // pi for Ni, pi in zip(grid_shape, proc_shape))
mpi = ps.DomainDecomposition(proc_shape, h, rank_shape)
L = 10
dx = L / grid_shape[0]
dk = 2 * np.pi / L
fft = ps.DFT(mpi, ctx, queue, grid_shape, dtype)
spectra = ps.PowerSpectra(mpi, fft, (dk,)*3, L**3)
statistics = ps.FieldStatistics(mpi, h, rank_shape=rank_shape,
grid_size=np.product(grid_shape))
def get_laplacian(f):
from pystella.derivs import _lap_coefs, centered_diff
lap_coefs = _lap_coefs[h]
from pymbolic import var
return sum([centered_diff(f, lap_coefs, direction=mu, order=2)
for mu in range(1, 4)]) / var("dx")**2
test_problems = {}
from pystella import Field
f = Field("f", offset="h")
rho = Field("rho", offset="h")
test_problems[f] = (get_laplacian(f), rho)
f = Field("f2", offset="h")
rho = Field("rho2", offset="h")
test_problems[f] = (get_laplacian(f) - f, rho)
solver = Solver(mpi, queue, test_problems, halo_shape=h, dtype=dtype,
fixed_parameters=dict(omega=1/2))
def zero_mean_array():
f0 = clr.rand(queue, grid_shape, dtype)
f = clr.rand(queue, tuple(ni + 2*h for ni in rank_shape), dtype)
mpi.scatter_array(queue, f0, f, root=0)
avg = statistics(f)["mean"]
f = f - avg
mpi.share_halos(queue, f)
return f
f = zero_mean_array()
rho = zero_mean_array()
tmp = cla.zeros_like(f)
f2 = zero_mean_array()
rho2 = zero_mean_array()
tmp2 = cla.zeros_like(f)
num_iterations = 1000
errors = {"f": [], "f2": []}
first_mode_zeroed = {"f": [], "f2": []}
for _ in range(0, num_iterations, 2):
solver(mpi, queue, iterations=2, dx=np.array(dx),
f=f, tmp_f=tmp, rho=rho,
f2=f2, tmp_f2=tmp2, rho2=rho2)
err = solver.get_error(queue,
f=f, r_f=tmp, rho=rho,
f2=f2, r_f2=tmp2, rho2=rho2, dx=np.array(dx))
for k, v in err.items():
errors[k].append(v)
for key, resid in zip(["f", "f2"], [tmp, tmp2]):
spectrum = spectra(resid, k_power=0)
if mpi.rank == 0:
max_amp = np.max(spectrum)
first_zero = np.argmax(spectrum[1:] < 1e-30 * max_amp)
first_mode_zeroed[key].append(first_zero)
for _, errs in errors.items():
errs = np.array(errs)
iters = np.arange(1, errs.shape[0]+1)
assert (errs[10:, 0] * iters[10:] / errs[0, 0] < 1.).all(), \
"relaxation not converging at least linearly for " \
f"{grid_shape=}, {h=}, {proc_shape=}"
first_mode_zeroed = mpi.bcast(first_mode_zeroed, root=0)
for _, x in first_mode_zeroed.items():
x = np.array(list(x))[2:]
assert (x[1:] <= x[:-1]).all() and np.min(x) < np.max(x) / 5, \
f"relaxation not smoothing error {grid_shape=}, {h=}, {proc_shape=}"
if __name__ == "__main__":
from common import parser
parser.set_defaults(grid_shape=(128,)*3)
args = parser.parse_args()
test_relax(
ps.choose_device_and_make_context,
grid_shape=args.grid_shape, proc_shape=args.proc_shape,
h=args.h, dtype=args.dtype, timing=args.timing,
Solver=NewtonIterator
)
|
"""
vmagnify.py : abstract class for the processing of the data.
"""
class VMagnify:
""" VMagnify is an abstract class for the processing of the data """
EDSR_MODEL_X2_PATH = "static/models/EDSR/EDSR_x2.pb"
EDSR_MODEL_X3_PATH = "static/models/EDSR/EDSR_x3.pb"
EDSR_MODEL_X4_PATH = "static/models/EDSR/EDSR_x4.pb"
def __init__(self) -> None:
self.url = ""
|
"""
.. codeauthor:: David Zwicker <[email protected]>
"""
import glob
import os
import subprocess as sp
import sys
from pathlib import Path
from typing import List # @UnusedImport
import pytest
from pde.tools.misc import module_available
PACKAGE_PATH = Path(__file__).resolve().parents[2]
EXAMPLES = glob.glob(str(PACKAGE_PATH / "examples" / "*.py"))
SKIP_EXAMPLES: List[str] = []
if not module_available("matplotlib"):
SKIP_EXAMPLES.append("plot_emulsion.py")
@pytest.mark.no_cover
@pytest.mark.skipif(sys.platform == "win32", reason="Assumes unix setup")
@pytest.mark.parametrize("path", EXAMPLES)
def test_example(path):
"""runs an example script given by path"""
if os.path.basename(path).startswith("_"):
pytest.skip("skip examples starting with an underscore")
if any(name in path for name in SKIP_EXAMPLES):
pytest.skip(f"Skip test {path}")
env = os.environ.copy()
env["PYTHONPATH"] = str(PACKAGE_PATH) + ":" + env.get("PYTHONPATH", "")
proc = sp.Popen([sys.executable, path], env=env, stdout=sp.PIPE, stderr=sp.PIPE)
try:
outs, errs = proc.communicate(timeout=30)
except sp.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
msg = "Script `%s` failed with following output:" % path
if outs:
msg = "%s\nSTDOUT:\n%s" % (msg, outs)
if errs:
msg = "%s\nSTDERR:\n%s" % (msg, errs)
assert proc.returncode <= 0, msg
|
import pytest
from traitlets.tests.utils import check_help_all_output
from jupyter_server.utils import url_escape, url_unescape
def test_help_output():
check_help_all_output('jupyter_server')
@pytest.mark.parametrize(
'unescaped,escaped',
[
(
'/this is a test/for spaces/',
'/this%20is%20a%20test/for%20spaces/'
),
(
'notebook with space.ipynb',
'notebook%20with%20space.ipynb'
),
(
'/path with a/notebook and space.ipynb',
'/path%20with%20a/notebook%20and%20space.ipynb'
),
(
'/ !@$#%^&* / test %^ notebook @#$ name.ipynb',
'/%20%21%40%24%23%25%5E%26%2A%20/%20test%20%25%5E%20notebook%20%40%23%24%20name.ipynb'
)
]
)
def test_url_escaping(unescaped, escaped):
# Test escaping.
path = url_escape(unescaped)
assert path == escaped
# Test unescaping.
path = url_unescape(escaped)
assert path == unescaped
|
import re
with open('input.txt', 'r') as fh:
lines = [l.strip() for l in fh.readlines()]
vowels = 'aeiou'
bad = ['ab', 'cd', 'pq', 'xy']
def nice(word):
vowel_count = sum(word.count(v) for v in vowels) >= 3
double = re.match('.*(([a-z])\\2{1}).*', word) is not None
no_bads = not any(b in word for b in bad)
return all([vowel_count, double, no_bads])
print 'part 1', len(filter(nice, lines))
def nicer(word):
repeat_pair = re.match('.*([a-z]{2}).*\\1.*', word) is not None
repeat_letter = re.match('.*([a-z])[a-z]\\1.*', word) is not None
return repeat_pair and repeat_letter
print 'part 2', len(filter(nicer, lines))
|
import komand
from .schema import FindIssuesInput, FindIssuesOutput, Input, Output, Component
# Custom imports below
from ...util import *
class FindIssues(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='find_issues',
description=Component.DESCRIPTION,
input=FindIssuesInput(),
output=FindIssuesOutput())
def run(self, params={}):
"""Search for issues"""
max = params.get(Input.MAX)
get_attachments = params.get(Input.GET_ATTACHMENTS, False)
issues = self.connection.client.search_issues(jql_str=params['jql'], maxResults=max)
results = list(map(lambda issue: normalize_issue(issue, get_attachments=get_attachments,logger=self.logger), issues))
results = komand.helper.clean(results)
return {Output.ISSUES: results}
|
#!/usr/bin/env python3
import numpy as np
import random
import os
from scipy.io import loadmat
from PIL import Image
from collections import defaultdict
from sklearn.model_selection import train_test_split
import cv2
from sklearn.utils import shuffle
import imgaug.augmenters as iaa
from few_shot.constants import DATA_DIR
def path_to_image(path, image_shape):
'''
Resizes an mnist image so it will work with a pre-trained network
'''
car_image = cv2.imread(path)
car_image = cv2.cvtColor(car_image, cv2.COLOR_BGR2RGB)
return np.array(Image.fromarray(car_image).resize(
(image_shape[0], image_shape[1])))
def augment_pair(image_a, image_b):
'''
Augments a pair of input images in the same way
'''
seq = iaa.Sequential([
iaa.Crop(percent=(0, 0.1)), # random crops
# Small gaussian blur with random sigma between 0 and 0.5.
# But we only blur about 50% of all images.
iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 0.1))),
# Strengthen or weaken the contrast in each image.
iaa.ContrastNormalization((0.75, 1.5)),
# flip the image left to right half the time
iaa.Fliplr(0.5),
# Add gaussian noise.
# For 50% of all images, we sample the noise once per pixel.
# For the other 50% of all images, we sample the noise per pixel AND
# channel. This can change the color (not only brightness) of the
# pixels.
iaa.AdditiveGaussianNoise(loc=0,
scale=(0.0, 0.05*255),
per_channel=0.5),
# Make some images brighter and some darker.
# In 20% of all cases, we sample the multiplier once per channel,
# which can end up changing the color of the images.
iaa.Multiply((0.8, 1.2), per_channel=0.2),
# Apply affine transformations to each image.
# Scale/zoom them, translate/move them, rotate them and shear them.
iaa.Affine(
scale={"x": (0.95, 1.05), "y": (0.95, 1.05)},
translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)},
rotate=(-5, 5),
shear=(-4, 4),
# mode='edge',
),
iaa.PerspectiveTransform(scale=(0.01, 0.05)),
], random_order=True) # apply augmenters in random order
image_a, image_b = seq(images=[image_a, image_b])
return image_a, image_b
def make_pairs_batch(dataset, batch_size, image_shape):
'''
Given the features and labels of a dataset, return a batch of matching and
not matching pairs
'''
cars_dict = defaultdict(list)
for car in dataset:
cars_dict[car.class_id].append(car.filename)
pairs = []
pair_labels = []
for i in range(batch_size // 2):
# select a random feature
class_id = random.choice(list(cars_dict.keys()))
anchor_car_path = random.choice(list(cars_dict[class_id]))
matching_car_path = random.choice(list(cars_dict[class_id]))
anchor_car = path_to_image(anchor_car_path, image_shape)
matching_car = path_to_image(matching_car_path, image_shape)
augmented_anchor, augmented_match = augment_pair(
anchor_car, matching_car)
pairs.append([augmented_anchor, augmented_match])
pair_labels.append(0)
# adding a non-matching example
non_matching_class_id = random.choice(list(cars_dict.keys()))
while non_matching_class_id == class_id:
non_matching_class_id = random.choice(list(cars_dict.keys()))
# get a sample from a different class
non_matching_car_path = random.choice(list(
cars_dict[non_matching_class_id]))
non_matching_car = path_to_image(non_matching_car_path, image_shape)
augmented_anchor, augmented_non_match = augment_pair(
anchor_car, non_matching_car)
pairs.append([augmented_anchor, augmented_non_match])
pair_labels.append(1)
pairs, pair_labels = shuffle(pairs, pair_labels)
pairs = np.array(pairs)
return [pairs[:, 0], pairs[:, 1]], np.array(pair_labels)
class Car():
'''
Creates car instance
'''
def __init__(self, np_data, classnames):
self._filename = np_data[0][0]
self.lr_x = np_data[1][0][0]
self.lr_y = np_data[2][0][0]
self.ul_x = np_data[3][0][0]
self.ul_x = np_data[4][0][0]
self.class_id = np_data[5][0][0]
self.class_name = classnames[self.class_id]
@property
def filename(self):
return os.path.join(DATA_DIR, self._filename)
def __repr__(self):
'''
String representation of the Car
'''
return '<Car {} | {} />'.format(self.class_id, self.filename)
def np_to_cars(numpy_dataset, classnames):
'''
Converts the raw numpy array to Car instances
'''
cars = []
for sample in numpy_dataset['annotations'][0][:]:
car = Car(sample, classnames)
cars.append(car)
return cars
def car_generator(dataset, batch_size, image_shape):
'''
Given a dataset of Car instances, yield matching and non-matching pairs
of car instances
'''
while True:
features, labels = make_pairs_batch(dataset, batch_size, image_shape)
yield features, labels
def get_car_generators(batch_size, image_shape):
'''
Return the training and validation car generators
'''
train, validation, test = get_car_data()
return (
car_generator(train, batch_size, image_shape),
car_generator(validation, batch_size, image_shape),
car_generator(test, batch_size, image_shape))
def get_car_data():
'''
Extracts the car data
'''
metadata = loadmat(os.path.join(DATA_DIR, 'car_data', 'cars_meta.mat'))
classnames = {i + 1: a[0] for i, a in enumerate(metadata['class_names'][0])}
car_data_raw = loadmat(os.path.join(DATA_DIR, 'car_data', 'cars_annos.mat'))
random.seed(9001)
car_data = np_to_cars(car_data_raw, classnames)
random.shuffle(car_data)
train, validation = train_test_split(car_data, test_size=0.3)
validation, test = train_test_split(validation, test_size=0.5)
return train, validation, test
|
import tensorflow as tf
from keras.callbacks import LearningRateScheduler
from callbacks import *
from help_functions import *
from plots import *
from networks import *
from dataset_preparation import get_dataset
def train_model(model, X_train, y_train, X_test, y_test, num_of_epochs, nn_cnn, batch_size=32, validation_share=0.0,
mode='normal', context_matrices=None, task_index=None):
"""
Train and evaluate Keras model.
:param model: Keras model instance
:param X_train: train input data
:param y_train: train output labels
:param X_test: test input data
:param y_test: test output labels
:param num_of_epochs: number of epochs to train the model
:param nn_cnn: usage of (convolutional) neural network (possible values: 'nn' or 'cnn')
:param batch_size: batch size - number of samples per gradient update (default = 32)
:param validation_share: share of examples to be used for validation (default = 0)
:param mode: string for learning mode, important for callbacks - possible values: 'normal', 'superposition'
:param context_matrices: multidimensional numpy array with random context (binary superposition), only used when mode = 'superposition'
:param task_index: index of current task, only used when mode = 'superposition'
:return: History object and 2 lists of test accuracies for every training epoch (normal and superposition)
"""
lr_callback = LearningRateScheduler(lr_scheduler)
test_callback = TestPerformanceCallback(X_test, y_test, model)
if nn_cnn == 'nn':
test_superposition_callback = TestSuperpositionPerformanceCallback(X_test, y_test, context_matrices, model, task_index)
elif nn_cnn == 'cnn':
test_superposition_callback = TestSuperpositionPerformanceCallback_CNN(X_test, y_test, context_matrices, model, task_index)
callbacks = [lr_callback]
if mode == 'normal':
callbacks.append(test_callback)
elif mode == 'superposition':
callbacks.append(test_superposition_callback)
history = model.fit(X_train, y_train, epochs=num_of_epochs, batch_size=batch_size, verbose=2,
validation_split=validation_share, callbacks=callbacks)
return history, test_callback.accuracies, test_superposition_callback.accuracies
def normal_training_mnist(model, X_train, y_train, X_test, y_test, num_of_epochs, num_of_tasks, nn_cnn, batch_size=32):
"""
Train model for 'num_of_tasks' tasks, each task is a different permutation of input images.
Check how accuracy for original images is changing through tasks using normal training.
:param model: Keras model instance
:param X_train: train input data
:param y_train: train output labels
:param X_test: test input data
:param y_test: test output labels
:param num_of_epochs: number of epochs to train the model
:param num_of_tasks: number of different tasks (permutations of original images)
:param nn_cnn: usage of (convolutional) neural network (possible values: 'nn' or 'cnn')
:param batch_size: batch size - number of samples per gradient update (default = 32)
:return: list of test accuracies for 10 epochs for each task
"""
original_accuracies = []
# first training task - original MNIST images
history, accuracies, _ = train_model(model, X_train, y_train, X_test, y_test, num_of_epochs, nn_cnn, batch_size, validation_share=0.1)
original_accuracies.extend(accuracies)
print_validation_acc(history, 0)
# other training tasks - permuted MNIST data
for i in range(num_of_tasks - 1):
print("\n\n Task: %d \n" % (i + 1))
permuted_X_train = permute_images(X_train)
history, accuracies, _ = train_model(model, permuted_X_train, y_train, X_test, y_test, num_of_epochs, nn_cnn, batch_size, validation_share=0.1)
original_accuracies.extend(accuracies)
print_validation_acc(history, i + 1)
return original_accuracies
def superposition_training_mnist(model, X_train, y_train, X_test, y_test, num_of_epochs, num_of_tasks, context_matrices, nn_cnn, batch_size=32):
"""
Train model for 'num_of_tasks' tasks, each task is a different permutation of input images.
Check how accuracy for original images is changing through tasks using superposition training.
:param model: Keras model instance
:param X_train: train input data
:param y_train: train output labels
:param X_test: test input data
:param y_test: test output labels
:param num_of_epochs: number of epochs to train the model
:param num_of_tasks: number of different tasks (permutations of original images)
:param context_matrices: multidimensional numpy array with random context (binary superposition)
:param nn_cnn: usage of (convolutional) neural network (possible values: 'nn' or 'cnn')
:param batch_size: batch size - number of samples per gradient update (default = 32)
:return: list of test accuracies for 10 epochs for each task
"""
original_accuracies = []
# context_multiplication(model, context_matrices, 0)
# first training task - original MNIST images
history, _, accuracies = train_model(model, X_train, y_train, X_test, y_test, num_of_epochs, nn_cnn, batch_size, validation_share=0.1,
mode='superposition', context_matrices=context_matrices, task_index=0)
original_accuracies.extend(accuracies)
print_validation_acc(history, 0)
# other training tasks - permuted MNIST data
for i in range(num_of_tasks - 1):
print("\n\n Task: %d \n" % (i + 1))
# multiply current weights with context matrices for each layer (without changing weights from bias node)
if nn_cnn == 'nn':
context_multiplication(model, context_matrices, i + 1)
elif nn_cnn == 'cnn':
context_multiplication_CNN(model, context_matrices, i + 1)
permuted_X_train = permute_images(X_train)
history, _, accuracies = train_model(model, permuted_X_train, y_train, X_test, y_test, num_of_epochs, nn_cnn, batch_size, validation_share=0.1,
mode='superposition', context_matrices=context_matrices, task_index=i + 1)
original_accuracies.extend(accuracies)
print_validation_acc(history, i + 1)
return original_accuracies
def normal_training_cifar(model, datasets, num_of_epochs, num_of_tasks, nn_cnn, batch_size=32):
"""
Train model for 'num_of_tasks' tasks, each task is a different disjoint set of CIFAR-100 images.
Check how accuracy for the first set of images is changing through tasks using normal training.
:param model: Keras model instance
:param datasets: list of disjoint datasets with corresponding train and test set
:param num_of_epochs: number of epochs to train the model
:param num_of_tasks: number of different tasks
:param nn_cnn: usage of (convolutional) neural network (possible values: 'nn' or 'cnn')
:param batch_size: batch size - number of samples per gradient update (default = 32)
:return: list of test accuracies for 10 epochs for each task
"""
original_accuracies = []
# first training task - 10 classes of CIFAR-100 dataset
X_train, y_train, X_test, y_test = datasets[0] # these X_test and y_test are used for testing all tasks
history, accuracies, _ = train_model(model, X_train, y_train, X_test, y_test, num_of_epochs, nn_cnn, batch_size, validation_share=0.1)
original_accuracies.extend(accuracies)
print_validation_acc(history, 0)
# other training tasks
for i in range(num_of_tasks - 1):
print("\n\n Task: %d \n" % (i + 1))
X_train, y_train, _, _ = datasets[i + 1] # use X_test and y_test from the first task to get its accuracy
history, accuracies, _ = train_model(model, X_train, y_train, X_test, y_test, num_of_epochs, nn_cnn, batch_size, validation_share=0.1)
original_accuracies.extend(accuracies)
print_validation_acc(history, i + 1)
return original_accuracies
def superposition_training_cifar(model, datasets, num_of_epochs, num_of_tasks, context_matrices, nn_cnn, batch_size=32):
"""
Train model for 'num_of_tasks' tasks, each task is a different disjoint set of CIFAR-100 images.
Check how accuracy for the first set of images is changing through tasks using superposition training.
:param model: Keras model instance
:param datasets: list of disjoint datasets with corresponding train and test set
:param num_of_epochs: number of epochs to train the model
:param num_of_tasks: number of different tasks
:param context_matrices: multidimensional numpy array with random context (binary superposition)
:param nn_cnn: usage of (convolutional) neural network (possible values: 'nn' or 'cnn')
:param batch_size: batch size - number of samples per gradient update (default = 32)
:return: list of test accuracies for 10 epochs for each task
"""
original_accuracies = []
# first training task - 10 classes of CIFAR-100 dataset
X_train, y_train, X_test, y_test = datasets[0] # these X_test and y_test are used for testing all tasks
history, _, accuracies = train_model(model, X_train, y_train, X_test, y_test, num_of_epochs, nn_cnn, batch_size, validation_share=0.1,
mode='superposition', context_matrices=context_matrices, task_index=0)
original_accuracies.extend(accuracies)
print_validation_acc(history, 0)
# other training tasks
for i in range(num_of_tasks - 1):
print("\n\n i: %d \n" % i)
# multiply current weights with context matrices for each layer (without changing weights from bias node)
if nn_cnn == 'nn':
context_multiplication(model, context_matrices, i + 1)
elif nn_cnn == 'cnn':
context_multiplication_CNN(model, context_matrices, i + 1)
X_train, y_train, _, _ = datasets[i + 1] # use X_test and y_test from the first task to get its accuracy
history, _, accuracies = train_model(model, X_train, y_train, X_test, y_test, num_of_epochs, nn_cnn, batch_size, validation_share=0.1,
mode='superposition', context_matrices=context_matrices, task_index=i + 1)
original_accuracies.extend(accuracies)
print_validation_acc(history, i + 1)
return original_accuracies
if __name__ == '__main__':
# to avoid cuDNN error (https://github.com/tensorflow/tensorflow/issues/24496)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
dataset = 'mnist' # 'mnist' or 'cifar'
nn_cnn = 'nn' # 'nn' or 'cnn' ('cifar' is only implemented with 'cnn')
input_size = (28, 28) if dataset == 'mnist' else (32, 32, 3) # input sizes for MNIST and CIFAR images
num_of_units = 1000
num_of_classes = 10
num_of_tasks = 50 if dataset == 'mnist' else 10
num_of_epochs = 10
batch_size = 600 if dataset == 'mnist' else 50
train_normal = True
train_superposition = True
if train_normal:
if nn_cnn == 'nn':
model = nn(input_size, num_of_units, num_of_classes)
elif nn_cnn == 'cnn':
model = cnn(input_size, num_of_classes)
else:
raise ValueError("'nn_cnn' variable must have value 'nn' or 'cnn'")
d = get_dataset(dataset, nn_cnn, input_size, num_of_classes)
if dataset == 'mnist':
X_train, y_train, X_test, y_test = d
acc_normal = normal_training_mnist(model, X_train, y_train, X_test, y_test, num_of_epochs, num_of_tasks, nn_cnn, batch_size)
elif dataset == 'cifar':
acc_normal = normal_training_cifar(model, d, num_of_epochs, num_of_tasks, nn_cnn, batch_size)
else:
raise ValueError("'dataset' variable must have value 'mnist' or 'cifar'")
if train_superposition:
if nn_cnn == 'nn':
model = nn(input_size, num_of_units, num_of_classes)
context_matrices = get_context_matrices(input_size, num_of_units, num_of_tasks)
elif nn_cnn == 'cnn':
model = cnn(input_size, num_of_classes)
context_matrices = get_context_matrices_CNN(model, num_of_tasks)
else:
raise ValueError("nn_cnn variable must have value 'nn' or 'cnn'")
d = get_dataset(dataset, nn_cnn, input_size, num_of_classes)
if dataset == 'mnist':
X_train, y_train, X_test, y_test = d
acc_superposition = superposition_training_mnist(model, X_train, y_train, X_test, y_test, num_of_epochs,
num_of_tasks, context_matrices, nn_cnn, batch_size)
elif dataset == 'cifar':
acc_superposition = superposition_training_cifar(model, d, num_of_epochs, num_of_tasks, context_matrices, nn_cnn, batch_size)
else:
raise ValueError("'dataset' variable must have value 'mnist' or 'cifar'")
plot_general(acc_superposition, acc_normal, ['Superposition model', 'Baseline model'],
'Superposition vs. baseline model with ' + nn_cnn.upper() + ' model', 'Epoch', 'Accuracy (%)', [10], 0, 100)
|
# encoding: utf8
from test_base import DisplaySDKTest
from utils import *
SLEEP_INTERVAL = 2
class MRAIDTest(DisplaySDKTest):
def test_mraid_single_expand(self):
self.driver.orientation = 'PORTRAIT'
set_channel_id(self, "24338")
click_load_ad_btn(self, "BANNER")
accept_location(self)
block_until_webview(self)
sleep(SLEEP_INTERVAL)
click_on_webview(self)
sleep(SLEEP_INTERVAL)
assert_exists(self, "Lock to Landscape")
# Rotate phone
self.driver.orientation = 'LANDSCAPE'
sleep(SLEEP_INTERVAL)
assert_exists(self, "Rotate To Portrait")
# Rotate phone back
self.driver.orientation = 'PORTRAIT'
sleep(SLEEP_INTERVAL)
assert_exists(self, "Lock to Landscape")
click_btn(self, "Lock to Landscape")
sleep(SLEEP_INTERVAL)
assert_exists(self, "Lock to Portrait")
self.driver.orientation = 'LANDSCAPE'
click_btn(self, "Lock to Portrait")
sleep(SLEEP_INTERVAL)
assert_exists(self, "Release Lock")
# Rotate phone
#self.driver.orientation = 'LANDSCAPE'
#sleep(SLEEP_INTERVAL)
assert_exists(self, "Release Lock")
# Rotate phone back
#self.driver.orientation = 'PORTRAIT'
sleep(SLEEP_INTERVAL)
assert_exists(self, "Release Lock")
click_btn(self, "Release Lock")
sleep(SLEEP_INTERVAL)
assert_not_exists(self, "Lock to Portrait")
assert_not_exists(self, "Lock To Landscape")
assert_not_exists(self, "Release Lock")
# Rotate phone
self.driver.orientation = 'LANDSCAPE'
sleep(SLEEP_INTERVAL)
assert_not_exists(self, "Lock To Portrait")
assert_not_exists(self, "Lock to Landscape")
assert_not_exists(self, "Release Lock")
# Rotate phone back
self.driver.orientation = 'PORTRAIT'
sleep(SLEEP_INTERVAL)
click_x_btn(self)
def test_mraid_two_part_expand(self):
self.driver.orientation = 'PORTRAIT'
set_channel_id(self, "24343")
click_load_ad_btn(self, "Banner")
accept_location(self)
block_until_webview(self)
save_source(self)
block_until_element(self, ["Two Part Expand", "Two Part Expand Link"])
click_btn(self, "Two Part Expand")
click_btn(self, "Two Part Expand Link")
sleep(SLEEP_INTERVAL)
switch_to_web_context(self)
block_until_css_element(self, "#openiab")
self.driver.orientation = 'LANDSCAPE'
sleep(SLEEP_INTERVAL)
self.driver.orientation = 'PORTRAIT'
sleep(SLEEP_INTERVAL)
# Click on open iab.com button
click_btn(self, "#openiab")
sleep(SLEEP_INTERVAL)
if len(self.driver.find_elements_by_id("android:id/resolver_grid")) != 0:
click_btn(self, "Chrome")
sleep(SLEEP_INTERVAL)
click_btn(self, "Always")
sleep(SLEEP_INTERVAL)
click_btn(self, "OK")
switch_to_native_context(self)
sleep(SLEEP_INTERVAL)
assert_viewing_browser(self)
# Close browser
click_back_btn(self)
sleep(SLEEP_INTERVAL)
switch_to_web_context(self)
sleep(SLEEP_INTERVAL)
# Play video
click_btn(self, "#openvideo")
sleep(SLEEP_INTERVAL*3)
assert_viewing_video(self)
# Close video
click_back_btn(self)
# Assert expand again does nothing
click_btn(self, "Expand Again")
sleep(SLEEP_INTERVAL)
assert_not_viewing_browser(self)
# Close expanded view
click_btn(self, "Click here to close.")
sleep(SLEEP_INTERVAL)
switch_to_native_context(self)
block_until_element(self, ["Two Part Expand", "Two Part Expand Link"])
sleep(SLEEP_INTERVAL)
click_btn(self, "Two Part Expand")
click_btn(self, "Two Part Expand Link")
click_btn(self, "Two Part Expand")
click_btn(self, "Two Part Expand Link")
sleep(SLEEP_INTERVAL)
block_until_element(self, "Click here to close.")
# TODO Click upper top corner and assert close
click_btn(self, "Click here to close.")
sleep(SLEEP_INTERVAL)
save_source(self)
assert_href_called(self, r"mraid://useCustomClose")
assert_href_called(self, r"mraid://setOrientationProperties")
assert_href_called(self, r"mraid://expand\?url=")
assert_href_called(self, r"mraid://open\?url=")
assert_href_called(self, r"mraid://playVideo")
assert_href_called(self, r"mraid://close")
def test_mraid_resize(self):
set_channel_id(self, "24348")
click_load_ad_btn(self, "Banner")
accept_location(self)
block_until_webview(self)
sleep(SLEEP_INTERVAL)
block_until_element(self, "Click to Resize")
click_btn(self, "Click to Resize")
sleep(SLEEP_INTERVAL)
assert_href_called(self, r"mraid://resize")
# Click open url
click_btn(self, "Open URL")
sleep(SLEEP_INTERVAL)
assert_viewing_browser(self)
# Close browser
click_back_btn(self)
sleep(SLEEP_INTERVAL)
assert_href_called(self, r"mraid://open\?url=.*www\.iab\.net")
# Open map
click_btn(self, "Click to Map")
sleep(SLEEP_INTERVAL)
if len(self.driver.find_elements_by_id("android:id/resolver_grid")) != 0:
click_btn(self, "Maps")
sleep(SLEEP_INTERVAL)
click_btn(self, "Always")
sleep(SLEEP_INTERVAL)
click_btn(self, "OK")
sleep(SLEEP_INTERVAL)
assert_viewing_maps(self)
# Close map
click_back_btn(self)
sleep(SLEEP_INTERVAL)
assert_href_called(self, r"mraid://open\?url=.*maps\.google\.com")
# Open app
click_btn(self, "Click to App")
sleep(SLEEP_INTERVAL)
if len(self.driver.find_elements_by_id("android:id/resolver_grid")) != 0:
click_btn(self, "Play Store")
sleep(SLEEP_INTERVAL)
click_btn(self, "Always")
sleep(SLEEP_INTERVAL)
click_btn(self, "OK")
sleep(SLEEP_INTERVAL)
assert_viewing_google_play(self)
click_back_btn(self)
sleep(SLEEP_INTERVAL)
assert_href_called(self, r"mraid://open\?url=.*play.google.com")
# Open video
click_btn(self, "Play Video")
sleep(SLEEP_INTERVAL)
assert_viewing_video(self)
sleep(SLEEP_INTERVAL)
# Close video
click_back_btn(self)
sleep(SLEEP_INTERVAL)
self.driver.orientation = 'PORTRAIT'
sleep(SLEEP_INTERVAL)
assert_href_called(self, r"mraid://playVideo")
"""
click_btn(self, "Click to Resize")
# Send sms
click_btn(self, "SMS")
sleep(SLEEP_INTERVAL)
assert_viewing_sms(self)
sleep(SLEEP_INTERVAL)
click_back_btn(self)
sleep(SLEEP_INTERVAL)
assert_href_called(self, r"mraid://open\?url=sms")
# Click to call
click_btn(self, "Click to Call")
sleep(SLEEP_INTERVAL)
assert_viewing_call(self)
# Close call
click_back_btn(self)
sleep(SLEEP_INTERVAL)
click_back_btn(self)
sleep(SLEEP_INTERVAL)
assert_href_called(self, r"mraid://open\?url=tel")
"""
def test_mraid_full_page(self):
set_channel_id(self, "24353")
click_load_ad_btn(self, "Banner")
accept_location(self)
block_until_webview(self)
sleep(SLEEP_INTERVAL)
save_source(self)
click_btn(self, "HIDE")
click_btn(self, "Hide")
sleep(SLEEP_INTERVAL)
click_btn(self, "SHOW")
click_btn(self, "Show")
sleep(SLEEP_INTERVAL)
switch_to_web_context(self)
sleep(SLEEP_INTERVAL)
# Assert that off screen timer is not all zeros
timer = self.driver.find_elements_by_css_selector("#offscreentimer")[0]
self.assertNotEquals(timer.text, "00:00:00")
# Assert that on screen timer is not all zeros
timer = self.driver.find_elements_by_css_selector("#onscreentimer")[0]
self.assertNotEquals(timer.text, "00:00:00")
def test_mraid_resize_error(self):
set_channel_id(self, "24358")
# Call specific size
click_load_ad_btn(self, "Banner", "300x250")
accept_location(self)
block_until_webview(self)
sleep(SLEEP_INTERVAL)
# Click bad timing
click_parent_btn(self, "bad timing")
sleep(SLEEP_INTERVAL)
click_parent_btn(self, "bad values")
sleep(SLEEP_INTERVAL)
click_parent_btn(self, "small")
sleep(SLEEP_INTERVAL)
click_parent_btn(self, "big")
sleep(SLEEP_INTERVAL)
click_parent_btn(self, "←")
sleep(SLEEP_INTERVAL)
click_parent_btn(self, "→")
sleep(SLEEP_INTERVAL)
click_parent_btn(self, "↑")
sleep(SLEEP_INTERVAL)
click_parent_btn(self, "↓")
sleep(SLEEP_INTERVAL)
click_parent_btn(self, "TRUE")
sleep(SLEEP_INTERVAL)
click_parent_btn(self, "←")
sleep(SLEEP_INTERVAL)
click_parent_btn(self, "→")
sleep(SLEEP_INTERVAL)
click_parent_btn(self, "↑")
sleep(SLEEP_INTERVAL)
click_parent_btn(self, "↓")
sleep(SLEEP_INTERVAL)
click_parent_btn(self, "X")
def test_mraid_video_interstitial(self):
set_channel_id(self, "24363")
click_load_ad_btn(self, "Interstitial")
sleep(SLEEP_INTERVAL)
accept_location(self)
block_until_webview(self)
switch_to_web_context(self)
sleep(SLEEP_INTERVAL)
block_until_css_element(self, "video")
sleep(SLEEP_INTERVAL)
# Assert landscape view
assert_landscape_view(self)
switch_to_native_context(self)
sleep(30)
assert_href_called(self, r"mraid://useCustomClose")
assert_href_called(self, r"mraid://close")
|
#Q1: WAP to check if the entered character is a vowel or a consonant
def Ques1():
ch = raw_input("Enter a letter: ");
if (ch=='a' or ch=='e' or ch=='i' or ch=='o' or ch=='u' or ch=='A' or ch=='E' or ch=='I' or ch=='O' or ch=='U'):
print ch,"is a vowel";
else:
print ch,"is a consonant";
#Q2: WAP to check the validity of triangle and then classify it on the basis of its sides
def Ques2():
side1, side2, side3 = input("Enter the length of side 1: "), input("Enter the length of side 2: "), input("Enter the length of side 3: ");
print "The Triangle exists" if (side1+side2>side3 and side2+side3>side1 and side3+side1>side2) else "The Triangle doesn't exist.",;
if(side1==side2 and side2==side3):
print "and is equilateral.";
elif(side1==side2 or side2==side3 or side3==side1):
print "and is isosceles.";
elif(side1!=side2 and side2!=side3 and side3!=side1 and side1+side2>side3 and side2+side3>side1 and side3+side1>side2):
print "and is scalene.";
#Q3: WAP to find the minimum number of notes or coins of different denominition
# required for the give amount(Rs. 2000, 500, 200, 100, 50, 20, 10, 5, 2, 1)
def Ques3():
amt = input("Enter the amount: ");
totalNumOfNotes=0
denominitions = [2000,500,200,100,50,20,10,5,2,1];
for i in range (0,10):
if(amt >= denominitions[i]):
numOfNotes = amt//denominitions[i];
print denominitions[i],":",numOfNotes;
amt = amt-(numOfNotes*denominitions[i]);
totalNumOfNotes+=numOfNotes;
print "The total number of notes needed are",totalNumOfNotes;
#O4: WAP to perform operations such as multiply divide add subtract and modulus as per user's choice
def Ques4():
num1, num2 = input("Enter a number: "), input("Enter a number: ");
choice = input("Enter:\n1 for Addition\n2 for Subtraction\n3 for Multiplication\n4 for Division\n5 for Remainder\n");
if choice == 1:
print num1,"+",num2,"=",num1+num2;
elif choice == 2:
print num1,"-",num2,"=",num1-num2;
elif choice == 3:
print num1,"X",num2,"=",num1*num2;
elif choice == 4:
print num1,"/",num2,"=",num1/num2;
elif choice == 5:
print num1,"mod",num2,"=",num1%num2;
else:
print("Wrong Choice!!");
#PSV main()
iterate = True;
while(iterate):
quesChoice = input("Enter the question number (1 to 4) you want to view(Press 0 to terminate): ");
if(quesChoice==1):
Ques1();
elif(quesChoice==2):
Ques2();
elif(quesChoice==3):
Ques3();
elif(quesChoice==4):
Ques4();
elif(quesChoice==0):
iterate=False;
else:
print "Enter a correct choice!!!"
|
#
# from shell import main as run |
import math
import sys
def hours_to_min_sec(time, milliseconds=False, txt=False):
hours = math.floor(time)
minutes = (time - hours) * 60
seconds = (minutes % 1) * 60
data = {
"hours": int(hours),
"minutes": int(math.floor(minutes)),
"seconds": int(math.floor(seconds))
}
if txt:
data["txt"] = str(data["hours"]) + ' hour ' + \
str(data["minutes"]) + " min " + str(data["seconds"]) + " sec" if hours else str(
data["minutes"]) + " min " + str(data["seconds"]) + " sec"
if milliseconds:
data["milliseconds"] = int(math.floor((seconds % 1) * 1000))
if data["milliseconds"]:
data["txt"] = data["txt"] + " " + \
str(data["milliseconds"]) + " millisecond"
return data
def main():
length = len(sys.argv)
if length > 1:
try:
print(hours_to_min_sec(float(sys.argv[1])))
except:
print("argument must be a number")
else:
print("missing argument")
if __name__ == '__main__':
main()
|
import requests
import urllib3
import json
from global_secrets import api_get_url,api_default_group
#Disable warning if not using certificate:
verify=False
if verify !=True :
urllib3.disable_warnings()
def _url(path):
url=api_get_url()
return url + path
#Operations on authorization
#Authenticate:
def get_auth_token(username, password):
auth_json={
"username": username,
"password": password ,
"cookie": "true",
"csrfToken": "false"
}
return requests.post(_url('/api/v3/authorize'), json=auth_json, verify=verify)
def get_tenant_token (username, password, accountId):
auth_json={
"accountId": accountId,
"username": username,
"password": password ,
"cookie": "true",
"csrfToken": "false"
}
return requests.post(_url('/api/v3/authorize'), json=auth_json, verify=verify)
#Operations at tenant level:
#Get tenants accounts
def get_tenants_accounts(authtoken):
headers={'Authorization': 'Bearer ' + authtoken }
return requests.get (_url('/api/v3/grid/accounts?limit=250'), headers=headers , verify=verify)
def get_tenant_by_name (authtoken, tenant_name):
#Returns the tenant id that match the name
query=get_tenants_accounts(authtoken)
for items in query.json()['data']:
if items['name']==tenant_name:
return items['id']
#Gets the storage usage information for the Storage Tenant Account
def get_storage_usage_in_tenant(tenant_id, authtoken):
headers={'Authorization': 'Bearer ' + authtoken }
return requests.get (_url('/api/v3/grid/accounts/{}/usage'.format(tenant_id)), headers=headers , verify=verify)
#Creates a new Storage Tenant Account
def create_new_tenant(authtoken, account_name,quota,root_password):
headers={'Authorization': 'Bearer ' + authtoken }
data_json= "{ \"name\":\""+ account_name +"\",\"capabilities\": [\"management\",\"s3\" ],\"policy\": {\"useAccountIdentitySource\": false,\"allowPlatformServices\": false,\"quotaObjectBytes\":"+ str(quota) +"},\"password\": \""+ root_password +"\",\"grantRootAccessToGroup\":\""+ api_default_group()+"\"}"
data=json.loads(data_json)
return requests.post(_url('/api/v3/grid/accounts'), json=data, headers=headers, verify=verify)
#Operations on alarms:
def get_alarms(authtoken):
headers={'Authorization': 'Bearer ' + authtoken }
return requests.get(_url('/api/v3/grid/alarms'), headers=headers, verify=verify)
def get_health(authtoken):
headers={'Authorization': 'Bearer ' + authtoken }
return requests.get(_url('/api/v3/grid/health'), headers=headers, verify=verify)
def get_health_topology(authtoken):
headers={'Authorization': 'Bearer ' + authtoken }
return requests.get(_url('/api/v3/grid/health/topology'), headers=headers, verify=verify)
#Operations on Users
#Lists Grid Administrator Users
def get_admin_users(authtoken):
headers={'Authorization': 'Bearer ' + authtoken }
return requests.get(_url('/api/v3/grid/users'), headers=headers, verify=verify)
#operations on tenants... needs a X-Csrf-Token
def get_usage(tenant_authtoken):
headers={'Authorization': 'Bearer ' + tenant_authtoken }
return requests.get(_url('/api/v3/org/usage'), headers=headers, verify=verify)
#Inside a Tenant
def create_new_tenant_user_group(tenant_authtoken, group_name, bucket_name):
#/org/groups Creates a new Tenant User Group only with access to a specific bucket.
#This group can operate over the bucket but not over the tenant, and can generate S3 keys.
headers={'Authorization': 'Bearer ' + tenant_authtoken }
body={
"displayName": group_name,
"policies": {
"management": {
"manageAllContainers": False,
"manageEndpoints": False,
"manageOwnS3Credentials": True,
"rootAccess": False },
"s3": {
"Statement": [
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::"+bucket_name,
"arn:aws:s3:::"+bucket_name+"/*"
],
}
]
}
},
"uniqueName": "federated-group/"+group_name
}
data=body
#For debug:
print (json.dumps(data, indent=1))
return requests.post(_url('/api/v3/org/groups'), json=data, headers=headers, verify=verify)
def create_new_tenant_user_group_noS3access(tenant_authtoken, group_name, bucket_name):
#/org/groups Creates a new Tenant User Group only with access to a specific bucket.
#This group can operate over the bucket but not over the tenant, and can generate S3 keys.
headers={'Authorization': 'Bearer ' + tenant_authtoken }
body={
"displayName": group_name,
"policies": {
"management": {
"manageAllContainers": False,
"manageEndpoints": False,
"manageOwnS3Credentials": True,
"rootAccess": False }
},
"uniqueName": "federated-group/"+group_name
}
data=body
#For debug:
print (json.dumps(data, indent=1))
return requests.post(_url('/api/v3/org/groups'), json=data, headers=headers, verify=verify)
def create_new_bucket(tenant_authtoken,bucket_name, region):
#/org/containers
#Create a bucket for an S3 tenant account
headers={'Authorization': 'Bearer ' + tenant_authtoken }
data={
"name": bucket_name,
"region": region,
}
#For debug:
print (json.dumps(data, indent=1))
return requests.post(_url('/api/v3/org/containers'), json=data, headers=headers, verify=verify)
#/org/containers/{bucketName}/last-access-time
#Determines if LAT is enable on a bucket
def get_last_access_time(tenant_authtoken,bucket_name):
headers={'Authorization': 'Bearer ' + tenant_authtoken }
return requests.get (_url('/api/v3/org/containers/{}/last-access-time'.format(bucket_name)), headers=headers , verify=verify)
|
import os
from modulefinder import Module
from pathlib import Path
from typing import Iterable
import fiona
import geopandas as gpd
import pyproj
import rasterio
from fiona.errors import DriverError
from rasterio.errors import RasterioIOError
from shapely.geometry import Polygon, box
from arcpy2foss.utils import reproject
def extent_from_file(filename: Path, as_wgs84: bool = True, lib: Module = fiona) -> Polygon:
"""Get the spatial extent from a file
Parameters
----------
filename : Path
Path of file
as_wgs84 : bool, optional
Return extent in WGS84, by default True
lib : Module
The library use to open the file, e.g. fiona, rasterio
Returns
-------
Polygon
Bounding box extent
"""
with lib.open(filename) as src:
bounds = box(*src.bounds)
prj = pyproj.CRS.from_user_input(src.crs)
epsg_code = prj.to_epsg(min_confidence=20)
if as_wgs84 and epsg_code != 4326:
bounds = reproject(from_crs=prj, to_crs="EPSG:4326", geom=bounds)
return bounds
def get_extent(filename: Path, as_wgs84: bool = True) -> Polygon:
"""Get the bounding box extent from a file
Parameters
----------
filename : Path
Path to raster or vector file
as_wgs84 : bool, optional
Return the Polygonal extent in the WGS84 CRS, by default True
Returns
-------
Polygon
A bounding box extent of the data in ``filename``
"""
try:
return extent_from_file(filename, as_wgs84=as_wgs84, lib=fiona)
except DriverError:
pass
try:
return extent_from_file(filename, as_wgs84=as_wgs84, lib=rasterio)
except RasterioIOError:
pass
# raise if neither OGR or GDAL can read the data
raise NotImplementedError(f"Could not open file as either raster or vector: {filename}")
def extents_to_features(input_files: Iterable[Path], output_file: Path, output_format: str = "GeoJSON") -> None:
"""Create a new vector file that contains the bounding box extents of each
given file in ``input_files``.
Parameters
----------
input_files : Iterable[Path]
List of input fiels (raster or vector)
output_file : Path
Path to output file (vector) that will be created with the bounding
boxes of each file from ``input_files``. This will be in WGS84.
The CRS will be WGS-84.
output_format : str
The output format (or Driver) to use when writing ``output_file``.
See also `fiona.support_drivers`.
"""
extents = []
for fn in input_files:
extents.append({"filename": os.path.basename(fn), "geometry": get_extent(fn, as_wgs84=True)})
out = gpd.GeoDataFrame(extents, geometry="geometry", crs="EPSG:4326")
out.to_file(output_file, driver=output_format)
|
# -*- coding: utf-8 -*-
import json
from oauthlib.oauth2 import RequestValidator
from oauthlib.oauth2.rfc6749 import errors, utils
from oauthlib.oauth2.rfc6749.grant_types.base import GrantTypeBase
import logging
log = logging.getLogger(__name__)
JWT_BEARER = 'urn:ietf:params:oauth:grant-type:jwt-bearer'
class JWTBearerGrant(GrantTypeBase):
def __init__(self, request_validator=None):
self.request_validator = request_validator or RequestValidator()
def create_token_response(self, request, token_handler):
headers = {
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
}
try:
log.debug('Validating access token request, %r.', request)
self.validate_token_request(request)
except errors.OAuth2Error as e:
log.debug('Client error in token request. %s.', e)
return headers, e.json, e.status_code
token = token_handler.create_token(request, refresh_token=False)
return headers, json.dumps(token), 200
def validate_token_request(self, request):
if request.grant_type != JWT_BEARER:
raise errors.UnsupportedGrantTypeError(request=request)
if request.assertion is None:
raise errors.InvalidRequestError('Missing assertion parameter.',
request=request)
for param in ('grant_type', 'scope'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(
'Duplicate %s parameter.' % param,
request=request)
# Since the JSON Web Token is signed by its issuer client
# authentication is not strictly required when the token is used as
# an authorization grant. However, if client credentials are provided
# they should be validated as describe in Section 3.1.
# https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12#section-3.1
if self.request_validator.client_authentication_required(request):
log.debug('Authenticating client, %r.', request)
if not self.request_validator.authenticate_client(request):
log.debug('Invalid client (%r), denying access.', request)
raise errors.InvalidClientError(request=request)
# REQUIRED. The web token issued by the client.
log.debug('Validating assertion %s.', request.assertion)
if not self.request_validator.validate_bearer_token(
request.assertion, request.scopes, request):
log.debug('Invalid assertion, %s, for client %r.',
request.assertion, request.client)
raise errors.InvalidGrantError('Invalid assertion.',
request=request)
original_scopes = utils.scope_to_list(
self.request_validator.get_original_scopes(
request.assertion, request))
if request.scope:
request.scopes = utils.scope_to_list(request.scope)
if (not all((s in original_scopes for s in request.scopes)) and
not self.request_validator.is_within_original_scope(
request.scopes, request.refresh_token, request)):
log.debug('Refresh token %s lack requested scopes, %r.',
request.refresh_token, request.scopes)
raise errors.InvalidScopeError(request=request)
else:
request.scopes = original_scopes
|
{
"targets": [
{
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")",
"lib/emokit-c/include",
"/usr/local/include"
],
"dependencies": [
"<!(node -p \"require('node-addon-api').gyp\")"
],
"target_name": "emotiv",
"sources": [
"lib/fprintf_override.c",
"lib/bindings.cc",
"lib/emokit-c/src/emokit.c"
],
"conditions": [
['OS=="linux"', {
'libraries': [
"/usr/lib/x86_64-linux-gnu/libhidapi-hidraw.so",
"/usr/lib/libmcrypt.so"
]
}],
['OS=="mac"', {
"libraries": [
"/usr/local/lib/libhidapi.0.dylib",
"/usr/local/lib/libmcrypt.4.4.8.dylib"
]
}]
],
"defines": [
"EXTERNAL_API"
],
"cflags!": [
"-fno-exceptions"
],
"cflags_cc!": [
"-fno-exceptions"
],
"xcode_settings": {
"GCC_ENABLE_CPP_EXCEPTIONS": "YES",
"CLANG_CXX_LIBRARY": "libc++",
"MACOSX_DEPLOYMENT_TARGET": "10.7",
"OTHER_LDFLAGS": [
"-framework IOKit"
]
},
"msvs_settings": {
"VCCLCompilerTool": {
"ExceptionHandling": 1
}
}
}
]
}
|
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import count
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: mnmcount <file>", file=sys.stderr)
sys.exit(-1)
spark = (SparkSession
.builder
.appName("PythonMnMCount")
.getOrCreate())
mnm_file = sys.argv[1]
mnm_df = (spark.read.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(mnm_file))
count_mnm_df = (mnm_df
.select("State", "Color", "Count")
.groupby("State", "Color")
.agg(count("Count").alias("Total"))
.orderBy("Total", ascending=False))
count_mnm_df.show(n=60, truncate=False)
print("Total Rows = %d" % (count_mnm_df.count()))
ca_count_mnm_df = (mnm_df
.select("State", "Color", "Count")
.where(mnm_df.State == "CA")
.groupby("State", "Color")
.agg(count("Count").alias("Total"))
.orderBy("Total", ascending=False))
ca_count_mnm_df.show(n=10, truncate=False)
spark.stop()
|
from django.contrib import admin
from leaflet.admin import LeafletGeoAdmin
from pothole.models import Pothole, PotholeRepair
# Register your models here.
admin.site.register(Pothole, LeafletGeoAdmin)
admin.site.register(PotholeRepair) |
from typing import TypeVar, Generic, Callable, Type, cast
from PySide2.QtCore import Signal
_T = TypeVar('_T')
class SimpleSignal:
def __init__(self):
pass
def emit(self):
pass
def connect(self, c: Callable[[], None]):
pass
class TypedSignal(Generic[_T]):
def __init__(self, _: Type[_T]):
pass
def emit(self, o: _T):
pass
def connect(self, c: Callable[[_T], None]):
pass
def typedsignal(t: Type[_T]) -> TypedSignal[_T]:
return cast(TypedSignal[_T], Signal(t))
def signal() -> SimpleSignal:
return cast(SimpleSignal, Signal())
__all__ = ['SimpleSignal', 'TypedSignal', 'typedsignal', 'signal']
|
import sys
# Models
from lokki.model import AdaBoost
from lokki.model import GradientBoosting
from lokki.model import RandomForest
from lokki.model import LogisticRegressionModel
from lokki.model import RidgeClassifierModel
from lokki.model import SVM
from lokki.model import DecisionTree
from lokki.model import ExtraTree
from lokki.model import LDA
from lokki.model import QDA
# Data Transforms
from lokki.data_transform import NoPreprocessing
from lokki.data_transform import Log
from lokki.data_transform import ZScore
# Feature Tranforms
from lokki.feature_transform import FactorAnalysis
from lokki.feature_transform import ICA
from lokki.feature_transform import NMF
from lokki.feature_transform import PCA
from lokki.feature_transform import ChiSquare
from lokki.feature_transform import MutualInformation
from lokki.feature_transform import HFE
from lokki.feature_transform import Void
class PipelineComponents:
def __init__(self, dataset_shape, parameters, taxonomy = None):
self.dataset_shape = dataset_shape
self.parameters = parameters
self.taxonomy = taxonomy
def get_component(self, name, component_type):
if component_type.strip().lower() == 'data_transform':
if name.lower() == 'none' or name.lower() == 'no_data_transform':
return NoPreprocessing()
elif name.lower() == 'log':
return Log()
elif name.lower() == 'zscore':
return ZScore()
else:
sys.exit('ERROR: ' + ' Could not find data transform method "' + name + '"')
if component_type.strip().lower() == 'feature_transform':
if name.lower() == 'none' or name.lower() == 'no_feature_transform':
return Void(self.dataset_shape, self.parameters)
elif name.lower() == 'chi_square':
return ChiSquare(self.dataset_shape, self.parameters)
elif name.lower() == 'mutual_information':
return MutualInformation(self.dataset_shape, self.parameters)
elif name.lower() == 'hfe':
return HFE(self.dataset_shape, self.taxonomy)
elif name.lower() == 'factor_analysis':
return FactorAnalysis(self.dataset_shape, self.parameters)
elif name.lower() == 'ica':
return ICA(self.dataset_shape, self.parameters)
elif name.lower() == 'nmf':
return NMF(self.dataset_shape, self.parameters)
elif name.lower() == 'pca':
return PCA(self.dataset_shape, self.parameters)
else:
sys.exit('ERROR: ' + ' Could not find feature transform method "' + name + '"')
if component_type.strip().lower() == 'model':
if name.lower() == 'random_forest':
return RandomForest()
elif name.lower() == 'decision_tree':
return DecisionTree()
elif name.lower() == 'lda':
return LDA()
if name.lower() == 'qda':
return QDA()
if name.lower() == 'extra_tree':
return ExtraTree()
if name.lower() == 'logistic_regression':
return LogisticRegressionModel()
if name.lower() == 'ridge_regression':
return RidgeClassifierModel()
if name.lower() == 'adaboost':
return AdaBoost()
if name.lower() == 'gradient_boosting':
return GradientBoosting()
if name.lower() == 'svm':
return SVM()
else:
sys.exit('ERROR: ' + ' Could not find model "' + name + '"')
# Description: Returns a dictionary mapping the name to the component type
def get_name_to_component_map(self):
def no_space(string):
return '_'.join(string.split(' '))
return { no_space(NoPreprocessing.get_name('').lower()) : 'data_transform',
no_space(Log.get_name('').lower()) : 'data_transform',
no_space(ZScore.get_name('').lower()) : 'data_transform',
no_space(Void.get_name('').lower()) : 'feature_transform',
no_space(ChiSquare.get_name('').lower()) : 'feature_transform',
no_space(MutualInformation.get_name('').lower()) : 'feature_transform',
no_space(HFE.get_name('').lower()) : 'feature_transform',
no_space(FactorAnalysis.get_name('').lower()) : 'feature_transform',
no_space(ICA.get_name('').lower()) : 'feature_transform',
no_space(NMF.get_name('').lower()) : 'feature_transform',
no_space(PCA.get_name('').lower()) : 'feature_transform',
no_space(RandomForest.get_name('').lower()) : 'model',
no_space(DecisionTree.get_name('').lower()) : 'model',
no_space(LDA.get_name('').lower()) : 'model',
no_space(QDA.get_name('').lower()) : 'model',
no_space(ExtraTree.get_name('').lower()) : 'model',
no_space(LogisticRegressionModel.get_name('').lower()) : 'model',
no_space(RidgeClassifierModel.get_name('').lower()) : 'model',
no_space(AdaBoost.get_name('').lower()) : 'model',
no_space(GradientBoosting.get_name('').lower()) : 'model',
no_space(SVM.get_name('').lower()) : 'model'}
|
# coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
"""
This script demonstrates the usage of the module
mpinterfaces/calibrate.py to setup and run vasp jobs
"""
import os
from math import sqrt
from collections import OrderedDict
from pymatgen.io.vasp.inputs import Incar, Poscar
from pymatgen.io.vasp.inputs import Potcar, Kpoints
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from mpinterfaces import get_struct_from_mp
from mpinterfaces.calibrate import CalibrateSlab
from mpinterfaces.interface import Interface
from mpinterfaces.utils import get_run_cmmnd
MAPI_KEY = os.environ.get("MAPI_KEY", "")
# get structure from materialsproject, use your own key
strt = get_struct_from_mp('PbS', MAPI_KEY=MAPI_KEY)
# convert from fcc primitive to conventional cell
# the conventional unit cell is used to create the slab
# this is important becasue the hkl specification for the required slab
# is wrt the provided unit cell
sa = SpacegroupAnalyzer(strt)
structure_conventional = sa.get_conventional_standard_structure()
strt = structure_conventional.copy()
# create slab
iface = Interface(strt, hkl=[1, 1, 1],
min_thick=10, min_vac=10,
supercell=[1, 1, 1])
# sort structure into groups of elements atoms for Potcar mapping
iface.sort()
# vasp input
incar_dict = {
'SYSTEM': 'test',
'ENCUT': 500,
'ISIF': 2,
'IBRION': 2,
'ISMEAR': 1,
'EDIFF': 1e-06,
'NPAR': 8,
'SIGMA': 0.1,
'NSW': 100,
'PREC': 'Accurate'
}
incar = Incar.from_dict(incar_dict)
poscar = Poscar(iface)
potcar = Potcar(poscar.site_symbols)
kpoints = Kpoints.automatic(20)
# set job list. if empty a single job will be run
encut_list = [] # range(400,800,100)
turn_knobs = OrderedDict(
[
('ENCUT', encut_list)
])
# job directory
job_dir = 'vasp_job'
# run settings
qadapter = None
job_cmd = None
nprocs = 16
nnodes = 1
walltime = '24:00:00'
mem = 1000
incar['NPAR'] = int(sqrt(nprocs))
job_bin = '/home/km468/Software/VASP/vasp.5.3.5/vasp'
qadapter, job_cmd = get_run_cmmnd(nnodes=nnodes, nprocs=nprocs,
walltime=walltime,
job_bin=job_bin, mem=mem)
# setup calibration jobs and run
cal = CalibrateSlab(incar, poscar, potcar, kpoints,
system=iface.as_dict(),
turn_knobs=turn_knobs, qadapter=qadapter,
job_cmd=job_cmd, job_dir=job_dir)
cal.setup()
cal.run()
|
from pathlib import Path
from time import sleep
from kubernetes import client, config
from kubernetes.client.rest import ApiException
from kubernetes.config.config_exception import ConfigException
from retrying import retry
SIG_DIR = '.openmpi-controller'
SIG_TERM = f'{SIG_DIR}/term.sig'
POLL_STATUS_INTERVAL = 10
TERMINATED_PHASES = ('Succeeded', 'Failed')
class Controller:
"""
Controller is a sidecar container that extends the "main" container (openmpi-job).
It communicates with the main container using a shared volume mounted at the working directory.
Right before it finishes its work, it creates a semaphore file "term.sig" to signal the main container to terminate.
"""
def __init__(self, namespace, master):
self.namespace = namespace
self.master = master
Path(SIG_DIR).mkdir()
def __enter__(self):
log('controller entered')
try:
config.load_incluster_config()
except ConfigException:
config.load_kube_config()
self.api = client.CoreV1Api()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
log('controller exited')
Path(SIG_TERM).touch()
def wait_master_terminated(self):
while True:
phase = self._get_master_phase()
log(f'{self.master} is in "{phase}" phase')
if phase in TERMINATED_PHASES:
break
sleep(POLL_STATUS_INTERVAL)
@retry(stop_max_attempt_number=5,
wait_exponential_multiplier=1000,
retry_on_exception=lambda e: isinstance(e, ApiException))
def _get_master_phase(self):
pod = self.api.read_namespaced_pod(self.master, self.namespace)
return pod.status.phase
def log(msg):
print(msg, flush=True)
|
import os
import time
import shutil
import logging
import subprocess
import torch
def init_dist(args):
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.slurm:
args.distributed = True
if not args.distributed:
# task with single GPU also needs to use distributed module
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['WORLD_SIZE'] = '1'
os.environ['LOCAL_RANK'] = '0'
os.environ['RANK'] = '0'
args.local_rank = 0
args.distributed = True
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
if args.slurm:
# processes are created with slurm
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput(
f'scontrol show hostname {node_list} | head -n1')
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
args.local_rank = proc_id % num_gpus
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
os.environ['RANK'] = str(proc_id)
print(f'Using slurm with master node: {addr}, rank: {proc_id}, world size: {ntasks}')
os.environ['MASTER_PORT'] = args.dist_port
args.device = 'cuda:%d' % args.local_rank
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
if not args.slurm:
torch.cuda.set_device(args.rank)
print(f'Training in distributed model with multiple processes, 1 GPU per process. Process {args.rank}, total {args.world_size}.')
else:
print('Training with a single process on 1 GPU.')
# create logger file handler for rank 0,
# ignore the outputs of the other ranks
def init_logger(args):
logger = logging.getLogger()
if args.rank == 0:
if not os.path.exists(args.exp_dir):
os.makedirs(args.exp_dir)
logger.setLevel(logging.INFO)
fh = logging.FileHandler(os.path.join(args.exp_dir, f'log_{time.strftime("%Y%m%d_%H%M%S", time.localtime())}.txt'))
fh.setFormatter(logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s',
datefmt='%y-%m-%d %H:%M:%S'))
logger.addHandler(fh)
logger.info(f'Experiment directory: {args.exp_dir}')
else:
logger.setLevel(logging.ERROR)
|
import os
import sys
import BaseHTTPServer
import threading
sys.path.append(os.path.join(os.path.dirname(
__file__), '../../../libbeat/tests/system'))
from beat.beat import TestCase
class BaseTest(TestCase):
@classmethod
def setUpClass(self):
self.beat_name = "heartbeat"
self.beat_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../"))
super(BaseTest, self).setUpClass()
def start_server(self, content, status_code):
class HTTPHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(status_code)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(content)
server = BaseHTTPServer.HTTPServer(('localhost', 8185), HTTPHandler)
thread = threading.Thread(target=server.serve_forever)
thread.start()
return server
|
import os
import json
import re
import os.path as op
from pathlib import Path
from itertools import product
from collections import defaultdict
from types import SimpleNamespace
from typing import Callable, Dict, List
# --------------------------------------------------------------------------
# Map the function to do code shorter
# --------------------------------------------------------------------------
from crawlino.modules_stores import CrawlinoModulesStore
ACTIONS_DETECTION_REGEX = \
re.compile(r'''(\$)([\w\d\_\-]+)([\(\s]+)([\w\'_\"\-\.\d\, ]+)([\)\s]+)''')
def gt(obj, key, default):
if isinstance(obj, dict):
return obj.get(key, default)
else:
return obj
def resolve_log_level(level: int, quite_mode: bool = False) -> int:
# If quiet mode selected -> decrease log level
if quite_mode:
input_level = 100
else:
input_level = level * 10
if input_level > 50:
input_level = 50
input_level = 60 - input_level
if input_level >= 50:
input_level = 50
return input_level
def find_file(file_name: str) -> str or None:
"""This function try to find a file in 3 places:
- Running path
- User path
- The folder ~/.crawlino/
If file not found, it returns None
"""
locations = [
op.abspath(os.getcwd()), # Current dir
op.join(str(Path.home()), ".crawlino") # User's home
]
if op.isabs(file_name):
return file_name
for l in locations:
curr = op.join(l, file_name)
if op.exists(curr):
return curr
def json_to_object(data: str) -> SimpleNamespace:
"""This function convert a JSON document into a Python object:
>>> data = '{"name": "John Smith", "hometown": {"name": "New York", "id": 123}}'
>>> json_to_object(data)
namespace(hometown=namespace(id=123, name='New York'), name='John Smith')
"""
return json.loads(data, object_hook=lambda d: SimpleNamespace(**d))
def dict_to_object(item, callback: Callable = None):
"""This function convert a Python Dict into a Python object:
>>> data = {"name": "John Smith", "hometown": {"name": "New York", "id": 123}}type >>> c = json_to_object(data)
type>> c
<class 'automatic'>
>>> c.name
"John Smith"
>>> c.hometown.name
"New York"
typevars(c)
mappingproxy({'name': 'Jotypemith', 'hometown': <class 'automatic'>, '__dict__':typetribute '__dict__' of 'automatic' objects>, '__weakref__': <attribute '__weakref__' of 'automatic' objects>, '__doc__': None})
"""
def convert(item):
if isinstance(item, dict):
return type('automatic', (), {
k: convert(v) for k, v in item.items()
})
if isinstance(item, list):
def yield_convert(item):
for index, value in enumerate(item):
yield convert(value)
return list(yield_convert(item))
else:
return item
return convert(item)
class GeneratorDiscover:
"""
This class stores dictionaries and discover Crawlino generator defined in
the strings of the values of them. Also allow:
- discover total of generators defined
- number of loop needed to generate all of the generators combinations
- yield the generated combinations of generated data
>>> data = {"name": "John Smith", "hometown": "/users/user?id=$generator('numeric', 1, 10)"}
>>> g = GeneratorBuilder(data)
>>> g.total_generators
1
>>> g.discovered_generators
["numeric"]
>>> g.keys_with_generators
{"hometown": ["numeric"]}
>>> next(g)
{"name": "John Smith", "hometown": "/users/user?id=1"}
>>> next(g)
{"name": "John Smith", "hometown": "/users/user?id=2"}
"""
def __init__(self, data: Dict):
self.raw_data: Dict = data
self.generators = defaultdict(list)
self._discovered_generators = None
self._total_generators = None
self._keys_with_generators = None
self.map_keys_and_generators = {}
# Parse
self._locate_generators(self.raw_data)
self._next_data = self._generate_data(self.raw_data)
@property
def discovered_generators(self) -> List[str]:
if not self._discovered_generators:
self._discovered_generators = list({
fn
for _, generators in self.generators.items()
for fn, _ in generators
})
return self._discovered_generators
@property
def keys_with_generators(self) -> Dict[str, List[str]]:
if not self._keys_with_generators:
tmp = defaultdict(list)
for key, generators in self.generators.items():
for fn, fn_args in generators:
tmp[key].append((fn, fn_args))
self._keys_with_generators = dict(tmp)
return self._keys_with_generators
@property
def total_generators(self) -> int:
if not self._total_generators:
self._total_generators = sum([
1
for _, generators in self.generators.items()
for _, _ in generators
])
return self._total_generators
def _generate_all_values(self):
result = {}
keys = list(self.map_keys_and_generators.keys())
d = list(self.map_keys_and_generators.values())
for x in product(*d):
for i, v in enumerate(x):
result[keys[i]] = v
yield result
def _locate_generators(self, item: Dict):
"""This function convert a Python Dict into a Python object:
>>> data = {"name": "John Smith", "hometown": {"name": "New York",
"id": 123}}type >>> c = json_to_object(data)
>>> lambda d: d if not isinstance(d, int) else d * d
>>>
"""
from crawlino.mini_lang import detect_actions
gen = self.generators
map_keys_and_generators = self.map_keys_and_generators
def convert(item, current: str = None):
if isinstance(item, dict):
return {
k: convert(
v,
f"{current}.{k}" if current else k
)
for k, v in item.items()
}
if isinstance(item, list):
#
# Currently List are not permitted
#
return [
convert(v, f"{current}.{k}") if current else k
for k, v in enumerate(item)
]
else:
# _item = str(item)
_item = item
if "$generator" in _item:
for i, (action, action_params) in \
enumerate(detect_actions(_item)):
# Set generator name
generator_name = action_params[0]
generator_params = action_params[1:]
gen[current].append(
(
generator_name,
generator_params
)
)
# Map current property with their generators
map_keys_and_generators[f"{current}_{i}"] = \
CrawlinoModulesStore.find_module(
"generators", generator_name
)(*generator_params)
convert(item)
def _generate_data(self, data) -> Dict:
pre_generated_values = None
def _data_generator(item, current: str = None):
if isinstance(item, dict):
res = {}
for k, v in item.items():
res[k] = _data_generator(
v,
f"{current}.{k}" if current else k
)
return res
if isinstance(item, list):
#
# Currently List are not permitted
#
return [
_data_generator(v, f"{current}.{k}") if current else k
for k, v in enumerate(item)
]
else:
_item = str(item)
if "$generator" not in _item:
return item
else:
try:
results_text = []
i = 0
iter_text = _item
while True:
found = ACTIONS_DETECTION_REGEX.search(iter_text)
# Replace generator results with generated data
if found:
start, end = found.span()
curr_item = f"{current}_{i}"
value = pre_generated_values[curr_item]
results_text.extend(
[
iter_text[:start],
str(value)
]
)
iter_text = iter_text[end:]
i += 1
else:
if iter_text:
results_text.append(iter_text)
break
return "".join(results_text).strip()
except KeyError:
return item
for d in self._generate_all_values():
pre_generated_values = d
yield _data_generator(data)
def __iter__(self):
return self
def __next__(self) -> Dict:
return next(self._next_data)
def un_camel(text: str):
output = [text[0].lower()]
for c in text[1:]:
if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
output.append('_')
output.append(c.lower())
else:
output.append(c)
return str.join('', output)
__all__ = ("gt", "find_file", "resolve_log_level", "json_to_object",
"dict_to_object", "GeneratorDiscover", "un_camel")
|
"""Author Md Abed Rahman: [email protected]"""
"""This method iteratively prunes outliers using
both LOF and LSCP based outlier detection methods improve HDBSCAN’s performance"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
import matplotlib.gridspec as gridspec
import timeit
import gc
from hdbscan import HDBSCAN
# import DBCV as DBCV
import seaborn as sns
from scipy.stats import norm
from scipy.stats import zscore
from pyod.models.lscp import LSCP
from pyod.models.lof import LOF
from pyod.utils.utility import standardizer
from pyod.utils.data import generate_data
from pyod.utils.data import evaluate_print
from pyod.utils.example import visualize
gc.collect()
print(__doc__)
start = timeit.default_timer()
np.random.seed(42)
path = 'G:\\Poralekha\\UofA\Fall 2019\\CMPUT 697- Into to KDD and DM\\Project\\New datasets'
np.random.seed(42)
filepath = path + '\\DS6\\Tagged.txt'
inputFile = pd.read_csv(filepath, sep='\t')
# Read dataset
df = pd.DataFrame([inputFile['X1'], inputFile['X2']])
X = df.transpose()
df = X
X = np.asarray(X, dtype=np.float32)
labels_true = inputFile['Cluster Number']
outliers_pred=[]
true_pred=[]
extra_X=[]
print(type(labels_true))
para = len(X) * .1
print("10% of the length of X is:", para)
if para < 50.0:
detector_list = [LOF(n_neighbors=30), LOF(n_neighbors=40),
LOF(n_neighbors=50), LOF(n_neighbors=60), LOF(n_neighbors=70)]
else:
detector_list = [LOF(n_neighbors=60), LOF(n_neighbors=70),
LOF(n_neighbors=80), LOF(n_neighbors=90), LOF(n_neighbors=100)]
# Make plot grid of 2X2
# gs = gridspec.GridSpec(2, 2)
#
# plt.figure()
#
# ax = plt.subplot(gs[0, 0]) # row 0, col 0
counter = 0
while (True):
# fit the model for outlier detection with LOF
clf = LocalOutlierFactor(n_neighbors=20)
y_pred = clf.fit_predict(X)
X_scores = clf.negative_outlier_factor_ # Default model has negative of LOF scores
# Get the true LOF scores
LOF_score = -X_scores
if counter == 0:
# Plot with LOF
# plt.title("Local Outlier Factor (LOF)")
# # plt.subplot(211)
# plt.xlim((df['X1'].min() - 100, df['X1'].max() + 100))
# plt.ylim((df['X2'].min() - 100, df['X2'].max() + 100))
#
# plt.scatter(X[:, 0], X[:, 1], color='k', s=3., label='Data points')
# # plot circles with radius proportional to the outlier scores
# radius = (X_scores.min() - X_scores) / (X_scores.min() - X_scores.max())
# plt.scatter(X[:, 0], X[:, 1], s=1000 * radius, edgecolors='r',
# facecolors='none', label='Outlier scores')
# plt.axis('tight')
#
# # plt.xlabel("prediction errors: %d" % (n_errors))
# legend = plt.legend(loc='upper left')
# legend.legendHandles[0]._sizes = [10]
# legend.legendHandles[1]._sizes = [20]
old_max = X_scores.max()
old_min = X_scores.min()
# The usual way. Make a function for ease of use
length = LOF_score.size
flag = []
for i in range(0, length):
if LOF_score[i] > 2:
# print i
flag.append(i)
# Find the outlier percentage
outlier_percentage = float(len(flag)) / float(len(df))
print("Outlier percentage is", outlier_percentage*100)
if outlier_percentage <= 0.01:
print("breaking here")
break
# Now get rid of the global outliers
clf_name = 'LSCP'
# detector_list = [LOF(n_neighbors=30), LOF(n_neighbors=40),
# LOF(n_neighbors=50), LOF(n_neighbors=60),LOF(n_neighbors=70), LOF(n_neighbors=80),
# LOF(n_neighbors=90), LOF(n_neighbors=100)]
clf = LSCP(detector_list, contamination=outlier_percentage, random_state=42)
clf.fit(X)
y_train_pred = clf.labels_ # binary labels (0: inliers, 1: outliers)
y_train_scores = clf.decision_scores_ # raw outlier scores
length = len(y_train_pred)
flag = []
for i in range(0, length):
if y_train_pred[i] == 1:
# print i
flag.append(i)
# Check how many inliers are going to get pruned
count = 0
for i in flag:
if (labels_true[i] != -1):
count = count + 1
print('no of inliers pruned is ' + str(count))
# pruning happens
df = df.drop(df.index[flag])
LOF_score = np.delete(LOF_score, flag, 0)
X_scores = np.delete(X_scores, flag, 0)
extra_X.extend([X[i] for i in flag])
X = np.delete(X, flag, 0)
print(len(labels_true), len(flag))
outliers_pred.extend([-1] * len(flag))
true_pred.extend([labels_true[i] for i in flag])
if counter == 0:
labels_true = np.delete(labels_true.values, flag, 0)
counter = counter + 1
else:
labels_true = np.delete(labels_true, flag, 0)
# print(np.where())
#
# ax = plt.subplot(gs[0, 1]) # row 0, col 1
# plt.title("After getting rid of outliers by recursively using Local Outlier Factor (LOF)")
# plt.xlim((df['X1'].min() - 100, df['X1'].max() + 100))
# plt.ylim((df['X2'].min() - 100, df['X2'].max() + 100))
# plt.scatter(X[:, 0], X[:, 1], color='k', s=3., label='Data points')
# # plot circles with radius proportional to the outlier scores
# # radius = (X_scores.min() - X_scores) / (X_scores.min() - X_scores.max())
#
# radius = (old_min - X_scores) / (old_min - old_max)
# plt.scatter(X[:, 0], X[:, 1], s=1000 * radius, edgecolors='r',
# facecolors='none', label='Outlier scores')
#
# plt.axis('tight')
#
# legend = plt.legend(loc='upper left')
# legend.legendHandles[0]._sizes = [10]
# legend.legendHandles[1]._sizes = [20]
# HDBSCAN Code
X = StandardScaler().fit_transform(X)
hdb = HDBSCAN(min_cluster_size=10, min_samples=5).fit(X)
hdb_labels = hdb.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_hdb_ = len(set(hdb_labels)) - (1 if -1 in hdb_labels else 0)
print('\n\n++ HDBSCAN Results')
print('Estimated number of clusters: %d' % n_clusters_hdb_)
# print('Elapsed time to cluster: %.4f s' % hdb_elapsed_time)
# print('Homogeneity: %0.3f' % metrics.homogeneity_score(labels_true, hdb_labels))
# print('Completeness: %0.3f' % metrics.completeness_score(labels_true, hdb_labels))
# print('V-measure: %0.3f' % metrics.v_measure_score(labels_true, hdb_labels))
print('Adjusted Rand Index: %0.3f' % metrics.adjusted_rand_score(labels_true, hdb_labels))
print('Mislabeled points: %0.3f'% (np.sum([np.where(labels_true[i]!=-1,1,0) for i in np.where(hdb_labels==-1)])+np.sum([np.where(hdb_labels[i]!=-1,1,0) for i in np.where(labels_true==-1)])))
# # print('Adjusted Mutual Information: %0.3f' % metrics.adjusted_mutual_info_score(labels_true, hdb_labels))
# print('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(X, hdb_labels))
# print('Mislabeled outliers: %0.3f'% np.sum([np.where(hdb_labels[i]!=-1,1,0) for i in np.where(labels_true==-1)]))
# print('DBCV: %0.3f' % DBCV.DBCV(X, hdb_labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# ax = plt.subplot(gs[1, :])
# Black removed and is used for noise instead.
hdb_unique_labels = set(hdb_labels)
# db_unique_labels = set(db_labels)
hdb_colors = plt.cm.Spectral(np.linspace(0, 1, len(
hdb_unique_labels))) # db_colors = plt.cm.Spectral(np.linspace(0, 1, len(db_unique_labels)))# fig = plt.figure(figsize=plt.figaspect(0.5))# hdb_axis = fig.add_subplot('121')# db_axis = fig.add_subplot('122')
for k, col in zip(hdb_unique_labels, hdb_colors):
if k == -1:
# Black used for noise.
col = 'k'
plt.plot(X[hdb_labels == k, 0], X[hdb_labels == k, 1], 'o', markerfacecolor=col, markeredgecolor='k',
markersize=6) # for k, col in zip(db_unique_labels, db_colors):# if k == -1:# # Black used for noise.# col = 'k'#
# db_axis.lot(X[db_labels == k, 0], X[db_labels == k, 1], 'o', markerfacecolor=col, #markeredgecolor='k', markersize=6)
plt.title('LOF-LSCP-HDBSCAN\nEstimated number of clusters: %d' % n_clusters_hdb_)
# db_axis.set_title('DBSCAN\nEstimated number of clusters: %d' % n_clusters_db_)
plt.show()
#True ARI
labels_true=labels_true.tolist()
labels_true.extend(true_pred)
hdb_labels=hdb_labels.tolist()
hdb_labels.extend(outliers_pred)
X=X.tolist()
X.extend(extra_X)
print('Adjusted Rand Index: %0.3f' % metrics.adjusted_rand_score(labels_true, hdb_labels))
print('Mislabeled points: %0.3f'% (np.sum([np.where(np.asarray(labels_true)[i]!=-1,1,0) for i in np.where(np.asarray(hdb_labels)==-1)])+np.sum([np.where(np.asarray(hdb_labels)[i]!=-1,1,0) for i in np.where(np.asarray(labels_true)==-1)])))
# filepath=filepath.replace('Tagged.txt','LOF-LSCP-HDBSCANPoints.txt')
#
# with open(filepath, 'w') as file:
# file.writelines('\t'.join(str(j) for j in i) + '\n' for i in X)
#
# filepath=filepath.replace('Points.txt','cluster.txt')
# hdb_labels = [x+1 for x in hdb_labels]
# with open(filepath, 'w') as f:
# for _list in hdb_labels:
# #f.seek(0)
# f.write(str(_list)+'\n')
|
import spacy
import torch
from pathlib import Path
from utils import read_multinli
from torch.autograd import Variable
from decomposable_attention import build_model
from spacy_hook import get_embeddings, get_word_ids
DATA_PATH = '../data/sample.jsonl'
def main():
sample_path = Path.cwd() / DATA_PATH
sample_premise, sample_hypo, sample_labels = read_multinli(sample_path)
nlp = spacy.load('en')
shape = (20, 300, 3)
settings = {
'lr': 0.001,
'batch_size': 3,
'dropout': 0.2,
'nr_epoch': 5,
'tree_truncate': False,
'gru_encode': False
}
model = build_model(get_embeddings(nlp.vocab), shape, settings)
data = []
for texts in (sample_premise, sample_hypo):
data.append(get_word_ids(
list(nlp.pipe(texts, n_threads=1, batch_size=3)),
max_length=20,
rnn_encode=False,
tree_truncate=False))
model(Variable(torch.from_numpy(data[0]).long()),
Variable(torch.from_numpy(data[1]).long()))
if __name__ == '__main__':
main()
|
#-- In python, if you define a function and then define a global variable it is visible to the function unless
# you do some hackaround. The following example gives you an idea of what this code is all about.
# link to the original answer: https://stackoverflow.com/questions/31023060/disable-global-variable-lookup-in-python
#-- example to show how a global variable is visible to a function ---
def f1(x):
retun print(x+y)
x = 1
y = 2
print(f1(x)) # it will print the answer even though we haven't passed y in the function call.
#-- fix to the above issue ---
def noglobal(f):
return types.FunctionType(f.__code__, globals().copy(), f.__name__, f.__defaults__, f.__closure__)
@noglobal
def f1(x):
retun print(x+y)
x = 1
y = 2
print(f1(x)) # now it will throw an error saying that you haven't passed y. So you need to pass it now.
# -- example to use multiple functions ---
import numpy as np
def noglobal(f):
return types.FunctionType(f.__code__, globals().copy(), f.__name__, f.__defaults__, f.__closure__)
@noglobal
def f1(x,y):
return np.pi*x*y
@noglobal
def f2(p,q):
return np.pi*p*q
x, y = 1, 2
p, q = 3, 4
print(f1(x,y), f2(p,q))
|
#!/usr/bin/env python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import copy
import datetime
import json
import time
import argparse
# PyCrypto library: https://pypi.python.org/pypi/pycrypto
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from Crypto.Util.number import long_to_bytes
# Google API Client Library for Python:
# https://developers.google.com/api-client-library/python/start/get_started
from oauth2client.client import GoogleCredentials
from googleapiclient.discovery import build
def GetCompute():
"""Get a compute object for communicating with the Compute Engine API."""
credentials = GoogleCredentials.get_application_default()
compute = build("compute", "v1", credentials=credentials)
return compute
def GetInstance(compute, instance, zone, project):
"""Get the data for a Google Compute Engine instance."""
cmd = compute.instances().get(instance=instance, project=project, zone=zone)
return cmd.execute()
def GetKey():
"""Get an RSA key for encryption."""
# This uses the PyCrypto library
key = RSA.generate(2048)
return key
def GetModulusExponentInBase64(key):
"""Return the public modulus and exponent for the key in bas64 encoding."""
mod = long_to_bytes(key.n)
exp = long_to_bytes(key.e)
modulus = base64.b64encode(mod)
exponent = base64.b64encode(exp)
return modulus, exponent
def GetExpirationTimeString():
"""Return an RFC3339 UTC timestamp for 5 minutes from now."""
utc_now = datetime.datetime.utcnow()
# These metadata entries are one-time-use, so the expiration time does
# not need to be very far in the future. In fact, one minute would
# generally be sufficient. Five minutes allows for minor variations
# between the time on the client and the time on the server.
expire_time = utc_now + datetime.timedelta(minutes=5)
return expire_time.strftime("%Y-%m-%dT%H:%M:%SZ")
def GetJsonString(user, modulus, exponent, email):
"""Return the JSON string object that represents the windows-keys entry."""
converted_modulus = modulus.decode("utf-8")
converted_exponent = exponent.decode("utf-8")
expire = GetExpirationTimeString()
data = {
"userName": user,
"modulus": converted_modulus,
"exponent": converted_exponent,
"email": email,
"expireOn": expire,
}
return json.dumps(data)
def UpdateWindowsKeys(old_metadata, metadata_entry):
"""Return updated metadata contents with the new windows-keys entry."""
# Simply overwrites the "windows-keys" metadata entry. Production code may
# want to append new lines to the metadata value and remove any expired
# entries.
new_metadata = copy.deepcopy(old_metadata)
new_metadata["items"] = [{"key": "windows-keys", "value": metadata_entry}]
return new_metadata
def UpdateInstanceMetadata(compute, instance, zone, project, new_metadata):
"""Update the instance metadata."""
cmd = compute.instances().setMetadata(
instance=instance, project=project, zone=zone, body=new_metadata
)
return cmd.execute()
def GetSerialPortFourOutput(compute, instance, zone, project):
"""Get the output from serial port 4 from the instance."""
# Encrypted passwords are printed to COM4 on the windows server:
port = 4
cmd = compute.instances().getSerialPortOutput(
instance=instance, project=project, zone=zone, port=port
)
output = cmd.execute()
return output["contents"]
def GetEncryptedPasswordFromSerialPort(serial_port_output, modulus):
"""Find and return the correct encrypted password, based on the modulus."""
# In production code, this may need to be run multiple times if the output
# does not yet contain the correct entry.
converted_modulus = modulus.decode("utf-8")
output = serial_port_output.split("\n")
for line in reversed(output):
try:
entry = json.loads(line)
if converted_modulus == entry["modulus"]:
return entry["encryptedPassword"]
except ValueError:
pass
def DecryptPassword(encrypted_password, key):
"""Decrypt a base64 encoded encrypted password using the provided key."""
decoded_password = base64.b64decode(encrypted_password)
cipher = PKCS1_OAEP.new(key)
password = cipher.decrypt(decoded_password)
return password
def Arguments():
# Create the parser
args = argparse.ArgumentParser(description="List the content of a folder")
# Add the arguments
args.add_argument(
"--instance", metavar="instance", type=str, help="compute instance name"
)
args.add_argument("--zone", metavar="zone", type=str, help="compute zone")
args.add_argument("--project", metavar="project", type=str, help="gcp project")
args.add_argument("--username", metavar="username", type=str, help="username")
args.add_argument("--email", metavar="email", type=str, help="email")
# return arguments
return args.parse_args()
def main():
config_args = Arguments()
# Setup
compute = GetCompute()
key = GetKey()
modulus, exponent = GetModulusExponentInBase64(key)
# Get existing metadata
instance_ref = GetInstance(
compute, config_args.instance, config_args.zone, config_args.project
)
old_metadata = instance_ref["metadata"]
# Create and set new metadata
metadata_entry = GetJsonString(
config_args.username, modulus, exponent, config_args.email
)
new_metadata = UpdateWindowsKeys(old_metadata, metadata_entry)
# Get Serial output BEFORE the modification
serial_port_output = GetSerialPortFourOutput(
compute, config_args.instance, config_args.zone, config_args.project
)
UpdateInstanceMetadata(
compute,
config_args.instance,
config_args.zone,
config_args.project,
new_metadata,
)
# Get and decrypt password from serial port output
# Monitor changes from output to get the encrypted password as soon as it's generated, will wait for 30 seconds
i = 0
new_serial_port_output = serial_port_output
while i <= 30 and serial_port_output == new_serial_port_output:
new_serial_port_output = GetSerialPortFourOutput(
compute, config_args.instance, config_args.zone, config_args.project
)
i += 1
time.sleep(1)
enc_password = GetEncryptedPasswordFromSerialPort(new_serial_port_output, modulus)
password = DecryptPassword(enc_password, key)
converted_password = password.decode("utf-8")
# Display only the password
print(format(converted_password))
if __name__ == "__main__":
main()
|
import os, zbarlight, sys
from PIL import Image
from datetime import datetime
def scan():
qr_count = len(os.listdir('resources/qr_codes'))
qr_countup = qr_count
print('Taking picture..')
try:
scan = True
for i in range(0,3):
os.system('sudo fswebcam -d /dev/video0 -r 800x600 -q resources/qr_codes/qr_'+str(qr_countup)+'.jpg')
qr_countup += 1
print('Picture saved..')
except:
scan = False
print('Picture couldn\'t be taken..')
if(scan):
codes = []
qr_countup = qr_count
print('Scanning image..')
for i in range(0,3):
with open('resources/qr_codes/qr_'+str(qr_countup)+'.jpg','rb') as f:
qr = Image.open(f)
qr.load()
code = zbarlight.scan_codes('qrcode',qr)
if code:
codes.append(code)
qr_countup += 1
print(codes)
print(type(codes))
if not codes:
print('No QR code found in the picture')
else:
code = codes[0][0]
with open('resources/qr_codes/qr_code_scans.txt','a+') as f:
f.write(code.decode() + ' ' + str(datetime.now()) + '\n')
return code.decode()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2022 Hibikino-Musashi@Home
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Hibikino-Musashi@Home nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Common import.
Importing commonly used libraries and defining global parameters.
"""
"""Imports."""
import sys
import os
import math
import numpy as np
import copy
import csv
import time
import random
import re
import threading
import glob
import cv2
import json
from enum import IntEnum
from subprocess import Popen, call, PIPE
"""Globals."""
class GraspPose(IntEnum):
""" Defining directions for grasp pose estimation."""
TOP = 0
FRONT = 1
class MappingAction(IntEnum):
""" Defining actions for mapping objects."""
ADD = 0
MAPPING = 1
DELETE = 2
DELETEALL = 3
CHECK = 4
GET = 5
GETALL = 6
GETNEAREST = 7 |
class Solution(object):
def thirdMax(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0:
return 0
nums = list(set(nums))
if len(nums) < 3:
return max(nums)
from heapq import heappush, heappop
heap = []
k = 3
for num in nums:
if len(heap) < k:
heappush(heap, num)
elif num > heap[0]:
heappop(heap)
heappush(heap, num)
return heap[0]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_tag_facts
short_description: Manage VMware tag facts
description:
- This module can be used to collect facts about VMware tags.
- Tag feature is introduced in vSphere 6 version, so this module is not supported in the earlier versions of vSphere.
- All variables and VMware object names are case sensitive.
version_added: '2.6'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
- vSphere Automation SDK
- vCloud Suite SDK
extends_documentation_fragment: vmware_rest_client.documentation
'''
EXAMPLES = r'''
- name: Get facts about tag
vmware_tag_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
delegate_to: localhost
- name: Get category id from the given tag
vmware_tag_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
validate_certs: no
delegate_to: localhost
register: tag_details
- debug:
msg: "{{ tag_details.tag_facts['fedora_machines']['tag_category_id'] }}"
'''
RETURN = r'''
results:
description: dictionary of tag metadata
returned: on success
type: dict
sample: {
"Sample_Tag_0002": {
"tag_category_id": "urn:vmomi:InventoryServiceCategory:6de17f28-7694-43ec-a783-d09c141819ae:GLOBAL",
"tag_description": "Sample Description",
"tag_id": "urn:vmomi:InventoryServiceTag:a141f212-0f82-4f05-8eb3-c49647c904c5:GLOBAL",
"tag_used_by": []
},
"fedora_machines": {
"tag_category_id": "urn:vmomi:InventoryServiceCategory:baa90bae-951b-4e87-af8c-be681a1ba30c:GLOBAL",
"tag_description": "",
"tag_id": "urn:vmomi:InventoryServiceTag:7d27d182-3ecd-4200-9d72-410cc6398a8a:GLOBAL",
"tag_used_by": []
},
"ubuntu_machines": {
"tag_category_id": "urn:vmomi:InventoryServiceCategory:89573410-29b4-4cac-87a4-127c084f3d50:GLOBAL",
"tag_description": "",
"tag_id": "urn:vmomi:InventoryServiceTag:7f3516d5-a750-4cb9-8610-6747eb39965d:GLOBAL",
"tag_used_by": []
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware_rest_client import VmwareRestClient
try:
from com.vmware.cis.tagging_client import Tag
except ImportError:
pass
class VmTagFactManager(VmwareRestClient):
def __init__(self, module):
"""Constructor."""
super(VmTagFactManager, self).__init__(module)
self.tag_service = Tag(self.connect)
self.global_tags = dict()
def get_all_tags(self):
"""Function to retrieve all tag information."""
for tag in self.tag_service.list():
tag_obj = self.tag_service.get(tag)
self.global_tags[tag_obj.name] = dict(
tag_description=tag_obj.description,
tag_used_by=tag_obj.used_by,
tag_category_id=tag_obj.category_id,
tag_id=tag_obj.id
)
self.module.exit_json(changed=False, tag_facts=self.global_tags)
def main():
argument_spec = VmwareRestClient.vmware_client_argument_spec()
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
vmware_tag_facts = VmTagFactManager(module)
vmware_tag_facts.get_all_tags()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python2.6
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import pygtk
pygtk.require('2.0')
except:
pygtk = None
import sys
import os
import re
import threading
from optparse import OptionParser
import traceback
import exceptions
import subprocess
import shlex
import signal
from util import *
try:
import ui
from ui.main_control_launcher import *
except:
print "While importing ui:"
traceback.print_exc()
ui = None
def _onsig_USR1(signum, frame):
print "SIGUSR1 recieved."
traceback.print_stack()
print " "
signal.signal(10, _onsig_USR1)
def exists(f):
return os.path.exists(f)
class LoadException(Exception):
def __init__(self,msg):
Exception.__init__(self)
self.message = msg
def get_basedir():
return os.path.dirname(sys.argv[0])
_debug_python_runtime = False
def set_debug_python_runtime():
global _debug_python_runtime
_debug_python_runtime = True
def is_debug_python_runtime():
return _debug_python_runtime
def run_tests(options,args):
MessageLoop.init_hooks()
testmod = __import__("tests")
testmod.set_debug_mode(options.test_debug == True)
try:
if len(args) > 0:
testmod.run(args)
else:
testmod.run(None)
except exceptions.KeyboardInterrupt:
traceback.print_exc()
log2("ndbg.run_tests complete")
MessageLoop.shutdown_hooks()
CUR_SETTINGS_VERSION = 3 # bump this to force firstRun to run on old .ndbg files
def _first_run(settings):
log1("_first_run...")
global CUR_SETTINGS_VERSION
settings.SettingsVersion = CUR_SETTINGS_VERSION # bump
# prompt user via GUI which editor they want to use
from ui.editor_selection_dialog import EditorSelectionDialog
dlg = EditorSelectionDialog()
resp = dlg.run()
assert resp == gtk.RESPONSE_OK
settings.Editor = dlg.editor # force it to a value, making it a user-specific setting
def process_options(options, args):
"""Returns dict with keys that need to be applied to the settings object"""
res = DynObject()
if options.exec_with_args:
if not exists(options.exec_with_args[0]):
print "%s is not a file. Cannot continue" % options.exec_with_args[0]
return None
res.ExecLaunch = options.exec_with_args
else:
if len(args) == 1:
if re.match("^\d+$",args[0]):
pid = int(args[0])
res.ExecAttach = pid
elif exists(args[0]):
res.ExecLaunch = [args[0]]
else:
print "%s is not a file, nor a pid. Cannot continue" % args[0]
return None
elif len(args) == 2:
if not exists(args[0]):
print "%s is not a file. Cannot continue" % args[0]
return
if not re.match("^\d+$", args[1]):
print "Second argument should be a pid" % args[1];
return
res.ExecAttach = int(args[1])
else:
log1("No arguments")
return res
def launch_in_existing(options, args):
# handle options first
res = process_options(options, args)
if not res:
return
if hasattr(res, 'ExecAttach') == False and hasattr(res, 'ExecLaunch') == False:
print "Need to specify PID or executable when using the --existing flag."
return
# get MainControl proxies in existing ndbg processes
from ui.main_control_base import MainControlBase
mcs = MainControlBase.get_all_remote_instances()
# If no ndbg exists, prompt the user to launch a new one
# TODO(nduca): multiple dialogs may be presented at once. If they press yes on one,
# then broadcast to all other ndbg instances to close the open dialog
# and retry.
if len(mcs) == 0:
print "No nicer debugger instance found. Launch one and try again."
return
MessageLoop.add_message(lambda: MainControlLauncher(mcs, res))
MessageLoop.run()
def run_ui(options, args):
global ui
if not ui:
print "Cannot run UI.\n"
return 255
settings = new_settings()
# defaults
settings.register("SettingsVersion", int, 0)
settings.register("Editor", str, None)
global CUR_SETTINGS_VERSION
if settings.SettingsVersion != CUR_SETTINGS_VERSION:
_first_run(settings)
# update settings object based on the editor stuffs
if hasattr(options,"gvim") and options.gvim:
settings.set_temporarily("Editor", "GVimEditor")
elif hasattr(options,"emacs") and options.emacs:
settings.set_temporarily("Editor", "EmacsEditor")
elif hasattr(options,"sourceview") and options.sourceview:
settings.set_temporarily("Editor", "SourceViewEditor")
if settings.Editor == 'GVimEditor':
import ui.gvim_editor
ok, error = ui.gvim_editor.SanityCheck(settings)
if not ok:
print error
return 255
# debuger init
settings.register("ExecLaunch", list, None)
settings.register("ExecAttach", int, -1)
res = process_options(options, args)
if not res:
return
if hasattr(res,'ExecAttach'):
settings.set_temporarily("ExecAttach", res.ExecAttach)
elif hasattr(res,'ExecLaunch'):
settings.set_temporarily("ExecLaunch", res.ExecLaunch)
# UI init
ui.run(settings) # engages main loop
return 0
def main():
# basic options
log1("ndbg.main(argv=%s)\n" % sys.argv)
parser = OptionParser()
def handle_args(option,opt_str,value,parser,*args,**kwargs):
value = []
for arg in parser.rargs:
value.append(arg)
del parser.rargs[:len(value)]
setattr(parser.values, option.dest, value)
parser.add_option("--test", dest="test", action="store_true", default=False, help="Run internal tests. Any arguments passed will be regexps for the tests to run.")
parser.add_option("--test-debug", dest="test_debug", action="store_true", default=False, help="Run internal tests, reporting errors immediately as they occurr. Any arguments passed will be regexps for the tests to run.")
parser.add_option("--args", dest="exec_with_args", action="callback", callback=handle_args, help="Specify program to run plus arguments")
parser.add_option("-v", action="count", dest="verbosity", help="Increase the verbosity level. Specifying repeatedly increases more.")
parser.add_option("--sourceview", action="store_true", default=False, dest="sourceview", help="Enables use of SourceView as the editor component")
parser.add_option("--gvim", action="store_true", default=False, dest="gvim", help="Enables use of GVimEditor as the editor component")
parser.add_option("--emacs", action="store_true", default=False, dest="emacs", help="Enables use of EmacsEditor as the editor component")
parser.add_option("-e", "--existing", action="store_true", default=False, dest="launch_in_existing", help="Launches the program inside an existing debugger instance rather than creating a new one.")
parser.add_option("-D", action="store_true", default=False, dest="debug_gdb", help="Launches UI for debugging GDB operation.")
(options,args) = parser.parse_args()
# set verbosity
if options.verbosity:
set_loglevel(options.verbosity)
else:
set_loglevel(0)
if options.debug_gdb:
import debugger.gdb_backend
debugger.gdb_backend.gdb_toggle_enable_debug_window()
# test mode check
if options.test or options.test_debug:
run_tests(options,args)
elif options.launch_in_existing:
launch_in_existing(options, args)
else:
run_ui(options,args)
if __name__ == "__main__":
try:
main()
except Exception, e:
traceback.print_exc()
log1("Performing final exit steps")
threads = threading.enumerate()
threads.remove(threading.current_thread())
if len(threads) == 0:
log2("Exiting via sys.exit()")
sys.exit(0)
else:
log1("Warning: threads are still running:")
for t in threads:
log1(" %s", t)
log1("Exiting via os._exit")
os._exit(0) # do this so we truly exit... even if we have a lingering thread [eew]
# assert(False)
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from typing import Any, Dict, List, Optional, cast
import botocore
from slugify import slugify
from aws_orbit import bundle, remote, utils
from aws_orbit.messages import MessagesContext, stylize
from aws_orbit.models.context import Context, ContextSerDe
from aws_orbit.remote_files.env import DEFAULT_IMAGES, DEFAULT_ISOLATED_IMAGES
from aws_orbit.services import cfn, codebuild
_logger: logging.Logger = logging.getLogger(__name__)
PROFILES_TYPE = List[Dict[str, Any]]
def read_user_profiles_ssm(env_name: str, team_name: str) -> PROFILES_TYPE:
ssm_profile_name = f"/orbit/{env_name}/teams/{team_name}/user/profiles"
_logger.debug("Trying to read profiles from SSM parameter (%s).", ssm_profile_name)
client = utils.boto3_client(service_name="ssm")
try:
json_str: str = client.get_parameter(Name=ssm_profile_name)["Parameter"]["Value"]
except botocore.errorfactory.ParameterNotFound:
_logger.info("No team profile found, returning only default profiles")
pass
return cast(PROFILES_TYPE, json.loads(json_str))
def write_context_ssm(profiles: PROFILES_TYPE, env_name: str, team_name: str) -> None:
ssm_profile_name = f"/orbit/{env_name}/teams/{team_name}/user/profiles"
client = utils.boto3_client(service_name="ssm")
_logger.debug("Writing team %s user profiles to SSM parameter.", team_name)
json_str = str(json.dumps(obj=profiles, sort_keys=True))
# resolve any parameters inside team context per context
json_str = utils.resolve_parameters(
json_str, dict(region=utils.get_region(), account=utils.get_account_id(), env=env_name, team=team_name)
)
client.put_parameter(
Name=ssm_profile_name,
Value=json_str,
Overwrite=True,
Tier="Intelligent-Tiering",
)
def delete_profile(env: str, team: str, profile_name: str, debug: bool) -> None:
with MessagesContext("Profile Deleted", debug=debug) as msg_ctx:
msg_ctx.info("Retrieving existing profiles")
profiles: List[Dict[str, Any]] = read_user_profiles_ssm(env, team)
_logger.debug("Existing user profiles for team %s: %s", team, profiles)
for p in profiles:
if p["slug"] == profile_name:
_logger.info("Profile exists, deleting...")
profiles.remove(p)
_logger.debug("Updated user profiles for team %s: %s", team, profiles)
write_context_ssm(profiles, env, team)
msg_ctx.tip("Profile deleted")
msg_ctx.progress(100)
return
raise Exception(f"Profile {profile_name} not found")
def list_profiles(env: str, team: str, debug: bool) -> None:
print("Team profiles:")
profiles: List[Dict[str, Any]] = read_user_profiles_ssm(env, team)
_logger.debug("Existing user profiles for team %s: %s", team, profiles)
print(json.dumps(profiles, indent=4, sort_keys=True))
print("Admin deployed profiles:")
from aws_orbit_sdk import common_pod_specification
os.environ["AWS_ORBIT_ENV"] = env
os.environ["AWS_ORBIT_TEAM_SPACE"] = team
deployed_profiles: common_pod_specification.PROFILES_TYPE = (
common_pod_specification.TeamConstants().deployed_profiles()
)
print(json.dumps(deployed_profiles, indent=4, sort_keys=True))
def build_profile(env: str, team: str, profile: str, debug: bool) -> None:
with MessagesContext("Adding profile", debug=debug) as msg_ctx:
msg_ctx.info("Retrieving existing profiles")
profiles: List[Dict[str, Any]] = read_user_profiles_ssm(env, team)
_logger.debug("Existing user profiles for team %s: %s", team, profiles)
profiles_new = json.loads(profile)
if isinstance(profiles_new, dict):
profile_json_list = [cast(Dict[str, Any], profiles_new)]
else:
profile_json_list = cast(List[Dict[str, Any]], profiles_new)
for profile_json in profile_json_list:
if "slug" not in profile_json:
# generate missing slug fields from display_name
profile_json["slug"] = slugify(profile_json["display_name"])
if "slug" not in profile_json:
raise Exception("Profile document must include property 'slug'")
_logger.debug(f"new profile name: {profile_json['display_name']}")
for p in profiles:
if p["slug"] == profile_json["slug"]:
_logger.info("Profile exists, updating...")
profiles.remove(p)
break
profiles.append(profile_json)
msg_ctx.tip(f"Profile added {profile_json['display_name']}")
_logger.debug("Updated user profiles for team %s: %s", team, profiles)
write_context_ssm(profiles, env, team)
msg_ctx.progress(100)
def build_image(
env: str,
dir: Optional[str],
name: str,
script: Optional[str],
build_args: Optional[List[str]],
timeout: int = 30,
debug: bool = False,
source_registry: Optional[str] = None,
source_repository: Optional[str] = None,
source_version: Optional[str] = None,
) -> None:
with MessagesContext("Deploying Docker Image", debug=debug) as msg_ctx:
context: "Context" = ContextSerDe.load_context_from_ssm(env_name=env, type=Context)
msg_ctx.info("Manifest loaded")
if cfn.does_stack_exist(stack_name=f"orbit-{context.name}") is False:
msg_ctx.error("Please, deploy your environment before deploying any additional docker image")
return
msg_ctx.progress(3)
if dir:
dirs = [(dir, name)]
else:
dirs = []
bundle_path = bundle.generate_bundle(command_name=f"deploy_image-{name}", context=context, dirs=dirs)
msg_ctx.progress(5)
script_str = "NO_SCRIPT" if script is None else script
source_str = "NO_REPO" if source_registry is None else f"{source_registry} {source_repository} {source_version}"
build_args = [] if build_args is None else build_args
buildspec = codebuild.generate_spec(
context=context,
plugins=False,
cmds_build=[
f"orbit remote --command build_image " f"{env} {name} {script_str} {source_str} {' '.join(build_args)}"
],
changeset=None,
)
msg_ctx.progress(6)
remote.run(
command_name=f"deploy_image-{name}",
context=context,
bundle_path=bundle_path,
buildspec=buildspec,
codebuild_log_callback=msg_ctx.progress_bar_callback,
timeout=timeout,
)
msg_ctx.info("Docker Image deploy into ECR")
if name in DEFAULT_IMAGES or name in DEFAULT_ISOLATED_IMAGES:
# Trying to build the system image, hope you are admin or you will get permission errors
address = f"{context.account_id}.dkr.ecr.{context.region}.amazonaws.com/orbit-{context.name}-{name}"
else:
address = f"{context.account_id}.dkr.ecr.{context.region}.amazonaws.com/orbit-{context.name}-users-{name}"
msg_ctx.info(f"ECR Image Address={address}")
msg_ctx.tip(f"ECR Image Address: {stylize(address, underline=True)}")
msg_ctx.progress(100)
|
from flask import Blueprint
from app.api.v1 import index
def create_blueprint_v1():
bp_v1 = Blueprint('v1', __name__)
index.api.register(bp_v1)
return bp_v1
|
import torch.nn as nn
import torch.nn.init as init
class MLP(nn.Module):
def __init__(self, input_dim, num_hidden_layers, hidden_dim, dropout=0.5, activation_fn=nn.ReLU):
super(MLP, self).__init__()
self.num_hidden_layers = num_hidden_layers
self.input_to_hidden = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(in_features=input_dim, out_features=hidden_dim),
activation_fn(),
nn.BatchNorm1d(num_features=hidden_dim, affine=False)
)
init.xavier_normal(self.input_to_hidden[1].weight)
self.input_to_hidden[1].bias.data.zero_()
if num_hidden_layers > 1:
self.hiddens = nn.ModuleList(
[nn.Sequential(nn.Linear(hidden_dim, hidden_dim),
activation_fn(),
nn.BatchNorm1d(num_features=hidden_dim, affine=False)
) for i in range(num_hidden_layers - 1)]
)
for i in range(num_hidden_layers - 1):
init.xavier_normal(self.hiddens[i][0].weight)
self.hiddens[i][0].bias.data.zero_()
self.output_logit = nn.Linear(in_features=hidden_dim, out_features=2)
init.xavier_normal(self.output_logit.weight)
self.output_logit.bias.data.zero_()
def forward(self, x):
x = self.input_to_hidden(x)
if self.num_hidden_layers > 1:
for hidden in self.hiddens:
x = hidden(x)
x = self.output_logit(x)
return x
|
import tkinter
from pytube import YouTube
root = tkinter.Tk()
root.geometry('500x300')
root.resizable(0,0)
root.title("Youtube Video Downloader")
link = tkinter.StringVar()
tkinter.Label(root, text ='Paste Link Here:', font ='arial 15 bold').place(x= 160, y = 60)
link_enter = tkinter.Entry(root, width = 70, textvariable = link).place(x = 32, y = 90)
def Downloader():
url =YouTube(str(link.get()))
video = url.streams.first()
video.download()
tkinter.Label(root, text ='DOWNLOADED', font ='arial 15').place(x= 180, y = 210)
tkinter.Button(root, text ='DOWNLOAD', font ='arial 15 bold', bg ='white', padx = 2, command = Downloader).place(x=180, y = 150)
root.mainloop()
|
"""
Deploy Artifacts to Anaconda and Quay
"""
import os
import subprocess as sp
import logging
from . import utils
logger = logging.getLogger(__name__)
def anaconda_upload(package: str, token: str = None, label: str = None) -> bool:
"""
Upload a package to anaconda.
Args:
package: Filename to built package
token: If None, use the environment variable ``ANACONDA_TOKEN``,
otherwise, use this as the token for authenticating the
anaconda client.
label: Optional label to add
Returns:
True if the operation succeeded, False if it cannot succeed,
None if it should be retried
Raises:
ValueError
"""
label_arg = []
if label is not None:
label_arg = ['--label', label]
if not os.path.exists(package):
logger.error("UPLOAD ERROR: package %s cannot be found.",
package)
return False
if token is None:
token = os.environ.get('ANACONDA_TOKEN')
if token is None:
raise ValueError("Env var ANACONDA_TOKEN not found")
logger.info("UPLOAD uploading package %s", package)
try:
cmds = ["anaconda", "-t", token, 'upload', package] + label_arg
utils.run(cmds, mask=[token])
logger.info("UPLOAD SUCCESS: uploaded package %s", package)
return True
except sp.CalledProcessError as e:
if "already exists" in e.stdout:
# ignore error assuming that it is caused by
# existing package
logger.warning(
"UPLOAD WARNING: tried to upload package, got:\n "
"%s", e.stdout)
return True
elif "Gateway Timeout" in e.stdout:
logger.warning("UPLOAD TEMP FAILURE: Gateway timeout")
return False
else:
logger.error('UPLOAD ERROR: command: %s', e.cmd)
logger.error('UPLOAD ERROR: stdout+stderr: %s', e.stdout)
return False
def mulled_upload(image: str, quay_target: str) -> sp.CompletedProcess:
"""
Upload the build Docker images to quay.io with ``mulled-build push``.
Calls ``mulled-build push <image> -n <quay_target>``
Args:
image: name of image to push
quary_target: name of image on quay
"""
cmd = ['mulled-build', 'push', image, '-n', quay_target]
mask = []
if os.environ.get('QUAY_OAUTH_TOKEN', False):
token = os.environ['QUAY_OAUTH_TOKEN']
cmd.extend(['--oauth-token', token])
mask = [token]
return utils.run(cmd, mask=mask)
def skopeo_upload(image_file: str, target: str,
creds: str, registry: str = "quay.io",
timeout: int = 600) -> bool:
"""
Upload an image to docker registy
Uses ``skopeo`` to upload tar archives of docker images as created
with e.g.``docker save`` to a docker registry.
The image name and tag are read from the archive.
Args:
image_file: path to the file to be uploaded (may be gzip'ed)
target: namespace/repo for the image
creds: login credentials (``USER:PASS``)
registry: url of the registry. defaults to "quay.io"
timeout: timeout in seconds
"""
cmd = ['skopeo',
'--insecure-policy', # disable policy checks
'--command-timeout', str(timeout) + "s",
'copy',
'docker-archive:{}'.format(image_file),
'docker://{}/{}'.format(registry, target),
'--dest-creds', creds]
try:
utils.run(cmd, mask=creds.split(':'))
return True
except sp.CalledProcessError as exc:
logger.error("Failed to upload %s to %s", image_file, target)
for line in exc.stdout.splitlines():
logger.error("> %s", line)
return False
|
import os, py
from rpython.memory.gc import env
from rpython.rlib.rarithmetic import r_uint
from rpython.tool.udir import udir
class FakeEnviron:
def __init__(self, value):
self._value = value
def get(self, varname):
assert varname == 'FOOBAR'
return self._value
def check_equal(x, y):
assert x == y
assert type(x) == type(y)
def test_get_total_memory_darwin():
# this only tests clipping
BIG = 2 * env.addressable_size
SMALL = env.addressable_size / 2
assert env.addressable_size == env.get_total_memory_darwin(0)
assert env.addressable_size == env.get_total_memory_darwin(-1)
assert env.addressable_size == env.get_total_memory_darwin(BIG)
assert SMALL == env.get_total_memory_darwin(SMALL)
def test_get_total_memory():
# total memory should be at least a megabyte
assert env.get_total_memory() > 1024*1024
def test_read_from_env():
saved = os.environ
try:
os.environ = FakeEnviron(None)
check_equal(env.read_from_env('FOOBAR'), 0)
check_equal(env.read_uint_from_env('FOOBAR'), r_uint(0))
check_equal(env.read_float_from_env('FOOBAR'), 0.0)
#
os.environ = FakeEnviron('')
check_equal(env.read_from_env('FOOBAR'), 0)
check_equal(env.read_uint_from_env('FOOBAR'), r_uint(0))
check_equal(env.read_float_from_env('FOOBAR'), 0.0)
#
os.environ = FakeEnviron('???')
check_equal(env.read_from_env('FOOBAR'), 0)
check_equal(env.read_uint_from_env('FOOBAR'), r_uint(0))
check_equal(env.read_float_from_env('FOOBAR'), 0.0)
#
os.environ = FakeEnviron('1')
check_equal(env.read_from_env('FOOBAR'), 1)
check_equal(env.read_uint_from_env('FOOBAR'), r_uint(1))
check_equal(env.read_float_from_env('FOOBAR'), 1.0)
#
os.environ = FakeEnviron('12345678')
check_equal(env.read_from_env('FOOBAR'), 12345678)
check_equal(env.read_uint_from_env('FOOBAR'), r_uint(12345678))
check_equal(env.read_float_from_env('FOOBAR'), 12345678.0)
#
os.environ = FakeEnviron('1234B')
check_equal(env.read_from_env('FOOBAR'), 1234)
check_equal(env.read_uint_from_env('FOOBAR'), r_uint(1234))
check_equal(env.read_float_from_env('FOOBAR'), 1234.0)
#
os.environ = FakeEnviron('1.5')
check_equal(env.read_float_from_env('FOOBAR'), 1.5)
#
os.environ = FakeEnviron('1.5Kb')
check_equal(env.read_from_env('FOOBAR'), 1536)
check_equal(env.read_uint_from_env('FOOBAR'), r_uint(1536))
check_equal(env.read_float_from_env('FOOBAR'), 0.0)
#
os.environ = FakeEnviron('1.5mB')
check_equal(env.read_from_env('FOOBAR'), int(1.5*1024*1024))
check_equal(env.read_uint_from_env('FOOBAR'), r_uint(1.5*1024*1024))
check_equal(env.read_float_from_env('FOOBAR'), 0.0)
#
os.environ = FakeEnviron('1.5g')
check_equal(env.read_from_env('FOOBAR'), int(1.5*1024*1024*1024))
check_equal(env.read_uint_from_env('FOOBAR'), r_uint(1.5*1024*1024*1024))
check_equal(env.read_float_from_env('FOOBAR'), 0.0)
#
finally:
os.environ = saved
def test_get_total_memory_linux2():
filepath = udir.join('get_total_memory_linux2')
filepath.write("""\
MemTotal: 1976804 kB
MemFree: 32200 kB
Buffers: 144092 kB
Cached: 1385196 kB
SwapCached: 8408 kB
Active: 1181436 kB
etc.
""")
result = env.get_total_memory_linux2(str(filepath))
assert result == 1976804 * 1024
def test_get_total_memory_linux2_32bit_limit():
filepath = udir.join('get_total_memory_linux2')
filepath.write("""\
MemTotal: 3145728 kB
etc.
""")
saved = env.addressable_size
try:
env.addressable_size = float(2**31)
result = env.get_total_memory_linux2(str(filepath))
check_equal(result, float(2**31)) # limit hit
#
env.addressable_size = float(2**32)
result = env.get_total_memory_linux2(str(filepath))
check_equal(result, float(3145728 * 1024)) # limit not hit
finally:
env.addressable_size = saved
def test_estimate_best_nursery_size_linux2():
filepath = udir.join('estimate_best_nursery_size_linux2')
filepath.write("""\
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 37
model name : Intel(R) Core(TM) i5 CPU M 540 @ 2.53GHz
stepping : 5
cpu MHz : 1199.000
cache size : 3072 KB
physical id : 0
siblings : 4
core id : 0
cpu cores : 2
apicid : 0
initial apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 11
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm sse4_1 sse4_2 popcnt aes lahf_lm ida arat tpr_shadow vnmi flexpriority ept vpid
bogomips : 5054.78
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management:
processor : 1
vendor_id : GenuineIntel
cpu family : 6
model : 37
model name : Intel(R) Core(TM) i5 CPU M 540 @ 2.53GHz
stepping : 5
cpu MHz : 2534.000
cache size : 3072 KB
physical id : 0
siblings : 4
core id : 0
cpu cores : 2
apicid : 1
initial apicid : 1
fpu : yes
etc.
""")
result = env.get_L2cache_linux2_cpuinfo(str(filepath))
assert result == 3072 * 1024
def test_estimate_nursery_s390x():
filepath = udir.join('estimate_best_nursery_size_linux2')
filepath.write("""\
vendor_id : IBM/S390
# processors : 2
bogomips per cpu: 20325.00
...
cache2 : level=2 type=Data scope=Private size=2048K line_size=256 associativity=8
cache3 : level=2 type=Instruction scope=Private size=2048K line_size=256 associativity=8
...
""")
result = env.get_L2cache_linux2_cpuinfo_s390x(str(filepath))
assert result == 2048 * 1024
filepath = udir.join('estimate_best_nursery_size_linux3')
filepath.write("""\
vendor_id : IBM/S390
# processors : 2
bogomips per cpu: 9398.00
...
cache2 : level=2 type=Unified scope=Private size=1536K line_size=256 associativity=12
cache3 : level=3 type=Unified scope=Shared size=24576K line_size=256 associativity=12
...
""")
result = env.get_L2cache_linux2_cpuinfo_s390x(str(filepath), label='cache3')
assert result == 24576 * 1024
result = env.get_L2cache_linux2_cpuinfo_s390x(str(filepath), label='cache2')
assert result == 1536 * 1024
|
'''
@uthor: saleem
Minimal Scheduler for running functions at set interval.
'''
import time
class scheduler:
def __init__(self, h=0, m=0, s=0):
self.h = h
self.m = m
self.s = s
def runit(self, fn, *args):
time_step = self.h*3600 + self.m*60 + self.s
while True:
starttime = time.time()
fn(*args)
exec_time = time.time()-starttime
tlambda = int(exec_time)/time_step
ttime = (tlambda+1)*time_step
time.sleep(ttime - ((time.time()-starttime)%ttime))
if __name__ == "__main__":
def fn(a, b):
print a+b
return
ss = scheduler(s=3)
ss.runit(fn, 1, 3)
|
# try-finally support
import sys
# Basic finally support:
print "basic_finally"
def basic_finally(n):
try:
1/n
print "1"
except:
print "2"
else:
print "3"
finally:
print "4"
print "5"
print basic_finally(1)
print basic_finally(0)
print
# If we return from inside the try part of a try-finally, we have to save the return value,
# execute the finally block, then do the actual return.
print "finally_after_return"
def finally_after_return():
try:
print 1
return 2
finally:
print 3
print 4
print finally_after_return()
print
# Return from a finally will disable any exception propagation:
print "return_from_finally"
def return_from_finally(to_throw=None):
try:
if to_throw:
raise to_throw
return 1
except:
print "except"
return 2
else:
print "else"
return 3
finally:
print "finally"
return 4
print return_from_finally()
print return_from_finally(Exception)
print
# A break in a finally will disable any exception propagation
# Note: "break" is allowed in a finally, but "continue" is not (see finally_continue.py)
print "break_from_finally"
def break_from_finally(to_throw=None):
for i in xrange(5):
print i
try:
if to_throw:
raise to_throw
return 1
except:
return 2
else:
return 3
finally:
break
return 4
print break_from_finally(None)
print break_from_finally(Exception)
print
# I guess you're allowed to yield from a finally block.
# Once execution is returned, exception propagation will continue
print "yield_from_finally"
def yield_from_finally(to_throw=None):
for i in xrange(5):
try:
if to_throw:
raise to_throw
finally:
yield i
for i in yield_from_finally():
print i
try:
for i in yield_from_finally(Exception("ex")):
print i
# Throw a different exception just for fun:
try:
raise Exception()
except:
pass
except Exception, e:
print e
print
# Similarly for continues
print "finally_after_continue"
def finally_after_continue():
for i in xrange(5):
try:
continue
finally:
print 3
print finally_after_continue()
print
# And breaks
print "finally_after_break"
def finally_after_break():
for i in xrange(5):
try:
break
finally:
print 3
print finally_after_break()
print
# Exceptions thrown in the else or except blocks of a try-finally still run the finally
print "exception_in_elseexcept"
def exception_in_elseexcept(throw0=None, throw1=None, throw2=None):
try:
if throw0:
raise throw0
except:
print "except"
if throw1:
raise throw1
else:
print "else"
if throw2:
raise throw2
finally:
print "in finally"
for t0 in [None, Exception("exc 0")]:
for t1 in [None, Exception("exc 1")]:
for t2 in [None, Exception("exc 2")]:
print "throwing:", t0, t1, t2
try:
exception_in_elseexcept(t0, t1, t2)
print "no exception"
except Exception, e:
print "threw:", e
print
# An exception thrown and caught inside a finally doesn't hide the current exception propagation
print "exception_in_finally"
def exception_in_finally():
try:
1/0
finally:
print sys.exc_info()[0]
try:
raise KeyError()
except KeyError:
pass
print sys.exc_info()[0]
try:
print exception_in_finally()
print "no exception"
except ZeroDivisionError, e:
print e
print
# sys.exc_clear() doesn't stop finally-exception propagation
print "sysclear_in_finally"
def sysclear_in_finally():
try:
1/0
finally:
sys.exc_clear()
try:
print sysclear_in_finally()
print "no exception"
except ZeroDivisionError, e:
print e
print
# An uncaught exception from a finally will override the previous exception:
print "raise_from_finally"
def raise_from_finally():
try:
raise Exception("exception 1")
finally:
raise Exception("exception 2")
try:
raise_from_finally()
except Exception, e:
print e
print
# Make sure we can handle various nestings of try-finally
print "nested_finally"
def nested_finally():
try:
try:
for i in xrange(5):
pass
finally:
print "finally1"
finally:
print "finally2"
try:
for j in xrange(5):
pass
finally:
print "finally3"
nested_finally()
print
# finally blocks hide their exceptions even from bare "raise" statements:
print "bare_raise_in_finally"
def bare_raise_in_finally():
try:
raise Exception("first exception")
except:
pass
try:
1/0
finally:
raise # raises the "first exception" exception above
try:
bare_raise_in_finally()
except Exception, e:
print e
print
# Some older tests. Keep them around, because why not
def f3():
print
print "f3"
def f():
print "getting the exc handler type"
raise AssertionError()
try:
print "in the first try"
# f() won't get evaluated until the exception is actually thrown:
try:
print "in the second try"
raise Exception()
except f():
print "In the inner exception block??"
finally:
# This will get called even though there was an exception in
# evaluating the exception-handler type:
print "inner finally"
except Exception:
# This will print "AssertionError", from the f() call, *not* the Exception
# that was thrown in the inner try block.
print "In the outer exception block:", sys.exc_info()[0].__name__
finally:
print "outer finally"
f3()
def f6():
print
print "f6"
# A finally block must somehow track how it was entered, because it's not based
# on the value of sys.exc_info at the end of the finally block:
def inner(nested_throw, reraise):
try:
pass
finally:
if nested_throw:
try:
raise AttributeError()
except:
pass
print sys.exc_info()[0]
if reraise:
raise
inner(False, False) # no exception raised
inner(True, False) # no exception raised
try:
inner(True, True)
# Shouldn't get here
raise Exception()
except AttributeError:
print "the thrown AttributeError raised as expected"
# Have to call this, because the inner throw can reraise the out-of-except
# exception from this scope!
sys.exc_clear()
try:
inner(False, True)
# Shouldn't get here
raise Exception()
except TypeError, e:
print "Got TypeError as expected, since exc_info was None"
print e
f6()
def f7():
print
print "f7"
# Similar test to f6, but this time with an exception propagating
# up through a finally block.
# An exception thrown inside that finally shouldn't change the exception
# that will end up getting propagated
def inner():
try:
raise AttributeError()
finally:
try:
raise NotImplementedError()
except:
pass
print sys.exc_info()[0].__name__
try:
inner()
except:
print sys.exc_info()[0].__name__
f7()
def f9():
print
print "f9"
# Exceptions thrown inside a catch block should still go through the finally,
# but not other catch blocks.
try:
try:
raise Exception()
except Exception:
print "here"
raise AttributeError()
except AttributeError:
print "shouldn't get here"
finally:
print "in finally"
except AttributeError:
pass
f9()
|
xh = input("Enter Hours: ")
xr = input("Enter Rate: ")
xp = float(xh) * float(xr)
print("Pay: ") |
from polyphony import pure
from polyphony import testbench
@pure
def rand(seed, x, y):
import random
random.seed(seed)
return random.randint(x, y)
@testbench
def test():
assert rand(0, 1, 1000) == rand(0, 1, 1000)
assert rand(0, -1000, 1000) == rand(0, -1000, 1000)
test()
|
from django.contrib import admin
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from basic import viewsets
from drf_triad_permissions.views import triad_permissions_js
router = DefaultRouter()
router.register("users", viewsets.UserViewSet)
router.register(r"entities-by-user/(?P<username>\w+)", viewsets.EntityByUserViewSet)
urlpatterns = [
path("admin/", admin.site.urls),
path("permissions.js", triad_permissions_js, name="triad_permissions_js"),
path("api/", include(router.urls)),
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_account
short_description: Manages accounts on Apache CloudStack based clouds.
description:
- Create, disable, lock, enable and remove accounts.
author: René Moser (@resmo)
options:
name:
description:
- Name of account.
type: str
required: true
username:
description:
- Username of the user to be created if account did not exist.
- Required on I(state=present).
type: str
password:
description:
- Password of the user to be created if account did not exist.
- Required on I(state=present) if I(ldap_domain) is not set.
type: str
first_name:
description:
- First name of the user to be created if account did not exist.
- Required on I(state=present) if I(ldap_domain) is not set.
type: str
last_name:
description:
- Last name of the user to be created if account did not exist.
- Required on I(state=present) if I(ldap_domain) is not set.
type: str
email:
description:
- Email of the user to be created if account did not exist.
- Required on I(state=present) if I(ldap_domain) is not set.
type: str
timezone:
description:
- Timezone of the user to be created if account did not exist.
type: str
network_domain:
description:
- Network domain of the account.
type: str
account_type:
description:
- Type of the account.
type: str
choices: [ user, root_admin, domain_admin ]
default: user
domain:
description:
- Domain the account is related to.
type: str
default: ROOT
role:
description:
- Creates the account under the specified role name or id.
type: str
ldap_domain:
description:
- Name of the LDAP group or OU to bind.
- If set, account will be linked to LDAP.
type: str
ldap_type:
description:
- Type of the ldap name. GROUP or OU, defaults to GROUP.
type: str
choices: [ GROUP, OU ]
default: GROUP
state:
description:
- State of the account.
- C(unlocked) is an alias for C(enabled).
type: str
choices: [ present, absent, enabled, disabled, locked, unlocked ]
default: present
poll_async:
description:
- Poll async jobs until job has finished.
type: bool
default: yes
extends_documentation_fragment:
- community.general.cloudstack
'''
EXAMPLES = '''
- name: create an account in domain 'CUSTOMERS'
cs_account:
name: customer_xy
username: customer_xy
password: S3Cur3
last_name: Doe
first_name: John
email: [email protected]
domain: CUSTOMERS
role: Domain Admin
delegate_to: localhost
- name: Lock an existing account in domain 'CUSTOMERS'
cs_account:
name: customer_xy
domain: CUSTOMERS
state: locked
delegate_to: localhost
- name: Disable an existing account in domain 'CUSTOMERS'
cs_account:
name: customer_xy
domain: CUSTOMERS
state: disabled
delegate_to: localhost
- name: Enable an existing account in domain 'CUSTOMERS'
cs_account:
name: customer_xy
domain: CUSTOMERS
state: enabled
delegate_to: localhost
- name: Remove an account in domain 'CUSTOMERS'
cs_account:
name: customer_xy
domain: CUSTOMERS
state: absent
delegate_to: localhost
- name: Create a single user LDAP account in domain 'CUSTOMERS'
cs_account:
name: customer_xy
username: customer_xy
domain: CUSTOMERS
ldap_domain: cn=customer_xy,cn=team_xy,ou=People,dc=domain,dc=local
delegate_to: localhost
- name: Create a LDAP account in domain 'CUSTOMERS' and bind it to a LDAP group
cs_account:
name: team_xy
username: customer_xy
domain: CUSTOMERS
ldap_domain: cn=team_xy,ou=People,dc=domain,dc=local
delegate_to: localhost
'''
RETURN = '''
---
id:
description: UUID of the account.
returned: success
type: str
sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
name:
description: Name of the account.
returned: success
type: str
sample: [email protected]
account_type:
description: Type of the account.
returned: success
type: str
sample: user
state:
description: State of the account.
returned: success
type: str
sample: enabled
network_domain:
description: Network domain of the account.
returned: success
type: str
sample: example.local
domain:
description: Domain the account is related.
returned: success
type: str
sample: ROOT
role:
description: The role name of the account
returned: success
type: str
sample: Domain Admin
'''
# import cloudstack common
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackAccount(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackAccount, self).__init__(module)
self.returns = {
'networkdomain': 'network_domain',
'rolename': 'role',
}
self.account = None
self.account_types = {
'user': 0,
'root_admin': 1,
'domain_admin': 2,
}
def get_role_id(self):
role_param = self.module.params.get('role')
role_id = None
if role_param:
role_list = self.query_api('listRoles')
for role in role_list['role']:
if role_param in [role['name'], role['id']]:
role_id = role['id']
if not role_id:
self.module.fail_json(msg="Role not found: %s" % role_param)
return role_id
def get_account_type(self):
account_type = self.module.params.get('account_type')
return self.account_types[account_type]
def get_account(self):
if not self.account:
args = {
'listall': True,
'domainid': self.get_domain(key='id'),
'fetch_list': True,
}
accounts = self.query_api('listAccounts', **args)
if accounts:
account_name = self.module.params.get('name')
for a in accounts:
if account_name == a['name']:
self.account = a
break
return self.account
def enable_account(self):
account = self.get_account()
if not account:
account = self.present_account()
if account['state'].lower() != 'enabled':
self.result['changed'] = True
args = {
'id': account['id'],
'account': self.module.params.get('name'),
'domainid': self.get_domain(key='id')
}
if not self.module.check_mode:
res = self.query_api('enableAccount', **args)
account = res['account']
return account
def lock_account(self):
return self.lock_or_disable_account(lock=True)
def disable_account(self):
return self.lock_or_disable_account()
def lock_or_disable_account(self, lock=False):
account = self.get_account()
if not account:
account = self.present_account()
# we need to enable the account to lock it.
if lock and account['state'].lower() == 'disabled':
account = self.enable_account()
if (lock and account['state'].lower() != 'locked' or
not lock and account['state'].lower() != 'disabled'):
self.result['changed'] = True
args = {
'id': account['id'],
'account': self.module.params.get('name'),
'domainid': self.get_domain(key='id'),
'lock': lock,
}
if not self.module.check_mode:
account = self.query_api('disableAccount', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
account = self.poll_job(account, 'account')
return account
def present_account(self):
account = self.get_account()
if not account:
self.result['changed'] = True
if self.module.params.get('ldap_domain'):
required_params = [
'domain',
'username',
]
self.module.fail_on_missing_params(required_params=required_params)
account = self.create_ldap_account(account)
else:
required_params = [
'email',
'username',
'password',
'first_name',
'last_name',
]
self.module.fail_on_missing_params(required_params=required_params)
account = self.create_account(account)
return account
def create_ldap_account(self, account):
args = {
'account': self.module.params.get('name'),
'domainid': self.get_domain(key='id'),
'accounttype': self.get_account_type(),
'networkdomain': self.module.params.get('network_domain'),
'username': self.module.params.get('username'),
'timezone': self.module.params.get('timezone'),
'roleid': self.get_role_id()
}
if not self.module.check_mode:
res = self.query_api('ldapCreateAccount', **args)
account = res['account']
args = {
'account': self.module.params.get('name'),
'domainid': self.get_domain(key='id'),
'accounttype': self.get_account_type(),
'ldapdomain': self.module.params.get('ldap_domain'),
'type': self.module.params.get('ldap_type')
}
self.query_api('linkAccountToLdap', **args)
return account
def create_account(self, account):
args = {
'account': self.module.params.get('name'),
'domainid': self.get_domain(key='id'),
'accounttype': self.get_account_type(),
'networkdomain': self.module.params.get('network_domain'),
'username': self.module.params.get('username'),
'password': self.module.params.get('password'),
'firstname': self.module.params.get('first_name'),
'lastname': self.module.params.get('last_name'),
'email': self.module.params.get('email'),
'timezone': self.module.params.get('timezone'),
'roleid': self.get_role_id()
}
if not self.module.check_mode:
res = self.query_api('createAccount', **args)
account = res['account']
return account
def absent_account(self):
account = self.get_account()
if account:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('deleteAccount', id=account['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'account')
return account
def get_result(self, account):
super(AnsibleCloudStackAccount, self).get_result(account)
if account:
if 'accounttype' in account:
for key, value in self.account_types.items():
if value == account['accounttype']:
self.result['account_type'] = key
break
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked'], default='present'),
account_type=dict(choices=['user', 'root_admin', 'domain_admin'], default='user'),
network_domain=dict(),
domain=dict(default='ROOT'),
email=dict(),
first_name=dict(),
last_name=dict(),
username=dict(),
password=dict(no_log=True),
timezone=dict(),
role=dict(),
ldap_domain=dict(),
ldap_type=dict(choices=['GROUP', 'OU'], default='GROUP'),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_acc = AnsibleCloudStackAccount(module)
state = module.params.get('state')
if state in ['absent']:
account = acs_acc.absent_account()
elif state in ['enabled', 'unlocked']:
account = acs_acc.enable_account()
elif state in ['disabled']:
account = acs_acc.disable_account()
elif state in ['locked']:
account = acs_acc.lock_account()
else:
account = acs_acc.present_account()
result = acs_acc.get_result(account)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
import asyncio
from typing import Optional
from tests import APITestCase, MAINNET_WS_URI, WEBSOCKET_TIMEOUT_GET_REQUEST
from tradehub.websocket_client import DemexWebsocket
class TestWSGetRecentTrades(APITestCase):
def test_get_recent_trades_structure(self):
"""
Check if response match expected dict structure.
:return:
"""
expect: dict = {
"id": str,
"sequence_number": int,
"result": [
{
"id": str,
"block_created_at": str,
"taker_id": str,
"taker_address": str,
"taker_fee_amount": str,
"taker_fee_denom": str,
"taker_side": str,
"maker_id": str,
"maker_address": str,
"maker_fee_amount": str,
"maker_fee_denom": str,
"maker_side": str,
"market": str,
"price": str,
"quantity": str,
"liquidation": str,
"taker_username": str,
"maker_username": str,
"block_height": str
},
]
}
# connect to websocket
client = DemexWebsocket(uri=MAINNET_WS_URI)
# little work around to save the response
self.response: Optional[dict] = None
async def on_connect():
await client.get_recent_trades("recent_trades", "swth_eth1")
async def on_message(message: dict):
# save response into self
self.response = message
await client.disconnect()
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait_for(client.connect(on_connect_callback=on_connect,
on_receive_message_callback=on_message),
WEBSOCKET_TIMEOUT_GET_REQUEST))
except asyncio.TimeoutError:
raise TimeoutError("Test did not complete in time.")
if not self.response:
raise RuntimeError("Did not receive a response.")
self.assertDictStructure(expect, self.response)
|
from datetime import datetime, date
import os
import re
import subprocess
import yaml
from boto.s3.connection import S3Connection
bucket_name = 'canvas_public_ugc'
results_bucket_name = 'canvas-ugc-backup-logging'
bucket_path = 'original'
base_dest = '/Volumes/Backup/ugc-backups'
prefix_dir_length = 3
use_date_directories = False
def datestr():
today = date.today()
return "{0}{1}{2}".format(today.year, str(today.month).zfill(2), str(today.day).zfill(2))
def destination_directory():
if use_date_directories:
return "{0}/{1}".format(base_dest, datestr())
else:
return base_dest
def ensure_destination_directory(name):
if not os.path.exists(name):
os.makedirs(name)
def shasum(fname):
hash_output = subprocess.Popen(['shasum', fname], stdout=subprocess.PIPE).communicate()[0]
try:
return hash_output.split()[0]
except IndexError:
print hash_output
return "doesnotmatch"
def check_hash(full_path, filename):
expected = re.sub(r'^([^\.]+)\..+$', r'\1', filename)
actual = shasum(full_path)
correct = (expected == actual)
if not correct:
print "{0}-{1}:{2}".format(full_path, actual, correct)
return correct
def get_dir_size(name):
p1 = subprocess.Popen(['df', '-h'], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['grep', 'Backup'], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
return p2.communicate()[0]
def store_results(start_time, end_time, stored, skipped, failed, failed_list):
keyname = 'ugc-backup-results-{0}'.format(datestr())
conn = S3Connection()
bucket = conn.get_bucket(results_bucket_name)
key = bucket.new_key(keyname)
backup_size_str = get_dir_size(destination_directory())
report = {
'start_time': start_time,
'end_time': end_time,
'stored': stored,
'skipped': skipped,
'failed': failed,
'size': backup_size_str,
'failed_list': failed_list,
}
key.set_contents_from_string(yaml.dump(report))
def do_backup():
stored = 0
skipped = 0
failed = 0
failed_list = []
start_time = datetime.utcnow()
dest = destination_directory()
ensure_destination_directory(dest)
conn = S3Connection()
bucket = conn.get_bucket(bucket_name)
try:
for k in iter(bucket.list(prefix=bucket_path)):
try:
if not k.name.endswith("/"):
key_part = k.name.split('/')[1]
sdir = "{0}/{1}".format(dest, key_part[:prefix_dir_length])
fname = "{0}/{1}".format(sdir, key_part)
ensure_destination_directory(sdir)
if not os.path.exists(fname):
print "Getting {0}/{1} ...".format(bucket.name, k.name),
k.get_contents_to_filename(fname)
print "done"
if check_hash(fname, key_part):
stored += 1
else:
raise Exception("File content does not match hash")
else:
skipped += 1
except KeyboardInterrupt as e:
raise e
except Exception as e:
failed_list.append(k.name)
failed += 1
except KeyboardInterrupt:
print
print "finishing up..."
finally:
store_results(start_time, datetime.utcnow(), stored, skipped, failed, failed_list)
if __name__ == "__main__":
do_backup()
|
from django.contrib import admin
from django.utils.translation import ugettext
from django.utils.encoding import force_unicode
from models import LogEntry, LogAggregate
from djangologdb import settings as djangologdb_settings
class LogEntryInline(admin.TabularInline):
model = LogEntry
class LogAggregateOptions(admin.ModelAdmin):
list_display = ('name', 'module', 'function_name', 'line_number', 'level', 'last_seen', 'times_seen',)
list_filter = ('name', 'level',)
date_hierarchy = 'last_seen'
ordering = ('-last_seen',)
inlines = (LogEntryInline,)
def change_view(self, request, object_id, extra_context=None):
djangologdb_context = {
'djangologdb_settings': djangologdb_settings,
'aggregate': 'checksum',
'title': ugettext('View %s') % force_unicode(self.opts.verbose_name),
}
return super(LogAggregateOptions, self).change_view(request, object_id, extra_context=djangologdb_context)
def changelist_view(self, request, extra_context=None):
djangologdb_context = {
'djangologdb_settings': djangologdb_settings,
'aggregate': 'checksum',
'title': ugettext('Select %s to view') % force_unicode(self.opts.verbose_name),
}
return super(LogAggregateOptions, self).changelist_view(request, extra_context=djangologdb_context)
class LogEntryOptions(admin.ModelAdmin):
list_display = ('created', 'level', 'name', 'module', 'function_name', 'line_number', 'process', 'thread', 'get_message_display', 'extra')
list_filter = ('name', 'level',)
date_hierarchy = 'created'
ordering = ('-created',)
def change_view(self, request, object_id, extra_context=None):
djangologdb_context = {
'djangologdb_settings': djangologdb_settings,
'title': ugettext('View %s') % force_unicode(self.opts.verbose_name),
}
return super(LogEntryOptions, self).change_view(request, object_id, extra_context=djangologdb_context)
def changelist_view(self, request, extra_context=None):
djangologdb_context = {
'djangologdb_settings': djangologdb_settings,
'aggregate': 'level',
'title': ugettext('Select %s to view') % force_unicode(self.opts.verbose_name),
}
return super(LogEntryOptions, self).changelist_view(request, extra_context=djangologdb_context)
admin.site.register(LogAggregate, LogAggregateOptions)
admin.site.register(LogEntry, LogEntryOptions)
|
# compute_rnaQuast.py
#
# Laura Tung
#
# Usage: compute_rnaQuast.py <result_dir>
#
# <result_dir> is the directory containing rnaQUAST_output and rnaQUAST_output_1
import sys
import numpy as np
def load_data(dataset):
loaded_isoform = np.loadtxt(dataset+"/isoform_data", dtype='int', usecols=(2, 3, 4))
loaded_matched = np.loadtxt(dataset+"/matched_data", dtype='int', usecols=(1, 2, 3))
return loaded_isoform, loaded_matched
if __name__ == "__main__":
# load rnaQUAST_output (75% and 95%)
loaded_isoform, loaded_matched = load_data(sys.argv[1]+"/rnaQUAST_output")
# load rnaQUAST_output_1 (0% and 50%)
loaded_isoform_1, loaded_matched_1 = load_data(sys.argv[1]+"/rnaQUAST_output_1")
print("\t\t\t\tScallop\t\tStringTie\tIsoseq")
# assembled isoforms
print("0-50%-assembled-isoforms\t"+str(loaded_isoform_1[0][0]-loaded_isoform_1[1][0])+"\t\t"+str(loaded_isoform_1[0][1]-loaded_isoform_1[1][1])+"\t\t"+str(loaded_isoform_1[0][2]-loaded_isoform_1[1][2]))
print("50-75%-assembled-isoforms\t"+str(loaded_isoform_1[1][0]-loaded_isoform[0][0])+"\t\t"+str(loaded_isoform_1[1][1]-loaded_isoform[0][1])+"\t\t"+str(loaded_isoform_1[1][2]-loaded_isoform[0][2]))
print("75-95%-assembled-isoforms\t"+str(loaded_isoform[0][0]-loaded_isoform[1][0])+"\t\t"+str(loaded_isoform[0][1]-loaded_isoform[1][1])+"\t\t"+str(loaded_isoform[0][2]-loaded_isoform[1][2]))
print("95-100%-assembled-isoforms\t"+str(loaded_isoform[1][0])+"\t\t"+str(loaded_isoform[1][1])+"\t\t"+str(loaded_isoform[1][2]))
# matched
print("0-50%-matched-transcripts\t"+str(loaded_matched_1[0][0]-loaded_matched_1[1][0])+"\t\t"+str(loaded_matched_1[0][1]-loaded_matched_1[1][1])+"\t\t"+str(loaded_matched_1[0][2]-loaded_matched_1[1][2]))
print("50-75%-matched-transcripts\t"+str(loaded_matched_1[1][0]-loaded_matched[0][0])+"\t\t"+str(loaded_matched_1[1][1]-loaded_matched[0][1])+"\t\t"+str(loaded_matched_1[1][2]-loaded_matched[0][2]))
print("75-95%-matched-transcripts\t"+str(loaded_matched[0][0]-loaded_matched[1][0])+"\t\t"+str(loaded_matched[0][1]-loaded_matched[1][1])+"\t\t"+str(loaded_matched[0][2]-loaded_matched[1][2]))
print("95-100%-matched-transcripts\t"+str(loaded_matched[1][0])+"\t\t"+str(loaded_matched[1][1])+"\t\t"+str(loaded_matched[1][2]))
|
import csv
import os
import random
import tempfile
from itertools import groupby
from operator import itemgetter
from django.conf import settings
from django_rq import job
from datetime import datetime
from terra.utils import gsutilCopy
from estimators.models import Annotation, Estimator, ImageTile
from tasks.models import Task, TaskLogEntry
from . import run_cloudml
@job("default")
def start_training_job(task_id):
task = Task.objects.get(pk=task_id)
try:
prepare_artifacts(task)
job_name = f'train_{task_id}_{datetime.now().strftime("%Y%m%d_%H%M%S")}'
task.internal_metadata.update(uses_cloudml=True)
run_cloudml(task, './submit_job.sh', job_name)
task.internal_metadata.update(cloudml_job_name=job_name)
task.save(update_fields=["internal_metadata"])
except Exception as err:
err_msg = str(err)
TaskLogEntry.objects.create(task=task,
log=dict(error=err_msg),
logged_at=datetime.now())
print(f"Error: {err_msg}")
task.mark_as_failed(reason=err_msg)
def prepare_artifacts(task):
generate_annotations_csv(task)
generate_classes_csv(task)
upload_image_tiles(task)
def train_val_split_rows(rows, val_size=0.2):
# FIXME Consider splitting in a stratified manner...
# Class balancing?
random.shuffle(rows)
n_val_size = round(len(rows) * val_size)
return rows[n_val_size:], rows[:n_val_size]
def constrain_and_scale(coord, max_value):
# FIXME !! When Analytics starts saving annotations with scaled coordinates
# (from 0 to 1), replace with:
# return round(min(max(coord, 0), 1) * max_value)
# if coord < 0 or coord > IMAGE_TILE_SIZE:
# import pdb
# pdb.set_trace()
# pass
return round(min(max(coord, 0), max_value))
def build_annotations_csv_rows(annotations):
rows = []
for annotation in annotations:
tile = annotation.image_tile
w, h = tile.width, tile.height
for s in annotation.segments:
print(annotation, s)
row = {}
x1, x2 = sorted([s['x'], s['x'] + s['width']])
y1, y2 = sorted([s['y'], s['y'] + s['height']])
row['x1'] = constrain_and_scale(x1, w)
row['x2'] = constrain_and_scale(x2, w)
row['y1'] = constrain_and_scale(y1, h)
row['y2'] = constrain_and_scale(y2, h)
row['tile_path'] = 'img/{basename}'.format(basename=os.path.join(
os.path.basename(os.path.normpath(tile.source_tile_path)),
os.path.basename(tile.tile_file.name)))
row['label'] = s['label']
rows.append(row)
return rows
def generate_annotations_csv(task):
annotations = Annotation.objects.filter(
estimator__uuid=task.kwargs["estimator"])
if sum([len(a.segments)
for a in annotations]) < settings.MIN_ANNOTATION_NEEDED:
raise Exception("Not enough labels for training. "
"You need at least {} objects of each class.".format(
settings.MIN_ANNOTATION_NEEDED))
rows = build_annotations_csv_rows(annotations)
rows_train, rows_val = train_val_split_rows(rows)
urls = []
for name, rows in zip(['train', 'val'], [rows_train, rows_val]):
url = os.path.join(task.input_artifacts_url, '{}.csv'.format(name))
upload_csv(url, rows, ('tile_path', 'x1', 'y1', 'x2', 'y2', 'label'))
urls.append(url)
return urls
def generate_classes_csv(job):
estimator = Estimator.objects.get(uuid=job.kwargs["estimator"])
rows = [
dict(label=label, class_id=i)
for i, label in enumerate(estimator.classes)
]
url = os.path.join(job.input_artifacts_url, 'classes.csv')
upload_csv(url, rows, ('label', 'class_id'))
def upload_csv(url, rows, fieldnames):
with tempfile.NamedTemporaryFile() as tmpfile:
with open(tmpfile.name, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
for row in rows:
writer.writerow(row)
gsutilCopy(tmpfile.name, url)
def upload_image_tiles(job):
annotations = Annotation.objects.filter(
estimator__uuid=job.kwargs["estimator"]).all()
image_tiles = [a.image_tile for a in annotations]
if len(image_tiles) == 0:
raise Exception(
"There are no tiles with labels. Please check your input.")
image_tile_urls = [
'gs://{bucket}/{name}'.format(bucket=settings.GS_BUCKET_NAME,
name=t.tile_file.name)
for t in image_tiles
]
image_file_names = [
os.path.dirname(t.tile_file.name).split("/")[-1] for t in image_tiles
]
seq = sorted(zip(image_file_names, image_tile_urls), key=itemgetter(0))
groups = groupby(seq, itemgetter(0))
for img_file_name, urls in groups:
urls = [url for _, url in urls]
dst_url = os.path.join(job.input_artifacts_url, 'img/', img_file_name)
gsutilCopy(' '.join(urls), dst_url)
|
import unittest
from intuitquickbooks.helpers import qb_date_format, qb_datetime_format, qb_datetime_utc_offset_format
from datetime import datetime, date
class HelpersTests(unittest.TestCase):
def test_qb_date_format(self):
result = qb_date_format(date(2016, 7, 22))
self.assertEquals(result, '2016-07-22')
def test_qb_datetime_format(self):
result = qb_datetime_format(datetime(2016, 7, 22, 10, 35, 00))
self.assertEquals(result, '2016-07-22T10:35:00')
def test_qb_datetime_utc_offset_format(self):
result = qb_datetime_utc_offset_format(datetime(2016, 7, 22, 10, 35, 00), '-06:00')
self.assertEquals(result, '2016-07-22T10:35:00-06:00')
|
import re
import json
import math
import time
import random
import pandas as pd
import bs4
import requests
from tqdm.notebook import tqdm
from .formatting.text_tools import title_to_snake_case
from .pandas.data import dict_to_col
pd.set_option('mode.chained_assignment', None)
# Objects included in this file:
# grades_converter
# Functions included in this file:
# # preprocess
# # get_ratings_from_mp
# # transform_features
grades_converter = {'V-easy': -1, 'V-easy PG13': -1, '5.9 V-easy': -1, 'V-easy R': -0.75,
'V0-': -0.25, '5.9 V0-': -0.25, 'V0- PG13': -0.25, 'V0- R': -0.25,
'5.8+ V0': 0, '5.9 V0': 0, 'V0': 0, 'V0 PG13': 0,
'5.10a V0 R': 0.25, 'V0 R': 0.25, 'V0 X': 0.25, '5.8+ V0 X': 0.25, 'V0+': 0.25, 'V0+ PG13': 0.25,
'V0+ R': 0.5, 'V0-1': 0.5, '5.10- V0-1': 0.5,
'V1-': 0.75, 'V1': 1, 'V1 PG13': 1, '5.9 V1': 1,
'V1 R': 1.25, 'V1 X':1.25, 'V1+': 1.25, 'V1+ PG13': 1.25, 'V1-2': 1.5,
'V2-': 1.75, '5.10+ V2': 2, 'V2': 2, 'V2 PG13': 2, '5.11b V2': 2,
'V2 R': 2.25, 'V2+': 2.25, 'V2-3': 2.5, '5.10- V2-3': 2.5, 'V2+ X': 2.5,
'V3-': 2.75, 'V3- R': 3, 'V3': 3, 'V3 PG13': 3,
'V3 R': 3.25, '5.11c V3 R': 3.25, 'V3+': 3.25, 'V3+ R':3.5, 'V3-4': 3.5,
'V4-': 3.75, 'V4': 4, 'V4 PG13': 4,
'V4 R': 4.25, 'V4 X': 4.25, 'V4+': 4.25, 'V4+ PG13': 4.25, 'V4-5': 4.5,
'V5-': 4.75, 'V5': 5, 'V5 PG13': 5, '5.12c V5 X': 5,
'V5 R': 5.25, 'V5+': 5.25, 'V5+ X': 5.5, 'V5-6': 5.5,
'V6-': 5.75, 'V6- PG13': 5.75, 'V6- R': 6, 'V6': 6, 'V6 PG13': 6,
'V6 R': 6.25, 'V6+': 6.25, 'V6-7': 6.5, 'V6-7 PG13': 6.5,
'V7-': 6.75, 'V7': 7, 'V7 PG13': 7,
'V7 R': 7.25, 'V7+':7.25, 'V7-8': 7.5,
'V7-8 R': 7.75, 'V8-': 7.75, 'V8': 8, 'V8 PG13': 8,
'V8 R':8.25, 'V8 X': 8.25, 'V8+': 8.25, 'V8-9': 8.5,
'V9-': 8.75, 'V9': 9, 'V9 PG13': 9,
'V9 R': 9.25, 'V9+': 9.25, 'V9+ PG13': 9.25, 'V9 X': 9.25, 'V9-10':9.5, 'V9-10 PG13': 9.5,
'V10-': 9.75, 'V10': 10, 'V10 PG13': 10,
'V10 R': 10.25, 'V10 X': 10.25, 'V10+': 10.25, 'V10-11': 10.5, 'V10-11 PG13': 10.5,
'V11-': 10.75, 'V11': 11,
'V11 R': 11.25, 'V11 X': 11.25, 'V11-12': 11.5,
'V12-': 11.75, 'V12': 12, 'V12 PG13': 12,
'V12+': 12.25, 'V12-13 R': 12.5,
'V13': 13, 'V13 PG13': 13, 'V13 R': 13.25, 'V13-14 PG13': 13.5,
'V14': 14} # should find a way to generate these automatically
def preprocess(df, grade_converter):
"""Use this to clean data prior to storage in postgres
"""
# format columns names
df.columns = [title_to_snake_case(col) for col in df.columns]
df = df.rename(columns = {"length": "length_"})
# Collect the route ID (to ensure there are no duplicates)
df['url_id'] = df['url'].apply(lambda x: re.sub("https://www.mountainproject.com/route/", "", x))
df['url_id'] = df['url_id'].apply(lambda x: int(re.sub("/(?<=/).*", "", x)))
# Fix bad avg_stars rating
df.avg_stars = df.avg_stars.replace(to_replace = -1.0, value = 0)
# Convert V grades into numerical values
df['grade'] = df.rating.replace(grade_converter)
return df
def get_ratings_from_mp(df):
"""Uses the link provided in the CSV file to open and grab data from the following categories:
Star Ratings, Suggested Ratings, On To Do Lists, and Ticks
"""
df["ratings"] = None # add empty column to df
# Chose to do it this way to prevent potential auto-ban
for row in tqdm(df.iterrows()):
# url
url = row[1]['url']
url_items = re.match('https://www.mountainproject.com/route/(?P<url_id>\d+)/(?P<route>.*)', url)
url_id, route = url_items['url_id'], url_items['route']
route_stats_url = f'https://www.mountainproject.com/route/stats/{url_id}/{route}'
# get data
html = bs4.BeautifulSoup((requests.get(route_stats_url).text), "lxml")
h3_sections = list(html.find('div', {"id":"route-stats"}).find_all("h3")) # ratings only
ratings = {title_to_snake_case(h3_section.contents[0].strip()): int(h3_section.contents[1].text)
for h3_section in h3_sections}
# add data to df
df.loc[row[0], ['ratings']] = json.dumps(ratings)
# Delay is 1.5 +/- 1.0 s, so code should run for about 10-15 min for 400 rows
time.sleep(.5+2*random.random()) # Delay by average of 2.5 seconds, at least 0.5 seconds between clicks
df['ratings'] = df['ratings'].apply(json.loads)
df = dict_to_col(df, 'ratings')
return df
def transform_features(df):
""" Add log and sqrt values
"""
# add log values for ols linear regression
df['log_star_ratings'] = df['star_ratings'].apply(lambda x: math.log(x+1, 10))
df['log_ticks'] = df['ticks'].apply(lambda x: math.log(x+1, 10))
df['log_avg_stars'] = df['avg_stars'].apply(lambda x: math.log(x+1, 10))
df['log_length'] = df['length_'].apply(lambda x: math.log(x+1, 10))
df['log_grade'] = df['grade'].apply(lambda x: math.log(x+2, 10))
df['log_on_to_do_lists'] = df['on_to_do_lists'].apply(lambda x: math.log(x+1, 10)) # Target
# add sqrt values for Poisson regression
df['sqrt_star_ratings'] = df['star_ratings'].apply(lambda x: math.sqrt(x))
df['sqrt_ticks'] = df['ticks'].apply(lambda x: math.sqrt(x))
df['sqrt_avg_stars'] = df['avg_stars'].apply(lambda x: math.sqrt(x))
df['sqrt_length'] = df['length_'].apply(lambda x: math.sqrt(x))
df['sqrt_grade'] = df['grade'].apply(lambda x: math.sqrt(x+1))
return df
|
from django.http import HttpResponse
import datetime
def hello(request):
return HttpResponse("Hello World")
def current_datetime(request):
now = datetime.datetime.now()
html = "<html><body>It is now %s.</body></html>" % now
return HttpResponse(html)
def hours_ahead(request,offset):
try:
# offset = int(offset)
except ValueError:
raise Http404()
dt = datetime.datetime.now() + datetime.timedelta(hours=offset)
html = "<html><body>In %s hour(s),it will be %s.</body></html>" %(offset,dt)
return HttpResponse(html) |
# pylint: disable=missing-docstring,no-self-use
import re
import pytest
from context import esperanto_analyzer
from esperanto_analyzer.speech import Preposition
from esperanto_analyzer.analyzers.morphological import PrepositionMorphologicalAnalyzer
class TestPrepositionMorphologicalAnalyzerBasic():
TEST_WORD = 'anstataŭ'
def test_import(self):
assert PrepositionMorphologicalAnalyzer
def test_initialize_default_options(self):
analyzer = PrepositionMorphologicalAnalyzer(self.TEST_WORD)
assert analyzer.options == dict()
def test_initialize_overwrite_options(self):
analyzer = PrepositionMorphologicalAnalyzer(self.TEST_WORD, dict(option='ok'))
assert analyzer.options == dict(option='ok')
def test_initialize_raw_word(self):
analyzer = PrepositionMorphologicalAnalyzer(self.TEST_WORD)
assert analyzer.raw_word == self.TEST_WORD
def test_initialize_word(self):
analyzer = PrepositionMorphologicalAnalyzer(self.TEST_WORD)
# analyzer.word is only populated after calling `analyze()` method
assert analyzer.word is None
def test_initialize_matches(self):
analyzer = PrepositionMorphologicalAnalyzer(self.TEST_WORD)
# analyzer.matches is only populated after calling `analyze()` method
assert analyzer.matches is None
def test_initialize_processed(self):
analyzer = PrepositionMorphologicalAnalyzer(self.TEST_WORD)
# analyzer.matches is only populated after calling `analyze()` method
assert analyzer.processed is False
def test_match_regexp(self):
assert PrepositionMorphologicalAnalyzer.MATCH_REGEXP is not None
def test_word_class(self):
assert isinstance(PrepositionMorphologicalAnalyzer.word_class()(self.TEST_WORD), Preposition)
class TestPrepositionMorphologicalAnalyzerMatchMethod():
VALID_WORDS = ['K', 'al', 'anstataŭ', 'antaŭ', 'antaŭ ol', 'apud', 'da', 'de', 'disde',
'du vortoj', 'dum', 'ekde', 'ekster', 'eksteren', 'el', 'en', 'ene',
'estiel', 'far', 'fare de', 'flanke de', 'for de', 'graŭ', 'inter', 'je',
'kaj ankaŭ', 'kiel', 'kontraŭ', 'kontraŭe de', 'krom', 'kun', 'laŭ',
'mala', 'malantaŭ', 'malgraŭ', 'malkiel', 'malsupre de', 'malsupren',
'meze de', 'na', 'nome de', 'ol', 'per', 'pere de', 'plus', 'po', 'por',
'post', 'preter', 'pri', 'pro', 'proksime de', 'samkiel', 'sed', 'sekva',
'sen', 'sub', 'suben', 'super', 'supren', 'sur', 'tiu', 'tiuj', 'tra',
'trans', 'tri vortoj', 'tuj post', 'tutĉirkaŭ',
'ĉe', 'ĉi tiu', 'ĉi tiuj', 'ĉirkaŭ', 'ĝis'
]
INVALID_WORDS = ['io', 'bela', 'domo', 'hundoj', 'kiu', 'vi', 'multe', 'ankoraŭ', 'dek',
'du', 'ĉar', 'aŭ', '?', '!']
def test_match(self):
for word in self.VALID_WORDS:
analyzer = PrepositionMorphologicalAnalyzer(word)
matches = analyzer.match()
assert matches is not None
assert len(matches.span()) == 2
def test_match_empty(self):
for word in self.INVALID_WORDS:
analyzer = PrepositionMorphologicalAnalyzer(word)
matches = analyzer.match()
assert matches is None
class TestPrepositionMorphologicalAnalyzerAnalyzeMethod():
VALID_WORDS = ['K', 'al', 'anstataŭ', 'antaŭ', 'antaŭ ol', 'apud', 'da', 'de', 'disde',
'du vortoj', 'dum', 'ekde', 'ekster', 'eksteren', 'el', 'en', 'ene',
'estiel', 'far', 'fare de', 'flanke de', 'for de', 'graŭ', 'inter', 'je',
'kaj ankaŭ', 'kiel', 'kontraŭ', 'kontraŭe de', 'krom', 'kun', 'laŭ',
'mala', 'malantaŭ', 'malgraŭ', 'malkiel', 'malsupre de', 'malsupren',
'meze de', 'na', 'nome de', 'ol', 'per', 'pere de', 'plus', 'po', 'por',
'post', 'preter', 'pri', 'pro', 'proksime de', 'samkiel', 'sed', 'sekva',
'sen', 'sub', 'suben', 'super', 'supren', 'sur', 'tiu', 'tiuj', 'tra',
'trans', 'tri vortoj', 'tuj post', 'tutĉirkaŭ',
'ĉe', 'ĉi tiu', 'ĉi tiuj', 'ĉirkaŭ', 'ĝis',
]
INVALID_WORDS = ['io', 'bela', 'domo', 'hundoj', 'kiu', 'vi', 'multe', 'ankoraŭ', 'dek',
'du', 'ĉar', 'aŭ', '?', '!']
def test_invalid_analyze(self):
for word in self.INVALID_WORDS:
analyzer = PrepositionMorphologicalAnalyzer(word)
result = analyzer.analyze()
assert not result
def test_invalid_analyze_word(self):
for word in self.INVALID_WORDS:
analyzer = PrepositionMorphologicalAnalyzer(word)
analyzer.analyze()
assert analyzer.word is None
def test_invalid_analyze_match(self):
for word in self.INVALID_WORDS:
analyzer = PrepositionMorphologicalAnalyzer(word)
analyzer.analyze()
assert analyzer.matches is None
def test_analyze(self):
for word in self.VALID_WORDS:
analyzer = PrepositionMorphologicalAnalyzer(word)
assert analyzer.analyze()
def test_prepositions_list(self):
for word in PrepositionMorphologicalAnalyzer.PREPOSITIONS_LIST:
analyzer = PrepositionMorphologicalAnalyzer(word)
assert analyzer.analyze()
def test_analyze_word(self):
for word in self.VALID_WORDS:
analyzer = PrepositionMorphologicalAnalyzer(word)
analyzer.analyze()
assert isinstance(analyzer.word, Preposition)
assert analyzer.word.content == word
def test_analyze_match(self):
for word in self.VALID_WORDS:
analyzer = PrepositionMorphologicalAnalyzer(word)
analyzer.analyze()
assert analyzer.matches is not None
def test_analyze_return_false(self):
for word in self.INVALID_WORDS:
analyzer = PrepositionMorphologicalAnalyzer(word)
assert analyzer.analyze() is False
def test_analyze_return_true(self):
for word in self.VALID_WORDS:
analyzer = PrepositionMorphologicalAnalyzer(word)
assert analyzer.analyze()
def test_analyze_processed(self):
for word in self.VALID_WORDS:
analyzer = PrepositionMorphologicalAnalyzer(word)
assert analyzer.processed is False
analyzer.analyze()
assert analyzer.processed is True
def test_analyze_processed_response(self):
for word in self.VALID_WORDS:
analyzer = PrepositionMorphologicalAnalyzer(word)
analyzer.analyze()
assert analyzer.analyze() is None
assert analyzer.analyze() is None
class TestPrepositionMorphologicalAnalyzerPrepositionsList:
PREPOSITIONS_LIST = ['K', 'al', 'anstataŭ', 'antaŭ', 'antaŭ ol', 'apud', 'da', 'de', 'disde',
'du vortoj', 'dum', 'ekde', 'ekster', 'eksteren', 'el', 'en', 'ene',
'estiel', 'far', 'fare de', 'flanke de', 'for de', 'graŭ', 'inter', 'je',
'kaj ankaŭ', 'kiel', 'kontraŭ', 'kontraŭe de', 'krom', 'kun', 'laŭ',
'mala', 'malantaŭ', 'malgraŭ', 'malkiel', 'malsupre de', 'malsupren',
'meze de', 'na', 'nome de', 'ol', 'per', 'pere de', 'plus', 'po', 'por',
'post', 'preter', 'pri', 'pro', 'proksime de', 'samkiel', 'sed', 'sekva',
'sen', 'sub', 'suben', 'super', 'supren', 'sur', 'tiu', 'tiuj', 'tra',
'trans', 'tri vortoj', 'tuj post', 'tutĉirkaŭ',
'ĉe', 'ĉi tiu', 'ĉi tiuj', 'ĉirkaŭ', 'ĝis']
def test_preposition_list_not_changed(self):
assert PrepositionMorphologicalAnalyzer.PREPOSITIONS_LIST == self.PREPOSITIONS_LIST
def test_prepositions_not_empty(self):
assert PrepositionMorphologicalAnalyzer.PREPOSITIONS_LIST is not None
def test_prepositions_not_size(self):
assert len(PrepositionMorphologicalAnalyzer.PREPOSITIONS_LIST) == 73
def test_prepositions_match_list(self):
for word in PrepositionMorphologicalAnalyzer.PREPOSITIONS_LIST:
assert PrepositionMorphologicalAnalyzer.PROPOSITIONS_MATCH_REGEXP.match(word)
def test_prepositions_match_final_regexp_list(self):
for word in PrepositionMorphologicalAnalyzer.PREPOSITIONS_LIST:
assert PrepositionMorphologicalAnalyzer.MATCH_REGEXP.match(word)
|
class PC:
def __init__(self, Name, Combat_Class, Race, Eye_Color, Skin_Tone, Hair_Color, Size, Weight, Trademarks, STR, DEX, CON, INT, WIS, CHA, Muscle, Wrestle, Brawl, Coordination, Finesse, Sleight_of_Hand, Stealth, Endurance, Concentration, Vitality, Academic, Arcana, Culture, Analyze, Nature, Aggressive, Suave, Diplomatic, Sincere, Tier_1, Tier_2, Tier_3, Offensive_Ability, Defensive_Ability, Movement_Ability, Special_Ability, Main_Hand, Off_Hand, Utility, Armor, Combat_Special_Move, Backpack, Gold, Rations, Kits, Adventuring_Special_Moves, Value_Self, Value_Others, Value_Society, Mission, Interests, Talents, Quirks, Fears, Family, Friends, Professional, Nemesis, Factions, Home, Profession, Skill, Race_Bonus, Story, Roleplaying_Special_Moves):
# Basic information and appearance
self.Name = Name
self.Combat_Class = Combat_Class
self.Race = Race
self.Eye_Color = Eye_Color
self.Skin_Tone = Skin_Tone
self.Hair_Color = Hair_Color
self.Size = Size
self.Weight = Weight
self.Trademarks = Trademarks
# STATS and SKILLS
self.STR = STR
STR_Mod = (STR // 5) - 2
self.DEX = DEX
DEX_Mod = (DEX // 5) - 2
self.CON = CON
CON_Mod = (CON // 5) - 2
self.INT = INT
INT_Mod = (INT // 5) - 2
self.WIS = WIS
WIS_MOD = (WIS // 5) - 2
self.CHA = CHA
CHA_Mod = (CHA // 5) - 2
self.Muscle = Muscle
self.Wrestle = Wrestle
self.Brawl = Brawl
self.Coordination = Coordination
self.Finesse = Finesse
self.Sleight_of_Hand = Sleight_of_Hand
self.Stealth = Stealth
self.Endurance = Endurance
self.Concentration = Concentration
self.Vitality = Vitality
self.Academic = Academic
self.Arcana = Arcana
self.Culture = Culture
self.Analyze = Analyze
self.Nature = Nature
self.Aggressive = Aggressive
self.Suave = Suave
self.Diplomatic = Diplomatic
self.Sincere = Sincere
#Health information
self.Tier_1 = Tier_1
self.Tier_2 = Tier_2
self.Tier_3 = Tier_3
self.Max_Vitality = Tier_3
self.Current_Vitality = Tier_3
#Statuses - always start at 0 and can be added during combat
self.blinded_status_stage = 0
self.burning_status_stage = 0
self.crippled_arm_status_stage = 0
self.crippled_leg_status_stage = 0
self.deafened_status_stage = 0
self.fatigued_status_stage = 0
self.impaled_status_stage = 0
self.captivated_status_stage = 0
self.confused_status_stage = 0
self.frightened_status_stage = 0
self.stunned_status_stage = 0
self.momentum_status_stage = 0
self.prone_status_stage = 0
self.restrained_status_stage = 0
self.suffocating_status_stage = 0
self.surprised_status_stage = 0
# Combat stats
self.Attack_Bonus = Muscle + STR_Mod
self.Dodge_Bonus = Coordination + DEX_Mod
self.Defend_Bonus = Endurance + CON_Mod
self.Offensive_Ability = Offensive_Ability
self.Defensive_Ability = Defensive_Ability
self.Movement_Ability = Movement_Ability
self.Special_Ability = Special_Ability
self.Free_Movement = 4
self.AP = 12
self.RP = 4
self.Power_Rank = 0
#equipment
self.Main_Hand = Main_Hand
self.Off_Hand = Off_Hand
self.Utility = Utility
self.Armor = Armor
self.Combat_Special_Move = Combat_Special_Move
# adventuring conditions - set at 0 until added to after combat or during adventuring
self.bleeding_condition_stage = 0
self.blinded_condition_stage = 0
self.crippled_condition_stage = 0
self.deafened_condition_stage = 0
self.drowsy_condition_stage = 0
self.fatigued_condition_stage = 0
self.fractured_condition_stage = 0
self.sickened_condition_stage = 0
self.Short_Rest = 0
self.Carry_Weight = 5 + STR_Mod
self.Backpack = Backpack
self.Gold = Gold
self.Rations = Rations
self.Kits = Kits
self.Adventuring_Special_Moves = Adventuring_Special_Moves
self.Value_Self = Value_Self
self.Value_Others = Value_Others
self.Value_Society = Value_Society
self.Mission = Mission
self.Interests = Interests
self.Talents = Talents
self.Quirks = Quirks
self.Fears = Fears
self.Family = Family
self.Friends = Friends
self.Professional = Professional
self.Nemesis = Nemesis
self.Factions = Factions
self.Home = Home
self.Profession = Profession
self.Skill = Skill
self.Race_Bonus = Race_Bonus
self.Story = Story
self.Roleplaying_Special_Moves = Roleplaying_Special_Moves
Dougey = PC("Dougey", "Brute", "Human", "Blue", "Skin_Tone", "Hair_Color", 1, 4, "Trademarks", 26, 21, 18, 6, 10, 13, 4, 0, 2, 1, 0, 0, 2, 3, 0, 0, 0, 0, 1, 2, 1, 3, 0, 0, 0, 3, 5, 7, "Offensive_Ability", "Defensive_Ability", "Movement_Ability", "Special_Ability", "Maul", "Off_Hand", "Utility", "ChainMail", "Combat_Special_Move", "Backpack", 100, 5, "Kits", "Adventuring_Special_Moves", "Loyalty", "Compassion", "Cooperation", "Mission", "Interests", "Talents", "Quirks", "Fears", "Family", "Friends", "Professional", "Nemesis", "Factions", "Home", "Profession", "Skill", "Race_Bonus", "Story", "Roleplaying_Special_Moves")
Maincharacter = Dougey.Name
print(Maincharacter) |
from collections import defaultdict
import os
with open(os.path.join(os.path.dirname(__file__), "input.txt"), "r") as file:
report_lines = [l.strip() for l in file.readlines()]
def count_occurances(report_numbers):
counts = defaultdict(lambda: {"0": 0, "1": 0})
for line in report_numbers:
for idx, value in enumerate(line):
counts[idx][value] += 1
return counts
def filter_list(l, mode):
idx = 0
while len(l) > 1:
counts = count_occurances(l)
if counts[idx]["0"] > counts[idx]["1"]:
l = list(filter(lambda b: b[idx] == ("0" if mode == "most" else "1"), l))
if counts[idx]["0"] < counts[idx]["1"]:
l = list(filter(lambda b: b[idx] == ("1" if mode == "most" else "0"), l))
if counts[idx]["0"] == counts[idx]["1"]:
l = list(filter(lambda b: b[idx] == ("1" if mode == "most" else "0"), l))
idx += 1
return l
def part1():
gamma_rate = epsilon_rate = ""
counts = count_occurances(report_lines)
for value in counts.values():
if value["0"] > value["1"]:
gamma_rate += "0"
epsilon_rate += "1"
else:
gamma_rate += "1"
epsilon_rate += "0"
return int(gamma_rate, 2) * int(epsilon_rate, 2)
def part2():
oxygen_list = filter_list(report_lines.copy(), "most")
co2_list = filter_list(report_lines.copy(), "least")
return int(oxygen_list[0], 2) * int(co2_list[0], 2)
print(f"Part 1: {part1()}") # 1997414
print(f"Part 2: {part2()}") # 1032597
|
from azure_storage.methods import client_prep, delete_container, delete_file, delete_folder, extract_account_name
from azure_storage.azure_delete import AzureDelete, cli, container_delete, file_delete, \
folder_delete
from unittest.mock import patch
import argparse
import pytest
import azure
import os
@pytest.fixture(name='variables', scope='module')
def setup():
class Variables:
def __init__(self):
self.passphrase = 'AzureStorage'
self.container_name = '000000container'
self.account_name = extract_account_name(passphrase=self.passphrase)
return Variables()
def test_client_prep(variables):
variables.container_name, variables.connect_str, variables.blob_service_client, variables.container_client = \
client_prep(container_name=variables.container_name,
passphrase=variables.passphrase,
account_name=variables.account_name)
assert variables.connect_str.startswith('DefaultEndpointsProtocol')
@pytest.mark.parametrize('file_name',
['file_1.txt',
'container_integration/file_2.txt',
'nested_container/nested_folder/nested_folder_2/nested_folder_test_1.txt',
'ABC/123/nested_folder_test_1.txt'])
def test_delete_file(variables, file_name):
delete_file(container_client=variables.container_client,
object_name=file_name,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name)
blobs = variables.container_client.list_blobs()
assert file_name not in [blob.name for blob in blobs]
@pytest.mark.parametrize('file_name',
['file_3.txt',
'container_integration/file_2.txt',
'nested_container/nested_folder/nested_folder_2/nested_folder_test_1.txt',
'ABC/123/nested_folder_test_1.txt'])
def test_delete_file_missing(variables, file_name):
with pytest.raises(SystemExit):
delete_file(container_client=variables.container_client,
object_name=file_name,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name)
def test_delete_file_invalid_category(variables):
with pytest.raises(SystemExit):
del_file = AzureDelete(object_name='file_1.txt',
container_name=variables.container_name,
account_name=variables.account_name,
passphrase=variables.passphrase,
retention_time=8,
category='container')
del_file.main()
@patch('argparse.ArgumentParser.parse_args')
def test_delete_file_integration(mock_args, variables):
file_name = 'nested/file_2.txt'
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info',
file=file_name,
retention_time=1)
arguments = cli()
file_delete(arguments)
blobs = variables.container_client.list_blobs()
assert os.path.basename(file_name) not in [blob.name for blob in blobs]
@pytest.mark.parametrize('retention_time',
[0,
1000])
@patch('argparse.ArgumentParser.parse_args')
def test_delete_file_integration_invalid_retention_time(mock_args, variables, retention_time):
file_name = 'nested/file_2.txt'
with pytest.raises(SystemExit):
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info',
file=file_name,
retention_time=retention_time)
arguments = cli()
file_delete(arguments)
@patch('argparse.ArgumentParser.parse_args')
def test_delete_file_integration_missing(mock_args, variables):
file_name = 'nested/file_2.txt'
with pytest.raises(SystemExit):
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info',
file=file_name,
retention_time=1)
arguments = cli()
file_delete(arguments)
@pytest.mark.parametrize('folder_name,check_file',
[('container_integration/', 'nested_folder_test_1.txt'),
('nested_container/nested_folder/', 'nested_file_2.txt'),
('ABC/', 'nested_folder_test_1.txt')])
def test_delete_folder(variables, folder_name, check_file):
delete_folder(container_client=variables.container_client,
object_name=folder_name,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name,
account_name=variables.account_name)
blobs = variables.container_client.list_blobs()
assert os.path.join(folder_name, check_file) not in [blob.name for blob in blobs]
@pytest.mark.parametrize('folder_name,check_file',
[('container_integration/', 'nested_folder_test_1.txt'),
('nested_container/nested_folder/', 'nested_file_2.txt'),
('ABC/', 'nested_folder_test_1.txt')])
def test_delete_folder_missing(variables, folder_name, check_file):
with pytest.raises(SystemExit):
delete_folder(container_client=variables.container_client,
object_name=folder_name,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name,
account_name=variables.account_name)
@patch('argparse.ArgumentParser.parse_args')
def test_delete_folder_integration(mock_args, variables):
folder_name = 'nested_folder_3'
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info',
folder=folder_name,
retention_time=1)
arguments = cli()
folder_delete(arguments)
blobs = variables.container_client.list_blobs()
assert os.path.join(folder_name, 'nested_folder_test_1.txt') not in [blob.name for blob in blobs]
@patch('argparse.ArgumentParser.parse_args')
def test_delete_folder_integration_missing(mock_args, variables):
folder_name = 'nested_folder_3'
with pytest.raises(SystemExit):
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info',
folder=folder_name,
retention_time=1)
arguments = cli()
folder_delete(arguments)
def test_delete_container_missing(variables):
with pytest.raises(SystemExit):
delete_container(blob_service_client=variables.blob_service_client,
container_name='000000000container',
account_name=variables.account_name)
@patch('argparse.ArgumentParser.parse_args')
def test_delete_container_integration(mock_args, variables):
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info')
arguments = cli()
container_delete(arguments)
with pytest.raises(azure.core.exceptions.ResourceExistsError):
variables.blob_service_client.create_container(variables.container_name)
|
from django.core.management.base import BaseCommand, CommandError
from extractor.models import DocumentationUnit, MappingUnitToUser
import random
from django.contrib.auth.models import User
"""
map each unit to 2 different users
"""
class Command(BaseCommand):
help = 'maps a sample to the 7 students with their id'
def create_mapping(self, object_list, offset):
howfull= [0 for x in range(7)]
user1 = 0
user2 = 0
for each in object_list:
random.seed()
user1=random.randint(0,6)
howfull[user1] += 1
user2=howfull.index(min(howfull))
howfull[user2] += 1
user_object1 = User.objects.get(id=(user1)+offset)
user_object2 = User.objects.get(id=(user2)+offset)
mapUnitToUser = MappingUnitToUser.objects.create(
user=user_object1,
documentation_unit=each,
already_marked=False
)
mapUnitToUser = MappingUnitToUser.objects.create(
user=user_object2,
documentation_unit=each,
already_marked=False
)
self.stdout.write(str(howfull))
self.stdout.write(str(howfull))
self.stdout.write("***DONE***")
def handle(self, *args, **options):
self.stdout.write("***START***")
ClassOrException = (DocumentationUnit.objects.filter(type='class') | \
DocumentationUnit.objects.filter(type='exception')).order_by('?')[:265]
AttributeOrData = (DocumentationUnit.objects.filter(type='attribute') | \
DocumentationUnit.objects.filter(type='data')).order_by('?')[:306]
MethodOrFunction = (DocumentationUnit.objects.filter(type='method') | \
DocumentationUnit.objects.filter(type='staticmethod') | \
DocumentationUnit.objects.filter(type='classmethod') | \
DocumentationUnit.objects.filter(type='function')).order_by('?')[:651]
Describe = (DocumentationUnit.objects.filter(type='describe')).order_by('?')[:16]
Section = (DocumentationUnit.objects.filter(type='section')).order_by('?')[:310]
self.stdout.write('Counted ClassOrException: "%s"' % ClassOrException.count())
self.stdout.write('Counted AttributeOrData: "%s"' % AttributeOrData.count())
self.stdout.write('Counted MethodOrFunction: "%s"' % MethodOrFunction.count())
self.stdout.write('Counted Describe: "%s"' % Describe.count())
self.stdout.write('Counted Section: "%s"' % Section.count())
self.stdout.write('Random-Test: "%s"' % ClassOrException[0].id)
Command.create_mapping(self, ClassOrException, 3, 76)
Command.create_mapping(self, AttributeOrData, 3, 88)
Command.create_mapping(self, MethodOrFunction, 3, 186)
Command.create_mapping(self, Describe, 3, 5)
Command.create_mapping(self, Section, 3, 89)
|
# -*- coding: utf-8 -*-
import argparse
import multiprocessing
from tox import hookimpl
@hookimpl
def tox_addoption(parser):
def positive_integer(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError(
"%s is an invalid positive int value" % value)
return ivalue
try:
num_proc = multiprocessing.cpu_count()
except Exception:
num_proc = 2
parser.add_argument(
"-n", "--num",
type=positive_integer,
action="store",
default=num_proc,
dest="numproc",
help="set the number of concurrent processes "
"(default %s)." % num_proc)
|
import pyaudio
p = pyaudio.PyAudio()
def getValidDevicesList():
output = []
for i in range(p.get_device_count()):
currentDevice = p.get_device_info_by_index(i)
isInput = currentDevice["maxInputChannels"] > 0
isWASAPI = (p.get_host_api_info_by_index(
currentDevice["hostApi"])["name"]).find("WASAPI") != -1
if isWASAPI and not isInput:
output.append((i, currentDevice['name']))
return output
def getDeviceInfoString(deviceIndex):
device = p.get_device_info_by_index(deviceIndex)
return f"""Device Name: {device['name']}\nDevice Index: {device['index']}\nSample Rate: {device['defaultSampleRate']}"""
|
import socket
import os
from six.moves import urllib
import netifaces
def guess_external_ip():
gateways = netifaces.gateways()
try:
ifnet = gateways['default'][netifaces.AF_INET][1]
return netifaces.ifaddresses(ifnet)[netifaces.AF_INET][0]['addr']
except (KeyError, IndexError):
return
def bind_zmq_socket(sock, address, port=None):
endpoint = address if '://' in address else 'tcp://%s' % address
p = urllib.parse.urlparse(endpoint)
if port and p.port and p.port != port:
raise ValueError('two port numbers given: %s and %s' % (p.port, port))
if p.port:
sock.bind(endpoint)
port = p.port
elif port:
endpoint = '%s:%s' % (endpoint, port)
sock.bind(endpoint)
else:
port = sock.bind_to_random_port(endpoint)
endpoint = '%s:%s' % (endpoint, port)
return endpoint, port
# adapted from https://github.com/mozilla-services/chaussette/
def create_socket(host, family=socket.AF_INET, type=socket.SOCK_STREAM,
backlog=2048, blocking=True, inheritable=False):
if family == socket.AF_UNIX and not host.startswith('unix:'):
raise ValueError('Your host needs to have the unix:/path form')
if host.startswith('unix:'):
family = socket.AF_UNIX
if host.startswith('fd://'):
fd = int(host[5:])
sock = socket.fromfd(fd, family, type)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
sock = socket.socket(family, type)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if host.startswith('unix:'):
filename = host[len('unix:'):]
try:
os.remove(filename)
except OSError:
pass
sock.bind(filename)
else:
if ':' in host:
host, port = host.rsplit(':', 1)
port = int(port)
else:
host, port = '0.0.0.0', int(host)
sock.bind((host, port))
sock.listen(backlog)
if blocking:
sock.setblocking(1)
else:
sock.setblocking(0)
# Required since Python 3.4 to be able to share a socket with a child
# process.
if inheritable and hasattr(os, 'set_inheritable'):
os.set_inheritable(sock.fileno(), True)
return sock
def get_unused_port(host="127.0.0.1", family=socket.AF_INET, socktype=socket.SOCK_STREAM):
tempsock = socket.socket(family, socktype)
tempsock.bind((host, 0))
port = tempsock.getsockname()[1]
tempsock.close()
del tempsock
return port
|
[print('\n'.join(" "*abs(c)+"* "*(a-abs(c))for c in range(-a+1,a)))for a in[int(__import__('sys').argv[1])]]
|
import argparse
import os
from PIL import Image
import numpy as np
def read_grayscale(input_file, width):
"""
Reads the raw bytes from the input_file and returns a 2d numpy array of np.int8 pixel values.
"""
with open(input_file, 'rb') as file:
raw_bytes = file.read()
flat_integers = [int(byte) for byte in raw_bytes]
int_array = np.array([flat_integers[n*width:(n+1)*width]
for n in range(len(flat_integers)//width)], dtype=np.uint8)
return int_array
def grayscale_to_jpg(input_file, output_dir, width):
"""
Reads grayscale image, converts it to jpg and writes it back into the output dir.
"""
int_array = read_grayscale(input_file, width)
image = Image.fromarray(int_array)
image.save(os.path.join(output_dir, os.path.splitext(os.path.basename(input_file))[0]+'.jpg'))
def grayscale_dir_to_jpg(input_dir, output_dir, width, grayscale_extension='gs'):
"""
Converts all images in the input_dir with the grayscale_extension to jpg in the output_dir.
"""
print('converting grayscale images in {} to jpg'.format(input_dir))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for file_name in os.listdir(input_dir):
if os.path.splitext(file_name)[-1] != ('.' + grayscale_extension):
continue
grayscale_to_jpg(input_file=os.path.join(input_dir, file_name),
output_dir=output_dir,
width=width)
print('jpg images written to {}'.format(output_dir))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir')
parser.add_argument('--output_dir')
args = parser.parse_args()
grayscale_dir_to_jpg(args.input_dir, args.output_dir, width=96, grayscale_extension='gs')
|
import unittest
import sys
import os
sys.path.append('../../')
from etk.core import Core
import json
import codecs
class TestTableExtractions(unittest.TestCase):
def setUp(self):
self.e_config = {
"document_id": "doc_id",
"extraction_policy": "replace",
"error_handling": "raise_error",
"resources": {
"dictionaries": {
},
"landmark": [
],
"pickle": {
}
},
"content_extraction": {
"input_path": "raw_content",
"extractors": {
"table": {
"field_name": "table",
"config": {
},
"extraction_policy": "replace"
}
}
},
"data_extraction": [
],
"kg_enhancement": [
]
}
file_path = os.path.join(os.path.dirname(__file__), "ground_truth/table.jl")
table_out = os.path.join(os.path.dirname(__file__), "ground_truth/table_out.jl")
no_table_file = os.path.join(os.path.dirname(__file__), "ground_truth/1_content_extracted.jl")
self.doc = json.load(codecs.open(file_path, "r", "utf-8"))
self.table_ex = json.load(codecs.open(table_out, "r", "utf-8"))
self.no_table = json.load(codecs.open(no_table_file, "r", "utf-8"))
def test_table_extractor(self):
c = Core(extraction_config=self.e_config)
r = c.process(self.doc)
with open("table_out.jl", "w") as f:
f.write(json.dumps(r["content_extraction"]["table"]["tables"]))
self.assertTrue("content_extraction" in r)
self.assertTrue("table" in r["content_extraction"])
ex = json.loads(json.JSONEncoder().encode(r["content_extraction"]["table"]["tables"]))
self.assertEqual(ex, self.table_ex)
def test_table_extractor_no_field_name(self):
c = Core(extraction_config=self.e_config)
r = c.process(self.doc)
self.assertTrue("content_extraction" in r)
self.assertTrue("table" in r["content_extraction"])
ex = json.loads(json.JSONEncoder().encode(r["content_extraction"]["table"]["tables"]))
self.assertEqual(ex, self.table_ex)
def test_table_extractor_empty_config(self):
c = Core(extraction_config=self.e_config)
r = c.process(self.doc)
self.assertTrue("content_extraction" in r)
self.assertTrue("table" in r["content_extraction"])
ex = json.loads(json.JSONEncoder().encode(r["content_extraction"]["table"]["tables"]))
self.assertEqual(ex, self.table_ex)
def test_table_extractor_no_table(self):
c = Core(extraction_config=self.e_config)
r = c.process(self.no_table)
self.assertTrue("content_extraction" in r)
self.assertEqual(len(r["content_extraction"]["table"]["tables"]), 0)
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from .web_view import VizSeqWebView
from .data_view import VizSeqDataPageView, DEFAULT_PAGE_SIZE, DEFAULT_PAGE_NO
from .data_filter import VizSeqFilter
from .data_sorters import (VizSeqSortingType, VizSeqRandomSorter,
VizSeqByLenSorter, VizSeqByStrOrderSorter,
VizSeqByMetricSorter, VizSeqByMetricDiffSorter)
|
from typing import DefaultDict, List
import itertools
class CachedParamMixin:
data: DefaultDict[float, List[float]] = None
def __init__(self):
self._all_keys = None
self._keysmax = None
self._keysmin = None
self._valmax = None
self._valmin = None
@property
def all_keys(self):
if self._all_keys is None:
self._all_keys = list(self.data.keys())
return self._all_keys
@property
def keysmax(self):
if self._keysmax is None:
self._keysmax = max(self.data.keys())
return self._keysmax
@property
def keysmin(self):
if self._keysmin is None:
self._keysmin = min(self.data.keys())
return self._keysmin
@property
def valmax(self):
if self._valmax is None:
self._valmax = max(itertools.chain.from_iterable(self.data.values()))
return self._valmax
@property
def valmin(self):
if self._valmin is None:
self._valmin = min(itertools.chain.from_iterable(self.data.values()))
return self._valmin
def dirty(self):
self._valmax = None
self._valmin = None
self._keysmax = None
self._keysmin = None
self._all_keys = None |
# Python Object Oriented Programming by Joe Marini course example
# Using composition to build complex objects
# Inheritance is a - is type of relationship.
# composition is different from inheritance. When using composition, we build objects out of other objects and this model is more of a has a relationship. B. ok object has an author object
# passing author object in __init__
# Now, book has an author associated with it.
# Composition is used for the separation of the responsibilities
class Book:
def __init__(self, title, price, author=None):
self.title = title
self.price = price
self.author = author
# holds list of chapters information.
self.chapters = []
# book object now takes in a chapter object.
# book has a collection of chapter objects.
def addchapter(self, chapter):
self.chapters.append((chapter))
def getbookpagecount(self):
result = 0
for ch in self.chapters:
result += ch.pagecount
return result
class Author:
def __init__(self, fname, lname):
self.fname = fname
self.lname = lname
def __str__(self):
return f"{self.fname} {self.lname}"
class Chapter:
def __init__(self, name, pagecount):
self.name = name
self.pagecount = pagecount
auth = Author("Leo", "Tolstoy")
b1 = Book("War and Peace", 39.0, auth)
b1.addchapter(Chapter("Chapter 1", 125))
b1.addchapter(Chapter("Chapter 2", 97))
b1.addchapter(Chapter("Chapter 3", 143))
print(b1.title)
print(b1.author)
print(b1.getbookpagecount())
|
from dns.rdtypes.ANY.TXT import TXT
def query(records):
context = {}
context['i'] = -1
def mockable(_, __, ___):
if context['i'] + 1 < len(records):
context['i'] += 1
rdclass = 1
rdtype = 1
strings = records[context['i']]
return [
TXT(rdclass, rdtype, strings)
]
return mockable
|
import numpy as np
import matplotlib.pyplot as plt
from random import randint
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
def generate_word_one():
features = np.zeros(81)
features_n = np.zeros((10,8))
for i in range(0, 10):
d = randint(50,100)
features[i*8] = d
features_n[i][0] = d
t = randint(350, 400)
features[i*8 + 1] = t
features_n[i][1] = t
lA = randint(750, 800)
features[i*8 + 2] = lA
features_n[i][2] = lA
hA = randint(1000, 1099)
features[i*8 + 3] = hA
features_n[i][3] = hA
lB = randint(1300, 1490)
features[i*8 + 4] = lB
features_n[i][4] = lB
hB = randint(1800, 2000)
features[i*8 + 5] = hB
features_n[i][5] = hB
lG = randint(3100, 3300)
features[i*8 + 6] = lG
features_n[i][6] = lG
hG = randint(4100, 4500)
features[i*8 + 7] = hG
features_n[i][7] = hG
f = features_n/np.linalg.norm(features_n, ord=1, axis=0, keepdims=True)
features[80] = 1
return features, np.append(f.reshape(80), 1)
def generate_word_two():
features = np.zeros(81)
features_n = np.zeros((10,8))
for i in range(0, 10):
d = randint(80,200)
features[i*8] = d
features_n[i][0] = d
t = randint(330, 550)
features[i*8 + 1] = t
features_n[i][1] = t
lA = randint(780, 900)
features[i*8 + 2] = lA
features_n[i][2] = lA
hA = randint(1080, 1130)
features[i*8 + 3] = hA
features_n[i][3] = hA
lB = randint(1450, 1600)
features[i*8 + 4] = lB
features_n[i][4] = lB
hB = randint(1900, 2400)
features[i*8 + 5] = hB
features_n[i][5] = hB
lG = randint(3200, 3805)
features[i*8 + 6] = lG
features_n[i][6] = lG
hG = randint(4400, 4700)
features[i*8 + 7] = hG
features_n[i][7] = hG
f = features_n/np.linalg.norm(features_n, ord=1, axis=0, keepdims=True)
features[80] = 2
return features, np.append(f.reshape(80), 2)
def generate_word_three():
features = np.zeros(81)
features_n = np.zeros((10,8))
for i in range(0, 10):
d = randint(180,275)
features[i*8] = d
features_n[i][0] = d
t = randint(480, 675)
features[i*8 + 1] = t
features_n[i][1] = t
lA = randint(850, 925)
features[i*8 + 2] = lA
features_n[i][2] = lA
hA = randint(1100, 1175)
features[i*8 + 3] = hA
features_n[i][3] = hA
lB = randint(1550, 1675)
features[i*8 + 4] = lB
features_n[i][4] = lB
hB = randint(2200, 2975)
features[i*8 + 5] = hB
features_n[i][5] = hB
lG = randint(3500, 3975)
features[i*8 + 6] = lG
features_n[i][6] = lG
hG = randint(4600, 4975)
features[i*8 + 7] = hG
features_n[i][7] = hG
f = features_n/np.linalg.norm(features_n, ord=1, axis=0, keepdims=True)
features[80] = 3
return features, np.append(f.reshape(80), 3)
smaple_count = 10
word1 = []
word2 = []
word3 = []
for i in range(0, smaple_count):
f1_1, f1_2 = generate_word_one()
f2_1, f2_2 = generate_word_two()
f3_1, f3_2 = generate_word_three()
#print(f1_1.shape, f1_2.shape)
#print(f1_1, f1_2)
word1.append(f1_1)
word2.append(f2_1)
word3.append(f3_1)
word_1 = np.array(word1)
word_2 = np.array(word2)
word_3 = np.array(word3)
feature_set = np.vstack([word_1, word_2, word_3])
labels = np.array([0]*10 + [1]*10 + [2]*10)
one_hot_labels = np.zeros((30, 3))
for i in range(30):
one_hot_labels[i, labels[i]] = 1
#X_b = feature_set[:,0:80].astype(float)
X = feature_set[:,0:80].astype(float)
Y = feature_set[:,80]
#word_1_r = word_1.reshape((100, 8))
#word_2_r = word_2.reshape((100, 8))
#word_3_r = word_3.reshape((100, 8))
#print("word_1", word_1.shape)
#print("word_2", word_2.shape)
#print("word_3", word_3.shape)
#print("word_1_r", word_1_r.shape)
#print("word_2_r", word_2_r.shape)
#print("word_3_r", word_3_r.shape)
#print("word_1_r", word_1_r)
#print("word_2_r", word_2_r)
#print("word_3_r", word_3_r)
#word_1 = np.random.randn(10, 80) + np.array([0, -3, 0, -3, 0, -3, 0, -3])
#word_2 = np.random.randn(10, 80) + np.array([3, 3, 3, 3, 3, 3, 3, 3])
#word_3 = np.random.randn(10, 80) + np.array([-3, 3, -3, 3, -3, 3, -3, 3])
#word_1_n = word_1_r/np.linalg.norm(word_1_r, ord=2, axis=0, keepdims=True)
#word_2_n = word_2_r/np.linalg.norm(word_2_r, ord=2, axis=0, keepdims=True)
#word_3_n = word_3_r/np.linalg.norm(word_3_r, ord=2, axis=0, keepdims=True)
#print("word_1_n", word_1_n.shape)
#print("word_2_n", word_2_n.shape)
#print("word_3_n", word_3_n.shape)
#print("word_n_r", word_1_n)
#print("word_n_r", word_2_n)
#print("word_n_3", word_3_n)
#word_1_b = word_1_n.reshape((10, 80))
#word_2_b = word_2_n.reshape((10, 80))
#word_3_b = word_3_n.reshape((10, 80))
#X = X_b.reshape((100, 8))
#X = X_b/np.linalg.norm(X_b, ord=2, axis=0, keepdims=True)
#X = word_1_n.reshape((10, 80))
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np_utils.to_categorical(encoded_Y)
def baseline_model():
# create model
model = Sequential()
model.add(Dense(8, input_dim=80, activation='relu'))
model.add(Dense(20, input_dim=8, activation='relu'))
#model.add(Dense(30, input_dim=20, activation='relu'))
#model.add(Dense(10, input_dim=30, activation='relu'))
model.add(Dense(3, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
#X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=seed)
#baseline_model().fit(X_train, y_train, validation_data=(X_test,y_test), epochs=150, batch_size=1)
estimator = KerasClassifier(build_fn=baseline_model, epochs=100, batch_size=5, verbose=0)
kfold = KFold(n_splits=10, shuffle=True, random_state=seed)
results = cross_val_score(estimator, X, dummy_y, cv=kfold)
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100)) |
import re
class DisambiguatorPrefixRule19(object):
"""Disambiguate Prefix Rule 19
Original Rule 19 : mempV -> mem-pV where V != 'e'
Modified Rule 19 by ECS : mempA -> mem-pA where A != 'e' in order to stem memproteksi
"""
def disambiguate(self, word):
"""Disambiguate Prefix Rule 19
Original Rule 19 : mempV -> mem-pV where V != 'e'
Modified Rule 19 by ECS : mempA -> mem-pA where A != 'e' in order to stem memproteksi
"""
matches = re.match(r'^memp([abcdfghijklmopqrstuvwxyz])(.*)$', word)
if matches:
return 'p' + matches.group(1) + matches.group(2)
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Custom modules built on top of nn layers that can do sequence classification
"""
import logging
from abc import abstractmethod
from collections import OrderedDict
from typing import List
from .classification import BaseClassification
from .helpers import BatchData, EmbedderType, SequenceClassificationType
from .layers import (
EmbeddingLayer,
CnnLayer,
LstmLayer,
PoolingLayer
)
from ..containers import HuggingfaceTransformersContainer
try:
import torch
import torch.nn as nn
import torch.nn.functional as F
except ImportError:
pass
logger = logging.getLogger(__name__)
class BaseSequenceClassification(BaseClassification):
"""Base class that defines all the necessary elements to successfully train/infer
custom pytorch modules wrapped on top of this base class. Classes derived from
this base can be trained for sequence classification.
"""
def _prepare_labels(self, labels: List[int], max_length: int = None):
# for sequence classification, the length of an example doesn't matter as we have only one
# label for each example. hence, no need to do any padding or validation checks.
del max_length
return torch.as_tensor(labels, dtype=torch.long)
def _init_graph(self):
# initialize the beginning layers of the graph
self._init_core()
# init the underlying params and architectural components
try:
assert self.out_dim > 0
self.params.update({"out_dim": self.out_dim})
except (AttributeError, AssertionError) as e:
msg = f"Derived class '{self.name}' must indicate its hidden size for dense layer " \
f"classification by having an attribute 'self.out_dim', which must be a " \
f"positive integer greater than 1"
raise ValueError(msg) from e
# init the peripheral architecture params
if not self.params.num_labels:
msg = f"Invalid number of labels ({self.params.num_labels}) inputted to '{self.name}'"
raise ValueError(msg)
# init the peripheral architectural components and the criterion to compute loss
self.dense_layer_dropout = nn.Dropout(
p=1 - self.params.output_keep_prob
)
if self.params.num_labels == 2:
# sigmoid criterion
self.classifier_head = nn.Linear(self.out_dim, 1)
self.criterion = nn.BCEWithLogitsLoss(reduction='mean')
elif self.params.num_labels > 2:
# cross-entropy criterion
self.classifier_head = nn.Linear(self.out_dim, self.params.num_labels)
self.criterion = nn.CrossEntropyLoss(reduction='mean')
else:
msg = f"Invalid number of labels specified: {self.params.num_labels}. " \
f"A valid number is equal to or greater than 2"
raise ValueError(msg)
msg = f"{self.name} is initialized"
logger.info(msg)
def forward(self, batch_data):
batch_data = self.to_device(batch_data)
batch_data = self._forward_core(batch_data)
seq_embs = batch_data["seq_embs"]
seq_embs = self.dense_layer_dropout(seq_embs)
logits = self.classifier_head(seq_embs)
batch_data.update({"logits": logits})
targets = batch_data.pop("_labels", None)
if targets is not None:
if self.params.num_labels == 2:
loss = self.criterion(logits.view(-1), targets.float())
else: # self.params.num_labels > 2:
loss = self.criterion(logits, targets)
batch_data.update({"loss": loss})
return batch_data
def predict(self, examples):
logits = self._forward_with_batching_and_no_grad(examples)
if self.params.num_labels == 2:
predictions = (logits >= 0.5).long().view(-1)
else: # self.params.num_labels > 2:
predictions = torch.argmax(logits, dim=-1)
return predictions.tolist()
def predict_proba(self, examples):
logits = self._forward_with_batching_and_no_grad(examples)
if self.params.num_labels == 2:
probs = F.sigmoid(logits)
# extending the results from shape [N,1] to [N,2] to give out class probs distinctly
probs = torch.cat((1 - probs, probs), dim=-1)
else: # self.params.num_labels > 2:
probs = F.softmax(logits, dim=-1)
return probs.tolist()
def _forward_with_batching_and_no_grad(self, examples):
logits = None
was_training = self.training
self.eval()
with torch.no_grad():
for start_idx in range(0, len(examples), self.params.batch_size):
batch_examples = examples[start_idx:start_idx + self.params.batch_size]
batch_data = self.encoder.batch_encode(
batch_examples,
padding_length=self.params.padding_length,
add_terminals=self.params.add_terminals,
)
batch_logits = self.forward(batch_data)["logits"]
logits = torch.cat((logits, batch_logits)) if logits is not None else batch_logits
if was_training:
self.train()
return logits
@abstractmethod
def _init_core(self) -> None:
raise NotImplementedError
@abstractmethod
def _forward_core(self, batch_data: BatchData) -> BatchData:
raise NotImplementedError
class EmbedderForSequenceClassification(BaseSequenceClassification):
"""An embedder pooling module that operates on a batched sequence of token ids. The
tokens could be characters or words or sub-words. This module finally outputs one 1D
representation for each instance in the batch (i.e. [BS, EMB_DIM]).
The `forward` method of this module expects padded token ids along with numer of tokens
per instance in the batch.
Additionally, one can set different coefficients for different tokens of the embedding
matrix (e.g. tf-idf weights).
"""
def _init_core(self):
self.emb_layer = EmbeddingLayer(
self.params._num_tokens,
self.params.emb_dim,
self.params._padding_idx,
self.params.pop("_embedding_weights", None),
self.params.update_embeddings,
1 - self.params.embedder_output_keep_prob
)
self.emb_layer_pooling = PoolingLayer(
self.params.embedder_output_pooling_type
)
self.out_dim = self.params.emb_dim
def _forward_core(self, batch_data):
seq_ids = batch_data["seq_ids"] # [BS, SEQ_LEN]
flattened_split_lengths = [
sum(_split_lengths) +
(self.encoder.number_of_terminal_tokens if self.params.add_terminals else 0)
for _split_lengths in batch_data["split_lengths"]
]
flattened_split_lengths = torch.as_tensor(flattened_split_lengths, dtype=torch.long) # [BS]
encodings = self.emb_layer(seq_ids) # [BS, SEQ_LEN, EMD_DIM]
encodings = self.emb_layer_pooling(encodings, flattened_split_lengths) # [BS, self.out_dim]
batch_data.update({"seq_embs": encodings})
return batch_data
class CnnForSequenceClassification(BaseSequenceClassification):
"""A CNN module that operates on a batched sequence of token ids. The tokens could be
characters or words or sub-words. This module finally outputs one 1D representation
for each instance in the batch (i.e. [BS, EMB_DIM]).
The `forward` method of this module expects only padded token ids as input.
"""
def _init_core(self):
self.emb_layer = EmbeddingLayer(
self.params._num_tokens,
self.params.emb_dim,
self.params._padding_idx,
self.params.pop("_embedding_weights", None),
self.params.update_embeddings,
1 - self.params.embedder_output_keep_prob
)
self.conv_layer = CnnLayer(
self.params.emb_dim,
self.params.window_sizes,
self.params.number_of_windows
)
self.out_dim = sum(self.params.number_of_windows)
def _forward_core(self, batch_data):
seq_ids = batch_data["seq_ids"] # [BS, SEQ_LEN]
encodings = self.emb_layer(seq_ids) # [BS, SEQ_LEN, EMD_DIM]
encodings = self.conv_layer(encodings) # [BS, self.out_dim]
batch_data.update({"seq_embs": encodings})
return batch_data
class LstmForSequenceClassification(BaseSequenceClassification):
"""A LSTM module that operates on a batched sequence of token ids. The tokens could be
characters or words or sub-words. This module finally outputs one 1D representation
for each instance in the batch (i.e. [BS, EMB_DIM]).
The `forward` method of this module expects padded token ids along with numer of tokens
per instance in the batch.
"""
def _init_core(self):
self.emb_layer = EmbeddingLayer(
self.params._num_tokens,
self.params.emb_dim,
self.params._padding_idx,
self.params.pop("_embedding_weights", None),
self.params.update_embeddings,
1 - self.params.embedder_output_keep_prob
)
self.lstm_layer = LstmLayer(
self.params.emb_dim,
self.params.lstm_hidden_dim,
self.params.lstm_num_layers,
1 - self.params.lstm_keep_prob,
self.params.lstm_bidirectional
)
self.lstm_layer_pooling = PoolingLayer(
self.params.lstm_output_pooling_type
)
self.out_dim = (
self.params.lstm_hidden_dim * 2 if self.params.lstm_bidirectional
else self.params.lstm_hidden_dim
)
def _forward_core(self, batch_data):
seq_ids = batch_data["seq_ids"] # [BS, SEQ_LEN]
flattened_split_lengths = [
sum(_split_lengths) +
(self.encoder.number_of_terminal_tokens if self.params.add_terminals else 0)
for _split_lengths in batch_data["split_lengths"]
]
flattened_split_lengths = torch.as_tensor(flattened_split_lengths, dtype=torch.long) # [BS]
encodings = self.emb_layer(seq_ids) # [BS, SEQ_LEN, EMD_DIM]
encodings = self.lstm_layer(encodings, flattened_split_lengths) # [BS,SEQ_LEN,self.out_dim]
encodings = self.lstm_layer_pooling(encodings, flattened_split_lengths) # [BS,self.out_dim]
batch_data.update({"seq_embs": encodings})
return batch_data
class BertForSequenceClassification(BaseSequenceClassification):
def fit(self, examples, labels, **params):
# overriding base class' method to set params, and then calling base class' .fit()
embedder_type = params.get("embedder_type", EmbedderType.BERT.value)
if EmbedderType(embedder_type) != EmbedderType.BERT:
msg = f"{self.name} can only be used with 'embedder_type': " \
f"'{EmbedderType.BERT.value}'. " \
f"Other values passed through config params are not allowed."
raise ValueError(msg)
safe_values = {
"num_warmup_steps": 50,
"learning_rate": 2e-5,
"optimizer": "AdamW",
"max_grad_norm": 1.0,
}
for k, v in safe_values.items():
v_inputted = params.get(k, v)
if v != v_inputted:
msg = f"{self.name} can be best used with '{k}' equal to '{v}' but found " \
f"the value '{v_inputted}'. Use the non-default value with caution as it " \
f"may lead to unexpected results and longer training times depending on " \
f"the choice of pretrained model."
logger.warning(msg)
else:
params.update({k: v})
params.update({
"embedder_type": embedder_type,
"save_frozen_bert_weights": params.get("save_frozen_bert_weights", False) # if True,
# frozen set of bert weights are also dumped
})
super().fit(examples, labels, **params)
def _create_optimizer_and_scheduler(self, num_training_steps):
num_warmup_steps = min(int(0.1 * num_training_steps), self.params.num_warmup_steps)
self.params.update({"num_warmup_steps": num_warmup_steps})
# https://github.com/huggingface/transformers/blob/master/src/transformers/optimization.py
# refer `get_linear_schedule_with_warmup` method
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0, float(num_training_steps - current_step) / float(
max(1, num_training_steps - num_warmup_steps))
)
# load a torch optimizer
optimizer = self._create_optimizer()
# load a lr scheduler
scheduler = getattr(torch.optim.lr_scheduler, "LambdaLR")(optimizer, lr_lambda)
return optimizer, scheduler
def _create_optimizer(self):
params = list(self.named_parameters())
no_decay = ["bias", 'LayerNorm.bias', "LayerNorm.weight",
'layer_norm.bias', 'layer_norm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in params if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in params if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
optimizer = getattr(torch.optim, self.params.optimizer)(
optimizer_grouped_parameters,
lr=self.params.learning_rate,
eps=1e-08,
weight_decay=0.01
)
return optimizer
def _get_dumpable_state_dict(self):
if not self.params.update_embeddings and not self.params.save_frozen_bert_weights:
state_dict = OrderedDict(
{k: v for k, v in self.state_dict().items() if not k.startswith("bert_model")}
)
return state_dict
return self.state_dict()
def _init_core(self):
self.bert_model = HuggingfaceTransformersContainer(
self.params.pretrained_model_name_or_path,
cache_lookup=False
).get_transformer_model()
if not self.params.update_embeddings:
for param in self.bert_model.parameters():
param.requires_grad = False
self.dropout = nn.Dropout(
p=1 - self.params.embedder_output_keep_prob
)
if self.params.embedder_output_pooling_type != "first":
self.emb_layer_pooling = PoolingLayer(
self.params.embedder_output_pooling_type
)
self.out_dim = self.params.emb_dim
def _forward_core(self, batch_data):
# refer to https://huggingface.co/docs/transformers/master/en/main_classes/output
# for more details on huggingface's bert outputs
bert_outputs = self.bert_model(**batch_data["hgf_encodings"], return_dict=True)
if self.params.embedder_output_pooling_type != "first":
# 'last_hidden_state' refers to the tensor output of the final transformer layer (i.e.
# before the logit layer) and hence its dimension [BS, SEQ_LEN, EMD_DIM]
last_hidden_state = bert_outputs.get("last_hidden_state") # [BS, SEQ_LEN, EMD_DIM]
if last_hidden_state is None:
# TODO: Do all huggingface models have this key? Are there any alternatives for this
# key? If so, we can enumerate the set of key names instead of just one key name?
msg = f"The choice of pretrained bert model " \
f"({self.params.pretrained_model_name_or_path}) " \
f"has no key 'last_hidden_state' in its output dictionary."
raise ValueError(msg)
last_hidden_state = self.dropout(last_hidden_state)
flattened_split_lengths = [
sum(_split_lengths) +
(self.encoder.number_of_terminal_tokens if self.params.add_terminals else 0)
for _split_lengths in batch_data["split_lengths"]
]
flattened_split_lengths = torch.as_tensor(flattened_split_lengths, dtype=torch.long)
encodings = self.emb_layer_pooling(
last_hidden_state, flattened_split_lengths) # [BS, self.out_dim]
else:
# 'pooler_output' refers to the first token's (aka. CLS) representation obtained form
# the last hidden layer, and hence its dimension [BS, self.out_dim]
pooler_output = bert_outputs.get("pooler_output") # [BS, self.out_dim]
if pooler_output is None:
msg = f"The chosen pretrained bert ({self.params.pretrained_model_name_or_path}) " \
"has no key 'pooler_output' in its output dictionary (maybe the selected " \
"choice of transformers model has no CLS kind-of token?). " \
"Continuing to obtain the first representation of the last hidden state " \
"due to the passed-in value for embedder output pooling type param: " \
f"{self.params.embedder_output_pooling_type}. " \
f"If you wish to pool differently (eg. mean pooling), change the param " \
f"value in configs and run training."
logger.error(msg)
last_hidden_state = bert_outputs.get("last_hidden_state") # [BS, SEQ_LEN, EMD_DIM]
if last_hidden_state is None:
msg = f"The choice of pretrained bert model " \
f"({self.params.pretrained_model_name_or_path}) " \
f"has no key 'last_hidden_state' in its output dictionary."
raise ValueError(msg)
pooler_output = last_hidden_state[:, 0, :] # [BS, self.out_dim]
encodings = self.dropout(pooler_output)
batch_data.update({"seq_embs": encodings})
return batch_data
def get_sequence_classifier_cls(classifier_type: str, embedder_type: str = None):
try:
classifier_type = SequenceClassificationType(classifier_type)
except ValueError as e:
msg = f"Neural Nets' sequence classification module expects classifier_type to be amongst" \
f" {[v.value for v in SequenceClassificationType.__members__.values()]}" \
f" but found '{classifier_type}'."
raise ValueError(msg) from e
try:
embedder_type = EmbedderType(embedder_type)
except ValueError as e:
msg = f"Neural Nets' sequence classification module expects embedder_type to be amongst " \
f" {[v.value for v in EmbedderType.__members__.values()]} " \
f" but found '{embedder_type}'."
raise ValueError(msg) from e
if (
embedder_type == EmbedderType.BERT and
classifier_type not in [SequenceClassificationType.EMBEDDER]
):
msg = f"To use the embedder_type '{EmbedderType.BERT.value}', " \
f"classifier_type must be '{SequenceClassificationType.EMBEDDER.value}'."
raise ValueError(msg)
# disambiguation between glove, bert and non-pretrained embedders
def _resolve_and_return_embedder_class(_embedder_type):
return {
EmbedderType.NONE: EmbedderForSequenceClassification,
EmbedderType.GLOVE: EmbedderForSequenceClassification,
EmbedderType.BERT: BertForSequenceClassification
}[_embedder_type]
return {
SequenceClassificationType.EMBEDDER: _resolve_and_return_embedder_class(embedder_type),
SequenceClassificationType.CNN: CnnForSequenceClassification,
SequenceClassificationType.LSTM: LstmForSequenceClassification,
}[classifier_type]
|
"""Tests for citrine.informatics.rows."""
import pytest
from citrine.gemtables.rows import MaterialRunByTemplate, Row
from gemd.entity.link_by_uid import LinkByUID
@pytest.fixture(params=[
MaterialRunByTemplate(templates=[
LinkByUID(scope="templates", id="density"), LinkByUID(scope="templates", id="ingredients")
]),
MaterialRunByTemplate(templates=[
LinkByUID(scope="templates", id="density"), LinkByUID(scope="templates", id="ingredients")
],
tags=[
"foo::bar", "some::tag"
]
),
])
def row(request):
return request.param
def test_deser_from_parent(row):
# Serialize and deserialize the rows, making sure they are round-trip serializable
row_data = row.dump()
row_deserialized = Row.build(row_data)
assert row == row_deserialized
def test_invalid_eq(row):
other = None
assert not row == other
def test_invalid_deser():
with pytest.raises(ValueError):
Row.build({})
with pytest.raises(ValueError):
Row.build({"type": "foo"})
|
#!/usr/bin/env python3
# Copyright (C) 2019 Freie Universität Berlin
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
from testrunner import run
def testfunc(child):
child.expect_exact("UDP event triggered: 0030")
child.expect_exact("Received UDP packet from [fe80::2]:38663:")
child.expect_exact("00000000 01 23 45 67 89 AB CD EF")
child.expect_exact("UDP message successfully sent")
child.expect_exact("IP event triggered: 0030")
child.expect_exact("Received IP packet from [fe80::2]:")
child.expect_exact("00000000 01 23 45 67 89 AB CD EF")
child.expect_exact("IP message successfully sent")
if __name__ == "__main__":
sys.exit(run(testfunc))
|
"""
Check that multiple BEP can be assigned to a single cast listener.
"""
import support
from java import awt
support.compileJava("test006j.java")
import test006j
def f(evt):
pass
m = test006j()
m.componentShown = f
m.componentHidden = f
m.fireComponentShown(awt.event.ComponentEvent(awt.Container(), 0))
m.fireComponentMoved(awt.event.ComponentEvent(awt.Container(), 0))
|
import sys
from configparser import ConfigParser
from steam.client import SteamClient
from dota2.client import Dota2Client
import logging
from random import randint
import os
#-> Mudar diretorio aqui se necessário
paginaspath = os.path.dirname(os.path.abspath(__file__))
botspath = os.path.join(paginaspath, 'bots.ini')
bot = ConfigParser()
bot.read(botspath)
############VARS###########
counterslot = 0
########BOTCONFIGS#########
bot_user = sys.argv[1]
bot_pass = sys.argv[2]
bot_sec = sys.argv[3]
lobby_name = sys.argv[4]
##########PLAYERS##########
#-Usar FOR aqui se quiser
player1 = int(sys.argv[5])#--------
player1time = sys.argv[6]
player2 = int(sys.argv[7])#--------
player2time = sys.argv[8]
player3 = int(sys.argv[9])#--------
player3time = sys.argv[10]
player4 = int(sys.argv[11])#--------
player4time = sys.argv[12]
player5 = int(sys.argv[13])#--------
player5time = sys.argv[14]
player6 = int(sys.argv[15])#--------
player6time = sys.argv[16]
player7 = int(sys.argv[17])#--------
player7time = sys.argv[18]
player8 = int(sys.argv[19])#--------
player8time = sys.argv[20]
player9 = int(sys.argv[21])#--------
player9time = sys.argv[22]
player10 = int(sys.argv[23])#--------
player10time = sys.argv[24]
############################
playerslot = {
1: {
'id': player1,
'slot': None,
},
2:{
'id': player2,
'slot': None,
},
3:{
'id': player3,
'slot': None,
},
4:{
'id': player4,
'slot': None,
},
5:{
'id': player5,
'slot': None,
},
6:{
'id': player6,
'slot': None,
},
7:{
'id': player7,
'slot': None,
},
8:{
'id': player8,
'slot': None,
},
9:{
'id': player9,
'slot': None,
},
10:{
'id': player10,
'slot': None,
},
}
############################
client = SteamClient()
dota = Dota2Client(client)
dota.verbose_debug = False
def converteridto32(steamid):
cc = int(steamid) - 76561197960265728
return cc
@client.on('logged_on')
def start_dota():
dota.launch()
#print('Dota iniciou')
@dota.on('ready')
def create_dolex_lobby():
global counterslot
#bot[bot_sec]['estado'] = 'ocupado'
#with open(botspath, 'w') as configfile:
# bot.write(configfile)
bot_name = client.username
pass_value = randint(0, 9999)
dota.abandon_current_game()
dota.leave_practice_lobby()
dota.create_tournament_lobby(password=str(pass_value), tournament_game_id=None, tournament_id=0, options={
'allow_cheats': False,
'visibility': 0,
'server_region': 10, #10-> Brazil
'game_mode': 2, # 2-> CAPTAINS MODE, 1-> ALL PICK
'game_name': lobby_name,
})
dota.sleep(1)
dota.join_practice_lobby_team()
dota.sleep(1)
#Usar aqui FOR se quiser dps
dota.invite_to_lobby(player1)
dota.invite_to_lobby(player2)
dota.invite_to_lobby(player3)
dota.invite_to_lobby(player4)
dota.invite_to_lobby(player5)
dota.invite_to_lobby(player6)
dota.invite_to_lobby(player7)
dota.invite_to_lobby(player8)
dota.invite_to_lobby(player9)
dota.invite_to_lobby(player10)
dota.channels.join_lobby_channel()
bot[bot_sec]['chatipo'] = 'lobbycriado'
bot[bot_sec]['conteudo'] = 'O bot criou a sala '+lobby_name+' com a seguinte senha: '+str(pass_value)+'.'
with open(botspath, 'w') as configfile:
bot.write(configfile)
dota.sleep(1)
bot[bot_sec]['chatipo'] = 'nada'
bot[bot_sec]['conteudo'] = 'nada'
with open(botspath, 'w') as configfile:
bot.write(configfile)
dota.sleep(9)
if dota.lobby:
if dota.channels.join_lobby_channel() is not None:
lobby_chat = dota.channels.lobby
lobby_chat.send('Obrigado por jogar na Dolex.')
else:
None
#print('Nao foi possivel entrar no chat da lobby.')
else:
None
#print('Nao foi possivel entrar na sala.')
dota.sleep(1)
for cont in range(0,180,+1):
if cont == 0:
dota.channels.lobby.send('Faltam 3 minutos para começar a partida')
if cont == 60:
dota.channels.lobby.send('Faltam 2 minutos para começar a partida')
if cont == 120:
dota.channels.lobby.send('Falta 1 minuto para começar a partida')
if cont == 170:
dota.channels.lobby.send('Partida começa em:')
for n in range(10,0,-1):
dota.channels.lobby.send(str(n))
dota.sleep(1)
for x in range(0, len(playerslot), +1):
if playerslot[x+1]['slot'] is not None:
counterslot += 1
if playerslot[x+1]['slot'] is None:
bot[bot_sec]['chatipo'] = 'banirplayerdapla'#'tirarpontosplayer'
bot[bot_sec]['conteudo'] = str(playerslot[x+1]['id'])
with open(botspath, 'w') as configfile:
bot.write(configfile)
dota.sleep(1)
if counterslot == 10: #dps '== 10'
dota.launch_practice_lobby()
elif counterslot < 10:
bot[bot_sec]['estado'] = 'livre'
bot[bot_sec]['comecou'] = 'nao'
with open(botspath, 'w') as configfile:
bot.write(configfile)
dota.sleep(1)
bot[bot_sec]['chatipo'] = 'playersnaoforamparaolobby'
bot[bot_sec]['conteudo'] = 'O bot não conseguiu iniciar partida por falta de jogadores.'
with open(botspath, 'w') as configfile:
bot.write(configfile)
dota.sleep(1)
bot[bot_sec]['chatipo'] = 'nada'
bot[bot_sec]['conteudo'] = 'nada'
with open(botspath, 'w') as configfile:
bot.write(configfile)
dota.channels.lobby.send('Sem jogadores suficientes para começar partida.')
dota.leave_practice_lobby()
dota.sleep(1)
dota.exit()
exit()
dota.sleep(1)
@dota.socache.on(('updated', dota.socache.ESOType.CSODOTALobby))
def lobby_match_update(obj):
#print('Algum lobby foi alterado...')
if obj.match_outcome:
if obj.match_outcome == 2:
bot[bot_sec]['chatipo'] = 'radganhou'
bot[bot_sec]['conteudo'] = 'O time Radiant ganhou.'
with open(botspath, 'w') as configfile:
bot.write(configfile)
dota.sleep(1)
bot[bot_sec]['chatipo'] = 'nada'
bot[bot_sec]['conteudo'] = 'nada'
with open(botspath, 'w') as configfile:
bot.write(configfile)
dota.sleep(1)
dota.exit()
bot[bot_sec]['comecou'] = 'nao'
bot[bot_sec]['estado'] = 'livre'
with open(botspath, 'w') as configfile:
bot.write(configfile)
client.sleep(1)
exit()
elif obj.match_outcome == 3:
bot[bot_sec]['chatipo'] = 'dirganhou'
bot[bot_sec]['conteudo'] = 'O time Dire ganhou.'
with open(botspath, 'w') as configfile:
bot.write(configfile)
dota.sleep(1)
bot[bot_sec]['chatipo'] = 'nada'
bot[bot_sec]['conteudo'] = 'nada'
with open(botspath, 'w') as configfile:
bot.write(configfile)
dota.sleep(1)
dota.exit()
bot[bot_sec]['comecou'] = 'nao'
bot[bot_sec]['estado'] = 'livre'
with open(botspath, 'w') as configfile:
bot.write(configfile)
client.sleep(1)
exit()
elif obj.match_outcome == 66:
bot[bot_sec]['comecou'] = 'nao'
bot[bot_sec]['estado'] = 'livre'
with open(botspath, 'w') as configfile:
bot.write(configfile)
client.sleep(1)
exit()
elif obj.match_outcome == 68:
bot[bot_sec]['comecou'] = 'nao'
bot[bot_sec]['estado'] = 'livre'
with open(botspath, 'w') as configfile:
bot.write(configfile)
client.sleep(1)
exit()
elif obj.match_outcome == 67:
bot[bot_sec]['comecou'] = 'nao'
bot[bot_sec]['estado'] = 'livre'
with open(botspath, 'w') as configfile:
bot.write(configfile)
client.sleep(1)
exit()
elif obj.match_outcome == 0:
bot[bot_sec]['comecou'] = 'nao'
bot[bot_sec]['estado'] = 'livre'
with open(botspath, 'w') as configfile:
bot.write(configfile)
client.sleep(1)
exit()
elif obj.match_outcome == 65:
bot[bot_sec]['comecou'] = 'nao'
bot[bot_sec]['estado'] = 'livre'
with open(botspath, 'w') as configfile:
bot.write(configfile)
client.sleep(1)
exit()
elif obj.match_outcome == 64:
bot[bot_sec]['comecou'] = 'nao'
bot[bot_sec]['estado'] = 'livre'
with open(botspath, 'w') as configfile:
bot.write(configfile)
client.sleep(1)
exit()
########################################
# obj.match_outcome :
# -DireVictory = 3
# -NotScored_Canceled = 68
# -NotScored_Leaver = 65
# -NotScored_NeverStarted = 67
# -NotScored_PoorNetworkConditions = 64
# -NotScored_ServerCrash = 66
# -RadVictory = 2
# -Unknown = 0
##########################################
if obj.members:
global playerslot
for i in range(len(obj.members)):
# for aqui tbm depois se quiser
if obj.members[i].id == player1:
steam32 = converteridto32(player1)
if player1time == 'rad':
if obj.members[i].team == 0:
playerslot[1]['slot'] = obj.members[i].name
if obj.members[i].team == 1:
playerslot[1]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[1]['slot'] = None
elif player1time == 'dir':
if obj.members[i].team == 1:
playerslot[1]['slot'] = obj.members[i].name
if obj.members[i].team == 0:
playerslot[1]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[1]['slot'] = None
if obj.members[i].id == player2:
steam32 = converteridto32(player2)
if player2time == 'rad':
if obj.members[i].team == 0:
playerslot[2]['slot'] = obj.members[i].name
if obj.members[i].team == 1:
playerslot[2]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[2]['slot'] = None
elif player2time == 'dir':
if obj.members[i].team == 1:
playerslot[2]['slot'] = obj.members[i].name
if obj.members[i].team == 0:
playerslot[2]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[2]['slot'] = None
if obj.members[i].id == player3:
steam32 = converteridto32(player3)
if player3time == 'rad':
if obj.members[i].team == 0:
playerslot[3]['slot'] = obj.members[i].name
if obj.members[i].team == 1:
playerslot[3]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[3]['slot'] = None
elif player3time == 'dir':
if obj.members[i].team == 1:
playerslot[3]['slot'] = obj.members[i].name
if obj.members[i].team == 0:
playerslot[3]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[3]['slot'] = None
if obj.members[i].id == player4:
steam32 = converteridto32(player4)
if player4time == 'rad':
if obj.members[i].team == 0:
playerslot[4]['slot'] = obj.members[i].name
if obj.members[i].team == 1:
playerslot[4]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[4]['slot'] = None
elif player4time == 'dir':
if obj.members[i].team == 1:
playerslot[4]['slot'] = obj.members[i].name
if obj.members[i].team == 0:
playerslot[4]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[4]['slot'] = None
if obj.members[i].id == player5:
steam32 = converteridto32(player5)
if player5time == 'rad':
if obj.members[i].team == 0:
playerslot[5]['slot'] = obj.members[i].name
if obj.members[i].team == 1:
playerslot[5]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[5]['slot'] = None
elif player5time == 'dir':
if obj.members[i].team == 1:
playerslot[5]['slot'] = obj.members[i].name
if obj.members[i].team == 0:
playerslot[5]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[5]['slot'] = None
if obj.members[i].id == player6:
steam32 = converteridto32(player6)
if player6time == 'rad':
if obj.members[i].team == 0:
playerslot[6]['slot'] = obj.members[i].name
if obj.members[i].team == 1:
playerslot[6]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[6]['slot'] = None
elif player6time == 'dir':
if obj.members[i].team == 1:
playerslot[6]['slot'] = obj.members[i].name
if obj.members[i].team == 0:
playerslot[6]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[6]['slot'] = None
if obj.members[i].id == player7:
steam32 = converteridto32(player7)
if player7time == 'rad':
if obj.members[i].team == 0:
playerslot[7]['slot'] = obj.members[i].name
if obj.members[i].team == 1:
playerslot[7]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[7]['slot'] = None
elif player7time == 'dir':
if obj.members[i].team == 1:
playerslot[7]['slot'] = obj.members[i].name
if obj.members[i].team == 0:
playerslot[7]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[7]['slot'] = None
if obj.members[i].id == player8:
steam32 = converteridto32(player8)
if player8time == 'rad':
if obj.members[i].team == 0:
playerslot[8]['slot'] = obj.members[i].name
if obj.members[i].team == 1:
playerslot[8]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[8]['slot'] = None
elif player8time == 'dir':
if obj.members[i].team == 1:
playerslot[8]['slot'] = obj.members[i].name
if obj.members[i].team == 0:
playerslot[8]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[8]['slot'] = None
if obj.members[i].id == player9:
steam32 = converteridto32(player9)
if player9time == 'rad':
if obj.members[i].team == 0:
playerslot[9]['slot'] = obj.members[i].name
if obj.members[i].team == 1:
playerslot[9]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[9]['slot'] = None
elif player9time == 'dir':
if obj.members[i].team == 1:
playerslot[9]['slot'] = obj.members[i].name
if obj.members[i].team == 0:
playerslot[9]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[9]['slot'] = None
if obj.members[i].id == player10:
steam32 = converteridto32(player10)
if player10time == 'rad':
if obj.members[i].team == 0:
playerslot[10]['slot'] = obj.members[i].name
if obj.members[i].team == 1:
playerslot[10]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[10]['slot'] = None
elif player10time == 'dir':
if obj.members[i].team == 1:
playerslot[10]['slot'] = obj.members[i].name
if obj.members[i].team == 0:
playerslot[10]['slot'] = None
dota.practice_lobby_kick(account_id=steam32)
if obj.members[i].team == 4:
playerslot[10]['slot'] = None
client.cli_login(username=bot_user, password=bot_pass)
client.run_forever()
|
from collections import defaultdict
from dataclasses import dataclass
from math import ceil
from typing import Dict, List
@dataclass
class Reactant:
quantity: int
ID: str
@dataclass
class Reaction:
output: Reactant
dependencies: List[Reactant]
def ore_required_for(target: Reactant, reactions: Dict[str, Reaction]) -> int:
"""Return the units of ORE needed to produce the target."""
ore = 0
excess: Dict[str, int] = defaultdict(int)
needed = [target]
while needed:
cur = needed.pop()
if cur.ID == "ORE":
ore += cur.quantity
continue
# Account for excess:
take = min(excess[cur.ID], cur.quantity)
excess[cur.ID] -= take
cur.quantity -= take
if not cur.quantity:
continue
producing_reaction = reactions[cur.ID]
needed_reactions = ceil(cur.quantity / producing_reaction.output.quantity)
excess[cur.ID] += (
producing_reaction.output.quantity * needed_reactions - cur.quantity
)
for dep in producing_reaction.dependencies:
needed.append(Reactant(ID=dep.ID, quantity=dep.quantity * needed_reactions))
return ore
def binary_search(max_ore: int, lower: int, upper: int, reactions: Dict[str, Reaction]):
"""Return the max fuel possible to produce with the given ore supply."""
assert lower < upper
while lower + 1 < upper:
mid = (lower + upper) // 2
value = ore_required_for(Reactant(ID="FUEL", quantity=mid), reactions)
if value > max_ore:
upper = mid
elif value < max_ore:
lower = mid
else:
lower = upper = mid
return lower
# Parse input:
reactions: Dict[str, Reaction] = {}
with open("input.txt") as f:
for line in f:
inp, out = line.strip().split("=>")
result = Reactant(quantity=int(out.split()[0]), ID=out.split()[1])
reactants = [
Reactant(quantity=int(part.split()[0]), ID=part.split()[1])
for part in inp.split(",")
]
reactions[result.ID] = Reaction(result, reactants)
ore_to_fuel = ore_required_for(Reactant(ID="FUEL", quantity=1), reactions)
print("Part 1:", ore_to_fuel)
# For the binary search, ore / ore_per_fuel is a lower limit, and twice that is a solid upper limit.
ore_available = 1000000000000
print(
"Part 2:",
binary_search(
max_ore=ore_available,
lower=ore_available // ore_to_fuel,
upper=ore_available // ore_to_fuel * 2,
reactions=reactions,
),
)
|
from .di import DiContainer as _DiContainer
def di_container(main_file_path):
"""
:type main_file_path: str
:rtype: gold_digger.di.DiContainer
"""
return _DiContainer(main_file_path)
_DiContainer.set_up_root_logger()
|
import os
import sys
import boto3
from dateutil import parser
from botocore.exceptions import ClientError
def get_latest_ami(ec2_conn, ami_filter, nodetype):
try:
latest = None
response = ec2_conn.describe_images(Filters=ami_filter)
if len(response['Images']) != 0:
print('[INFO] AMIs found:')
for image in response['Images']:
print('ImageId: '+image['ImageId']+' | Name: '+image['Name']+' | CreationDate: '+image['CreationDate'])
if not latest:
latest = image
continue
if parser.parse(image['CreationDate']) > parser.parse(latest['CreationDate']):
latest = image
return latest
else:
print('[ERR] No AMIs found!')
sys.exit('[ERR] No AMIs found!')
except ClientError as error:
print('[ERR] '+str(error))
raise
def run_ec2(ec2_conn, aws_region, image_id, instance_type, subnet_id, security_group_id, instance_profile_arn, assessment_template_arn, timeout_s, tags):
try:
response = ec2_conn.run_instances(
ImageId=image_id,
InstanceType=instance_type,
MaxCount=1,
MinCount=1,
TagSpecifications=[{'ResourceType': 'instance', 'Tags': tags}],
SubnetId=subnet_id,
SecurityGroupIds=[security_group_id],
UserData="\n".join([
"#!/bin/bash",
"curl -O https://d1wk0tztpsntt1.cloudfront.net/linux/latest/install",
"/bin/bash install",
"/etc/init.d/awsagent start",
"echo -e '\n[INFO] Starting assessment...'",
"assessment_run_arn=`aws inspector start-assessment-run --assessment-template-arn {} --region {} | jq -r .assessmentRunArn`",
"status='unknown'",
"timeout_s={}",
"sleep_s=60",
"repeates=$((timeout_s/sleep_s))",
"echo -e '\n[INFO] Monitoring assessment status...'",
"while [[ $status != 'COMPLETED' && $status != 'FAILED' && $repeates -gt 1 ]]; do",
"let repeates=$repeates-1",
"status=`aws inspector describe-assessment-runs --assessment-run $assessment_run_arn --region {} | jq -r .assessmentRuns[].state`",
"echo `date` '| STATUS:' $status",
"sleep $sleep_s",
"done",
"echo -e '\n[INFO] Assessment finished.'",
"echo '[INFO] Shutting down...'",
"/sbin/shutdown -h now"
]).format(assessment_template_arn, aws_region, timeout_s, aws_region),
IamInstanceProfile={'Arn': instance_profile_arn},
InstanceInitiatedShutdownBehavior='terminate'
)
instance_id = response.get("Instances")[0].get("InstanceId")
return instance_id
except ClientError as error:
print('[ERR] '+str(error))
raise
def handler(event, context):
# event received
print('[INFO] Event received: '+str(event))
# prep
session = boto3.session.Session()
aws_region = session.region_name
nodetype = event.get('nodetype')
# environment variables capture
instance_type = os.environ['instance_type']
subnet_id = os.environ['subnet_id']
security_group_id = os.environ['security_group_id']
instance_profile_arn = os.environ['instance_profile_arn']
assessment_template_arn = os.environ['assessment_template_arn']
timeout_s = os.environ['timeout_s']
# create ec2 service client
ec2_conn = boto3.client('ec2', region_name=aws_region)
# get latest AMI ID based on filters
ami_filter = [
{ 'Name': 'tag:nodetype', 'Values': [nodetype] },
{ 'Name': 'state', 'Values': ['available'] }
]
latest_ami = get_latest_ami(ec2_conn, ami_filter, nodetype)
print('[INFO] The latest AMI:')
print('ImageId: '+latest_ami['ImageId']+' | Name: '+latest_ami['Name']+' | CreationDate: '+latest_ami['CreationDate'])
# run ec2 instance and start AWS Inspector assessment
tags = [
{ 'Key': 'Name', 'Value': 'aws-inspector-test-'+nodetype },
{ 'Key': 'AWSInspectorScan', 'Value': 'true' },
{ 'Key': 'Nodetype', 'Value': nodetype },
{ 'Key': 'ImageId', 'Value': latest_ami['ImageId'] }
]
test_instance_id = run_ec2(
ec2_conn,
aws_region,
latest_ami['ImageId'],
instance_type,
subnet_id,
security_group_id,
instance_profile_arn,
assessment_template_arn,
timeout_s,
tags
)
print('[INFO] Test Instance started: '+test_instance_id)
# the end
print('[INFO] All good! Quitting...')
|
#Оператор цикла for
for i in range(5):
print("Hello")#В результате “Hello” будет выведено пять раз
lst = [1, 3, 5, 7, 9]
for i in lst:
print(i ** 2) #Также можно пройти по всем буквам в строке.
word_str = "Hello, world!"
for l in word_str:
print(l) #Строка “Hello, world!” будет напечатана в столбик.
|
import pandas as pd
import os
import numpy as np
import json
import glob
import sys
import time
#from git import Repo
def find_range(x,a,b,option='within'):
"""
Find indices of data within or outside range [a,b]
Inputs:
-------
x - numpy.ndarray
Data to search
a - float or int
Minimum value
b - float or int
Maximum value
option - String
'within' or 'outside'
Output:
-------
inds - numpy.ndarray
Indices of x that fall within or outside specified range
"""
if option=='within':
return np.where(np.logical_and(x>=a, x<=b))[0]
elif option=='outside':
return np.where(np.logical_or(x < a, x > b))[0]
else:
raise ValueError('unrecognized option paramter: {}'.format(option))
def rms(data):
"""
Computes root-mean-squared voltage of a signal
Input:
-----
data - numpy.ndarray
Output:
------
rms_value - float
"""
return np.power(np.mean(np.power(data.astype('float32'),2)),0.5)
def write_probe_json(output_file, channels, offset, scaling, mask, surface_channel, air_channel, vertical_pos, horizontal_pos):
"""
Writes a json file containing information about one Neuropixels probe.
Inputs:
-------
output_file : file path
Location for writing the json file
channels : numpy.ndarray (384 x 0)
Probe channel numbers
offset : numpy.ndarray (384 x 0)
Offset of each channel from zero
scaling : numpy.ndarray (384 x 0)
Relative noise level on each channel
mask : numpy.ndarray (384 x 0)
1 if channel contains valid data, 0 otherwise
surface_channel : Int
Index of channel at brain surface
air_channel : Int
Index of channel at interface between saline/agar and air
vertical_pos : numpy.ndarray (384 x 0)
Distance (in microns) of each channel from the probe tip
horizontal_pos : numpy.ndarray (384 x 0)
Distance (in microns) of each channel from the probe edge
Outputs:
--------
output_file.json (written to disk)
"""
with open(output_file, 'w') as outfile:
json.dump(
{
'channel' : channels.tolist(),
'offset' : offset.tolist(),
'scaling' : scaling.tolist(),
'mask' : mask.tolist(),
'surface_channel' : surface_channel,
'air_channel' : air_channel,
'vertical_pos' : vertical_pos.tolist(),
'horizontal_pos' : horizontal_pos.tolist()
},
outfile,
indent = 4, separators = (',', ': ')
)
def read_probe_json(input_file):
"""
Reads a json file containing information about one Neuropixels probe.
Inputs:
-------
input_file : file path
Location of file to read
Outputs:
--------
mask : numpy.ndarray (384 x 0)
1 if channel contains valid data, 0 otherwise
offset : numpy.ndarray (384 x 0)
Offset of each channel from zero
scaling : numpy.ndarray (384 x 0)
Relative noise level on each channel
surface_channel : Int
Index of channel at brain surface
air_channel : Int
Index of channel at interface between saline/agar and air
"""
with open(input_file) as data_file:
data = json.load(data_file)
scaling = np.array(data['scaling'])
mask = np.array(data['mask'])
offset = np.array(data['offset'])
surface_channel = data['surface_channel']
air_channel = data['air_channel']
return mask, offset, scaling, surface_channel, air_channel
def write_cluster_group_tsv(IDs, quality, output_directory, filename = 'cluster_group.tsv'):
"""
Writes a tab-separated cluster_group.tsv file
Inputs:
-------
IDs : list
List of cluster IDs
quality : list
Quality ratings for each unit (same size as IDs)
output_directory : String
Location to save the file
Outputs:
--------
cluster_group.tsv (written to disk)
"""
df = pd.DataFrame(data={'cluster_id' : IDs, 'group': quality})
print('Saving data...')
df.to_csv(os.path.join(output_directory, filename), sep='\t', index=False)
def read_cluster_group_tsv(filename):
"""
Reads a tab-separated cluster_group.tsv file from disk
Inputs:
-------
filename : String
Full path of file
Outputs:
--------
IDs : list
List of cluster IDs
quality : list
Quality ratings for each unit (same size as IDs)
"""
info = pd.read_csv(filename, sep='\t')
cluster_ids = info['cluster_id'].values.astype('int')
try:
cluster_quality = info['group'].values
except KeyError:
cluster_quality = info['KSLabel'].values
return cluster_ids, cluster_quality
def load(folder, filename):
"""
Loads a numpy file from a folder.
Inputs:
-------
folder : String
Directory containing the file to load
filename : String
Name of the numpy file
Outputs:
--------
data : numpy.ndarray
File contents
"""
return np.load(os.path.join(folder, filename))
def load_kilosort_data(folder,
sample_rate = None,
convert_to_seconds = True,
use_master_clock = False,
include_pcs = False,
template_zero_padding= 21):
"""
Loads Kilosort output files from a directory
Inputs:
-------
folder : String
Location of Kilosort output directory
sample_rate : float (optional)
AP band sample rate in Hz
convert_to_seconds : bool (optional)
Flags whether to return spike times in seconds (requires sample_rate to be set)
use_master_clock : bool (optional)
Flags whether to load spike times that have been converted to the master clock timebase
include_pcs : bool (optional)
Flags whether to load spike principal components (large file)
template_zero_padding : int (default = 21)
Number of zeros added to the beginning of each template
Outputs:
--------
spike_times : numpy.ndarray (N x 0)
Times for N spikes
spike_clusters : numpy.ndarray (N x 0)
Cluster IDs for N spikes
spike_templates : numpy.ndarray (N x 0)
Template IDs for N spikes
amplitudes : numpy.ndarray (N x 0)
Amplitudes for N spikes
unwhitened_temps : numpy.ndarray (M x samples x channels)
Templates for M units
channel_map : numpy.ndarray
Channels from original data file used for sorting
cluster_ids : Python list
Cluster IDs for M units
cluster_quality : Python list
Quality ratings from cluster_group.tsv file
pc_features (optinal) : numpy.ndarray (N x channels x num_PCs)
PC features for each spike
pc_feature_ind (optional) : numpy.ndarray (M x channels)
Channels used for PC calculation for each unit
"""
if use_master_clock:
spike_times = load(folder,'spike_times_master_clock.npy')
else:
spike_times = load(folder,'spike_times.npy')
spike_clusters = load(folder,'spike_clusters.npy')
spike_templates = load(folder, 'spike_templates.npy')
amplitudes = load(folder,'amplitudes.npy')
templates = load(folder,'templates.npy')
unwhitening_mat = load(folder,'whitening_mat_inv.npy')
channel_map = np.squeeze(load(folder, 'channel_map.npy'))
if include_pcs:
pc_features = load(folder, 'pc_features.npy')
pc_feature_ind = load(folder, 'pc_feature_ind.npy')
templates = templates[:,template_zero_padding:,:] # remove zeros
spike_clusters = np.squeeze(spike_clusters) # fix dimensions
spike_templates = np.squeeze(spike_templates) # fix dimensions
spike_times = np.squeeze(spike_times)# fix dimensions
if convert_to_seconds and sample_rate is not None:
spike_times = spike_times / sample_rate
unwhitened_temps = np.zeros((templates.shape))
for temp_idx in range(templates.shape[0]):
unwhitened_temps[temp_idx,:,:] = np.dot(np.ascontiguousarray(templates[temp_idx,:,:]),np.ascontiguousarray(unwhitening_mat))
try:
cluster_ids, cluster_quality = read_cluster_group_tsv(os.path.join(folder, 'cluster_group.tsv'))
except OSError:
cluster_ids = np.unique(spike_clusters)
cluster_quality = ['unsorted'] * cluster_ids.size
if not include_pcs:
return spike_times, spike_clusters, spike_templates, amplitudes, unwhitened_temps, channel_map, cluster_ids, cluster_quality
else:
return spike_times, spike_clusters, spike_templates, amplitudes, unwhitened_temps, channel_map, cluster_ids, cluster_quality, pc_features, pc_feature_ind
def get_spike_depths(spike_templates, pc_features, pc_feature_ind):
"""
Calculates the distance (in microns) of individual spikes from the probe tip
This implementation is based on Matlab code from github.com/cortex-lab/spikes
Input:
-----
spike_templates : numpy.ndarray (N x 0)
Template IDs for N spikes
pc_features : numpy.ndarray (N x channels x num_PCs)
PC features for each spike
pc_feature_ind : numpy.ndarray (M x channels)
Channels used for PC calculation for each unit
Output:
------
spike_depths : numpy.ndarray (N x 0)
Distance (in microns) from each spike waveform from the probe tip
"""
pc_features_copy = np.copy(pc_features)
pc_features_copy = np.squeeze(pc_features_copy[:,0,:])
pc_features_copy[pc_features_copy < 0] = 0
pc_power = pow(pc_features_copy, 2)
spike_feat_ind = pc_feature_ind[spike_templates, :]
spike_depths = np.sum(spike_feat_ind * pc_power, 1) / np.sum(pc_power,1)
return spike_depths * 10
def get_spike_amplitudes(spike_templates, templates, amplitudes):
"""
Calculates the amplitude of individual spikes, based on the original template
plus a scaling factor
This implementation is based on Matlab code from github.com/cortex-lab/spikes
Inputs:
-------
spike_templates : numpy.ndarray (N x 0)
Template IDs for N spikes
templates : numpy.ndarray (M x samples x channels)
Unwhitened templates for M units
amplitudes : numpy.ndarray (N x 0)
Amplitudes for N spikes
Outputs:
--------
spike_amplitudes : numpy.ndarray (N x 0)
Amplitudes for N spikes
"""
template_amplitudes = np.max(np.max(templates,1) - np.min(templates,1),1)
spike_amplitudes = template_amplitudes[spike_templates] * amplitudes
return np.squeeze(spike_amplitudes)
def get_repo_commit_date_and_hash(repo_location):
"""
Finds the date and hash of the latest commit in a git repository
Input:
------
repo_location - String
Local directory containing the git repository
Outputs:
--------
commit_date - String
Date string of the latest commit
commit_hash - String
Hash of the latest commit
"""
if os.path.exists(repo_location):
repo = Repo(repo_location)
headcommit = repo.head.commit
commit_date = time.strftime("%a, %d %b %Y %H:%M", time.gmtime(headcommit.committed_date))
commit_hash = headcommit.hexsha
else:
commit_date = 'repository not available'
commit_hash = 'repository not available'
return commit_date, commit_hash
def printProgressBar(iteration, total, prefix = '', suffix = '', decimals = 0, length = 40, fill = '▒'):
"""
Call in a loop to create terminal progress bar
Code from https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
Inputs:
-------
iteration - Int
Current iteration
total - Int
Total iterations
prefix - Str (optional)
Prefix string
suffix - Str (optional)
Suffix string
decimals - Int (optional)
Positive number of decimals in percent complete
length - Int (optional)
Character length of bar
fill - Str (optional)
Bar fill character
Outputs:
--------
None
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '░' * (length - filledLength)
sys.stdout.write('\r%s %s %s%% %s' % (prefix, bar, percent, suffix))
sys.stdout.flush()
if iteration == total:
print() |
#!/usr/bin/env python
import sys
from setuptools import find_packages, setup
requirements = ['torch']
assert sys.version_info[0] == 3
if sys.version_info[1] < 7:
requirements.append('dataclasses')
dev_requirements = {'dev': ['mypy>=0.660', 'pycodestyle>=2.4.0']}
test_requirements = ['pytest>=4.1.1']
setup(
name='ML-ToolBox',
version='0.1',
description='ML ToolBox for PyTorch',
author='Zonglin Li',
author_email='[email protected]',
url='https://github.com/zli117/ML-ToolBox',
packages=find_packages(exclude=('test',)),
install_requires=requirements,
extras_require=dev_requirements,
tests_require=test_requirements,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
])
|
import os
from meaningless.bible_base_extractor import BaseExtractor
from meaningless.utilities import xml_file_interface
class XMLExtractor(BaseExtractor):
"""
An base extractor object that retrieves Bible passages from an XML file
"""
def __init__(self, translation='NIV', show_passage_numbers=True, output_as_list=False,
strip_excess_whitespace_from_list=False, default_directory=os.getcwd(),
use_ascii_punctuation=False):
super().__init__(xml_file_interface.read, translation, show_passage_numbers, output_as_list,
strip_excess_whitespace_from_list, default_directory, use_ascii_punctuation,
file_extension='.xml', read_key_as_string=True)
|
from sympy.core.numbers import RealNumber
from ..syms import syms
import numpy as np
from sklearn.linear_model.base import LinearRegression
from ..sym_predict import register_sym_predict
def sym_predict_linear(estimator):
if hasattr(estimator, 'intercept_'):
expression = RealNumber(estimator.intercept_[0])
else:
expression = RealNumber(0)
symbols = syms(estimator)
for coef, sym in zip(np.ravel(estimator.coef_), symbols):
expression += RealNumber(coef) * sym
return expression
register_sym_predict(LinearRegression, sym_predict_linear) |
import sys
sys.path.append("../")
from settings import (NES_PALETTE_HEX, animation_settings)
from core import sprite
ColScottOConnorWalkRight01 = sprite(
palette = {
"b":NES_PALETTE_HEX[0, 13],
"d":NES_PALETTE_HEX[0, 6],
"l":NES_PALETTE_HEX[1, 6],
"w":NES_PALETTE_HEX[3, 0],
},
matrix = [
"x14b6x2",
"x11b3d1l5b1x1",
"x7b4d1l9b1",
"x4b3d1l13b1",
"x2b2d1l16b1",
"x1b1d1l14w1l1w1l1b1",
"b1d1l1d3l7d3l3w1b1x1",
"x1b1d1b3d8w2d1l1b3x1",
"x2b1x3b6d1w4d2b1x2",
"x11b1d1w3d3b1x2",
"x10b1d1w3d4b1x2",
"x10b1d1w2d1w2d2b1x2",
"x10b1d1w5d1l2b1x1",
"x11b1d1w4d1l2b1x1",
"x10b1d4w3d1b1x2",
"x9b1d4w1d4b1x2",
"x10b1d2w4d1b1x3",
"x11b1d1w5d1b1x2",
"x12b1d1w4d1b1x2",
"x12b1d2w4b1x2",
"x11b1d1w6b1x2",
"x10b1d1w7b1x2",
"x10b1d1w1d1w4b1x3",
"x9b1d1l1d1w1d2w2b1x3",
"x8b1d1l3d1w3b1x4",
"x8b1d1l3d2b2x5",
"x9b1d1l3d1b1x6",
"x10b1d5b1x5",
"x11b2d1l2b1x5",
"x13b3x6",
]
)
ColScottOConnorWalkRight02 = sprite(
palette = {
"b":NES_PALETTE_HEX[0, 13],
"d":NES_PALETTE_HEX[0, 6],
"l":NES_PALETTE_HEX[1, 6],
"w":NES_PALETTE_HEX[3, 0],
},
matrix = [
"x15b5x5",
"x13b2d1l4b1x4",
"x11b2d1l7b1x3",
"x9b2d1l9b1x3",
"x7b2d1l11b1x3",
"x4b3d1l9w1l1w1l1b1x3",
"x1b3d1l9d3l2w1b1x4",
"b1d1l10d4w1d2b1x2b2x1",
"x1b1d4l4d2w1d2w3d1b1x1b1l2b1",
"x2b4d4w2d3w3d1b3l2b1",
"x6b2d1w3d3w4b1w2d1l1b1",
"x8b1d1w1d5w7d1b1",
"x9b1d6w6d1b1x1",
"x10b2w2d4w3d1b1x2",
"x10b1d4w1d4b1x4",
"x9b1d7b3x5",
"x9b1d2w2d4b1x6",
"x7b2d1w5d2w2b1x5",
"x5b2d2w6d2w2b1x5",
"x2b3d2w7d2w4b1x4",
"x1b1d3w1d1w7b1d1w4b1x4",
"b1d1l2d1w8b2d1w5b1x3",
"b1d1l2d1w1d1w5b1x1b1d1w5b1x3",
"b1d1l2b1w1d1w4b1x3b1d1w3d1b1x3",
"b1d1l1b3w1d1w1b2x4b1d4w1b1x3",
"x1b2x3b3x6b1d1w3b1x4",
"x16b1d3b1x4",
"x17b1d1l2b1x3",
"x17b1d1l3b1x2",
"x17b6x2",
]
)
ColScottOConnorWalkRight03 = sprite(
palette = {
"b":NES_PALETTE_HEX[0, 13],
"d":NES_PALETTE_HEX[0, 6],
"l":NES_PALETTE_HEX[1, 6],
"w":NES_PALETTE_HEX[3, 0],
},
matrix = [
"x14b5x2",
"x12b2d1l4b1x1",
"x10b2d1l7b1",
"x9b1d1l9b1",
"x8b1d1l10b1",
"x7b1d1l7w1l1w1l1b1",
"x1b2x2b2d1l6d2l2w1b1x1",
"b1d1l1b2d1l6d1w2d1l1b3x1",
"x1b1d1l8d1w4d2b1x2",
"x2b1d2l4d3w3d3b1x2",
"x3b2d4b1d1w3d4b1x2",
"x5b5d1w2d1w3b2x2",
"x9b1d2w5l2b1x1",
"x10b1w1d1w4l2b1x1",
"x9b1d7b2x2",
"x9b1d2w3d2b1x3",
"x8b1d2w5b1x4",
"x9b1d1w6b1x3",
"x10b1d1w5b1x3",
"x10b1d1w6b1x2",
"x10b1d2w5b1x2",
"x9b1d3w5b1x2",
"x8b1d3w6b1x2",
"x8b1d2w1d1w4b1x3",
"x8b1d3w2d2w1b1x3",
"x9b1d3w3b1x4",
"x9b1d2l1d2b1x5",
"x9b1d1l3b1x6",
"x10b1l4b1x5",
"x11b4x6",
]
)
ColScottOConnorWalkRight04 = sprite(
palette = {
"b":NES_PALETTE_HEX[0, 13],
"d":NES_PALETTE_HEX[0, 6],
"l":NES_PALETTE_HEX[1, 6],
"w":NES_PALETTE_HEX[3, 0],
},
matrix = [
"x16b4x5",
"x13b3l4b1x4",
"x11b2d1l7b1x3",
"x8b3d1l9b1x3",
"x6b2d1l12b1x3",
"x4b2d1l10w1l1w1l1b1x3",
"x1b3d1l9d3l2w1b1x4",
"b1d1l10d2w2d1l1b3x4",
"x1b1d5l3d2w5d2b1x2b2x1",
"x2b5d3b1d1w4d3b1x1b1l2b1",
"x7b4w2d6b2d1l2b1",
"x9b1d1w1d1w3d2b1d2w1d2b1",
"x9b1d1w5l2d1w5b1",
"x9b1d2w4l2d2w3b1x1",
"x10b1d3w2d4w1d2b1x1",
"x9b1d6w1d4b2x2",
"x9b1d3w5b3x4",
"x7b2d1w1d2w6b1x5",
"x5b2d2w3d2w6b1x4",
"x4b1d2w6d1w6b1x4",
"x3b1d1w8d1w7b1x3",
"x2b1d2w7b2d1w6b1x3",
"x1b1d1l2d1w5b1x1b1d1w7b1x2",
"x1b1d1l2b1w4b1x2b1d1w6d1b1x2",
"x1b1d1l1b1x1b4x4b1d1w3d2w1b1x2",
"x2b2x10b1d1w1d2w2b2x2",
"x15b1d1w3d1l1b1x2",
"x16b1d3l3b1x1",
"x17b2d1l4b1",
"x18b1d4b1x1",
"x19b4x2",
]
)
KabukiQuantumFighterBG01 = sprite(
palette = {
"b":NES_PALETTE_HEX[0, 13],
"p":NES_PALETTE_HEX[0, 4],
"o":NES_PALETTE_HEX[1, 7],
"l":NES_PALETTE_HEX[2, 7],
"g":NES_PALETTE_HEX[3, 11],
},
matrix = [
"b16" + "b4p1o1l1o3b1p1b2p1b1",
"b4p1b3p1b3p1b1p1b1" + "b1o1b2p1l1o3p1o1p1b4",
"b2p1b13" + "b1o1b1p1o1l1o5p2b1p1b1",
"b16" + "b1p1b1p1o2b1o2b1o2b4",
"b5p1b5l1b4" + "b1p1b1p2b2o1p1b2p2b3",
"b1p1b5p1b5p1b2" + "b1o1b1p1o1b2l1o1b2o1p1b1p1b1",
"b5p1b5p1b4" + "b1o1b1o1p1b2l1o1b2o2b3",
"b2o1b2p1b2p1b2p1b2p1b1" + "b5o1l1b2o2b3p1b1",
"p1b1l1b5p1b5p1b1" + "b1o1b4o1b2o1b6",
"b2o1b1p3b1p1b1o1l1o1b1p1b1" + "b1p1b2o1b2l1o1b2o1b2p1b1",
"b16" + "b5o1b4o1b5",
"b1p2o1p1b1p4b1p1l1p2b1" + "b1o1b1o1b1o1b4o1b3p1b1",
"b16" + "b1p1b1l1b2o1b2o1b2p1b1p1b1",
"b1p4b1p2o1p1b1p4b1" + "b1p1b1o2b2l1o1b2p2b3",
"b16" + "b1p1b1l1o1b1l1o3b1o1p1b1p1b1",
"b1l1p3b1p4b1p2o1p1b1" + "b1p1b1o2b1o1l1o1p1b1p1o1b3",
"b16" + "b3o2b1o3p1b1o1p1b1p1b1",
"b2p1b1o1l1o1b1p1b1p3b1p1b1" + "b1o1b1l1o1b1l1o3b1p1b2p1b1",
"p1b1p1b5p1b5p1b1" + "b1o1b1o2b1o3p1b1p2b1p1b1",
"b2p1b2p1b2p1b2o1b2p1b1" + "b1p1b1o2b1l1o3b1p2b3",
"b5p1b5o1b4" + "b1p1b1o2b1o1l1o2b1o1p1b1p1b1",
"b1p3b3p1o1p1b3p3" + "b1o1b1l1o1b1l1o2p1b1p1b2p1b1",
"b5p1b5p1b4" + "b1p1b1o2b1l2o1p1b1o1p1b3",
"b2o1b2p1b2p1b2p1b2p1b1" +"b4o1b1o1p1l1o1b1p1o1b1p1b1",
"p1b1l1b5p1b5p1b1" + "b3p1b2p4b1p2b3",
"b2o1b1p3b1p1b1o1l1o1b1p1b1" + "b1p1b2p1b2o1b1p1b1p1b2p1b1",
"b16" + "b3p1b2p1b1p1b3p1b1p1b1",
"b1p2o1p1b1p4b1p1l1p2b1" + "b1p1b1p1b12",
"b16" + "b7p1b4p1b3",
"b1p4b1p2o1p1b1p4b1" + "b16",
"b16" + "b16",
"b1g15" + "b1g15",
]
)
KabukiQuantumFighterBG02 = sprite(
palette = {
"b":NES_PALETTE_HEX[0, 13],
"r":NES_PALETTE_HEX[2, 8],
"l":NES_PALETTE_HEX[2, 9],
"g":NES_PALETTE_HEX[1, 11],
},
matrix = [
"b1g1b4g4b6" + "b16",
"b1g1b1g1l1b1g2b1g1b2g1b3" + "b16",
"b3g2b6g1b4" + "b16",
"b1g1b1g1l1b1g2b1g1b1g2b1g1b1" + "b16",
"b1g1b4l1g2b7" + "b16",
"b1g1b1g1l1b1g2b1g1b2g1b1g1b1" + "b16",
"b3g2b6g1b4" + "b16",
"b1g1b1g1l1b1l1g2b2g1b2g1b1" + "b16",
"b1g1b4g4b6" + "b16",
"b1g1b1g1l1b1g2b1g1b2g1b3" + "b8g8",
"b3g2b6g1b4" + "b8g1l1g1l2g2l1",
"b1g1b1g1l1b1g2b1g1b1g2b1g1b1" + "b8g2b1g3b1g1",
"b1g1b4l1g2b7" + "b8g1b1g2b2g2",
"b1g1b1g1l1b1g2b1g1b2g1b1g1b1" + "b16",
"b3g2b6g1b4" + "b16",
"b1g1b1g1l1b1l1g2b2g1b2g1b1" + "b16",
"b1g1b4g4b6" + "b16",
"b1g1b1g1l1b1g2b1g1b2g1b3" + "b16",
"b3g2b6g1b4" + "b16",
"b1g1b1g1l1b1g2b1g1b1g2b1g1b1" + "b16",
"b1g1b4l1g2b7" + "b16",
"b1g1b1g1l1b1g2b1g1b2g1b1g1b1" + "b16",
"b3g2b6g1b4" + "b16",
"b1g1b1g1l1b1l1g2b2g1b2g1b1" + "b16",
"b1g1b4g4b6" + "b16",
"b1g1b1g1l1b1g2b1g1b2g1b3" + "b8g8",
"b3g2b6g1b4" + "b8g1l1g1l2g2l1",
"b1g1b1g1l1b1g2b1g1b1g2b1g1b1" + "b8g2b1g3b1g1",
"b1g1b4l1g2b7" + "b8g1b1g2b2g2",
"b1g1b1g1l1b1g2b1g1b2g1b1g1b1" + "b16",
"b3g2b6g1b4" + "b16",
"r16" + "r16",
]
)
kabukiquantumfighter_animation = animation_settings(
sprite_list=[[ColScottOConnorWalkRight01,
ColScottOConnorWalkRight02,
ColScottOConnorWalkRight03,
ColScottOConnorWalkRight04],
],
bg_sprites=[KabukiQuantumFighterBG01,
KabukiQuantumFighterBG02,],
xoffs=[[0, -1, 0, -1],
],
yoffs=[[0, -1, 0, 0],
],
frame_time=0.030,
spbg_ratio=5,
center=True,
bg_scroll_speed=(1, 0),
cycles_per_char=5,
reversible="horizontal",
)
|
"""Numerical timestepping methods for stochastic differential equation (SDE) systems."""
import sympy
import symnum.numpy as snp
import symnum.diffops.symbolic as diffops
def euler_maruyama_step(drift_func, diff_coeff):
"""Construct Euler-Maruyama integrator step function."""
def forward_func(z, x, v, δ):
return x + δ * drift_func(x, z) + δ ** 0.5 * diff_coeff(x, z) @ v
return forward_func
def milstein_step(drift_func, diff_coeff, noise_type="diagonal"):
"""Construct Milstein scheme step function."""
if noise_type in ("scalar", "diagonal"):
def forward_func(z, x, v, δ):
δω = snp.sqrt(δ) * v
a = drift_func(x, z)
B = diff_coeff(x, z)
if noise_type == "diagonal":
B_dB_dx = snp.array(
[B[i, i] * B[i, i].diff(x[i]) for i in range(v.shape[0])]
)
else:
B_dB_dx = snp.array(
[(B * B[i].diff(x)).sum() for i in range(x.shape[0])]
)
x_ = x + δ * a + B @ δω + B_dB_dx * (δω ** 2 - δ) / 2
return snp.array([sympy.simplify(x_[i]) for i in range(x.shape[0])])
else:
raise NotImplementedError(f"Noise type {noise_type} not implemented.")
return forward_func
def strong_order_1p5_step(drift_func, diff_coeff, noise_type="additive"):
"""Construct strong-order 1.5 Taylor scheme step function."""
if noise_type == "additive":
def forward_func(z, x, v, δ):
dim_noise = v.shape[0] // 2
δω = snp.sqrt(δ) * v[:dim_noise]
δζ = δ * snp.sqrt(δ) * (v[:dim_noise] + v[dim_noise:] / snp.sqrt(3)) / 2
x_ = (
x
+ δ * drift_func(x, z)
+ diff_coeff(x, z) @ δω
+ (δ ** 2 / 2)
* diffusion_operator(drift_func, diff_coeff)(drift_func)(x, z)
+ sum(
Lj_operator(diff_coeff, j)(drift_func)(x, z) * δζ[j]
for j in range(dim_noise)
)
)
return snp.array([sympy.simplify(x_[i]) for i in range(x.shape[0])])
elif noise_type == "scalar":
def forward_func(z, x, v, δ):
δω = snp.sqrt(δ) * v[:1]
δζ = δ * snp.sqrt(δ) * (v[:1] + v[1:] / snp.sqrt(3)) / 2
x_ = (
x
+ δ * drift_func(x, z)
+ diff_coeff(x, z) @ δω
+ Lj_operator(diff_coeff, 0)(diff_coeff)(x, z) @ (δω ** 2 - δ) / 2
+ Lj_operator(diff_coeff, 0)(drift_func)(x, z) * δζ
+ diffusion_operator(drift_func, diff_coeff)(
lambda x, z: diff_coeff(x, z)[:, 0]
)(x, z)
* (δω * δ - δζ)
+ (δ ** 2 / 2)
* diffusion_operator(drift_func, diff_coeff)(drift_func)(x, z)
+ Lj_operator(diff_coeff, 0)(Lj_operator(diff_coeff, 0)(diff_coeff))(
x, z
)
@ (δω ** 3 / 3 - δ * δω)
)
return snp.array([sympy.simplify(x_[i]) for i in range(x.shape[0])])
else:
raise NotImplementedError(f"Noise type {noise_type} not implemented.")
return forward_func
def diffusion_operator(drift_func, diff_coeff):
"""Construct diffusion operator for autonomous Ito stochastic differential equation.
Diffusion operator here refers to the partial differential operator which is the
infintesimal generator of the stochastic process.
Args:
drift_func (Callable[[SymbolicArray, SymbolicArray], SymbolicArray]): Function
defining drift term of diffusion, accepting symbolic state and parameter
vectors (1D arrays) as arguments and returning a symbolic vector (1D array)
drift term.
diff_coeff (Callable[[SymbolicArray, SymbolicArray], SymbolicArray]): Function
defining diffusion coefficient term, accepting symbolic state and parameter
vectors (1D arrays) as arguments and returning a symbolic matrix (2D array)
diffusion coefficient term.
"""
def _diffusion_operator(func):
def diffusion_operator_func(x, z):
a = drift_func(x, z)
B = diff_coeff(x, z)
return (
diffops.jacobian_vector_product(func)(x, z)(a)
+ diffops.matrix_hessian_product(func)(x, z)(B @ B.T) / 2
)
return diffusion_operator_func
return _diffusion_operator
def Lj_operator(diff_coeff, j=0):
"""Construct Lj operator for autonomous Ito stochastic differential equation.
Lj operator here refers to the Lʲ partial differential operator defined in Equation
3.2 in Chapter 5 of Kloeden and Platen (1992)
Lʲf(x) = ∑ₖ Bₖⱼ(x) ∂ₖf(x)
Args:
diff_coeff (Callable[[SymbolicArray, SymbolicArray], SymbolicArray]): Function
defining diffusion coefficient term, accepting symbolic state and parameter
vectors (1D arrays) as arguments and returning a symbolic matrix (2D array)
diffusion coefficient term.
j (int): Column index of diffusion coefficient term (zero-based).
"""
def Lj(func):
def Lj_func(x, z):
B = diff_coeff(x, z)
return diffops.jacobian_vector_product(func)(x, z)(B[:, j])
return Lj_func
return Lj
|
__author__ = 'lorenzo'
import dtk
import pickle
import sentence_encoder
import dataset_reader
from tree import Tree
import numpy
class DatasetCreator:
def __init__(self, file = "/Users/lorenzo/Documents/Universita/PHD/Lavori/DSTK/RTE/RTE3_dev_processed.xml.svm",
dtk_params={"LAMBDA":1.0, "dimension":1024},
encoder_params=[1024, 1, "pos"]):
self.file = file
self.dtk_params = dtk_params
self.encoder_params = encoder_params
dt = dtk.DT(**dtk_params)
#dir2 = "/Users/lorenzo/Documents/Universita/PHD/Lavori/DSTK/RTE"
#file = "RTE3_dev_processed.xml.svm"
#test = "RTE3_test_processed.xml"
filename = "dtk " + str(sorted(list(dtk_params.items())))
filename2 = "encoder " + str(list(encoder_params))
try:
D1 = pickle.load(open(filename, "rb"))
self.D1 = D1
except:
print("No file found for dtk, generating one")
Data = dataset_reader.Dataset(self.file)
trees = [Tree(string=pairs[0]) for pairs in Data.pairs]
D1 = []
for t in trees[1:]:
D1.append(dt.dt(t))
D1 = numpy.array(D1)
pickle.dump(D1, open(filename, "wb"))
self.D1 = D1
#raise
try:
self.D2 = pickle.load(open(filename2, "rb"))
except:
print("No file found for sentence encoder, generating one")
Data = dataset_reader.Dataset(self.file)
trees = [Tree(string=pairs[0]) for pairs in Data.pairs]
D2 = []
for t in trees[1:]:
D2.append(sentence_encoder.encoder(t.sentence, *encoder_params))
D2 = numpy.array(D2)
pickle.dump(D2, open(filename2, "wb"))
self.D2 = D2
#raise
def get_d(self):
return [self.D1, self.D2]
|
from decouple import config
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = config('POSTGRES')
db = SQLAlchemy(app)
# entity needs to be placed after app and db is created
from entity.output import Output
from entity.input import Input
# can be used to creade tables from the entity classes
db.create_all()
@app.route('/output')
def show_output():
output = Output.query.all()
results = [
{
"id": res.id,
"userId": res.user_id,
"amount": res.amount,
"note": res.note,
"time_created": res.time_created
} for res in output]
return {"count": len(results), "output": results}
@app.route('/input')
def show_input():
input = Input.query.all()
results = [
{
"id": res.id,
"userId": res.user_id,
"amount": res.amount,
"note": res.note,
"time_created": res.time_created
} for res in input]
return {"count": len(results), "input": results}
@app.route('/output/new', methods=['GET'])
def new_output():
if 'user_id' and 'amount' in request.args:
user_id = str(request.args.get('user_id'))
amount = float(request.args.get('amount'))
# note is optional, if no value present it will use an empty string
note = str(request.args.get('note', ''))
output = Output(user_id=user_id, amount=amount, note=note)
db.session.add(output)
db.session.commit()
return 'Output created!'
else:
return "No User and/or amount provided!"
@app.route('/input/new', methods=['GET'])
def new_input():
if 'user_id' and 'amount' in request.args:
user_id = str(request.args.get('user_id'))
amount = float(request.args.get('amount'))
# note is optional, if no value present it will use an empty string
note = str(request.args.get('note', ''))
input = Input(user_id=user_id, amount=amount, note=note)
db.session.add(input)
db.session.commit()
return 'Input created!'
else:
return "No User and/or amount provided!"
@app.route('/input/delete', methods=['GET'])
def delete_input():
if 'id' in request.args:
id = str(request.args.get('id'))
Input.query.filter_by(id=id).delete()
db.session.commit()
return 'Input deleted!'
else:
return "No ID provided!"
@app.route('/output/delete', methods=['GET'])
def delete_output():
if 'id' in request.args:
id = str(request.args.get('id'))
Output.query.filter_by(id=id).delete()
db.session.commit()
return 'Output deleted!'
else:
return "No ID provided!"
if __name__ == '__main__':
app.run()
|
"""Diagnostics support for Synology DSM."""
from __future__ import annotations
from synology_dsm.api.surveillance_station.camera import SynoCamera
from homeassistant.components.diagnostics import async_redact_data
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from .const import CONF_DEVICE_TOKEN, DOMAIN
from .models import SynologyDSMData
TO_REDACT = {CONF_USERNAME, CONF_PASSWORD, CONF_DEVICE_TOKEN}
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict:
"""Return diagnostics for a config entry."""
data: SynologyDSMData = hass.data[DOMAIN][entry.unique_id]
syno_api = data.api
dsm_info = syno_api.dsm.information
diag_data = {
"entry": async_redact_data(entry.as_dict(), TO_REDACT),
"device_info": {
"model": dsm_info.model,
"version": dsm_info.version_string,
"ram": dsm_info.ram,
"uptime": dsm_info.uptime,
"temperature": dsm_info.temperature,
},
"network": {"interfaces": {}},
"storage": {"disks": {}, "volumes": {}},
"surveillance_station": {"cameras": {}},
"upgrade": {},
"utilisation": {},
"is_system_loaded": True,
"api_details": {
"fetching_entities": syno_api._fetching_entities, # pylint: disable=protected-access
},
}
if syno_api.network is not None:
intf: dict
for intf in syno_api.network.interfaces:
diag_data["network"]["interfaces"][intf["id"]] = { # type: ignore[index]
"type": intf["type"],
"ip": intf["ip"],
}
if syno_api.storage is not None:
disk: dict
for disk in syno_api.storage.disks:
diag_data["storage"]["disks"][disk["id"]] = { # type: ignore[index]
"name": disk["name"],
"vendor": disk["vendor"],
"model": disk["model"],
"device": disk["device"],
"temp": disk["temp"],
"size_total": disk["size_total"],
}
volume: dict
for volume in syno_api.storage.volumes:
diag_data["storage"]["volumes"][volume["id"]] = { # type: ignore[index]
"name": volume["fs_type"],
"size": volume["size"],
}
if syno_api.surveillance_station is not None:
camera: SynoCamera
for camera in syno_api.surveillance_station.get_all_cameras():
diag_data["surveillance_station"]["cameras"][camera.id] = { # type: ignore[index]
"name": camera.name,
"is_enabled": camera.is_enabled,
"is_motion_detection_enabled": camera.is_motion_detection_enabled,
"model": camera.model,
"resolution": camera.resolution,
}
if syno_api.upgrade is not None:
diag_data["upgrade"] = {
"update_available": syno_api.upgrade.update_available,
"available_version": syno_api.upgrade.available_version,
"reboot_needed": syno_api.upgrade.reboot_needed,
"service_restarts": syno_api.upgrade.service_restarts,
}
if syno_api.utilisation is not None:
diag_data["utilisation"] = {
"cpu": syno_api.utilisation.cpu,
"memory": syno_api.utilisation.memory,
"network": syno_api.utilisation.network,
}
return diag_data
|
# Copyright 2018-2021 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
import typing
from io_scene_gltf2_msfs.io.com import gltf2_io
from io_scene_gltf2_msfs.blender.exp.gltf2_blender_gather_cache import cached
from io_scene_gltf2_msfs.blender.exp import gltf2_blender_gather_nodes
from io_scene_gltf2_msfs.blender.exp import gltf2_blender_gather_joints
from io_scene_gltf2_msfs.blender.exp import gltf2_blender_gather_skins
from io_scene_gltf2_msfs.io.exp.gltf2_io_user_extensions import export_user_extensions
@cached
def gather_animation_channel_target(
channels: typing.Tuple[bpy.types.FCurve],
blender_object: bpy.types.Object,
bake_bone: typing.Union[str, None],
bake_channel: typing.Union[str, None],
driver_obj,
export_settings,
) -> gltf2_io.AnimationChannelTarget:
animation_channel_target = gltf2_io.AnimationChannelTarget(
extensions=__gather_extensions(
channels, blender_object, export_settings, bake_bone
),
extras=__gather_extras(channels, blender_object, export_settings, bake_bone),
node=__gather_node(
channels, blender_object, export_settings, bake_bone, driver_obj
),
path=__gather_path(
channels, blender_object, export_settings, bake_bone, bake_channel
),
)
export_user_extensions(
"gather_animation_channel_target_hook",
export_settings,
animation_channel_target,
channels,
blender_object,
bake_bone,
bake_channel,
)
return animation_channel_target
def __gather_extensions(
channels: typing.Tuple[bpy.types.FCurve],
blender_object: bpy.types.Object,
export_settings,
bake_bone: typing.Union[str, None],
) -> typing.Any:
return None
def __gather_extras(
channels: typing.Tuple[bpy.types.FCurve],
blender_object: bpy.types.Object,
export_settings,
bake_bone: typing.Union[str, None],
) -> typing.Any:
return None
def __gather_node(
channels: typing.Tuple[bpy.types.FCurve],
blender_object: bpy.types.Object,
export_settings,
bake_bone: typing.Union[str, None],
driver_obj,
) -> gltf2_io.Node:
if driver_obj is not None:
return gltf2_blender_gather_nodes.gather_node(
driver_obj,
driver_obj.library.name if driver_obj.library else None,
None,
None,
export_settings,
)
if blender_object.type == "ARMATURE":
# TODO: get joint from fcurve data_path and gather_joint
if bake_bone is not None:
blender_bone = blender_object.pose.bones[bake_bone]
else:
blender_bone = blender_object.path_resolve(
channels[0].data_path.rsplit(".", 1)[0]
)
if isinstance(blender_bone, bpy.types.PoseBone):
if export_settings["gltf_def_bones"] is False:
obj = blender_object.proxy if blender_object.proxy else blender_object
return gltf2_blender_gather_joints.gather_joint(
obj, blender_bone, export_settings
)
else:
bones, _, _ = gltf2_blender_gather_skins.get_bone_tree(
None, blender_object
)
if blender_bone.name in [b.name for b in bones]:
obj = (
blender_object.proxy if blender_object.proxy else blender_object
)
return gltf2_blender_gather_joints.gather_joint(
obj, blender_bone, export_settings
)
return gltf2_blender_gather_nodes.gather_node(
blender_object,
blender_object.library.name if blender_object.library else None,
None,
None,
export_settings,
)
def __gather_path(
channels: typing.Tuple[bpy.types.FCurve],
blender_object: bpy.types.Object,
export_settings,
bake_bone: typing.Union[str, None],
bake_channel: typing.Union[str, None],
) -> str:
if bake_channel is None:
# Note: channels has some None items only for SK if some SK are not animated
target = [c for c in channels if c is not None][0].data_path.split(".")[-1]
else:
target = bake_channel
path = {
"delta_location": "translation",
"delta_rotation_euler": "rotation",
"location": "translation",
"rotation_axis_angle": "rotation",
"rotation_euler": "rotation",
"rotation_quaternion": "rotation",
"scale": "scale",
"value": "weights",
}.get(target)
if target is None:
return None
return path
|
import smartpy as sp
class SoccerBetFactory(sp.Contract):
def __init__(self, admin):
self.init(
admin=admin,
games=sp.map(tkey=sp.TInt),
archived_games = sp.map(tkey = sp.TInt),
remainder=sp.tez(0)
)
@sp.entry_point
def new_game(self, params):
sp.verify_equal(sp.sender, self.data.admin,message="Error: you cannot initialize a new game")
sp.verify(~ self.data.games.contains(params.game_id),message="Error: this game id already exists")
self.data.games[params.game_id] = sp.record(
team_a=params.team_a,
team_b=params.team_b,
status=sp.int(0),
match_timestamp = params.match_timestamp,
outcome=sp.int(-1),
total_bet_amount=sp.tez(0),
bet_amount_on=sp.record(team_a=sp.tez(0), team_b=sp.tez(0), tie=sp.tez(0)),
redeemed=sp.int(0),
bets_by_choice=sp.record(team_a=sp.int(0), team_b=sp.int(0), tie=sp.int(0)),
bet_amount_by_user=sp.map(tkey=sp.TAddress, tvalue=sp.TRecord(timestamp=sp.TTimestamp,team_a=sp.TMutez,team_b=sp.TMutez, tie=sp.TMutez)),
jackpot=sp.tez(0)
)
@sp.entry_point
def bet_on_team_a(self, game_id):
self.add_bet(sp.record(game_id=game_id, choice=sp.int(0)))
@sp.entry_point
def bet_on_team_b(self, game_id):
self.add_bet(sp.record(game_id=game_id, choice=sp.int(1)))
@sp.entry_point
def bet_on_tie(self, game_id):
self.add_bet(sp.record(game_id=game_id, choice=sp.int(2)))
@sp.private_lambda(with_storage="read-write", with_operations=False, wrap_call=True)
def add_bet(self, params):
sp.verify(self.data.games.contains(params.game_id))
game = self.data.games[params.game_id]
sp.verify(sp.now < game.match_timestamp,message = "Error, you cannot place a bet anymore")
bet_by_user=game.bet_amount_by_user[sp.sender]
sp.if ~game.bet_amount_by_user.contains(sp.sender):
game.bet_amount_by_user[sp.sender] = sp.record(
timestamp=sp.now,
team_a=sp.tez(0),
team_b=sp.tez(0),
tie=sp.tez(0))
sp.if params.choice == 0:
bet_by_user.team_a += sp.amount
game.bet_amount_on.team_a += sp.amount
game.bets_by_choice.team_a += sp.int(1)
sp.if params.choice == 1:
bet_by_user.team_b += sp.amount
game.bet_amount_on.team_b += sp.amount
game.bets_by_choice.team_b += sp.int(1)
sp.if params.choice == 2:
bet_by_user.tie += sp.amount
game.bet_amount_on.tie += sp.amount
game.bets_by_choice.tie += sp.int(1)
game.total_bet_amount = game.bet_amount_on.team_a + game.bet_amount_on.team_b + game.bet_amount_on.tie
@sp.entry_point
def unbet_on_team_a(self, game_id):
self.remove_bet(sp.record(game_id=game_id, choice=sp.int(0)))
@sp.entry_point
def unbet_on_team_b(self, game_id):
self.remove_bet(sp.record(game_id=game_id, choice=sp.int(1)))
@sp.entry_point
def unbet_on_tie(self, game_id):
self.remove_bet(sp.record(game_id=game_id, choice=sp.int(2)))
@sp.entry_point
def unbet_all(self, game_id):
self.remove_bet(sp.record(game_id=game_id, choice=sp.int(-1)))
@sp.private_lambda(with_storage="read-write", with_operations=True, wrap_call=True)
def remove_bet(self, params):
sp.verify(self.data.games.contains(params.game_id),message="Error: this match does not exist")
game = self.data.games[params.game_id]
sp.verify(game.bet_amount_by_user.contains(sp.sender),message="Error: you do not have any bets to remove")
sp.verify(sp.now < game.match_timestamp, message = "Error, you cannot remove a bet anymore")
amount_to_send = sp.local("amount_to_send", sp.tez(0))
bet_by_user = game.bet_amount_by_user[sp.sender]
one_day = sp.int(86000)
service_fee = sp.local("service_fee", sp.tez(0))
fee_multiplier = sp.local("fee_multiplier", sp.nat(0))
time_diff = sp.local("time_diff", sp.int(0))
time_diff.value = self.data.games[params.game_id].match_timestamp - bet_by_user.timestamp
sp.if time_diff.value < one_day:
time_diff.value = sp.fst(sp.ediv(time_diff.value,3600).open_some())
fee_multiplier.value = sp.as_nat(2000-sp.mul(83,time_diff.value))
sp.if params.choice == 0:
sp.verify(bet_by_user.team_a > sp.tez(0), message="Error: you have not placed any bets on this outcome")
game.bet_amount_on.team_a -= bet_by_user.team_a
amount_to_send.value = bet_by_user.team_a
self.data.games[params.game_id].bets_by_choice.team_a -= sp.int(1)
service_fee.value = sp.mul(fee_multiplier.value, bet_by_user.team_a)
bet_by_user.team_a = sp.tez(0)
sp.if params.choice == 1:
sp.verify(bet_by_user.team_b > sp.tez(0), message="Error: you have not placed any bets on this outcome")
game.bet_amount_on.team_b -= bet_by_user.team_b
amount_to_send.value = bet_by_user.team_b
self.data.games[params.game_id].bets_by_choice.team_b -= sp.int(1)
service_fee.value = sp.mul(fee_multiplier.value, bet_by_user.team_b)
bet_by_user.team_b = sp.tez(0)
sp.if params.choice == 2:
sp.verify(bet_by_user.tie > sp.tez(0), message="Error: you have not placed any bets on this outcome")
game.bet_amount_on.tie -= bet_by_user.tie
amount_to_send.value = bet_by_user.tie
self.data.games[params.game_id].bets_by_choice.tie -= sp.int(1)
service_fee.value = sp.mul(fee_multiplier.value, bet_by_user.tie)
bet_by_user.tie = sp.tez(0)
sp.if params.choice == -1:
sp.if bet_by_user.team_a>sp.tez(0):
game.bet_amount_on.team_a -= bet_by_user.team_a
amount_to_send.value = bet_by_user.team_a
self.data.games[params.game_id].bets_by_choice.team_a -= sp.int(1)
sp.if bet_by_user.team_b>sp.tez(0):
game.bet_amount_on.team_b -= bet_by_user.team_b
amount_to_send.value += bet_by_user.team_b
self.data.games[params.game_id].bets_by_choice.team_b -= sp.int(1)
sp.if bet_by_user.tie>sp.tez(0):
game.bet_amount_on.tie -= bet_by_user.tie
amount_to_send.value += bet_by_user.tie
self.data.games[params.game_id].bets_by_choice.tie -= sp.int(1)
service_fee.value = sp.mul(fee_multiplier.value, bet_by_user.team_a+bet_by_user.team_b+bet_by_user.tie)
bet_by_user.team_a = sp.tez(0)
bet_by_user.team_b = sp.tez(0)
bet_by_user.tie = sp.tez(0)
sp.if time_diff.value < one_day:
service_fee.value = sp.split_tokens(service_fee.value, 1, 10000)
game.jackpot+=service_fee.value
sp.send(sp.sender, amount_to_send.value - service_fee.value)
game.total_bet_amount = game.bet_amount_on.team_a + game.bet_amount_on.team_b + game.bet_amount_on.tie
sp.if (bet_by_user.team_a == sp.mutez(0)) & (bet_by_user.team_b == sp.tez(0)) & (bet_by_user.tie == sp.tez(0)):
del game.bet_amount_by_user[sp.sender]
@sp.private_lambda(with_storage="read-write", with_operations=False, wrap_call=True)
def archive_game(self, params):
sp.verify(self.data.games.contains(params.game_id), message = "Error: this match does not exist")
game = self.data.games[params.game_id]
sp.verify(game.outcome!=-1, message = "Error: current game is already archived")
self.data.archived_games[params.game_id] = game
@sp.entry_point
def redeem_tez(self, game_id):
sp.verify(self.data.games.contains(game_id),message="Error: this match does not exist anymore!")
game = self.data.games[game_id]
sp.verify(game.bet_amount_by_user.contains(sp.sender),message="Error: you did not place a bet on this match")
sp.verify(game.outcome != -1, message = "Error, you cannot redeem your winnings yet")
bet_by_user = game.bet_amount_by_user[sp.sender]
total_bet_by_user=bet_by_user.team_a + bet_by_user.team_b + bet_by_user.tie
sp.verify((game.outcome == sp.int(10)) | ((game.outcome == sp.int(0)) & (bet_by_user.team_a > sp.tez(0))) | ((game.outcome == sp.int(1)) & (bet_by_user.team_b > sp.tez(0))) | ((game.outcome == sp.int(2)) & (bet_by_user.tie > sp.tez(0))), message="Error: you have lost your bet! :(")
amount_to_send = sp.local("amount_to_send", sp.tez(0))
jackpot_share = sp.local("jackpot_share", sp.tez(0))
repayment_allowed=sp.bool(False)
# If a game is postponed or delayed, each player gets his money back
sp.if game.outcome == sp.int(10):
amount_to_send.value = bet_by_user.team_a + bet_by_user.team_b + bet_by_user.tie
jackpot_share.value+=sp.split_tokens(game.jackpot,sp.utils.mutez_to_nat(bet_by_user.team_a+bet_by_user.team_b+bet_by_user.tie),sp.utils.mutez_to_nat(game.total_bet_amount))
bet_by_user.team_a = sp.tez(0)
bet_by_user.team_b = sp.tez(0)
bet_by_user.tie = sp.tez(0)
sp.if game.outcome == sp.int(0):
sp.if game.bet_amount_on.team_a>sp.tez(0):
amount_to_send.value = sp.split_tokens(bet_by_user.team_a, sp.utils.mutez_to_nat(game.total_bet_amount), sp.utils.mutez_to_nat(game.bet_amount_on.team_a))
jackpot_share.value+=sp.split_tokens(game.jackpot,sp.utils.mutez_to_nat(bet_by_user.team_a),sp.utils.mutez_to_nat(game.bet_amount_on.team_a))
bet_by_user.team_a = sp.tez(0)
sp.else:
amount_to_send.value=total_bet_by_user
jackpot_share.value+=sp.split_tokens(game.jackpot,sp.utils.mutez_to_nat(total_bet_by_user),sp.utils.mutez_to_nat(game.total_bet_amount))
bet_by_user.team_b=sp.tez(0)
bet_by_user.tie=sp.tez(0)
repayment_allowed=True
sp.if game.outcome == sp.int(1):
sp.if game.bet_amount_on.team_b>sp.tez(0):
amount_to_send.value = sp.split_tokens(bet_by_user.team_b, sp.utils.mutez_to_nat(game.total_bet_amount), sp.utils.mutez_to_nat(game.bet_amount_on.team_b))
jackpot_share.value+=sp.split_tokens(game.jackpot,sp.utils.mutez_to_nat(bet_by_user.team_b),sp.utils.mutez_to_nat(game.bet_amount_on.team_b))
bet_by_user.team_b = sp.tez(0)
sp.else:
amount_to_send.value=total_bet_by_user
jackpot_share.value+=sp.split_tokens(game.jackpot,sp.utils.mutez_to_nat(total_bet_by_user),sp.utils.mutez_to_nat(game.total_bet_amount))
bet_by_user.team_a=sp.tez(0)
bet_by_user.tie=sp.tez(0)
repayment_allowed=True
sp.if game.outcome == sp.int(2):
sp.if game.bet_amount_on.tie>sp.tez(0):
amount_to_send.value = sp.split_tokens(bet_by_user.tie, sp.utils.mutez_to_nat(game.total_bet_amount), sp.utils.mutez_to_nat(game.bet_amount_on.tie))
jackpot_share.value+=sp.split_tokens(game.jackpot,sp.utils.mutez_to_nat(bet_by_user.tie),sp.utils.mutez_to_nat(game.bet_amount_on.tie))
bet_by_user.tie = sp.tez(0)
sp.else:
amount_to_send.value=total_bet_by_user
jackpot_share.value+=sp.split_tokens(game.jackpot,sp.utils.mutez_to_nat(total_bet_by_user),sp.utils.mutez_to_nat(game.total_bet_amount))
bet_by_user.team_a=sp.tez(0)
bet_by_user.team_b=sp.tez(0)
repayment_allowed=True
game.jackpot-=jackpot_share.value
sp.send(sp.sender, amount_to_send.value+jackpot_share.value)
game.redeemed += 1
sp.if (bet_by_user.team_a == sp.mutez(0)) & (bet_by_user.team_b == sp.tez(0)) & (bet_by_user.tie == sp.tez(0)):
del game.bet_amount_by_user[sp.sender]
sp.if repayment_allowed==False:
sp.if (game.outcome == sp.int(0)) & (game.redeemed == game.bets_by_choice.team_a):
del self.data.games[game_id]
sp.else:
sp.if (game.outcome == sp.int(1)) & (game.redeemed == game.bets_by_choice.team_b):
del self.data.games[game_id]
sp.else:
sp.if (game.outcome == sp.int(2)) & (game.redeemed == game.bets_by_choice.tie):
del self.data.games[game_id]
sp.else:
sp.if sp.len(game.bet_amount_by_user)==0:
del self.data.games[game_id]
# Below entry points mimick the future oracle behaviour and are not meant to stay
@sp.entry_point
def set_outcome(self, params):
sp.verify_equal(self.data.games[params.game_id].outcome, -1, "Error: current game outcome has already been set")
sp.verify_equal(sp.sender, self.data.admin, message = "Error: you cannot update the game status")
sp.verify((params.choice == 0) | (params.choice == 1) | (params.choice == 2) | (params.choice == 10), message = "Error: entered value must be comprised in {0;1;2}")
sp.verify(self.data.games.contains(params.game_id), message = "Error: this match does not exist")
game = self.data.games[params.game_id]
sp.if params.choice != 10:
sp.verify(sp.now > game.match_timestamp, message = "Error: match has not started yet")
game.outcome = params.choice
sp.if (game.bet_amount_on.team_a == sp.tez(0)) & (game.bet_amount_on.team_b == sp.tez(0)) & (game.bet_amount_on.tie == sp.tez(0)):
sp.if game.jackpot>sp.tez(0):
self.data.remainder+=game.jackpot
game.jackpot=sp.tez(0)
del self.data.games[params.game_id]
sp.else:
self.archive_game(params)
# Above entry points mimick the future oracle behaviour and are not meant to stay
@sp.add_test(name="Test Match Contract")
def test():
scenario = sp.test_scenario()
admin = sp.test_account("Admin")
alice = sp.test_account("Alice")
bob = sp.test_account("Bob")
gabriel = sp.test_account("Gabriel")
eloi = sp.test_account("Eloi")
pierre_antoine = sp.test_account("Pierre-Antoine")
victor = sp.test_account("Victor")
jean_francois = sp.test_account("Jean-Francois")
mathis = sp.test_account("Mathis")
enguerrand = sp.test_account("Enguerrand")
hennequin = sp.test_account("Hennequin")
berger = sp.test_account("Berger")
levillain = sp.test_account("Levillain")
olivier = sp.test_account("Olivier")
pascal = sp.test_account("Pascal")
game1 = 1
factory = SoccerBetFactory(admin.address)
scenario += factory
scenario.h1("Testing game initialization")
scenario += factory.new_game(sp.record(
game_id=game1,
team_a="France",
team_b="Angleterre",
match_timestamp = sp.timestamp_from_utc(2022, 1, 1, 1, 1, 1)
)).run(sender=admin)
game2 = 2
scenario += factory.new_game(sp.record(
game_id=game2,
team_a="Nice",
team_b="Marseille",
match_timestamp = sp.timestamp_from_utc(2022, 1, 1, 1, 1, 1)
)).run(sender=admin)
game3 = 3
scenario += factory.new_game(sp.record(
game_id=game3,
team_a="Lorient",
team_b="Vannes",
match_timestamp = sp.timestamp_from_utc(2022, 1, 1, 1, 1, 1)
)).run(sender=admin)
game5 = 5
scenario += factory.new_game(sp.record(
game_id=game5,
team_a="Olympique Lyonnais",
team_b="PSG",
match_timestamp = sp.timestamp_from_utc(2022, 1, 1, 1, 1, 1)
)).run(sender=admin)
game6 = 6
scenario += factory.new_game(sp.record(
game_id=game6,
team_a="Luxembourg",
team_b="Malte",
match_timestamp = sp.timestamp_from_utc(2022, 1, 1, 1, 1, 1)
)).run(sender=admin)
game7 = 7
scenario += factory.new_game(sp.record(
game_id=game7,
team_a="Irlande",
team_b="Ecosse",
match_timestamp = sp.timestamp_from_utc(2022, 1, 1, 1, 1, 40)
)).run(sender=admin)
game8 = 8
scenario += factory.new_game(sp.record(
game_id=game8,
team_a="Allemagne",
team_b="Pologne",
match_timestamp = sp.timestamp_from_utc(2022, 1, 1, 1, 1, 3)
)).run(sender=admin)
scenario.h1("Testing bet placing")
# Betting on game 1
scenario += factory.bet_on_team_a(game1).run(
sender=pierre_antoine.address, amount=sp.tez(2000))
scenario += factory.bet_on_team_b(game1).run(
sender=victor.address, amount=sp.tez(5000))
scenario += factory.bet_on_team_a(game1).run(sender=alice.address, amount=sp.tez(100), now=sp.timestamp_from_utc(2022, 1, 1, 1, 1, 0))
scenario += factory.bet_on_team_b(game1).run(
sender=mathis.address, amount=sp.tez(1000))
scenario += factory.bet_on_tie(game1).run(
sender=bob.address,amount=sp.tez(2000))
# Betting on game 2
scenario += factory.bet_on_team_b(game2).run(
sender=mathis.address, amount=sp.tez(7500))
scenario += factory.bet_on_team_b(game2).run(
sender=enguerrand.address, amount=sp.tez(500))
scenario += factory.bet_on_team_b(game2).run(
sender=alice.address, amount=sp.tez(1000))
scenario += factory.bet_on_team_b(game2).run(
sender=bob.address, amount=sp.tez(1000))
scenario += factory.bet_on_team_a(game2).run(
sender=gabriel.address, amount=sp.tez(10000))
# Betting on game 3
scenario += factory.bet_on_team_a(game3).run(
sender = alice.address, amount=sp.tez(3000), now = sp.timestamp(1546297200))
scenario += factory.bet_on_team_b(game3).run(
sender = bob.address, amount = sp.tez(1000), now = sp.timestamp(1546297200))
scenario += factory.bet_on_team_b(game3).run(
sender = eloi.address, amount = sp.tez(2000), now = sp.timestamp(1546297200))
scenario += factory.bet_on_team_a(game3).run(
sender = gabriel.address, amount = sp.tez(4000), now = sp.timestamp(1546297200))
scenario += factory.bet_on_team_a(game3).run(
sender = levillain.address, amount = sp.tez(4000), now = sp.timestamp(1546297200))
scenario += factory.bet_on_team_a(game3).run(
sender = pascal.address, amount = sp.tez(4000), now = sp.timestamp(1546297200))
scenario += factory.bet_on_team_a(game3).run(
sender = olivier.address, amount = sp.tez(4000), now = sp.timestamp(1546297200))
# Betting on game 5
scenario += factory.bet_on_team_b(game5).run(sender=mathis.address, amount=sp.tez(100), now=sp.timestamp_from_utc(2022, 1, 1, 1, 1, 0))
scenario += factory.unbet_on_team_b(game5).run(sender=mathis.address)
scenario += factory.bet_on_tie(game5).run(sender=mathis.address, amount=sp.tez(7500))
scenario += factory.bet_on_team_a(game5).run(sender=enguerrand.address, amount=sp.tez(500))
scenario += factory.bet_on_team_b(game5).run(sender=enguerrand.address, amount=sp.tez(2500))
# Testing an outcome cannot be set twice
scenario += factory.set_outcome(sp.record(
game_id = game1,
choice = 2,
)).run(sender = admin.address, valid=False)
scenario.h1("Testing bet removal")
scenario += factory.unbet_on_tie(game1).run(sender=bob.address)
scenario.h1("Testing outcome")
scenario += factory.set_outcome(sp.record(game_id = game1, choice = 1)).run(sender=admin.address, now = sp.timestamp(1640998862))
scenario += factory.set_outcome(sp.record(game_id = game2, choice = 1)).run(sender=admin.address, now = sp.timestamp(1640998862))
# Testing the deletion of games with no bet records
scenario += factory.set_outcome(sp.record(game_id = game6, choice = 1)).run(sender=admin.address, now = sp.timestamp(1640998862))
# Testing cancelled/postponed outcome
scenario += factory.set_outcome(sp.record(game_id = game5, choice = 10)).run(sender=admin.address, now = sp.timestamp(1640998862))
scenario.h1("Testing losers can recover their bet amount when there is no bet on the actual outcome")
# scenario += factory.bet_on_team_a(game8).run(sender=enguerrand.address, amount=sp.tez(2500), now=sp.timestamp_from_utc(2022, 1, 1, 1, 1, 1))
# scenario += factory.set_outcome(sp.record(game_id = game8, choice = 1)).run(sender=admin.address, now=sp.timestamp_from_utc(2022, 1, 1, 1, 1, 3))
# scenario += factory.redeem_tez(game8).run(sender=enguerrand.address, now=sp.timestamp_from_utc(2022, 1, 1, 1, 1, 4))
scenario.h1("Testing players can remove all their bets at once")
scenario += factory.bet_on_team_a(game8).run(sender=enguerrand.address, amount=sp.tez(2500), now=sp.timestamp_from_utc(2022, 1, 1, 1, 1, 1))
scenario += factory.bet_on_team_b(game8).run(sender=enguerrand.address, amount=sp.tez(2500), now=sp.timestamp_from_utc(2022, 1, 1, 1, 1, 1))
scenario += factory.bet_on_tie(game8).run(sender=enguerrand.address, amount=sp.tez(2500), now=sp.timestamp_from_utc(2022, 1, 1, 1, 1, 1))
scenario += factory.unbet_all(game8).run(sender=enguerrand.address, now=sp.timestamp_from_utc(2022, 1, 1, 1, 1, 2))
scenario.h1("Testing contract's remainder increase when no-bet games are deleted")
scenario += factory.bet_on_team_b(game7).run(sender=enguerrand.address, amount=sp.tez(2500), now=sp.timestamp_from_utc(2022, 1, 1, 1, 1, 1))
scenario += factory.unbet_on_team_b(game7).run(sender=enguerrand.address, now=sp.timestamp_from_utc(2022, 1, 1, 1, 1, 20))
scenario += factory.set_outcome(sp.record(game_id = game7, choice = 1)).run(sender=admin.address, now=sp.timestamp_from_utc(2022, 1, 1, 1, 1, 59))
scenario.verify(factory.data.remainder>sp.tez(0))
scenario.h1("Testing winnings withdrawal ")
scenario += factory.redeem_tez(game1).run(sender=mathis.address)
scenario += factory.redeem_tez(game1).run(sender=victor.address)
scenario += factory.redeem_tez(game2).run(sender=alice.address)
scenario += factory.redeem_tez(game2).run(sender=mathis.address)
scenario += factory.redeem_tez(game2).run(sender=enguerrand.address)
# Testing Bob can redeem a winning bet while having lost another one
scenario += factory.redeem_tez(game2).run(sender=bob.address)
# Testing Alice cannot redeem gains from a game she did not bet on
scenario += factory.redeem_tez(game1).run(sender=alice.address, valid=False)
# Testing Pierre-Antoine cannot redeem gains from a bet he lost
scenario += factory.redeem_tez(game1).run(sender=pierre_antoine.address, valid=False)
# Testing players can recover their bet amount when a game is cancelled/postponed
scenario += factory.redeem_tez(game5).run(sender=mathis.address)
scenario += factory.redeem_tez(game5).run(sender=enguerrand.address)
scenario.h1("Setting outcome but match has not started")
scenario += factory.set_outcome(sp.record(
game_id=game2,
choice=2,
)).run(sender=admin.address, valid=False)
scenario += factory.set_outcome(sp.record(
game_id=game1,
choice=2,
)).run(sender=admin.address, valid=False) |
# Generated by Django 2.2.1 on 2019-05-01 16:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mapper', '0002_auto_20190430_2053'),
]
operations = [
migrations.CreateModel(
name='Peptide',
fields=[
('sequence', models.CharField(max_length=64, primary_key=True, serialize=False)),
('requests', models.IntegerField()),
],
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.CharField(max_length=10, primary_key=True, serialize=False)),
('chromosome', models.CharField(max_length=3)),
('start', models.IntegerField()),
('end', models.IntegerField()),
('strand', models.IntegerField()),
],
),
migrations.CreateModel(
name='SpanGroup',
fields=[
('id', models.CharField(max_length=10, primary_key=True, serialize=False)),
('chromosome', models.CharField(max_length=3)),
('start', models.IntegerField()),
('end', models.IntegerField()),
],
),
migrations.CreateModel(
name='SpanGroup_Regions',
fields=[
('id', models.CharField(max_length=10, primary_key=True, serialize=False)),
('region', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mapper.Region')),
('span_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mapper.SpanGroup')),
],
),
migrations.CreateModel(
name='Peptide_SpanGroups',
fields=[
('id', models.CharField(max_length=10, primary_key=True, serialize=False)),
('peptide', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mapper.Peptide')),
('span_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mapper.SpanGroup')),
],
),
]
|
from .abt_buy import AbtBuyBenchmark # noqa: F401
from .amazon_google import AmazonGoogleBenchmark # noqa: F401
from .beer import BeerBenchmark # noqa: F401
from .company import CompanyBenchmark # noqa: F401
from .dblp_acm_structured import DBLP_ACM_StructuredBenchmark # noqa: F401
from .dblp_scholar_structured import DBLPScholarStructuredBenchmark # noqa: F401
from .fodors_zagats import FodorsZagatsBenchmark # noqa: F401
from .itunes_amazon_structured import ITunesAmazonStructuredBenchmark # noqa: F401
from .walmart_amazon_structured import WalmartAmazonStructuredBenchmark # noqa: F401
|
import time
import ipaddress
from nubia import command, argument
import pandas as pd
from suzieq.cli.sqcmds.command import SqCommand
from suzieq.sqobjects.routes import RoutesObj
@command("route", help="Act on Routes")
class RouteCmd(SqCommand):
def __init__(
self,
engine: str = "",
hostname: str = "",
start_time: str = "",
end_time: str = "",
view: str = "latest",
namespace: str = "",
format: str = "",
query_str: str = ' ',
columns: str = "default",
) -> None:
super().__init__(
engine=engine,
hostname=hostname,
start_time=start_time,
end_time=end_time,
view=view,
namespace=namespace,
columns=columns,
format=format,
query_str=query_str,
sqobj=RoutesObj,
)
self.json_print_handler = self._json_print_handler
def _json_print_handler(self, input):
"""This handler calls the code to print the IPNetwork as a string"""
if isinstance(input, ipaddress.IPv4Network):
return ipaddress.IPv4Network.__str__(input)
elif isinstance(input, ipaddress.IPv6Network):
return ipaddress.IPv6Network.__str__(input)
return input
def _get_ipvers(self, value: str) -> int:
"""Return the IP version in use"""
if ':' in value:
ipvers = 6
elif '.' in value:
ipvers = 4
else:
ipvers = ''
return ipvers
@command("show")
@argument("prefix", description="Prefix, in quotes, to filter show on")
@argument("vrf", description="VRF to qualify")
@argument("protocol", description="routing protocol to qualify")
@argument("prefixlen", description="must be of the form "
"[<|<=|>=|>|!] length")
def show(self, prefix: str = "", vrf: str = '', protocol: str = "",
prefixlen: str = ""):
"""
Show Routes info
"""
if self.columns is None:
return
# Get the default display field names
now = time.time()
if self.columns != ["default"]:
self.ctxt.sort_fields = None
else:
self.ctxt.sort_fields = []
df = self._invoke_sqobj(self.sqobj.get,
hostname=self.hostname,
prefix=prefix.split(),
vrf=vrf.split(),
protocol=protocol.split(),
columns=self.columns,
namespace=self.namespace,
prefixlen=prefixlen,
query_str=self.query_str,
)
self.ctxt.exec_time = "{:5.4f}s".format(time.time() - now)
return self._gen_output(df)
@command("summarize")
@argument("vrf", description="VRF to qualify")
def summarize(self, vrf: str = ''):
"""
Show Routes info
"""
# Get the default display field names
now = time.time()
df = self._invoke_sqobj(self.sqobj.summarize,
hostname=self.hostname,
vrf=vrf.split(),
namespace=self.namespace,
query_str=self.query_str,
)
self.ctxt.exec_time = "{:5.4f}s".format(time.time() - now)
return self._gen_output(df, json_orient='columns')
@command('lpm')
@argument("address", description="IP Address, in quotes, for lpm query")
@argument("vrf", description="specific VRF to qualify")
def lpm(self, address: str = '', vrf: str = ''):
"""
Show the Longest Prefix Match on a given prefix, vrf
"""
if self.columns is None:
return
now = time.time()
if self.columns != ["default"]:
self.ctxt.sort_fields = None
else:
self.ctxt.sort_fields = []
if not address:
print('address is mandatory parameter')
return
df = self._invoke_sqobj(self.sqobj.lpm,
hostname=self.hostname,
address=address,
vrf=vrf.split(),
ipvers=self._get_ipvers(address),
columns=self.columns,
namespace=self.namespace,
query_str=self.query_str,
)
self.ctxt.exec_time = "{:5.4f}s".format(time.time() - now)
return self._gen_output(df)
|
"""
Author: Anthony Perez
A collection of ImageDatasource classes which allow loading of generic image collections by name.
"""
import ee
from gee_tools.datasources.interface import MultiImageDatasource, GlobalImageDatasource, SingleImageDatasource, DatasourceError
class GenericSingleImageDatasource(SingleImageDatasource):
"""Generic SingleImageDatasource for loading an image by name or arguments to ee.Image."""
def build_img_coll(self, image_args=None):
if image_args is None:
raise ValueError('image_args must be provided, but was None')
self.img = ee.Image(image_args)
self.coll = ee.ImageCollection(self.img)
def get_img_coll(self):
return self.coll
class GenericGlobalImageDatasource(GlobalImageDatasource):
"""Generic GlobalImageDatasource for loading an image by name or arguments to ee.ImageCollection."""
def build_img_coll(self, coll_args=None):
if coll_args is None:
raise ValueError('coll_args must be provided, but was None')
self.coll = ee.ImageCollection(coll_args) \
.filterDate(self.start_date, self.end_date) \
.sort('system:time_start')
def get_img_coll(self):
return self.coll
class GenericMultiImageDatasource(MultiImageDatasource):
"""Generic GlobalImageDatasource for loading an image by name or arguments to ee.ImageCollection."""
def build_img_coll(self, coll_args=None):
if coll_args is None:
raise ValueError('coll_args must be provided, but was None')
self.coll = ee.ImageCollection(coll_args) \
.filterDate(self.start_date, self.end_date) \
.filterBounds(self.filterpoly) \
.sort('system:time_start')
def get_img_coll(self):
return self.coll
|
import abc
import os
class Config:
BASE_DIR = os.path.dirname(__file__)
DATA_DIR = os.path.join(BASE_DIR, 'data')
VOCABS_DIR = os.path.join(DATA_DIR, 'vocabs')
CHECKPOINTS_DIR = os.path.join(DATA_DIR, 'checkpoints')
class UniEnViConfig:
PROBLEM = 'uni_en_vi'
MODEL = 'transformer'
CHECKPOINT_PATH = os.path.join(Config.CHECKPOINTS_DIR, 'unienvi', 'model.ckpt-best')
VOCAB_DIR = Config.VOCABS_DIR
HPARAMS = 'transformer_base_single_gpu'
class UniViEnConfig:
PROBLEM = 'uni_vi_en'
MODEL = 'transformer'
CHECKPOINT_PATH = os.path.join(Config.CHECKPOINTS_DIR, 'univien', 'model.ckpt-best')
VOCAB_DIR = Config.VOCABS_DIR
HPARAMS = 'transformer_base_single_gpu'
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def tree2str(self, t: TreeNode) -> str:
def dfs(root):
if not root:
return ''
# 左子树为空右子树不为空,要加一个()
if not root.left and root.right:
return str(root.val) + '()' + '(' + dfs(root.right) + ')'
# 左子树不为空右子树为空
elif root.left and not root.right:
return str(root.val) + '(' + dfs(root.left) + ')'
# 左右子树都为空
elif not root.left and not root.right:
return str(root.val)
return str(root.val) + '(' + dfs(root.left) + ')' + '(' + dfs(root.right) + ')'
return dfs(t) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.