max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
yolox/data/datasets/mot.py | ldelzott/ByteTrack | 0 | 5800 | import cv2
import numpy as np
from pycocotools.coco import COCO
import os
from ..dataloading import get_yolox_datadir
from .datasets_wrapper import Dataset
class MOTDataset(Dataset):
"""
COCO dataset class.
"""
def __init__( # This function is called in the exps yolox_x_mot17_half.py in this way: dataset = MOTDataset(
# data_dir=os.path.join(get_yolox_datadir(), "mot"),
# json_file=self.train_ann,
# name='train',
# img_size=self.input_size,
# preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406),
# std=(0.229, 0.224, 0.225),
# max_labels=500,),)
self,
data_dir=None,
json_file="train_half.json",
name="train",
img_size=(608, 1088),
preproc=None,
):
"""
COCO dataset initialization. Annotation data are read into memory by COCO API.
Args:
data_dir (str): dataset root directory
json_file (str): COCO json file name
name (str): COCO data name (e.g. 'train2017' or 'val2017')
img_size (int): target image size after pre-processing
preproc: data augmentation strategy
"""
super().__init__(img_size)
if data_dir is None:
data_dir = os.path.join(get_yolox_datadir(), "mot")
self.data_dir = data_dir
self.json_file = json_file
self.coco = COCO(os.path.join(self.data_dir, "annotations", self.json_file))
self.ids = self.coco.getImgIds()
self.class_ids = sorted(self.coco.getCatIds())
cats = self.coco.loadCats(self.coco.getCatIds())
self._classes = tuple([c["name"] for c in cats])
self.annotations = self._load_coco_annotations()
self.name = name
self.img_size = img_size
self.preproc = preproc
def __len__(self):
return len(self.ids)
def _load_coco_annotations(self):
return [self.load_anno_from_ids(_ids) for _ids in self.ids]
def load_anno_from_ids(self, id_):
im_ann = self.coco.loadImgs(id_)[0]
width = im_ann["width"]
height = im_ann["height"]
#frame_id = im_ann["frame_id"] : the default value '1' avoid to break augmentation & evaluation processes
frame_id = 1
#video_id = im_ann["video_id"] : the default value '1' avoid to break augmentation & evaluation processes
video_id = 1
anno_ids = self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False)
annotations = self.coco.loadAnns(anno_ids)
objs = []
for obj in annotations:
x1 = obj["bbox"][0]
y1 = obj["bbox"][1]
x2 = x1 + obj["bbox"][2]
y2 = y1 + obj["bbox"][3]
if obj["area"] > 0 and x2 >= x1 and y2 >= y1:
obj["clean_bbox"] = [x1, y1, x2, y2]
objs.append(obj)
num_objs = len(objs)
res = np.zeros((num_objs, 6))
for ix, obj in enumerate(objs):
cls = self.class_ids.index(obj["category_id"])
res[ix, 0:4] = obj["clean_bbox"]
res[ix, 4] = cls
#res[ix, 5] = obj["track_id"] # See comment line 66; same comment for the default value 1
res[ix, 5] = 1
file_name = im_ann["file_name"] if "file_name" in im_ann else "{:012}".format(id_) + ".jpg"
img_info = (height, width, frame_id, video_id, file_name)
del im_ann, annotations
return (res, img_info, file_name)
def load_anno(self, index):
return self.annotations[index][0]
def pull_item(self, index):
id_ = self.ids[index]
res, img_info, file_name = self.annotations[index]
# load image and preprocess
img_file = os.path.join(
self.data_dir, self.name, file_name
)
img = cv2.imread(img_file)
assert img is not None
return img, res.copy(), img_info, np.array([id_])
@Dataset.resize_getitem
def __getitem__(self, index):
"""
One image / label pair for the given index is picked up and pre-processed.
Args:
index (int): data index
Returns:
img (numpy.ndarray): pre-processed image
padded_labels (torch.Tensor): pre-processed label data.
The shape is :math:`[max_labels, 5]`.
each label consists of [class, xc, yc, w, h]:
class (float): class index.
xc, yc (float) : center of bbox whose values range from 0 to 1.
w, h (float) : size of bbox whose values range from 0 to 1.
info_img : tuple of h, w, nh, nw, dx, dy.
h, w (int): original shape of the image
nh, nw (int): shape of the resized image without padding
dx, dy (int): pad size
img_id (int): same as the input index. Used for evaluation.
"""
img, target, img_info, img_id = self.pull_item(index)
if self.preproc is not None:
img, target = self.preproc(img, target, self.input_dim)
return img, target, img_info, img_id
| 2.703125 | 3 |
src/poetry/console/commands/remove.py | pkoch/poetry | 0 | 5801 | <reponame>pkoch/poetry
from __future__ import annotations
from typing import Any
from cleo.helpers import argument
from cleo.helpers import option
from tomlkit.toml_document import TOMLDocument
try:
from poetry.core.packages.dependency_group import MAIN_GROUP
except ImportError:
MAIN_GROUP = "default"
from poetry.console.commands.installer_command import InstallerCommand
class RemoveCommand(InstallerCommand):
name = "remove"
description = "Removes a package from the project dependencies."
arguments = [argument("packages", "The packages to remove.", multiple=True)]
options = [
option("group", "G", "The group to remove the dependency from.", flag=False),
option("dev", "D", "Remove a package from the development dependencies."),
option(
"dry-run",
None,
"Output the operations but do not execute anything "
"(implicitly enables --verbose).",
),
]
help = """The <info>remove</info> command removes a package from the current
list of installed packages
<info>poetry remove</info>"""
loggers = ["poetry.repositories.pypi_repository", "poetry.inspection.info"]
def handle(self) -> int:
packages = self.argument("packages")
if self.option("dev"):
self.line_error(
"<warning>The --dev option is deprecated, "
"use the `--group dev` notation instead.</warning>"
)
group = "dev"
else:
group = self.option("group", self.default_group)
content: dict[str, Any] = self.poetry.file.read()
poetry_content = content["tool"]["poetry"]
if group is None:
removed = []
group_sections = [
(group_name, group_section.get("dependencies", {}))
for group_name, group_section in poetry_content.get("group", {}).items()
]
for group_name, section in [
(MAIN_GROUP, poetry_content["dependencies"])
] + group_sections:
removed += self._remove_packages(packages, section, group_name)
if group_name != MAIN_GROUP:
if not section:
del poetry_content["group"][group_name]
else:
poetry_content["group"][group_name]["dependencies"] = section
elif group == "dev" and "dev-dependencies" in poetry_content:
# We need to account for the old `dev-dependencies` section
removed = self._remove_packages(
packages, poetry_content["dev-dependencies"], "dev"
)
if not poetry_content["dev-dependencies"]:
del poetry_content["dev-dependencies"]
else:
removed = self._remove_packages(
packages, poetry_content["group"][group].get("dependencies", {}), group
)
if not poetry_content["group"][group]:
del poetry_content["group"][group]
if "group" in poetry_content and not poetry_content["group"]:
del poetry_content["group"]
removed_set = set(removed)
not_found = set(packages).difference(removed_set)
if not_found:
raise ValueError(
"The following packages were not found: " + ", ".join(sorted(not_found))
)
# Refresh the locker
self.poetry.set_locker(
self.poetry.locker.__class__(self.poetry.locker.lock.path, poetry_content)
)
self._installer.set_locker(self.poetry.locker)
# Update packages
self._installer.use_executor(
self.poetry.config.get("experimental.new-installer", False)
)
self._installer.dry_run(self.option("dry-run", False))
self._installer.verbose(self._io.is_verbose())
self._installer.update(True)
self._installer.whitelist(removed_set)
status = self._installer.run()
if not self.option("dry-run") and status == 0:
assert isinstance(content, TOMLDocument)
self.poetry.file.write(content)
return status
def _remove_packages(
self, packages: list[str], section: dict[str, Any], group_name: str
) -> list[str]:
removed = []
group = self.poetry.package.dependency_group(group_name)
section_keys = list(section.keys())
for package in packages:
for existing_package in section_keys:
if existing_package.lower() == package.lower():
del section[existing_package]
removed.append(package)
group.remove_dependency(package)
return removed
| 2.109375 | 2 |
orrinjelo/aoc2021/day_11.py | orrinjelo/AdventOfCode2021 | 0 | 5802 | <filename>orrinjelo/aoc2021/day_11.py
from orrinjelo.utils.decorators import timeit
import numpy as np
def parse(lines):
return np.array([[int(c) for c in line.strip()] for line in lines])
visited = []
def flash(a, x, y):
global visited
if (x,y) in visited:
return
for dx in range(-1,2):
for dy in range(-1,2):
if dx == 0 and dy == 0:
continue
if x+dx < 0 or x+dx >= a.shape[0]:
continue
if y+dy < 0 or y+dy >= a.shape[1]:
continue
a[x+dx, y+dy] += 1
visited.append((x,y))
if a[x+dx, y+dy] > 9:
flash(a, x+dx, y+dy)
def progress(a):
global visited
a += 1
x,y = np.where(a > 9)
visited = []
for i in range(len(x)):
flash(a,x[i],y[i])
count = np.sum(a > 9)
# print('a:\n', a)
a[a > 9] = 0
return a, count
@timeit("Day 11 Part 1")
def part1(input_str, use_rust=False):
octomap = parse(input_str)
total_count = 0
for i in range(100):
octomap, count = progress(octomap)
total_count += count
return total_count
@timeit("Day 11 Part 2")
def part2(input_str, use_rust=False):
octomap = parse(input_str)
step = 0
while True:
step += 1
octomap, count = progress(octomap)
if count == octomap.shape[0]*octomap.shape[1]:
break
return step
# = Test ================================================
inputlist = [
'5483143223',
'2745854711',
'5264556173',
'6141336146',
'6357385478',
'4167524645',
'2176841721',
'6882881134',
'4846848554',
'5283751526',
]
def test_part1():
# import matplotlib.pyplot as plt
# plt.imshow(parse(inputlist))
# plt.show()
assert part1(inputlist) == 1656
def test_part2():
assert part2(inputlist) == 195
import pygame
import sys
def plot(input_str):
# octomap = parse(input_str)
octomap = np.random.randint(0,9,(100,100))
pygame.init()
clock = pygame.time.Clock()
scale = 5
screen = pygame.display.set_mode((octomap.shape[0]*scale,octomap.shape[1]*scale))
surface = pygame.Surface((octomap.shape[0]*scale,octomap.shape[1]*scale))
frame = 0
history = []
for i in range(500):
print('Generating frame #', i)
octomap, _ = progress(octomap)
history.append(np.copy(octomap))
input()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit(); sys.exit();
# erase the screen
screen.fill((255,0,0))
try:
octomap = history[frame]
except:
frame = 0
for i in range(octomap.shape[0]):
for j in range(octomap.shape[1]):
if octomap[i,j] == 0:
brightness = 255
else:
brightness = int(255.0 * octomap[i,j]/10.0)
print(i*scale, j*scale, brightness)
pygame.draw.rect(
screen,
(brightness,brightness,brightness),
pygame.Rect(i*scale, j*scale, scale, scale)
)
pygame.display.update()
# surface.blit(screen, (0,0))
clock.tick(30)
frame += 1 | 2.890625 | 3 |
exercise_2/exercise_2.1.py | lukaszbinden/ethz-iacv-2020 | 0 | 5803 |
camera_width = 640
camera_height = 480
film_back_width = 1.417
film_back_height = 0.945
x_center = 320
y_center = 240
P_1 = (-0.023, -0.261, 2.376)
p_11 = P_1[0]
p_12 = P_1[1]
p_13 = P_1[2]
P_2 = (0.659, -0.071, 2.082)
p_21 = P_2[0]
p_22 = P_2[1]
p_23 = P_2[2]
p_1_prime = (52, 163)
x_1 = p_1_prime[0]
y_1 = p_1_prime[1]
p_2_prime = (218, 216)
x_2 = p_2_prime[0]
y_2 = p_2_prime[1]
f = 1.378
k_x = camera_width / film_back_width
k_y = camera_height / film_back_height
# f_k_x = f * k_x
f_k_x = f
# f_k_y = f * k_y
f_k_y = f
u_1_prime = (x_1 - x_center) / k_x
v_1_prime = (y_1 - y_center) / k_y
u_2_prime = (x_2 - x_center) / k_x
v_2_prime = (y_2 - y_center) / k_y
c_1_prime = (f_k_x * p_21 + (p_13 - p_23) * u_2_prime - u_2_prime/u_1_prime * f_k_x * p_11) / (f_k_x * (1 - u_2_prime/u_1_prime))
c_2_prime = (f_k_y * p_22 - (p_23 - (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_2_prime) / f_k_y
c_2_prime_alt = (f_k_y * p_12 - (p_13 - (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_1_prime) / f_k_y
c_3_prime = p_13 - (f_k_x / u_1_prime) * (p_11 - c_1_prime)
rho_1_prime = p_13 - c_3_prime
rho_2_prime = p_23 - c_3_prime
print(f"C' = ({c_1_prime}, {c_2_prime}, {c_3_prime})")
print(f"c_2_prime_alt = {c_2_prime_alt}")
print(f"rho_1_prime = {rho_1_prime}")
print(f"rho_2_prime = {rho_2_prime}")
print("------------------")
r_11 = f_k_x * (p_11 - c_1_prime)
r_12 = f_k_y * (p_12 - c_2_prime)
r_13 = 1 * (p_13 - c_3_prime)
l_11 = rho_1_prime * u_1_prime
l_12 = rho_1_prime * v_1_prime
l_13 = rho_1_prime * 1
print(f"L: ({l_11}, {l_12}, {l_13})")
print(f"R: ({r_11}, {r_12}, {r_13})")
print("------------------")
r_21 = f_k_x * (p_21 - c_1_prime)
r_22 = f_k_y * (p_22 - c_2_prime)
r_23 = 1 * (p_23 - c_3_prime)
l_21 = rho_2_prime * u_2_prime
l_22 = rho_2_prime * v_2_prime
l_23 = rho_2_prime * 1
print(f"L: ({l_11}, {l_12}, {l_13})")
print(f"R: ({r_11}, {r_12}, {r_13})") | 1.757813 | 2 |
services/train/single.py | paper2code/torch2vec-restful-service | 2 | 5804 | <reponame>paper2code/torch2vec-restful-service
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 19:15:34 2020
@author: deviantpadam
"""
import pandas as pd
import numpy as np
import concurrent.futures
import os
import tqdm
from collections import Counter
from torch2vec.data import DataPreparation
from torch2vec.torch2vec import DM
# train = pd.read_csv('/home/deviantpadam/Downloads/example.csv',delimiter='\t')
# train = pd.read_csv('/home/deviantpadam/Downloads/example (1).csv')
train = pd.read_csv('../data/suggest_dump.txt',delimiter='\t')
def cleaner(train):
sub=(train['subjects'].str.lower()).str.split(',',expand=True)
sub.drop([2,3],axis=1,inplace=True)
sub.columns = ['subject1','subject2']
sub.fillna('none',inplace=True)
tasks = (train['tasks'].str.lower()).str.split(',',expand=True)[0]
tasks.fillna('none',inplace=True)
tasks.name = 'task'
train = pd.concat([train,sub,tasks],axis=1).drop(['subjects','tasks'],axis=1)
train.fillna('none',inplace=True)
return train
train = cleaner(train)
corpus = train['authors']+' '+train['title']+' '+train['summary']+' '+train['subject1']+' '+train['subject2']+' '+train['task']
corpus.name = 'text'
corpus = pd.concat([train['subject1'],train['subject2'],train['task'],corpus],axis=1)
def phraser(corpus,workers=-1):
if workers==-1:
workers = os.cpu_count()
chunks = np.array_split(corpus,workers)
with concurrent.futures.ProcessPoolExecutor(workers) as executor:
result = np.concatenate(list(tqdm.tqdm(executor.map(_add_bigrams,chunks),total=workers,desc='Phrasing using {} cores'.format(workers))),axis=0)
executor.shutdown(wait=True)
# result = _add_bigrams(data)
global bigrams
del bigrams
return pd.DataFrame({'text':np.array(result)})['text']
def _add_bigrams(text):
for idx in range(len(text)):
length=len(text[idx])-1
word_count=0
while word_count<length:
if text[idx][word_count]+' '+text[idx][word_count+1] in bigrams:
text[idx][word_count] = text[idx][word_count]+' '+text[idx][word_count+1]
text[idx].remove(text[idx][word_count+1])
length = len(text[idx])-1
# print(cor[i][j]+' '+cor[i][j+1])
word_count+=1
return text
def _get_bigrams(corpus,min_count):
text = np.copy(corpus)
vocab = [word for sen in text for word in sen]
ngram = [(i,j) for i,j in zip(vocab[:-1],vocab[1:])]
freq = Counter(ngram)
filterbi = [bigram for bigram in freq.most_common() if bigram[1]>min_count]
bigrams = [" ".join(bigram[0]) for bigram in filterbi]
return bigrams
data = DataPreparation(corpus.reset_index(),f_size=3)
data.tokenize()
bigrams = _get_bigrams(data.corpus.values,min_count=700)
data.corpus = phraser(data.corpus.values)
bigrams = _get_bigrams(data.corpus.values,min_count=500)
data.corpus = phraser(data.corpus.values)
data.vocab_builder()
doc, context, target_noise_ids = data.get_data(window_size=5,num_noise_words=10)
model = DM(vec_dim=100,num_docs=len(data),num_words=data.vocab_size).cuda()
num_workers = os.cpu_count()
model.fit(doc_ids=doc,context=context,target_noise_ids=target_noise_ids,epochs=20,batch_size=8000,num_workers=num_workers)
model.save_model(data.document_ids,data.args,file_name='weights')
| 2.6875 | 3 |
tests/sources/test_document_oereblex.py | geo-bl-ch/pyramid_oereb | 0 | 5805 | <reponame>geo-bl-ch/pyramid_oereb
# -*- coding: utf-8 -*-
import datetime
import pytest
import requests_mock
from geolink_formatter.entity import Document, File
from requests.auth import HTTPBasicAuth
from pyramid_oereb.contrib.sources.document import OEREBlexSource
from pyramid_oereb.lib.records.documents import DocumentRecord, LegalProvisionRecord
from pyramid_oereb.lib.records.office import OfficeRecord
from tests.mockrequest import MockParameter
@pytest.mark.parametrize('valid,cfg', [
(True, {
'host': 'http://oereblex.example.com',
'language': 'de',
'canton': 'BL'
}),
(False, {
'language': 'de',
'canton': 'BL'
}),
(False, {
'host': 'http://oereblex.example.com',
'language': 'german',
'canton': 'BL'
}),
(False, {
'host': 'http://oereblex.example.com',
'language': 'de'
})
])
def test_init(valid, cfg):
if valid:
assert isinstance(OEREBlexSource(**cfg), OEREBlexSource)
else:
with pytest.raises(AssertionError):
OEREBlexSource(**cfg)
@pytest.mark.parametrize('key,language,result', [
('official_title', None, None),
('municipality', None, 'Liestal'),
('municipality', 'de', {'de': 'Liestal'})
])
def test_get_mapped_value(key, language, result):
file_ = File('Test', '/api/attachments/1', 'main')
document = Document(id='test', title='Test', category='main', doctype='decree', files=[file_],
enactment_date=datetime.date.today(), subtype='Liestal', authority='Office')
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',
mapping={'municipality': 'subtype'})
assert source._get_mapped_value(document, key, language=language) == result
@pytest.mark.parametrize('i,document', [
(1, Document(
id='doc1',
title='Document 1',
category='main',
doctype='edict',
authority='Office',
files=[File('File 1', '/api/attachments/1', 'main')],
enactment_date=datetime.date.today()
)),
(2, Document(
id='doc2',
title='Document 2',
category='main',
doctype='decree',
authority='Office',
files=[
File('File 2', '/api/attachments/2', 'main'),
File('File 3', '/api/attachments/3', 'additional')
],
enactment_date=datetime.date.today()
)),
(3, Document(
id='doc1',
title='Document 1',
category='main',
doctype='invalid',
authority='Office',
files=[File('File 1', '/api/attachments/1', 'main')],
enactment_date=datetime.date.today()
)),
(4, Document(
id='doc1',
title='Document 1',
category='main',
doctype='decree',
authority='Office',
files=[],
enactment_date=datetime.date.today()
))
])
def test_get_document_records(i, document):
language = 'de'
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL')
references = [
Document(
id='ref',
title='Reference',
category='related',
doctype='edict',
authority='Office',
files=[File('Reference file', '/api/attachments/4', 'main')],
enactment_date=datetime.date.today()
)
]
if i == 3:
with pytest.raises(TypeError):
source._get_document_records(document, language, references)
elif i == 4:
assert source._get_document_records(document, language, references) == []
else:
records = source._get_document_records(document, language, references)
assert len(records) == i
for idx, record in enumerate(records):
if i == 1:
assert isinstance(record, DocumentRecord)
elif i == 2:
assert isinstance(record, LegalProvisionRecord)
assert record.title == {'de': 'Document {0}'.format(i)}
assert record.published_from == datetime.date.today()
assert record.canton == 'BL'
assert record.text_at_web == {'de': '/api/attachments/{fid}'.format(fid=i + idx)}
assert len(record.references) == 1
reference = record.references[0]
assert isinstance(reference, DocumentRecord)
assert reference.title == {'de': 'Reference'}
assert reference.canton == 'BL'
assert reference.text_at_web == {'de': '/api/attachments/4'}
def test_read():
with requests_mock.mock() as m:
with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:
m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read())
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL')
source.read(MockParameter(), 100)
assert len(source.records) == 2
document = source.records[0]
assert isinstance(document, DocumentRecord)
assert isinstance(document.responsible_office, OfficeRecord)
assert document.responsible_office.name == {'de': 'Landeskanzlei'}
assert document.canton == 'BL'
assert document.text_at_web == {
'de': 'http://oereblex.example.com/api/attachments/313'
}
assert len(document.references) == 5
def test_read_related_decree_as_main():
with requests_mock.mock() as m:
with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:
m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read())
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',
related_decree_as_main=True)
source.read(MockParameter(), 100)
assert len(source.records) == 3
document = source.records[0]
assert isinstance(document, DocumentRecord)
assert isinstance(document.responsible_office, OfficeRecord)
assert document.responsible_office.name == {'de': 'Landeskanzlei'}
assert document.canton == 'BL'
assert document.text_at_web == {
'de': 'http://oereblex.example.com/api/attachments/313'
}
assert len(document.references) == 4
def test_read_with_version_in_url():
with requests_mock.mock() as m:
with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:
m.get('http://oereblex.example.com/api/1.1.1/geolinks/100.xml', content=f.read())
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',
pass_version=True)
source.read(MockParameter(), 100)
assert len(source.records) == 2
def test_read_with_specified_version():
with requests_mock.mock() as m:
with open('./tests/resources/geolink_v1.0.0.xml', 'rb') as f:
m.get('http://oereblex.example.com/api/1.0.0/geolinks/100.xml', content=f.read())
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',
pass_version=True, version='1.0.0')
source.read(MockParameter(), 100)
assert len(source.records) == 2
def test_read_with_specified_language():
with requests_mock.mock() as m:
with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:
m.get('http://oereblex.example.com/api/geolinks/100.xml?locale=fr', content=f.read())
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL')
params = MockParameter()
params.set_language('fr')
source.read(params, 100)
assert len(source.records) == 2
document = source.records[0]
assert document.responsible_office.name == {'fr': 'Landeskanzlei'}
assert document.text_at_web == {
'fr': 'http://oereblex.example.com/api/attachments/313'
}
def test_authentication():
auth = {
'username': 'test',
'password': '<PASSWORD>'
}
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', auth=auth)
assert isinstance(source._auth, HTTPBasicAuth)
def test_get_document_title():
document = Document([], id='1', title='Test')
result = {'de': 'Test'}
assert OEREBlexSource._get_document_title(document, File(), 'de') == result
| 2.125 | 2 |
apps/zsh/singletons.py | codecat555/codecat555-fidgetingbits_knausj_talon | 4 | 5806 | <reponame>codecat555/codecat555-fidgetingbits_knausj_talon<filename>apps/zsh/singletons.py
# A rarely-updated module to assist in writing reload-safe talon modules using
# things like threads, which are not normally safe for reloading with talon.
# If this file is ever updated, you'll need to restart talon.
import logging
_singletons = {}
def singleton(fn):
name = f"{fn.__module__}.{fn.__name__}"
# Do any cleanup actions from before.
if name in _singletons:
old = _singletons.pop(name)
try:
next(old)
except StopIteration:
pass
else:
logging.error(
f"the old @singleton function {name} had more than one yield!"
)
# Do the startup actions on the new object.
it = iter(fn())
obj = next(it)
# Remember the iterator so we can call the cleanup actions later.
_singletons[name] = it
# We want the object yielded by the iterator to be available at the name
# of the function, so instead of returning a function we return an object.
return obj
| 2.375 | 2 |
trainNN/run_bichrom.py | yztxwd/Bichrom | 3 | 5807 | import argparse
import yaml
from subprocess import call
from train import train_bichrom
if __name__ == '__main__':
# parsing
parser = argparse.ArgumentParser(description='Train and Evaluate Bichrom')
parser.add_argument('-training_schema_yaml', required=True,
help='YAML file with paths to train, test and val data')
parser.add_argument('-len', help='Size of genomic windows',
required=True, type=int)
parser.add_argument('-outdir', required=True, help='Output directory')
parser.add_argument('-nbins', type=int, required=True, help='Number of bins')
args = parser.parse_args()
# load the yaml file with input data paths:
with open(args.training_schema_yaml, 'r') as f:
try:
data_paths = yaml.safe_load(f)
except yaml.YAMLError as exc:
print(exc)
# create the output directory:
outdir = args.outdir
call(['mkdir', outdir])
train_bichrom(data_paths=data_paths, outdir=outdir, seq_len=args.len,
bin_size=int(args.len/args.nbins)) | 2.609375 | 3 |
setup.py | Fronius-SED/rapidyaml | 0 | 5808 | <reponame>Fronius-SED/rapidyaml
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
import os
import shutil
import sys
from pathlib import Path
from distutils import log
from setuptools import setup
from setuptools.command.sdist import sdist as SdistCommand
from cmake_build_extension import BuildExtension, CMakeExtension
TOP_DIR = (Path(__file__).parent).resolve()
# Where the Python library is actually found.
PYTHON_DIR = "api/python"
setup_kw = {}
# Read in the package version when not in a git repository.
VERSION_FILE = os.path.join(PYTHON_DIR, 'ryml', 'version.py')
if not (TOP_DIR / '.git').exists() and os.path.exists(VERSION_FILE):
exec(open(VERSION_FILE).read())
setup_kw['version'] = version
else:
setup_kw['use_scm_version']= {
"version_scheme": "post-release",
"local_scheme": "no-local-version",
"write_to": VERSION_FILE,
}
# Read in the module description from the README.md file.
README_FILE = TOP_DIR / "README.md"
if README_FILE.exists():
with open(TOP_DIR / "README.md", "r") as fh:
setup_kw['long_description'] = fh.read()
setup_kw['long_description_content_type'] = "text/markdown"
# define a CMake package
cmake_args = dict(
name='ryml.ryml',
install_prefix='',
source_dir='',
cmake_component='python',
cmake_configure_options=[
"-DRYML_BUILD_API:BOOL=ON",
# Force cmake to use the Python interpreter we are currently using to
# run setup.py
"-DPython3_EXECUTABLE:FILEPATH="+sys.executable,
],
)
try:
ext = CMakeExtension(**cmake_args)
except TypeError:
del cmake_args['cmake_component']
ext = CMakeExtension(**cmake_args)
# If the CMakeExtension doesn't support `cmake_component` then we have to
# do some manual cleanup.
_BuildExtension=BuildExtension
class BuildExtension(_BuildExtension):
def build_extension(self, ext):
_BuildExtension.build_extension(self, ext)
ext_dir = Path(self.get_ext_fullpath(ext.name)).parent.absolute()
cmake_install_prefix = ext_dir / ext.install_prefix
assert cmake_install_prefix.exists(), cmake_install_prefix
try:
lib_path = cmake_install_prefix / "lib"
assert lib_path.exists(), lib_path
log.info("Removing everything under: %s", lib_path)
shutil.rmtree(lib_path)
inc_path = cmake_install_prefix / "include"
assert inc_path.exists(), inc_path
log.info("Removing everything under: %s", inc_path)
shutil.rmtree(inc_path)
# Windows only
cm_path = cmake_install_prefix / "cmake"
if cm_path.exists():
log.info("Removing everything under: %s", cm_path)
shutil.rmtree(cm_path)
except:
log.info('Found following installed files:')
for f in cmake_install_prefix.rglob("*"):
log.info(' - %s', f)
raise
setup(
# Package human readable information
name='rapidyaml',
#author='<NAME>',
description='Rapid YAML - a library to parse and emit YAML, and do it fast.',
url='https://github.com/biojppm/rapidyaml',
license='MIT',
license_files=['LICENSE.txt'],
# Package contents control
cmdclass={
"build_ext": BuildExtension,
},
package_dir={"": PYTHON_DIR},
packages=['ryml'],
ext_modules=[ext],
include_package_data=True,
# Requirements
python_requires=">=3.7",
setup_requires=['setuptools_scm'],
# Extra arguments
**setup_kw,
)
| 1.632813 | 2 |
litex_boards/targets/digilent_arty_z7.py | machdyne/litex-boards | 0 | 5809 | <filename>litex_boards/targets/digilent_arty_z7.py
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
import argparse
import subprocess
from migen import *
from litex_boards.platforms import digilent_arty_z7
from litex.build import tools
from litex.build.xilinx import common as xil_common
from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict
from litex.soc.interconnect import axi
from litex.soc.interconnect import wishbone
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.soc import SoCRegion
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq, use_ps7_clk=False):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
# # #
if use_ps7_clk:
self.comb += ClockSignal("sys").eq(ClockSignal("ps7"))
self.comb += ResetSignal("sys").eq(ResetSignal("ps7") | self.rst)
else:
# Clk.
clk125 = platform.request("clk125")
# PLL.
self.submodules.pll = pll = S7PLL(speedgrade=-1)
self.comb += pll.reset.eq(self.rst)
pll.register_clkin(clk125, 125e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
# Ignore sys_clk to pll.clkin path created by SoC's rst.
platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, variant="z7-20", toolchain="vivado", sys_clk_freq=int(125e6),
with_led_chaser=True, **kwargs):
platform = digilent_arty_z7.Platform(variant=variant, toolchain=toolchain)
if kwargs.get("cpu_type", None) == "zynq7000":
kwargs['integrated_sram_size'] = 0
kwargs['with_uart'] = False
self.mem_map = {
'csr': 0x4000_0000, # Zynq GP0 default
}
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Arty Z7",
**kwargs)
# Zynq7000 Integration ---------------------------------------------------------------------
if kwargs.get("cpu_type", None) == "zynq7000":
assert toolchain == "vivado", ' not tested / specific vivado cmds'
preset_name = "arty_z7_20.tcl" if variant == "z7-20" else "arty_z7_10.tcl"
os.system("wget http://kmf2.trabucayre.com/" + preset_name)
self.cpu.set_ps7(preset=preset_name)
# Connect AXI GP0 to the SoC
wb_gp0 = wishbone.Interface()
self.submodules += axi.AXI2Wishbone(
axi = self.cpu.add_axi_gp_master(),
wishbone = wb_gp0,
base_address = self.mem_map['csr'])
self.add_wb_master(wb_gp0)
use_ps7_clk = True
else:
use_ps7_clk = False
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq, use_ps7_clk)
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Arty Z7")
parser.add_argument("--toolchain", default="vivado", help="FPGA toolchain (vivado, symbiflow or yosys+nextpnr).")
parser.add_argument("--build", action="store_true", help="Build bitstream.")
parser.add_argument("--load", action="store_true", help="Load bitstream.")
parser.add_argument("--variant", default="z7-20", help="Board variant (z7-20 or z7-10).")
parser.add_argument("--sys-clk-freq", default=125e6, help="System clock frequency.")
builder_args(parser)
soc_core_args(parser)
vivado_build_args(parser)
parser.set_defaults(cpu_type="zynq7000")
args = parser.parse_args()
soc = BaseSoC(
variant = args.variant,
toolchain = args.toolchain,
sys_clk_freq=int(float(args.sys_clk_freq)),
**soc_core_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder_kwargs = vivado_build_argdict(args) if args.toolchain == "vivado" else {}
builder.build(**builder_kwargs, run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if __name__ == "__main__":
main()
| 1.710938 | 2 |
goose/parsers.py | allmalaysianews/article-extractor | 0 | 5810 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by <NAME>
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import lxml.html as lxmlhtml
from lxml.html import soupparser
from lxml import etree
from copy import deepcopy
from goose.text import innerTrim
from goose.text import encodeValue
class Parser(object):
@classmethod
def xpath_re(self, node, expression):
regexp_namespace = "http://exslt.org/regular-expressions"
items = node.xpath(expression, namespaces={'re': regexp_namespace})
return items
@classmethod
def drop_tag(self, nodes):
if isinstance(nodes, list):
for node in nodes:
node.drop_tag()
else:
nodes.drop_tag()
@classmethod
def css_select(self, node, selector):
return node.cssselect(selector)
@classmethod
def fromstring(self, html):
html = encodeValue(html)
self.doc = lxmlhtml.fromstring(html)
return self.doc
@classmethod
def nodeToString(self, node):
return etree.tostring(node)
@classmethod
def replaceTag(self, node, tag):
node.tag = tag
@classmethod
def stripTags(self, node, *tags):
etree.strip_tags(node, *tags)
@classmethod
def getElementById(self, node, idd):
selector = '//*[@id="%s"]' % idd
elems = node.xpath(selector)
if elems:
return elems[0]
return None
@classmethod
def getElementsByTag(self, node, tag=None, attr=None, value=None, childs=False):
NS = "http://exslt.org/regular-expressions"
# selector = tag or '*'
selector = 'descendant-or-self::%s' % (tag or '*')
if attr and value:
selector = '%s[re:test(@%s, "%s", "i")]' % (selector, attr, value)
elems = node.xpath(selector, namespaces={"re": NS})
# remove the root node
# if we have a selection tag
if node in elems and (tag or childs):
elems.remove(node)
return elems
@classmethod
def appendChild(self, node, child):
node.append(child)
@classmethod
def childNodes(self, node):
return list(node)
@classmethod
def childNodesWithText(self, node):
root = node
# create the first text node
# if we have some text in the node
if root.text:
t = lxmlhtml.HtmlElement()
t.text = root.text
t.tag = 'text'
root.text = None
root.insert(0, t)
# loop childs
for c, n in enumerate(list(root)):
idx = root.index(n)
# don't process texts nodes
if n.tag == 'text':
continue
# create a text node for tail
if n.tail:
t = self.createElement(tag='text', text=n.tail, tail=None)
root.insert(idx + 1, t)
return list(root)
@classmethod
def textToPara(self, text):
return self.fromstring(text)
@classmethod
def getChildren(self, node):
return node.getchildren()
@classmethod
def getElementsByTags(self, node, tags):
selector = ','.join(tags)
elems = self.css_select(node, selector)
# remove the root node
# if we have a selection tag
if node in elems:
elems.remove(node)
return elems
@classmethod
def createElement(self, tag='p', text=None, tail=None):
t = lxmlhtml.HtmlElement()
t.tag = tag
t.text = text
t.tail = tail
return t
@classmethod
def getComments(self, node):
return node.xpath('//comment()')
@classmethod
def getParent(self, node):
return node.getparent()
@classmethod
def remove(self, node):
parent = node.getparent()
if parent is not None:
if node.tail:
prev = node.getprevious()
if prev is None:
if not parent.text:
parent.text = ''
parent.text += u' ' + node.tail
else:
if not prev.tail:
prev.tail = ''
prev.tail += u' ' + node.tail
node.clear()
parent.remove(node)
@classmethod
def getTag(self, node):
return node.tag
@classmethod
def getText(self, node):
txts = [i for i in node.itertext()]
return innerTrim(u' '.join(txts).strip())
@classmethod
def previousSiblings(self, node):
nodes = []
for c, n in enumerate(node.itersiblings(preceding=True)):
nodes.append(n)
return nodes
@classmethod
def previousSibling(self, node):
nodes = []
for c, n in enumerate(node.itersiblings(preceding=True)):
nodes.append(n)
if c == 0:
break
return nodes[0] if nodes else None
@classmethod
def nextSibling(self, node):
nodes = []
for c, n in enumerate(node.itersiblings(preceding=False)):
nodes.append(n)
if c == 0:
break
return nodes[0] if nodes else None
@classmethod
def isTextNode(self, node):
return True if node.tag == 'text' else False
@classmethod
def getAttribute(self, node, attr=None):
if attr:
return node.attrib.get(attr, None)
return attr
@classmethod
def delAttribute(self, node, attr=None):
if attr:
_attr = node.attrib.get(attr, None)
if _attr:
del node.attrib[attr]
@classmethod
def setAttribute(self, node, attr=None, value=None):
if attr and value:
node.set(attr, value)
@classmethod
def outerHtml(self, node):
e0 = node
if e0.tail:
e0 = deepcopy(e0)
e0.tail = None
return self.nodeToString(e0)
class ParserSoup(Parser):
@classmethod
def fromstring(self, html):
html = encodeValue(html)
self.doc = soupparser.fromstring(html)
return self.doc
| 2.15625 | 2 |
src/infrastructure/database/postgres/sqlhandler.py | SoyBeansLab/daizu-online-judge-backend | 7 | 5811 | <reponame>SoyBeansLab/daizu-online-judge-backend<gh_stars>1-10
from logging import getLogger
import os
from typing import List, Union
import psycopg2
from interface.database.sqlhandler import Cursor as AbsCursor
from interface.database.sqlhandler import Result as AbsResult
from interface.database.sqlhandler import SqlHandler as AbsSqlHandler
from exceptions.waf import SqlTransactionException
logger = getLogger("daizu").getChild("infrastracture.SqlHandler")
class Result(AbsResult):
def __init__(self, rowid: int):
self.last_insertid = rowid
def lastrowid(self) -> int:
return self.last_insertid
class Cursor(AbsCursor):
def __init__(self, cursor):
self.cursor = cursor
def fetch_all(self):
return self.cursor
def fetch_one(self):
if len(self.cursor) == 0:
return []
return self.cursor[0]
class SqlHandler(AbsSqlHandler):
def __init__(self):
# 環境から取るようにする
self.host = os.getenv("DAIZU_DATABASE_HOST", "localhost")
self.dbname = os.getenv("DAIZU_DATABASE_NAME", "doj")
self.user = os.getenv("DAIZU_DATABASE_USERNAME", "daizu")
self.password = os.getenv("DAIZU_DATABASE_PASSWORD", "<PASSWORD>")
try:
self.connection = psycopg2.connect(
host=self.host,
dbname=self.dbname,
user=self.user,
password=self.password,
)
except psycopg2.OperationalError as err:
raise err
# self.cursor = self.connection.cursor()
def execute(self, query: str, *args) -> Result:
try:
with self.connection.cursor() as cursor:
cursor.execute(query, args)
lastrowid = cursor.lastrowid
self.connection.commit()
except psycopg2.errors.InFailedSqlTransaction as e:
logger.error(e)
self.connection.rollback()
raise SqlTransactionException()
return lastrowid
def query(self, query: str, *args) -> Cursor:
try:
with self.connection.cursor() as cursor:
cursor.execute(query, *args)
data = cursor.fetchall()
except psycopg2.errors.InFailedSqlTransaction as e:
logger.error(e)
self.connection.rollback()
raise SqlTransactionException()
return Cursor(data)
| 2.390625 | 2 |
virtualisation/wrapper/parser/xmlparser.py | CityPulse/CP_Resourcemanagement | 2 | 5812 | <reponame>CityPulse/CP_Resourcemanagement
from virtualisation.clock.abstractclock import AbstractClock
__author__ = '<NAME> (<EMAIL>)'
from virtualisation.wrapper.parser.abstractparser import AbstractParser
from virtualisation.misc.jsonobject import JSONObject as JOb
import datetime as dt
class XMLParser(AbstractParser):
"""
Maps a list of values read by a CSVReader with a given naming list
"""
def __init__(self, wrapper):
super(XMLParser, self).__init__(wrapper)
self.timestampcell = -1
if self.wrapper.getSensorDescription().isTimestampedStream():
try:
self.timestampcell = -1
self.timestampformat = self.wrapper.getSensorDescription().timestamp.format
except ValueError:
self.timestampcell = -1
def parse(self, data, clock):
raise Exception("not implemented yet!")
if not data: # nothing received or nothing in the history -> nothing to parse
return None
| 2.46875 | 2 |
plaso/formatters/interface.py | jonathan-greig/plaso | 1,253 | 5813 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""This file contains the event formatters interface classes.
The l2t_csv and other formats are dependent on a message field,
referred to as description_long and description_short in l2t_csv.
Plaso no longer stores these field explicitly.
A formatter, with a format string definition, is used to convert
the event object values into a formatted string that is similar
to the description_long and description_short field.
"""
import abc
import re
from plaso.formatters import logger
class EventFormatterHelper(object):
"""Base class of helper for formatting event data."""
@abc.abstractmethod
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
class BooleanEventFormatterHelper(EventFormatterHelper):
"""Helper for formatting boolean event data.
Attributes:
input_attribute (str): name of the attribute that contains the boolean
input value.
output_attribute (str): name of the attribute where the boolean output
value should be stored.
value_if_false (str): output value if the boolean input value is False.
value_if_true (str): output value if the boolean input value is True.
"""
def __init__(
self, input_attribute=None, output_attribute=None, value_if_false=None,
value_if_true=None):
"""Initialized a helper for formatting boolean event data.
Args:
input_attribute (Optional[str]): name of the attribute that contains
the boolean input value.
output_attribute (Optional[str]): name of the attribute where the
boolean output value should be stored.
value_if_false (str): output value if the boolean input value is False.
value_if_true (str): output value if the boolean input value is True.
"""
super(BooleanEventFormatterHelper, self).__init__()
self.input_attribute = input_attribute
self.output_attribute = output_attribute
self.value_if_false = value_if_false
self.value_if_true = value_if_true
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
input_value = event_values.get(self.input_attribute, None)
if input_value:
output_value = self.value_if_true
else:
output_value = self.value_if_false
event_values[self.output_attribute] = output_value
class CustomEventFormatterHelper(EventFormatterHelper):
"""Base class for a helper for custom formatting of event data."""
DATA_TYPE = ''
IDENTIFIER = ''
@abc.abstractmethod
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
class EnumerationEventFormatterHelper(EventFormatterHelper):
"""Helper for formatting enumeration event data.
Attributes:
default (str): default value.
input_attribute (str): name of the attribute that contains the enumeration
input value.
output_attribute (str): name of the attribute where the enumeration output
value should be stored.
values (dict[str, str]): mapping of enumeration input and output values.
"""
def __init__(
self, default=None, input_attribute=None, output_attribute=None,
values=None):
"""Initialized a helper for formatting enumeration event data.
Args:
default (Optional[str]): default value.
input_attribute (Optional[str]): name of the attribute that contains
the enumeration input value.
output_attribute (Optional[str]): name of the attribute where the
enumeration output value should be stored.
values (Optional[dict[str, str]]): mapping of enumeration input and
output values.
"""
super(EnumerationEventFormatterHelper, self).__init__()
self.default = default
self.input_attribute = input_attribute
self.output_attribute = output_attribute
self.values = values or {}
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
If default value is None and there is no corresponding enumeration value
then the original value is used.
Args:
event_values (dict[str, object]): event values.
"""
input_value = event_values.get(self.input_attribute, None)
if input_value is not None:
default_value = self.default
if default_value is None:
default_value = input_value
event_values[self.output_attribute] = self.values.get(
input_value, default_value)
class FlagsEventFormatterHelper(EventFormatterHelper):
"""Helper for formatting flags event data.
Attributes:
input_attribute (str): name of the attribute that contains the flags
input value.
output_attribute (str): name of the attribute where the flags output
value should be stored.
values (dict[str, str]): mapping of flags input and output values.
"""
def __init__(
self, input_attribute=None, output_attribute=None, values=None):
"""Initialized a helper for formatting flags event data.
Args:
input_attribute (Optional[str]): name of the attribute that contains
the flags input value.
output_attribute (Optional[str]): name of the attribute where the
flags output value should be stored.
values (Optional[dict[str, str]]): mapping of flags input and output
values.
"""
super(FlagsEventFormatterHelper, self).__init__()
self.input_attribute = input_attribute
self.output_attribute = output_attribute
self.values = values or {}
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
input_value = event_values.get(self.input_attribute, None)
if input_value is None:
return
output_values = []
for flag, mapped_value in self.values.items():
if flag & input_value:
output_values.append(mapped_value)
event_values[self.output_attribute] = ', '.join(output_values)
class EventFormatter(object):
"""Base class to format event values.
Attributes:
custom_helpers (list[str]): identifiers of custom event formatter helpers.
helpers (list[EventFormatterHelper]): event formatter helpers.
"""
# The format string can be defined as:
# {name}, {name:format}, {name!conversion}, {name!conversion:format}
_FORMAT_STRING_ATTRIBUTE_NAME_RE = re.compile(
'{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}')
def __init__(self, data_type='internal'):
"""Initializes an event formatter.
Args:
data_type (Optional[str]): unique identifier for the event data supported
by the formatter.
"""
super(EventFormatter, self).__init__()
self._data_type = data_type
self._format_string_attribute_names = None
self.custom_helpers = []
self.helpers = []
@property
def data_type(self):
"""str: unique identifier for the event data supported by the formatter."""
return self._data_type.lower()
def _FormatMessage(self, format_string, event_values):
"""Determines the formatted message.
Args:
format_string (str): message format string.
event_values (dict[str, object]): event values.
Returns:
str: formatted message.
"""
try:
message_string = format_string.format(**event_values)
except KeyError as exception:
data_type = event_values.get('data_type', 'N/A')
display_name = event_values.get('display_name', 'N/A')
event_identifier = event_values.get('uuid', 'N/A')
parser_chain = event_values.get('parser', 'N/A')
error_message = (
'unable to format string: "{0:s}" missing required event '
'value: {1!s}').format(format_string, exception)
error_message = (
'Event: {0:s} data type: {1:s} display name: {2:s} '
'parser chain: {3:s} with error: {4:s}').format(
event_identifier, data_type, display_name, parser_chain,
error_message)
logger.error(error_message)
attribute_values = []
for attribute, value in event_values.items():
attribute_values.append('{0:s}: {1!s}'.format(attribute, value))
message_string = ' '.join(attribute_values)
except UnicodeDecodeError as exception:
data_type = event_values.get('data_type', 'N/A')
display_name = event_values.get('display_name', 'N/A')
event_identifier = event_values.get('uuid', 'N/A')
parser_chain = event_values.get('parser', 'N/A')
error_message = 'Unicode decode error: {0!s}'.format(exception)
error_message = (
'Event: {0:s} data type: {1:s} display name: {2:s} '
'parser chain: {3:s} with error: {4:s}').format(
event_identifier, data_type, display_name, parser_chain,
error_message)
logger.error(error_message)
message_string = ''
# Strip carriage return and linefeed form the message strings.
# Using replace function here because it is faster than re.sub() or
# string.strip().
return message_string.replace('\r', '').replace('\n', '')
def FormatEventValues(self, event_values):
"""Formats event values using the helpers.
Args:
event_values (dict[str, object]): event values.
"""
for helper in self.helpers:
helper.FormatEventValues(event_values)
@abc.abstractmethod
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
# pylint: disable=unused-argument
def AddCustomHelper(
self, identifier, input_attribute=None, output_attribute=None):
"""Adds a custom event formatter helper.
Args:
identifier (str): identifier.
input_attribute (Optional[str]): name of the attribute that contains
the input value.
output_attribute (Optional[str]): name of the attribute where the
output value should be stored.
"""
self.custom_helpers.append(identifier)
def AddHelper(self, helper):
"""Adds an event formatter helper.
Args:
helper (EventFormatterHelper): event formatter helper to add.
"""
self.helpers.append(helper)
@abc.abstractmethod
def GetMessage(self, event_values):
"""Determines the message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: message.
"""
@abc.abstractmethod
def GetMessageShort(self, event_values):
"""Determines the short message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: short message.
"""
class BasicEventFormatter(EventFormatter):
"""Format event values using a message format string.
Attributes:
custom_helpers (list[str]): identifiers of custom event formatter helpers.
helpers (list[EventFormatterHelper]): event formatter helpers.
"""
def __init__(
self, data_type='basic', format_string=None, format_string_short=None):
"""Initializes a basic event formatter.
The syntax of the format strings is similar to that of format() where
the place holder for a certain event object attribute is defined as
{attribute_name}.
Args:
data_type (Optional[str]): unique identifier for the event data supported
by the formatter.
format_string (Optional[str]): (long) message format string.
format_string_short (Optional[str]): short message format string.
"""
super(BasicEventFormatter, self).__init__(data_type=data_type)
self._format_string_attribute_names = None
self._format_string = format_string
self._format_string_short = format_string_short
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
if self._format_string_attribute_names is None:
self._format_string_attribute_names = (
self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
self._format_string))
return set(self._format_string_attribute_names)
def GetMessage(self, event_values):
"""Determines the message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: message.
"""
return self._FormatMessage(self._format_string, event_values)
def GetMessageShort(self, event_values):
"""Determines the short message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: short message.
"""
if self._format_string_short:
format_string = self._format_string_short
else:
format_string = self._format_string
short_message_string = self._FormatMessage(format_string, event_values)
# Truncate the short message string if necessary.
if len(short_message_string) > 80:
short_message_string = '{0:s}...'.format(short_message_string[:77])
return short_message_string
class ConditionalEventFormatter(EventFormatter):
"""Conditionally format event values using format string pieces."""
_DEFAULT_FORMAT_STRING_SEPARATOR = ' '
def __init__(
self, data_type='conditional', format_string_pieces=None,
format_string_separator=None, format_string_short_pieces=None):
"""Initializes a conditional event formatter.
The syntax of the format strings pieces is similar to of the basic event
formatter (BasicEventFormatter). Every format string piece should contain
at maximum one unique attribute name. Format string pieces without an
attribute name are supported.
Args:
data_type (Optional[str]): unique identifier for the event data supported
by the formatter.
format_string_pieces (Optional[list[str]]): (long) message format string
pieces.
format_string_separator (Optional[str]): string by which separate format
string pieces should be joined.
format_string_short_pieces (Optional[list[str]]): short message format
string pieces.
"""
if format_string_separator is None:
format_string_separator = self._DEFAULT_FORMAT_STRING_SEPARATOR
super(ConditionalEventFormatter, self).__init__(data_type=data_type)
self._format_string_pieces = format_string_pieces or []
self._format_string_pieces_map = []
self._format_string_separator = format_string_separator
self._format_string_short_pieces = format_string_short_pieces or []
self._format_string_short_pieces_map = []
def _CreateFormatStringMap(
self, format_string_pieces, format_string_pieces_map):
"""Creates a format string map.
The format string pieces map is a list containing the attribute name
per format string piece. E.g. ["Description: {description}"] would be
mapped to: [0] = "description". If the string piece does not contain
an attribute name it is treated as text that does not needs formatting.
Args:
format_string_pieces (list[str]): format string pieces.
format_string_pieces_map (list[str]): format string pieces map.
Raises:
RuntimeError: when an invalid format string piece is encountered.
"""
for format_string_piece in format_string_pieces:
attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
format_string_piece)
if len(set(attribute_names)) > 1:
raise RuntimeError((
'Invalid format string piece: [{0:s}] contains more than 1 '
'attribute name.').format(format_string_piece))
if not attribute_names:
# The text format string piece is stored as an empty map entry to keep
# the index in the map equal to the format string pieces.
attribute_name = ''
else:
attribute_name = attribute_names[0]
format_string_pieces_map.append(attribute_name)
def _CreateFormatStringMaps(self):
"""Creates the format string maps.
Maps are built of the string pieces and their corresponding attribute
name to optimize conditional string formatting.
Raises:
RuntimeError: when an invalid format string piece is encountered.
"""
self._format_string_pieces_map = []
self._CreateFormatStringMap(
self._format_string_pieces, self._format_string_pieces_map)
self._format_string_short_pieces_map = []
self._CreateFormatStringMap(
self._format_string_short_pieces, self._format_string_short_pieces_map)
def _ConditionalFormatMessage(
self, format_string_pieces, format_string_pieces_map, event_values):
"""Determines the conditional formatted message.
Args:
format_string_pieces (dict[str, str]): format string pieces.
format_string_pieces_map (list[int, str]): format string pieces map.
event_values (dict[str, object]): event values.
Returns:
str: conditional formatted message.
Raises:
RuntimeError: when an invalid format string piece is encountered.
"""
string_pieces = []
for map_index, attribute_name in enumerate(format_string_pieces_map):
if not attribute_name or event_values.get(
attribute_name, None) is not None:
string_pieces.append(format_string_pieces[map_index])
format_string = self._format_string_separator.join(string_pieces)
return self._FormatMessage(format_string, event_values)
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
if self._format_string_attribute_names is None:
self._format_string_attribute_names = []
for format_string_piece in self._format_string_pieces:
attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
format_string_piece)
if attribute_names:
self._format_string_attribute_names.extend(attribute_names)
return set(self._format_string_attribute_names)
def GetMessage(self, event_values):
"""Determines the message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: message.
"""
if not self._format_string_pieces_map:
self._CreateFormatStringMaps()
return self._ConditionalFormatMessage(
self._format_string_pieces, self._format_string_pieces_map,
event_values)
def GetMessageShort(self, event_values):
"""Determines the short message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: short message.
"""
if not self._format_string_pieces_map:
self._CreateFormatStringMaps()
if (self._format_string_short_pieces and
self._format_string_short_pieces != ['']):
format_string_pieces = self._format_string_short_pieces
format_string_pieces_map = self._format_string_short_pieces_map
else:
format_string_pieces = self._format_string_pieces
format_string_pieces_map = self._format_string_pieces_map
short_message_string = self._ConditionalFormatMessage(
format_string_pieces, format_string_pieces_map, event_values)
# Truncate the short message string if necessary.
if len(short_message_string) > 80:
short_message_string = '{0:s}...'.format(short_message_string[:77])
return short_message_string
| 2.703125 | 3 |
python_program/condition.py | LiuKaiqiang94/PyStudyExample | 5 | 5814 | <reponame>LiuKaiqiang94/PyStudyExample
def main():
val=int(input("input a num"))
if val<10:
print("A")
elif val<20:
print("B")
elif val<30:
print("C")
else:
print("D")
main()
| 3.890625 | 4 |
Annotated_video/test/Annotatedvideo_worm.py | Rukaume/LRCN | 1 | 5815 | <filename>Annotated_video/test/Annotatedvideo_worm.py
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 4 22:27:11 2020
@author: Miyazaki
"""
imdir = "C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/chamber3"
resultdir= "C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/result0918.csv"
import os, cv2, shutil
from tqdm import tqdm
import pandas as pd
os.chdir(imdir)
os.makedirs("../annotatedimages", exist_ok = True)
imlist = os.listdir("./")
imlist = [i for i in imlist if os.path.splitext(i)[1] == '.jpg' \
or os.path.splitext(i)[1] == '.png']
imlist.sort()
result = pd.read_csv(resultdir)
font = cv2.FONT_HERSHEY_SIMPLEX
for i in tqdm(range(len(imlist))):
if int(result.loc[i]) == 0:
tempim = cv2.imread(imlist[i])
tempim = cv2.putText(tempim,'quiescent',(10,500), font, 1,(255,0,0),2,cv2.LINE_AA)
cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)
elif int(result.loc[i]) == 1:
tempim = cv2.imread(imlist[i])
tempim = cv2.putText(tempim,'dwell',(10,500), font, 1,(0,255,0),2,cv2.LINE_AA)
cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)
elif int(result.loc[i]) == 2:
tempim = cv2.imread(imlist[i])
tempim = cv2.putText(tempim,'forward',(10,500), font, 1,(0,0,255),2,cv2.LINE_AA)
cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)
elif int(result.loc[i]) == 3:
tempim = cv2.imread(imlist[i])
tempim = cv2.putText(tempim,'backward',(10,500), font, 1,(100,100,0),2,cv2.LINE_AA)
cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)
else:
pass
| 2.3125 | 2 |
emilia/modules/math.py | masterisira/ELIZA_OF-master | 0 | 5816 | <gh_stars>0
from typing import List
import requests
from telegram import Message, Update, Bot, MessageEntity
from telegram.ext import CommandHandler, run_async
from emilia import dispatcher
from emilia.modules.disable import DisableAbleCommandHandler
from emilia.modules.helper_funcs.alternate import send_message
import pynewtonmath as newton
import math
@run_async
def simplify(update, context):
args=context.args
args=str(args)
message = update.effective_message
message.reply_text(newton.simplify('{}'.format(args[0])))
@run_async
def factor(update, context):
args=context.args
args=str(args)
message = update.effective_message
message.reply_text(newton.factor('{}'.format(args[0])))
@run_async
def derive(update, context):
args=context.args
args=str(args)
message = update.effective_message
message.reply_text(newton.derive('{}'.format(args[0])))
@run_async
def integrate(update, context):
args=context.args
args=str(args)
message = update.effective_message
message.reply_text(newton.integrate('{}'.format(args[0])))
@run_async
def zeroes(update, context):
args=context.args
args=str(args)
message = update.effective_message
message.reply_text(newton.zeroes('{}'.format(args[0])))
@run_async
def tangent(update, context):
args=context.args
args=str(args)
message = update.effective_message
message.reply_text(newton.tangent('{}'.format(args[0])))
@run_async
def area(update, context):
args=context.args
args=str(args)
message = update.effective_message
message.reply_text(newton.area('{}'.format(args[0])))
@run_async
def cos(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.cos(int(args[0])))
@run_async
def sin(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.sin(int(args[0])))
@run_async
def tan(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.tan(int(args[0])))
@run_async
def arccos(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.acos(int(args[0])))
@run_async
def arcsin(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.asin(int(args[0])))
@run_async
def arctan(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.atan(int(args[0])))
@run_async
def abs(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.fabs(int(args[0])))
@run_async
def log(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.log(int(args[0])))
__help__ = """
Under Developmeent.. More features soon
- /cos: Cosine `/cos pi`
- /sin: Sine `/sin 0`
- /tan: Tangent `/tan 0`
- /arccos: Inverse Cosine `/arccos 1`
- /arcsin: Inverse Sine `/arcsin 0`
- /arctan: Inverse Tangent `/arctan 0`
- /abs: Absolute Value `/abs -1`
- /log: Logarithm `/log 2l8`
__Keep in mind__: To find the tangent line of a function at a certain x value, send the request as c|f(x) where c is the given x value and f(x) is the function expression, the separator is a vertical bar '|'. See the table above for an example request.
To find the area under a function, send the request as c:d|f(x) where c is the starting x value, d is the ending x value, and f(x) is the function under which you want the curve between the two x values.
To compute fractions, enter expressions as numerator(over)denominator. For example, to process 2/4 you must send in your expression as 2(over)4. The result expression will be in standard math notation (1/2, 3/4).
"""
SIMPLIFY_HANDLER = DisableAbleCommandHandler("math", simplify, pass_args=True)
FACTOR_HANDLER = DisableAbleCommandHandler("factor", factor, pass_args=True)
DERIVE_HANDLER = DisableAbleCommandHandler("derive", derive, pass_args=True)
INTEGRATE_HANDLER = DisableAbleCommandHandler("integrate", integrate, pass_args=True)
ZEROES_HANDLER = DisableAbleCommandHandler("zeroes", zeroes, pass_args=True)
TANGENT_HANDLER = DisableAbleCommandHandler("tangent", tangent, pass_args=True)
AREA_HANDLER = DisableAbleCommandHandler("area", area, pass_args=True)
COS_HANDLER = DisableAbleCommandHandler("cos", cos, pass_args=True)
SIN_HANDLER = DisableAbleCommandHandler("sin", sin, pass_args=True)
TAN_HANDLER = DisableAbleCommandHandler("tan", tan, pass_args=True)
ARCCOS_HANDLER = DisableAbleCommandHandler("arccos", arccos, pass_args=True)
ARCSIN_HANDLER = DisableAbleCommandHandler("arcsin", arcsin, pass_args=True)
ARCTAN_HANDLER = DisableAbleCommandHandler("arctan", arctan, pass_args=True)
ABS_HANDLER = DisableAbleCommandHandler("abs", abs, pass_args=True)
LOG_HANDLER = DisableAbleCommandHandler("log", log, pass_args=True)
dispatcher.add_handler(SIMPLIFY_HANDLER)
dispatcher.add_handler(FACTOR_HANDLER)
dispatcher.add_handler(DERIVE_HANDLER)
dispatcher.add_handler(INTEGRATE_HANDLER)
dispatcher.add_handler(ZEROES_HANDLER)
dispatcher.add_handler(TANGENT_HANDLER)
dispatcher.add_handler(AREA_HANDLER)
dispatcher.add_handler(COS_HANDLER)
dispatcher.add_handler(SIN_HANDLER)
dispatcher.add_handler(TAN_HANDLER)
dispatcher.add_handler(ARCCOS_HANDLER)
dispatcher.add_handler(ARCSIN_HANDLER)
dispatcher.add_handler(ARCTAN_HANDLER)
dispatcher.add_handler(ABS_HANDLER)
dispatcher.add_handler(LOG_HANDLER)
__mod_name__ = "Math"
__command_list__ = ["math","factor","derive","integrate","zeroes","tangent","area","cos","sin","tan","arccos","arcsin","arctan","abs","log"]
__handlers__ = [
SIMPLIFY_HANDLER,FACTOR_HANDLER,DERIVE_HANDLER,INTEGRATE_HANDLER,TANGENT_HANDLER,ZEROES_HANDLER,AREA_HANDLER,COS_HANDLER,SIN_HANDLER,TAN_HANDLER,ARCCOS_HANDLER,ARCSIN_HANDLER,ARCTAN_HANDLER,ABS_HANDLER,LOG_HANDLER
]
| 2.21875 | 2 |
services/IAm.py | matteobjornsson/serverless-rock-paper-scissors | 0 | 5817 | #
# Created on Thu Apr 22 2021
# <NAME>
#
import boto3
from botocore.exceptions import ClientError
import logging
logging.basicConfig(filename="rps.log", level=logging.INFO)
iam_resource = boto3.resource("iam")
sts_client = boto3.client("sts")
def create_role(
iam_role_name: str, assume_role_policy_json: str, policy_arns: list
) -> iam_resource.Role:
"""
Create an IAM role with a given policy.
:param assume_role_policy_json: A json string that represents the assume
role policy defining what resources are allowed to assume the role.
:param policy_arns: a list of strings representing existing policy arns to
also attach to the role
:return: IAM role object
This method was adapted from the create_iam_role_for_lambda() method found here:
https://docs.aws.amazon.com/code-samples/latest/catalog/python-lambda-boto_client_examples-lambda_basics.py.html
"""
try:
role = iam_resource.create_role(
RoleName=iam_role_name,
AssumeRolePolicyDocument=assume_role_policy_json,
)
# wait for the creation to complete
iam_resource.meta.client.get_waiter("role_exists").wait(RoleName=iam_role_name)
# attach the additional supplied policies
for arn in policy_arns:
role.attach_policy(PolicyArn=arn)
except ClientError as error:
if error.response["Error"]["Code"] == "EntityAlreadyExists":
role = iam_resource.Role(iam_role_name)
logging.warning("The role %s already exists. Using it.", iam_role_name)
return role
else:
logging.error(error.response["Error"]["Message"])
logging.exception(
"Couldn't create role %s or attach policy %s.",
iam_role_name,
str(policy_arns),
)
raise
else:
logging.info("Created IAM role %s.", role.name)
logging.info("Attached policies %s to role %s.", policy_arns, role.name)
return role
def create_policy(policy_name: str, policy_json: str) -> iam_resource.Policy:
"""
Create an IAM policy of given name and json description.
Policies define permissions in AWS and can be associated with IAM roles.
:param policy_json: just be a valid policy json string
:return: IAM Policy object
"""
try:
policy = iam_resource.create_policy(
PolicyName=policy_name, PolicyDocument=policy_json
)
except ClientError as error:
if error.response["Error"]["Code"] == "EntityAlreadyExists":
policy = get_policy_by_name(policy_name)
logging.warning("The policy %s already exists. Using it.", policy.arn)
return policy
else:
logging.error(error.response["Error"]["Message"])
logging.exception("Couldn't create policy %s", policy_name)
raise
else:
logging.info("Created Policy '%s'", policy_name)
return policy
def get_policy_by_name(policy_name: str) -> iam_resource.Policy:
"""
Get an existing policy by name.
:return: IAM Policy object
"""
# sts provides the account number of the current credentials
account_id = sts_client.get_caller_identity()["Account"]
# policy arns consist of an account id and policy name
policy_arn = f"arn:aws:iam::{account_id}:policy/{policy_name}"
# policies are created in the Python SDK via their arn
policy = iam_resource.Policy(policy_arn)
return policy
def delete_role(iam_role) -> dict:
"""
Delete a role.
:param iam_role: this parameter is an IAM role object, such as returned
by create_role()
"""
try:
# remove all policies before deleting role
for policy in iam_role.attached_policies.all():
policy.detach_role(RoleName=iam_role.name)
response = iam_role.delete()
except ClientError as error:
logging.error(error.response["Error"]["Message"])
logging.error("Couldn't delete role %s", iam_role.name)
else:
logging.info("Deleted role '%s'", iam_role.name)
return response
def delete_policy(iam_policy) -> dict:
"""
Delete a role.
:param iam_policy: this parameter is an IAM policy object, such as returned
by create_policy()
"""
try:
response = iam_policy.delete()
except ClientError as error:
logging.error(error.response["Error"]["Message"])
logging.error("Couldn't delete policy %s", iam_policy.arn)
else:
logging.info("Deleted policy '%s'", iam_policy.arn)
return response
if __name__ == "__main__":
# brief functionality test with delete() cleanup at end
policy_json_file = "./policy/lambda_policy.json"
with open(policy_json_file) as file:
policy_json = file.read()
policy_name = "test_policy"
policy = create_policy(policy_name, policy_json)
print("new policy arn: ", policy.arn)
policy.delete()
| 2.65625 | 3 |
stograde/common/run_status.py | babatana/stograde | 0 | 5818 | <reponame>babatana/stograde
from enum import auto, Enum
class RunStatus(Enum):
SUCCESS = auto()
CALLED_PROCESS_ERROR = auto()
FILE_NOT_FOUND = auto()
PROCESS_LOOKUP_ERROR = auto()
TIMEOUT_EXPIRED = auto()
| 2.25 | 2 |
recsys/__init__.py | shenghuiliuu/recsys | 50 | 5819 | <reponame>shenghuiliuu/recsys
__all__ = ['cross_validation',
'metrics',
'datasets',
'recommender']
| 1.015625 | 1 |
audiomate/annotations/label_list.py | CostanzoPablo/audiomate | 133 | 5820 | import collections
import copy
import intervaltree
from .label import Label
class LabelList:
"""
Represents a list of labels which describe an utterance.
An utterance can have multiple label-lists.
Args:
idx (str): An unique identifier for the label-list
within a corpus for one utterance.
labels (list): The list containing the
:py:class:`audiomate.annotations.Label`.
Attributes:
utterance (Utterance): The utterance this label-list is belonging to.
label_tree (IntervalTree): The interval-tree storing the labels.
Example:
>>> label_list = LabelList(idx='transcription', labels=[
>>> Label('this', 0, 2),
>>> Label('is', 2, 4),
>>> Label('timmy', 4, 8)
>>> ])
"""
__slots__ = ['idx', 'label_tree', 'utterance']
def __init__(self, idx='default', labels=None):
self.idx = idx
self.utterance = None
self.label_tree = intervaltree.IntervalTree()
if labels is not None:
self.update(labels)
def __eq__(self, other):
data_this = (self.idx, self.label_tree)
data_other = (other.idx, other.label_tree)
return data_this == data_other
def __iter__(self):
for interval in self.label_tree:
yield interval.data
def __len__(self):
return self.label_tree.__len__()
def __copy__(self):
# utterance is ignored intentionally,
# since it is kind of a weak ref
return LabelList(
idx=self.idx,
labels=[iv.data for iv in self.label_tree]
)
def __deepcopy__(self, memo):
# utterance is ignored intentionally,
# since it is kind of a weak ref
return LabelList(
idx=self.idx,
labels=copy.deepcopy([iv.data for iv in self.label_tree], memo)
)
@property
def labels(self):
""" Return list of labels. """
return list(self)
@property
def start(self):
""" Return start of the earliest starting label (lower bound). """
return self.label_tree.begin()
@property
def end(self):
""" Return end of the lastly ending label (upper bound). """
return self.label_tree.end()
@property
def total_length(self):
"""
Return the cumulative length of all labels
(Number of characters).
"""
return sum(label.length for label in self.labels)
#
# Alteration
#
def add(self, label):
"""
Add a label to the end of the list.
Args:
label (Label): The label to add.
"""
label.label_list = self
self.label_tree.addi(label.start, label.end, label)
def addl(self, value, start=0.0, end=float('inf')):
""" Shortcut for ``add(Label(value, start, end))``. """
self.add(Label(value, start=start, end=end))
def update(self, labels):
"""
Add a list of labels to the end of the list.
Args:
labels (list): Labels to add.
"""
ivs = []
for label in labels:
label.label_list = self
ivs.append(intervaltree.Interval(label.start, label.end, label))
self.label_tree.update(ivs)
def apply(self, fn):
"""
Apply the given function `fn` to every label in this label list.
`fn` is a function of one argument that receives the current label
which can then be edited in place.
Args:
fn (func): Function to apply to every label
Example:
>>> ll = LabelList(labels=[
... Label('a_label', 1.0, 2.0),
... Label('another_label', 2.0, 3.0)
... ])
>>> def shift_labels(label):
... label.start += 1.0
... label.end += 1.0
...
>>> ll.apply(shift_labels)
>>> ll.labels
[Label(a_label, 2.0, 3.0), Label(another_label, 3.0, 4.0)]
"""
for label in self.labels:
fn(label)
def merge_overlaps(self, threshold=0.0):
"""
Merge overlapping labels with the same value.
Two labels are considered overlapping,
if ``l2.start - l1.end < threshold``.
Args:
threshold (float): Maximal distance between two labels
to be considered as overlapping.
(default: 0.0)
Example:
>>> ll = LabelList(labels=[
... Label('a_label', 1.0, 2.0),
... Label('a_label', 1.5, 2.7),
... Label('b_label', 1.0, 2.0),
... ])
>>> ll.merge_overlapping_labels()
>>> ll.labels
[
Label('a_label', 1.0, 2.7),
Label('b_label', 1.0, 2.0),
]
"""
updated_labels = []
all_intervals = self.label_tree.copy()
# recursivly find a group of overlapping labels with the same value
def recursive_overlaps(interval):
range_start = interval.begin - threshold
range_end = interval.end + threshold
direct_overlaps = all_intervals.overlap(range_start, range_end)
all_overlaps = [interval]
all_intervals.discard(interval)
for overlap in direct_overlaps:
if overlap.data.value == interval.data.value:
all_overlaps.extend(recursive_overlaps(overlap))
return all_overlaps
# For every remaining interval
# - Find overlapping intervals recursively
# - Remove them
# - Create a concatenated new label
while not all_intervals.is_empty():
next_interval = list(all_intervals)[0]
overlapping = recursive_overlaps(next_interval)
ov_start = float('inf')
ov_end = 0.0
ov_value = next_interval.data.value
for overlap in overlapping:
ov_start = min(ov_start, overlap.begin)
ov_end = max(ov_end, overlap.end)
all_intervals.discard(overlap)
updated_labels.append(Label(
ov_value,
ov_start,
ov_end
))
# Replace the old labels with the updated ones
self.label_tree.clear()
self.update(updated_labels)
#
# Statistics
#
def label_total_duration(self):
"""
Return for each distinct label value the total duration of
all occurrences.
Returns:
dict: A dictionary containing for every label-value (key)
the total duration in seconds (value).
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3, 5),
>>> Label('b', 5, 8),
>>> Label('a', 8, 10),
>>> Label('b', 10, 14),
>>> Label('a', 15, 18.5)
>>> ])
>>> ll.label_total_duration()
{'a': 7.5 'b': 7.0}
"""
durations = collections.defaultdict(float)
for label in self:
durations[label.value] += label.duration
return durations
def label_values(self):
"""
Return a list of all occuring label values.
Returns:
list: Lexicographically sorted list (str) of label values.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14),
>>> Label('d', 15, 18)
>>> ])
>>> ll.label_values()
['a', 'b', 'c', 'd']
"""
all_labels = {l.value for l in self}
return sorted(all_labels)
def label_count(self):
"""
Return for each label the number of occurrences within the list.
Returns:
dict: A dictionary containing for every label-value (key)
the number of occurrences (value).
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('a', 7.2, 10.5),
>>> Label('b', 10.5, 14),
>>> Label('a', 15, 18)
>>> ])
>>> ll.label_count()
{'a': 3 'b': 2}
"""
occurrences = collections.defaultdict(int)
for label in self:
occurrences[label.value] += 1
return occurrences
def all_tokens(self, delimiter=' '):
"""
Return a list of all tokens occurring in the label-list.
Args:
delimiter (str): The delimiter used to split labels into tokens.
See :meth:`audiomate.annotations.Label.tokenized`
Returns:
:class:`set`: A set of distinct tokens.
"""
tokens = set()
for label in self:
tokens = tokens.union(set(label.tokenized(delimiter=delimiter)))
return tokens
#
# Query Label Values
#
def join(self, delimiter=' ', overlap_threshold=0.1):
"""
Return a string with all labels concatenated together.
The order of the labels is defined by the start of the label.
If the overlapping between two labels is greater than
``overlap_threshold``, an Exception is thrown.
Args:
delimiter (str): A string to join two consecutive labels.
overlap_threshold (float): Maximum overlap between two
consecutive labels.
Returns:
str: A string with all labels concatenated together.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('c', start=7.0, end=10.2),
>>> Label('d', start=10.3, end=14.0)
>>> ])
>>> ll.join(' - ')
'a - b - c - d'
"""
sorted_by_start = sorted(self.labels)
concat_values = []
last_label_end = None
for label in sorted_by_start:
if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0):
concat_values.append(label.value)
last_label_end = label.end
else:
raise ValueError('Labels overlap, not able to define the correct order')
return delimiter.join(concat_values)
def tokenized(self, delimiter=' ', overlap_threshold=0.1):
"""
Return a ordered list of tokens based on all labels.
Joins all token from all labels (``label.tokenized()```).
If the overlapping between two labels is greater than
``overlap_threshold``, an Exception is thrown.
Args:
delimiter (str): The delimiter used to split labels into tokens.
(default: space)
overlap_threshold (float): Maximum overlap between two
consecutive labels.
Returns:
str: A list containing tokens of all labels ordered according
to the label order.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a d q', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('c a', start=7.0, end=10.2),
>>> Label('f g', start=10.3, end=14.0)
>>> ])
>>> ll.tokenized(delimiter=' ', overlap_threshold=0.1)
['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g']
"""
sorted_by_start = sorted(self.labels)
tokens = []
last_label_end = None
for label in sorted_by_start:
if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0):
tokens.extend(label.tokenized(delimiter=delimiter))
last_label_end = label.end
else:
raise ValueError('Labels overlap, not able to define the correct order')
return tokens
#
# Restructuring
#
def separated(self):
"""
Create a separate Label-List for every distinct label-value.
Returns:
dict: A dictionary with distinct label-values as keys. Every value
is a LabelList containing only labels with the same value.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('a', start=7.0, end=10.2),
>>> Label('b', start=10.3, end=14.0)
>>> ])
>>> s = ll.separate()
>>> s['a'].labels
[Label('a', start=0, end=4), Label('a', start=7.0, end=10.2)]
>>> s['b'].labels
[Label('b', start=3.95, end=6.0), Label('b', start=10.3, end=14.0)]
"""
separated_lls = collections.defaultdict(LabelList)
for label in self.labels:
separated_lls[label.value].add(label)
for ll in separated_lls.values():
ll.idx = self.idx
return separated_lls
def labels_in_range(self, start, end, fully_included=False):
"""
Return a list of labels, that are within the given range.
Also labels that only overlap are included.
Args:
start(float): Start-time in seconds.
end(float): End-time in seconds.
fully_included(bool): If ``True``, only labels fully included
in the range are returned. Otherwise
also overlapping ones are returned.
(default ``False``)
Returns:
list: List of labels in the range.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14)
>>>])
>>> ll.labels_in_range(6.2, 10.1)
[Label('b', 5.1, 8.9), Label('c', 7.2, 10.5)]
"""
if fully_included:
intervals = self.label_tree.envelop(start, end)
else:
intervals = self.label_tree.overlap(start, end)
return [iv.data for iv in intervals]
def ranges(self, yield_ranges_without_labels=False, include_labels=None):
"""
Generate all ranges of the label-list. A range is defined
as a part of the label-list for which the same labels are defined.
Args:
yield_ranges_without_labels(bool): If True also yields ranges for
which no labels are defined.
include_labels(list): If not empty, only the label values in
the list will be considered.
Returns:
generator: A generator which yields one range
(tuple start/end/list-of-labels) at a time.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14)
>>>])
>>> ranges = ll.ranges()
>>> next(ranges)
(3.2, 4.5, [ < audiomate.annotations.Label at 0x1090527c8 > ])
>>> next(ranges)
(4.5, 5.1, [])
>>> next(ranges)
(5.1, 7.2, [ < audiomate.annotations.label.Label at 0x1090484c8 > ])
"""
tree_copy = self.label_tree.copy()
# Remove labels not included
if include_labels is not None:
for iv in list(tree_copy):
if iv.data.value not in include_labels:
tree_copy.remove(iv)
def reduce(x, y):
x.append(y)
return x
# Split labels when overlapping and merge equal ranges to a list of labels
tree_copy.split_overlaps()
tree_copy.merge_equals(data_reducer=reduce, data_initializer=[])
intervals = sorted(tree_copy)
last_end = intervals[0].begin
# yield range by range
for iv in intervals:
# yield an empty range if necessary
if yield_ranges_without_labels and iv.begin > last_end:
yield (last_end, iv.begin, [])
yield (iv.begin, iv.end, iv.data)
last_end = iv.end
def split(self, cutting_points, shift_times=False, overlap=0.0):
"""
Split the label-list into x parts and return them as new label-lists.
x is defined by the number of cutting-points
(``x == len(cutting_points) + 1``).
The result is a list of label-lists corresponding to each part.
Label-list 0 contains labels between ``0`` and ``cutting_points[0]``.
Label-list 1 contains labels between ``cutting_points[0]`` and
``cutting_points[1]``. And so on.
Args:
cutting_points(list): List of floats defining the points in seconds,
where the label-list is splitted.
shift_times(bool): If True, start and end-time are shifted in
splitted label-lists. So the start is relative
to the cutting point and not to the beginning
of the original label-list.
overlap(float): Amount of overlap in seconds. This amount is
subtracted from a start-cutting-point, and added
to a end-cutting-point.
Returns:
list: A list of of: class: `audiomate.annotations.LabelList`.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>> Label('c', 11, 15),
>>>])
>>>
>>> res = ll.split([4.1, 8.9, 12.0])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.1)]
>>> res[1].labels
[
Label('a', 4.1, 5.0),
Label('b', 5.0, 8.9)
]
>>> res[2].labels
[
Label('b', 8.9, 10.0),
Label('c', 11.0, 12.0)
]
>>> res[3].labels
[Label('c', 12.0, 15.0)]
If ``shift_times = True``, the times are adjusted to be relative
to the cutting-points for every label-list but the first.
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>>])
>>>
>>> res = ll.split([4.6])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.6)]
>>> res[1].labels
[
Label('a', 0.0, 0.4),
Label('b', 0.4, 5.4)
]
"""
if len(cutting_points) == 0:
raise ValueError('At least one cutting-point is needed!')
# we have to loop in sorted order
cutting_points = sorted(cutting_points)
splits = []
iv_start = 0.0
for i in range(len(cutting_points) + 1):
if i < len(cutting_points):
iv_end = cutting_points[i]
else:
iv_end = float('inf')
# get all intervals intersecting range
intervals = self.label_tree.overlap(
iv_start - overlap,
iv_end + overlap
)
cp_splits = LabelList(idx=self.idx)
# Extract labels from intervals with updated times
for iv in intervals:
label = copy.deepcopy(iv.data)
label.start = max(0, iv_start - overlap, label.start)
label.end = min(iv_end + overlap, label.end)
if shift_times:
orig_start = max(0, iv_start - overlap)
label.start -= orig_start
label.end -= orig_start
cp_splits.add(label)
splits.append(cp_splits)
iv_start = iv_end
return splits
#
# Convenience Constructors
#
@classmethod
def create_single(cls, value, idx='default'):
"""
Create a label-list with a single label
containing the given value.
"""
return LabelList(idx=idx, labels=[
Label(value=value)
])
@classmethod
def with_label_values(cls, values, idx='default'):
"""
Create a new label-list containing labels with the given values.
All labels will have default start/end values of 0 and ``inf``.
Args:
values(list): List of values(str) that should be created and
appended to the label-list.
idx(str): The idx of the label-list.
Returns:
(LabelList): New label-list.
Example:
>>> ll = LabelList.with_label_values(['a', 'x', 'z'], idx='letters')
>>> ll.idx
'letters'
>>> ll.labels
[
Label('a', 0, inf),
Label('x', 0, inf),
Label('z', 0, inf),
]
"""
ll = LabelList(idx=idx)
for label_value in values:
ll.add(Label(label_value))
return ll
| 3.15625 | 3 |
src/views/age_results_widget.py | RubyMarsden/Crayfish | 0 | 5821 | <filename>src/views/age_results_widget.py
import matplotlib
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QHBoxLayout, QDialog, QPushButton, QWidget, QVBoxLayout, QLabel
matplotlib.use('QT5Agg')
import matplotlib.pyplot as plt
from models.data_key import DataKey
from utils import ui_utils
class AgeResultsWidget(QWidget):
def __init__(self, results_dialog):
QWidget.__init__(self)
self.results_dialog = results_dialog
layout = QHBoxLayout()
layout.addLayout(self._create_widget())
self.setLayout(layout)
results_dialog.sample_tree.tree.currentItemChanged.connect(lambda i, j: self.replot_graph())
results_dialog.configuration_changed.connect(self.replot_graph)
def _create_widget(self):
layout = QVBoxLayout()
layout.addWidget(QLabel("Sample and spot name"))
layout.addWidget(self._create_age_graph_and_point_selection())
return layout
def _create_age_graph_and_point_selection(self):
graph_and_points = QWidget()
layout = QVBoxLayout()
fig = plt.figure()
self.axes = plt.axes()
graph_widget, self.canvas = ui_utils.create_figure_widget(fig, self)
layout.addWidget(graph_widget)
graph_and_points.setLayout(layout)
return graph_and_points
###############
### Actions ###
###############
def replot_graph(self):
current_spot = self.results_dialog.sample_tree.current_spot()
config = self.results_dialog.configuration_widget.current_config
if config and current_spot:
self.plot_cps_graph(current_spot, config)
def plot_cps_graph(self, spot, config):
axis = self.axes
axis.clear()
if spot is None:
return
axis.spines['top'].set_visible(False)
axis.spines['right'].set_visible(False)
xs = []
ys = []
errors = []
if DataKey.AGES not in spot.data[config]:
# TODO plot words on graph
return
ages = spot.data[config][DataKey.AGES]
if len(ages) != 0:
for i, age in enumerate(ages):
if isinstance(age, str):
continue
x = i + 1
y, dy = age
xs.append(x)
if y is None:
ys.append(0)
errors.append(0)
else:
ys.append(y)
errors.append(dy)
else:
# TODO plot some text
return
weighted_age, age_st_dev = spot.data[config][DataKey.WEIGHTED_AGE]
if isinstance(weighted_age, str):
string = "No weighted age"
else:
string = f"Weighted age: {weighted_age:.0f}, 1σ: {age_st_dev:.0f}"
axis.errorbar(xs, ys, yerr=errors, linestyle="none", marker='o')
axis.text(0.5, 1, string, transform=axis.transAxes, horizontalalignment="center")
axis.set_xlabel("Scan number")
axis.set_ylabel("Age (ka)")
self.canvas.draw()
| 2.484375 | 2 |
examples/single_run/ocaes_single_run.py | EnergyModels/OCAES | 0 | 5822 | import pandas as pd
from OCAES import ocaes
# ----------------------
# create and run model
# ----------------------
data = pd.read_csv('timeseries_inputs_2019.csv')
inputs = ocaes.get_default_inputs()
# inputs['C_well'] = 5000.0
# inputs['X_well'] = 50.0
# inputs['L_well'] = 50.0
# inputs['X_cmp'] = 0
# inputs['X_exp'] = 0
model = ocaes(data, inputs)
df, s = model.get_full_results()
revenue, LCOE, COVE, avoided_emissions = model.post_process(s)
s['revenue'] = revenue
s['LCOE'] = LCOE
s['COVE'] = COVE
s['avoided_emissions'] = avoided_emissions
df.to_csv('results_timeseries.csv')
s.to_csv('results_values.csv')
print(model.calculate_LCOE(s))
# ----------------------
# create plots using built-in functions
# ----------------------
model.plot_overview()
model.plot_power_energy()
| 2.765625 | 3 |
tests/transformations/local_storage_test.py | am-ivanov/dace | 1 | 5823 | import unittest
import dace
import numpy as np
from dace.transformation.dataflow import MapTiling, OutLocalStorage
N = dace.symbol('N')
@dace.program
def arange():
out = np.ndarray([N], np.int32)
for i in dace.map[0:N]:
with dace.tasklet:
o >> out[i]
o = i
return out
class LocalStorageTests(unittest.TestCase):
def test_even(self):
sdfg = arange.to_sdfg()
sdfg.apply_transformations([MapTiling, OutLocalStorage],
options=[{
'tile_sizes': [8]
}, {}])
self.assertTrue(
np.array_equal(sdfg(N=16), np.arange(16, dtype=np.int32)))
def test_uneven(self):
# For testing uneven decomposition, use longer buffer and ensure
# it's not filled over
output = np.ones(20, np.int32)
sdfg = arange.to_sdfg()
sdfg.apply_transformations([MapTiling, OutLocalStorage],
options=[{
'tile_sizes': [5]
}, {}])
dace.propagate_memlets_sdfg(sdfg)
sdfg(N=16, __return=output)
self.assertTrue(
np.array_equal(output[:16], np.arange(16, dtype=np.int32)))
self.assertTrue(np.array_equal(output[16:], np.ones(4, np.int32)))
if __name__ == '__main__':
unittest.main()
| 2.453125 | 2 |
astropy/io/fits/hdu/streaming.py | jayvdb/astropy | 445 | 5824 | <gh_stars>100-1000
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import os
from .base import _BaseHDU, BITPIX2DTYPE
from .hdulist import HDUList
from .image import PrimaryHDU
from astropy.io.fits.file import _File
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import fileobj_name
class StreamingHDU:
"""
A class that provides the capability to stream data to a FITS file
instead of requiring data to all be written at once.
The following pseudocode illustrates its use::
header = astropy.io.fits.Header()
for all the cards you need in the header:
header[key] = (value, comment)
shdu = astropy.io.fits.StreamingHDU('filename.fits', header)
for each piece of data:
shdu.write(data)
shdu.close()
"""
def __init__(self, name, header):
"""
Construct a `StreamingHDU` object given a file name and a header.
Parameters
----------
name : file path, file object, or file like object
The file to which the header and data will be streamed. If opened,
the file object must be opened in a writeable binary mode such as
'wb' or 'ab+'.
header : `Header` instance
The header object associated with the data to be written
to the file.
Notes
-----
The file will be opened and the header appended to the end of
the file. If the file does not already exist, it will be
created, and if the header represents a Primary header, it
will be written to the beginning of the file. If the file
does not exist and the provided header is not a Primary
header, a default Primary HDU will be inserted at the
beginning of the file and the provided header will be added as
the first extension. If the file does already exist, but the
provided header represents a Primary header, the header will
be modified to an image extension header and appended to the
end of the file.
"""
if isinstance(name, gzip.GzipFile):
raise TypeError('StreamingHDU not supported for GzipFile objects.')
self._header = header.copy()
# handle a file object instead of a file name
filename = fileobj_name(name) or ''
# Check if the file already exists. If it does not, check to see
# if we were provided with a Primary Header. If not we will need
# to prepend a default PrimaryHDU to the file before writing the
# given header.
newfile = False
if filename:
if not os.path.exists(filename) or os.path.getsize(filename) == 0:
newfile = True
elif (hasattr(name, 'len') and name.len == 0):
newfile = True
if newfile:
if 'SIMPLE' not in self._header:
hdulist = HDUList([PrimaryHDU()])
hdulist.writeto(name, 'exception')
else:
# This will not be the first extension in the file so we
# must change the Primary header provided into an image
# extension header.
if 'SIMPLE' in self._header:
self._header.set('XTENSION', 'IMAGE', 'Image extension',
after='SIMPLE')
del self._header['SIMPLE']
if 'PCOUNT' not in self._header:
dim = self._header['NAXIS']
if dim == 0:
dim = ''
else:
dim = str(dim)
self._header.set('PCOUNT', 0, 'number of parameters',
after='NAXIS' + dim)
if 'GCOUNT' not in self._header:
self._header.set('GCOUNT', 1, 'number of groups',
after='PCOUNT')
self._ffo = _File(name, 'append')
# TODO : Fix this once the HDU writing API is cleaned up
tmp_hdu = _BaseHDU()
# Passing self._header as an argument to _BaseHDU() will cause its
# values to be modified in undesired ways...need to have a better way
# of doing this
tmp_hdu._header = self._header
self._header_offset = tmp_hdu._writeheader(self._ffo)[0]
self._data_offset = self._ffo.tell()
self._size = self.size
if self._size != 0:
self.writecomplete = False
else:
self.writecomplete = True
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def write(self, data):
"""
Write the given data to the stream.
Parameters
----------
data : ndarray
Data to stream to the file.
Returns
-------
writecomplete : int
Flag that when `True` indicates that all of the required
data has been written to the stream.
Notes
-----
Only the amount of data specified in the header provided to the class
constructor may be written to the stream. If the provided data would
cause the stream to overflow, an `OSError` exception is
raised and the data is not written. Once sufficient data has been
written to the stream to satisfy the amount specified in the header,
the stream is padded to fill a complete FITS block and no more data
will be accepted. An attempt to write more data after the stream has
been filled will raise an `OSError` exception. If the
dtype of the input data does not match what is expected by the header,
a `TypeError` exception is raised.
"""
size = self._ffo.tell() - self._data_offset
if self.writecomplete or size + data.nbytes > self._size:
raise OSError('Attempt to write more data to the stream than the '
'header specified.')
if BITPIX2DTYPE[self._header['BITPIX']] != data.dtype.name:
raise TypeError('Supplied data does not match the type specified '
'in the header.')
if data.dtype.str[0] != '>':
# byteswap little endian arrays before writing
output = data.byteswap()
else:
output = data
self._ffo.writearray(output)
if self._ffo.tell() - self._data_offset == self._size:
# the stream is full so pad the data to the next FITS block
self._ffo.write(_pad_length(self._size) * '\0')
self.writecomplete = True
self._ffo.flush()
return self.writecomplete
@property
def size(self):
"""
Return the size (in bytes) of the data portion of the HDU.
"""
size = 0
naxis = self._header.get('NAXIS', 0)
if naxis > 0:
simple = self._header.get('SIMPLE', 'F')
random_groups = self._header.get('GROUPS', 'F')
if simple == 'T' and random_groups == 'T':
groups = 1
else:
groups = 0
size = 1
for idx in range(groups, naxis):
size = size * self._header['NAXIS' + str(idx + 1)]
bitpix = self._header['BITPIX']
gcount = self._header.get('GCOUNT', 1)
pcount = self._header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
def close(self):
"""
Close the physical FITS file.
"""
self._ffo.close()
| 2.703125 | 3 |
geoprisma/tests/test_templatetags.py | groupe-conseil-nutshimit-nippour/django-geoprisma | 0 | 5825 | import django
from django.test import TestCase
from django.template import Template, Context
class genericObj(object):
"""
A generic object for testing templatetags
"""
def __init__(self):
self.name = "test"
self.status = "ready"
def getOption(self, optionName):
if optionName == "name":
return self.name
elif optionName == "status":
return self.status
def getName(self):
return self.name
def render(template_string, context_dict=None):
"""
A shortcut for testing template output.
"""
if context_dict is None:
context_dict = {}
c = Context(context_dict)
t = Template(template_string)
return t.render(c).strip()
class object_extrasTests(TestCase):
def test_callMethod(self):
genObj = genericObj()
template = """
{% load object_extras %}
{{ obj|args:"name"|call:"getOption" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "test")
template = """
{% load object_extras %}
{{ obj|call:"getName" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "test")
def test_check_type(self):
genObj = genericObj()
template = """
{% load object_extras %}
{{ obj|obj_type:"genericObj" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "True")
template = """
{% load object_extras %}
{{ obj|obj_type:"notexist" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "False")
class static_extrasTests(TestCase):
def setUp(self):
self.widgetTypeSetJs = set()
self.widgetTypeSetJs.add('queryonclick')
self.widgetTypeSetCss = set()
self.widgetTypeSetCss.add('geoexttoolbar')
def test_getJsStatics(self):
template = """
{% load staticfiles %}
{% load static_extras %}
{% getJsStatics widgetTypeSet as widget_js %}
{% for static_path in widget_js %}
<script src="{% static static_path %}" type="text/javascript"></script>
{% endfor %}
"""
context = {
'widgetTypeSet': self.widgetTypeSetJs
}
out = '<script src="/static/geoprisma/widgets/queryonclick/js/QueryOnClick.js" type="text/javascript"></script>'
self.assertEqual(render(template, context), out)
def test_getCssStatics(self):
template = """
{% load staticfiles %}
{% load static_extras %}
{% getCssStatics widgetTypeSet as widget_css %}
{% for static_path in widget_css %}
<link rel="stylesheet" type="text/css" href="{% static static_path %}" />
{% endfor %}
"""
context = {
'widgetTypeSet': self.widgetTypeSetCss
}
out = '<link rel="stylesheet" type="text/css" href="/static/geoprisma/widgets/geoexttoolbar/css/GeoExtToolbar.css" />'
self.assertEqual(render(template, context), out)
def test_template_exist(self):
template = """
{% load static_extras %}
{{ "geoprisma/widgets/queryonclick/queryonclick.html"|template_exists }}
"""
self.assertEqual(render(template), "True")
template = """
{% load static_extras %}
{{ "geoprisma/widgets/queryonclick/queryonclicknotexist.html"|template_exists }}
"""
self.assertEqual(render(template), "False")
| 2.40625 | 2 |
src/ggrc_workflows/models/task_group.py | acidburn0zzz/ggrc-core | 1 | 5826 | <reponame>acidburn0zzz/ggrc-core
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""A module containing the workflow TaskGroup model."""
from sqlalchemy import or_
from ggrc import db
from ggrc.login import get_current_user
from ggrc.models.associationproxy import association_proxy
from ggrc.models.mixins import (
Titled, Slugged, Described, Timeboxed, WithContact
)
from ggrc.models.reflection import AttributeInfo
from ggrc.models.reflection import PublishOnly
from ggrc.models import all_models
from ggrc_workflows.models.task_group_object import TaskGroupObject
class TaskGroup(
WithContact, Timeboxed, Described, Titled, Slugged, db.Model):
"""Workflow TaskGroup model."""
__tablename__ = 'task_groups'
_title_uniqueness = False
workflow_id = db.Column(
db.Integer,
db.ForeignKey('workflows.id', ondelete="CASCADE"),
nullable=False,
)
lock_task_order = db.Column(db.Boolean(), nullable=True)
task_group_objects = db.relationship(
'TaskGroupObject', backref='task_group', cascade='all, delete-orphan')
objects = association_proxy(
'task_group_objects', 'object', 'TaskGroupObject')
task_group_tasks = db.relationship(
'TaskGroupTask', backref='task_group', cascade='all, delete-orphan')
cycle_task_groups = db.relationship(
'CycleTaskGroup', backref='task_group')
sort_index = db.Column(
db.String(length=250), default="", nullable=False)
_publish_attrs = [
'workflow',
'task_group_objects',
PublishOnly('objects'),
'task_group_tasks',
'lock_task_order',
'sort_index',
# Intentionally do not include `cycle_task_groups`
# 'cycle_task_groups',
]
_aliases = {
"title": "Summary",
"description": "Details",
"contact": {
"display_name": "Assignee",
"mandatory": True,
"filter_by": "_filter_by_contact",
},
"secondary_contact": None,
"start_date": None,
"end_date": None,
"workflow": {
"display_name": "Workflow",
"mandatory": True,
"filter_by": "_filter_by_workflow",
},
"task_group_objects": {
"display_name": "Objects",
"type": AttributeInfo.Type.SPECIAL_MAPPING,
"filter_by": "_filter_by_objects",
},
}
def copy(self, _other=None, **kwargs):
columns = [
'title', 'description', 'workflow', 'sort_index', 'modified_by',
'context'
]
if kwargs.get('clone_people', False) and getattr(self, "contact"):
columns.append("contact")
else:
kwargs["contact"] = get_current_user()
target = self.copy_into(_other, columns, **kwargs)
if kwargs.get('clone_objects', False):
self.copy_objects(target, **kwargs)
if kwargs.get('clone_tasks', False):
self.copy_tasks(target, **kwargs)
return target
def copy_objects(self, target, **kwargs):
# pylint: disable=unused-argument
for task_group_object in self.task_group_objects:
target.task_group_objects.append(task_group_object.copy(
task_group=target,
context=target.context,
))
return target
def copy_tasks(self, target, **kwargs):
for task_group_task in self.task_group_tasks:
target.task_group_tasks.append(task_group_task.copy(
None,
task_group=target,
context=target.context,
clone_people=kwargs.get("clone_people", False),
))
return target
@classmethod
def _filter_by_workflow(cls, predicate):
from ggrc_workflows.models import Workflow
return Workflow.query.filter(
(Workflow.id == cls.workflow_id) &
(predicate(Workflow.slug) | predicate(Workflow.title))
).exists()
@classmethod
def _filter_by_objects(cls, predicate):
parts = []
for model_name in all_models.__all__:
model = getattr(all_models, model_name)
query = getattr(model, "query", None)
field = getattr(model, "slug", getattr(model, "email", None))
if query is None or field is None or not hasattr(model, "id"):
continue
parts.append(query.filter(
(TaskGroupObject.object_type == model_name) &
(model.id == TaskGroupObject.object_id) &
predicate(field)
).exists())
return TaskGroupObject.query.filter(
(TaskGroupObject.task_group_id == cls.id) &
or_(*parts)
).exists()
| 1.867188 | 2 |
src/tests/app_functions/menu/test_change_auto_login.py | DanielNoord/DuolingoPomodoro | 0 | 5827 | import pytest
import rumps
from src.app_functions.menu.change_auto_login import change_auto_login
@pytest.fixture(name="basic_app")
def create_app():
"""Creates a basic app object with some variables to pass to functions
Returns:
rumps.App: Basic app
"""
app = rumps.App("TestApp")
app.settings = {}
return app
def test_setting_is_true(mocker, basic_app):
"""Check if setting is changed correctly if True"""
basic_app.settings["auto_login"] = True
mock_function = mocker.patch("src.app_functions.menu.change_auto_login.update_menu")
mocker.patch("src.app_functions.menu.change_auto_login.save_settings")
change_auto_login(basic_app)
assert basic_app.settings["auto_login"] is False
mock_function.assert_called_once_with(basic_app)
def test_setting_is_false(mocker, basic_app):
"""Check if setting is changed correctly if false"""
basic_app.settings["auto_login"] = False
mock_function = mocker.patch("src.app_functions.menu.change_auto_login.update_menu")
mocker.patch("src.app_functions.menu.change_auto_login.save_settings")
change_auto_login(basic_app)
assert basic_app.settings["auto_login"] is True
mock_function.assert_called_once_with(basic_app)
| 2.515625 | 3 |
deepobs/tensorflow/testproblems/cifar100_vgg19.py | H0merJayS1mpson/deepobscustom | 0 | 5828 | # -*- coding: utf-8 -*-
"""VGG 19 architecture for CIFAR-100."""
import tensorflow as tf
from ._vgg import _vgg
from ..datasets.cifar100 import cifar100
from .testproblem import TestProblem
class cifar100_vgg19(TestProblem):
"""DeepOBS test problem class for the VGG 19 network on Cifar-100.
The CIFAR-100 images are resized to ``224`` by ``224`` to fit the input
dimension of the original VGG network, which was designed for ImageNet.
Details about the architecture can be found in the `original paper`_.
VGG 19 consists of 19 weight layers, of mostly convolutions. The model uses
cross-entroy loss. A weight decay is used on the weights (but not the biases)
which defaults to ``5e-4``.
.. _original paper: https://arxiv.org/abs/1409.1556
Args:
batch_size (int): Batch size to use.
weight_decay (float): Weight decay factor. Weight decay (L2-regularization)
is used on the weights but not the biases.
Defaults to ``5e-4``.
Attributes:
dataset: The DeepOBS data set class for Cifar-100.
train_init_op: A tensorflow operation initializing the test problem for the
training phase.
train_eval_init_op: A tensorflow operation initializing the test problem for
evaluating on training data.
test_init_op: A tensorflow operation initializing the test problem for
evaluating on test data.
losses: A tf.Tensor of shape (batch_size, ) containing the per-example loss
values.
regularizer: A scalar tf.Tensor containing a regularization term.
accuracy: A scalar tf.Tensor containing the mini-batch mean accuracy.
"""
def __init__(self, batch_size, weight_decay=5e-4):
"""Create a new VGG 19 test problem instance on Cifar-100.
Args:
batch_size (int): Batch size to use.
weight_decay (float): Weight decay factor. Weight decay (L2-regularization)
is used on the weights but not the biases.
Defaults to ``5e-4``.
"""
super(cifar100_vgg19, self).__init__(batch_size, weight_decay)
def set_up(self):
"""Set up the VGG 19 test problem on Cifar-100."""
self.dataset = cifar100(self._batch_size)
self.train_init_op = self.dataset.train_init_op
self.train_eval_init_op = self.dataset.train_eval_init_op
self.valid_init_op = self.dataset.valid_init_op
self.test_init_op = self.dataset.test_init_op
training = tf.equal(self.dataset.phase, "train")
x, y = self.dataset.batch
linear_outputs = _vgg(
x,
training,
variant=19,
num_outputs=100,
weight_decay=self._weight_decay,
)
self.losses = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=y, logits=linear_outputs
)
y_pred = tf.argmax(linear_outputs, 1)
y_correct = tf.argmax(y, 1)
correct_prediction = tf.equal(y_pred, y_correct)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.regularizer = tf.losses.get_regularization_loss()
| 2.796875 | 3 |
write-a-function.py | TheHumanGoogle/Hackerrank-python-solution | 1 | 5829 | def is_leap(year):
leap=False
if year%400==0:
leap=True
elif year%4==0 and year%100!=0:
leap=True
else:
leap=False
return leap
year = int(input())
| 4.09375 | 4 |
shortio/utils.py | byshyk/shortio | 0 | 5830 | <reponame>byshyk/shortio
"""Contains utility functions."""
BIN_MODE_ARGS = {'mode', 'buffering', }
TEXT_MODE_ARGS = {'mode', 'buffering', 'encoding', 'errors', 'newline'}
def split_args(args):
"""Splits args into two groups: open args and other args.
Open args are used by ``open`` function. Other args are used by
``load``/``dump`` functions.
Args:
args: Keyword args to split.
Returns:
open_args: Arguments for ``open``.
other_args: Arguments for ``load``/``dump``.
"""
mode_args = BIN_MODE_ARGS if 'b' in args['mode'] else TEXT_MODE_ARGS
open_args = {}
other_args = {}
for arg, value in args.items():
if arg in mode_args:
open_args[arg] = value
else:
other_args[arg] = value
return open_args, other_args
def read_wrapper(load, **base_kwargs):
"""Wraps ``load`` function to avoid context manager boilerplate.
Args:
load: Function that takes the return of ``open``.
**base_kwargs: Base arguments that ``open``/``load`` take.
Returns:
Wrapper for ``load``.
"""
def wrapped(file, **kwargs):
open_args, load_args = split_args({**base_kwargs, **kwargs})
with open(file, **open_args) as f:
return load(f, **load_args)
return wrapped
def write_wrapper(dump, **base_kwargs):
"""Wraps ``dump`` function to avoid context manager boilerplate.
Args:
dump: Function that takes the return of ``open`` and data to dump.
**base_kwargs: Base arguments that ``open``/``dump`` take.
Returns:
Wrapper for ``dump``.
"""
def wrapped(file, obj, **kwargs):
open_args, dump_args = split_args({**base_kwargs, **kwargs})
with open(file, **open_args) as f:
dump(obj, f, **dump_args)
return wrapped
| 3.34375 | 3 |
paasta_tools/async_utils.py | sobolevn/paasta | 1,711 | 5831 | import asyncio
import functools
import time
import weakref
from collections import defaultdict
from typing import AsyncIterable
from typing import Awaitable
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import TypeVar
T = TypeVar("T")
# NOTE: this method is not thread-safe due to lack of locking while checking
# and updating the cache
def async_ttl_cache(
ttl: Optional[float] = 300,
cleanup_self: bool = False,
*,
cache: Optional[Dict] = None,
) -> Callable[
[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner
]:
async def call_or_get_from_cache(cache, async_func, args_for_key, args, kwargs):
# Please note that anything which is put into `key` will be in the
# cache forever, potentially causing memory leaks. The most common
# case is the `self` arg pointing to a huge object. To mitigate that
# we're using `args_for_key`, which is supposed not contain any huge
# objects.
key = functools._make_key(args_for_key, kwargs, typed=False)
try:
future, last_update = cache[key]
if ttl is not None and time.time() - last_update > ttl:
raise KeyError
except KeyError:
future = asyncio.ensure_future(async_func(*args, **kwargs))
# set the timestamp to +infinity so that we always wait on the in-flight request.
cache[key] = (future, float("Inf"))
try:
value = await future
except Exception:
# Only update the cache if it's the same future we awaited and
# it hasn't already been updated by another coroutine
# Note also that we use get() in case the key was deleted from the
# cache by another coroutine
if cache.get(key) == (future, float("Inf")):
del cache[key]
raise
else:
if cache.get(key) == (future, float("Inf")):
cache[key] = (future, time.time())
return value
if cleanup_self:
instance_caches: Dict = cache if cache is not None else defaultdict(dict)
def on_delete(w):
del instance_caches[w]
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(self, *args, **kwargs):
w = weakref.ref(self, on_delete)
self_cache = instance_caches[w]
return await call_or_get_from_cache(
self_cache, wrapped, args, (self,) + args, kwargs
)
return inner
else:
cache2: Dict = cache if cache is not None else {} # Should be Dict[Any, T] but that doesn't work.
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(*args, **kwargs):
return await call_or_get_from_cache(cache2, wrapped, args, args, kwargs)
return inner
return outer
async def aiter_to_list(aiter: AsyncIterable[T],) -> List[T]:
return [x async for x in aiter]
def async_timeout(
seconds: int = 10,
) -> Callable[
[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner
]:
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(*args, **kwargs):
return await asyncio.wait_for(wrapped(*args, **kwargs), timeout=seconds)
return inner
return outer
| 2.4375 | 2 |
util/dataset.py | MTI830PyTraders/pytrade | 3 | 5832 | #!/usr/bin/python
''' generate dataset '''
import csv
import argparse
import numpy as np
import sklearn.metrics
import theanets
from sklearn.metrics import accuracy_score
import logging
from trendStrategy import OptTrendStrategy, TrendStrategy
from util import visu
def compare(stock, field='orders', strategy="TrendStrategy_predicted", best=OptTrendStrategy.__name__):
best_fname="{0}_{1}_{2}.csv".format(stock, best, field)
predicted_fname="{0}_{1}_{2}.csv".format(stock, strategy, field)
print "comparing",best_fname,predicted_fname
best_data = np.loadtxt(best_fname, usecols=[1], delimiter=',')
predicted_data = np.loadtxt(predicted_fname, usecols=[1], delimiter=',')
min_size = min(len(best_data), len(predicted_data))
title = "%s vs %s" %(best, strategy)
visu.compare(best_data[-min_size:], predicted_data[-min_size:], title)
def load_dataset(stock, ratio=0.8, name=OptTrendStrategy.__name__):
''' return train, valid (x,y) '''
orders = np.loadtxt("{0}_{1}_orders.csv".format(stock, name), usecols=[1], delimiter=',')
orders[orders==-1]=0
features = np.loadtxt("{0}_input.csv".format(stock), delimiter=',')
if len(orders)!=len(features):
logging.error("len(orders)!=len(features) -> %s!=%s" %(len(orders),len(features)))
features = features.astype('f')
orders = orders.astype('i')
pos = round(len(features)*ratio)
train = (features[:pos], orders[:pos])
valid = (features[pos:], orders[pos:])
return train, valid
def evaluate(exp, dataset):
y_true = dataset[1]
y_pred = exp.network.predict(dataset[0])
print(sklearn.metrics.confusion_matrix(y_true, y_pred))
print('accuracy:',accuracy_score(y_true, y_pred))
def train_strategy(stock, ratio=0.8, min_improvement=0.001):
train, valid = load_dataset(stock)
n, n_input = train[0].shape
exp = theanets.Experiment(
theanets.Classifier,
layers=(n_input, n_input*2, 2),
)
exp.train(train, valid, min_improvement=min_improvement,
algo='sgd',
learning_rate=0.01,
momentum=0.5,
hidden_l1=0.001,
weight_l2=0.001,
num_updates=100
)
print('training:')
evaluate(exp, train)
print('validation:')
evaluate(exp, valid)
exp.save('%s.nn' %stock)
return exp
def load_strategy(name, verbose=False):
print("loading %s trained strategy" %name)
train, valid = load_dataset(name)
n, n_input = train[0].shape
exp = theanets.Experiment(
theanets.Classifier,
layers=(n_input, n_input*2, 2),
)
exp.load('%s.nn' %name)
if verbose:
print('training:')
evaluate(exp, train)
print('validation:')
evaluate(exp, valid)
return exp
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--stock', '-s', default="TSLA", help='stock')
parser.add_argument('--ratio', '-r', default=0.8, type=int, help='train/valid ratio')
parser.add_argument('--min', '-m', default=0.001, type=int, help='min improvement (stop learning)')
parser.add_argument('--field', default='orders', help='compare field')
args = parser.parse_args()
if args.field:
compare(args.stock, args.field)
train, valid = load_dataset(args.stock)
exp = train_strategy(args.stock, args.ratio, args.min)
exp = load_strategy(args.stock, True)
| 2.8125 | 3 |
examples/scripts/sc/bpdn.py | manvhah/sporco | 0 | 5833 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Basis Pursuit DeNoising
=======================
This example demonstrates the use of class :class:`.admm.bpdn.BPDN` to solve the Basis Pursuit DeNoising (BPDN) problem :cite:`chen-1998-atomic`
$$\mathrm{argmin}_\mathbf{x} \; (1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2 + \lambda \| \mathbf{x} \|_1 \;,$$
where $D$ is the dictionary, $\mathbf{x}$ is the sparse representation, and $\mathbf{s}$ is the signal to be represented. In this example the BPDN problem is used to estimate the reference sparse representation that generated a signal from a noisy version of the signal.
"""
from __future__ import print_function
from builtins import input
import numpy as np
from sporco.admm import bpdn
from sporco import util
from sporco import plot
"""
Configure problem size, sparsity, and noise level.
"""
N = 512 # Signal size
M = 4*N # Dictionary size
L = 32 # Number of non-zero coefficients in generator
sigma = 0.5 # Noise level
"""
Construct random dictionary, reference random sparse representation, and test signal consisting of the synthesis of the reference sparse representation with additive Gaussian noise.
"""
# Construct random dictionary and random sparse coefficients
np.random.seed(12345)
D = np.random.randn(N, M)
x0 = np.zeros((M, 1))
si = np.random.permutation(list(range(0, M-1)))
x0[si[0:L]] = np.random.randn(L, 1)
# Construct reference and noisy signal
s0 = D.dot(x0)
s = s0 + sigma*np.random.randn(N,1)
"""
Set BPDN solver class options.
"""
opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500,
'RelStopTol': 1e-3, 'AutoRho': {'RsdlTarget': 1.0}})
"""
Select regularization parameter $\lambda$ by evaluating the error in recovering the sparse representation over a logarithmicaly spaced grid. (The reference representation is assumed to be known, which is not realistic in a real application.) A function is defined that evalues the BPDN recovery error for a specified $\lambda$, and this function is evaluated in parallel by :func:`sporco.util.grid_search`.
"""
# Function computing reconstruction error at lmbda
def evalerr(prm):
lmbda = prm[0]
b = bpdn.BPDN(D, s, lmbda, opt)
x = b.solve()
return np.sum(np.abs(x-x0))
# Parallel evalution of error function on lmbda grid
lrng = np.logspace(1, 2, 20)
sprm, sfvl, fvmx, sidx = util.grid_search(evalerr, (lrng,))
lmbda = sprm[0]
print('Minimum ℓ1 error: %5.2f at 𝜆 = %.2e' % (sfvl, lmbda))
"""
Once the best $\lambda$ has been determined, run BPDN with verbose display of ADMM iteration statistics.
"""
# Initialise and run BPDN object for best lmbda
opt['Verbose'] = True
b = bpdn.BPDN(D, s, lmbda, opt)
x = b.solve()
print("BPDN solve time: %.2fs" % b.timer.elapsed('solve'))
"""
Plot comparison of reference and recovered representations.
"""
plot.plot(np.hstack((x0, x)), title='Sparse representation',
lgnd=['Reference', 'Reconstructed'])
"""
Plot lmbda error curve, functional value, residuals, and rho
"""
its = b.getitstat()
fig = plot.figure(figsize=(15, 10))
plot.subplot(2, 2, 1)
plot.plot(fvmx, x=lrng, ptyp='semilogx', xlbl='$\lambda$',
ylbl='Error', fig=fig)
plot.subplot(2, 2, 2)
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)
plot.subplot(2, 2, 3)
plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T,
ptyp='semilogy', xlbl='Iterations', ylbl='Residual',
lgnd=['Primal', 'Dual'], fig=fig)
plot.subplot(2, 2, 4)
plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig)
fig.show()
# Wait for enter on keyboard
input()
| 2.9375 | 3 |
saleor-env/lib/python3.7/site-packages/snowballstemmer/nepali_stemmer.py | tadartefactorist/mask | 0 | 5834 | <filename>saleor-env/lib/python3.7/site-packages/snowballstemmer/nepali_stemmer.py
# This file was generated automatically by the Snowball to Python compiler
# http://snowballstem.org/
from .basestemmer import BaseStemmer
from .among import Among
class NepaliStemmer(BaseStemmer):
'''
This class was automatically generated by a Snowball to Python compiler
It implements the stemming algorithm defined by a snowball script.
'''
a_0 = [
Among(u"\u0932\u093E\u0907", -1, 1),
Among(u"\u0932\u093E\u0908", -1, 1),
Among(u"\u0938\u0901\u0917", -1, 1),
Among(u"\u0938\u0902\u0917", -1, 1),
Among(u"\u092E\u093E\u0930\u094D\u092B\u0924", -1, 1),
Among(u"\u0930\u0924", -1, 1),
Among(u"\u0915\u093E", -1, 2),
Among(u"\u092E\u093E", -1, 1),
Among(u"\u0926\u094D\u0935\u093E\u0930\u093E", -1, 1),
Among(u"\u0915\u093F", -1, 2),
Among(u"\u092A\u091B\u093F", -1, 1),
Among(u"\u0915\u0940", -1, 2),
Among(u"\u0932\u0947", -1, 1),
Among(u"\u0915\u0948", -1, 2),
Among(u"\u0938\u0901\u0917\u0948", -1, 1),
Among(u"\u092E\u0948", -1, 1),
Among(u"\u0915\u094B", -1, 2)
]
a_1 = [
Among(u"\u0901", -1, -1),
Among(u"\u0902", -1, -1),
Among(u"\u0948", -1, -1)
]
a_2 = [
Among(u"\u0901", -1, 1),
Among(u"\u0902", -1, 1),
Among(u"\u0948", -1, 2)
]
a_3 = [
Among(u"\u0925\u093F\u090F", -1, 1),
Among(u"\u091B", -1, 1),
Among(u"\u0907\u091B", 1, 1),
Among(u"\u090F\u091B", 1, 1),
Among(u"\u093F\u091B", 1, 1),
Among(u"\u0947\u091B", 1, 1),
Among(u"\u0928\u0947\u091B", 5, 1),
Among(u"\u0939\u0941\u0928\u0947\u091B", 6, 1),
Among(u"\u0907\u0928\u094D\u091B", 1, 1),
Among(u"\u093F\u0928\u094D\u091B", 1, 1),
Among(u"\u0939\u0941\u0928\u094D\u091B", 1, 1),
Among(u"\u090F\u0915\u093E", -1, 1),
Among(u"\u0907\u090F\u0915\u093E", 11, 1),
Among(u"\u093F\u090F\u0915\u093E", 11, 1),
Among(u"\u0947\u0915\u093E", -1, 1),
Among(u"\u0928\u0947\u0915\u093E", 14, 1),
Among(u"\u0926\u093E", -1, 1),
Among(u"\u0907\u0926\u093E", 16, 1),
Among(u"\u093F\u0926\u093E", 16, 1),
Among(u"\u0926\u0947\u0916\u093F", -1, 1),
Among(u"\u092E\u093E\u0925\u093F", -1, 1),
Among(u"\u090F\u0915\u0940", -1, 1),
Among(u"\u0907\u090F\u0915\u0940", 21, 1),
Among(u"\u093F\u090F\u0915\u0940", 21, 1),
Among(u"\u0947\u0915\u0940", -1, 1),
Among(u"\u0926\u0947\u0916\u0940", -1, 1),
Among(u"\u0925\u0940", -1, 1),
Among(u"\u0926\u0940", -1, 1),
Among(u"\u091B\u0941", -1, 1),
Among(u"\u090F\u091B\u0941", 28, 1),
Among(u"\u0947\u091B\u0941", 28, 1),
Among(u"\u0928\u0947\u091B\u0941", 30, 1),
Among(u"\u0928\u0941", -1, 1),
Among(u"\u0939\u0930\u0941", -1, 1),
Among(u"\u0939\u0930\u0942", -1, 1),
Among(u"\u091B\u0947", -1, 1),
Among(u"\u0925\u0947", -1, 1),
Among(u"\u0928\u0947", -1, 1),
Among(u"\u090F\u0915\u0948", -1, 1),
Among(u"\u0947\u0915\u0948", -1, 1),
Among(u"\u0928\u0947\u0915\u0948", 39, 1),
Among(u"\u0926\u0948", -1, 1),
Among(u"\u0907\u0926\u0948", 41, 1),
Among(u"\u093F\u0926\u0948", 41, 1),
Among(u"\u090F\u0915\u094B", -1, 1),
Among(u"\u0907\u090F\u0915\u094B", 44, 1),
Among(u"\u093F\u090F\u0915\u094B", 44, 1),
Among(u"\u0947\u0915\u094B", -1, 1),
Among(u"\u0928\u0947\u0915\u094B", 47, 1),
Among(u"\u0926\u094B", -1, 1),
Among(u"\u0907\u0926\u094B", 49, 1),
Among(u"\u093F\u0926\u094B", 49, 1),
Among(u"\u092F\u094B", -1, 1),
Among(u"\u0907\u092F\u094B", 52, 1),
Among(u"\u092D\u092F\u094B", 52, 1),
Among(u"\u093F\u092F\u094B", 52, 1),
Among(u"\u0925\u093F\u092F\u094B", 55, 1),
Among(u"\u0926\u093F\u092F\u094B", 55, 1),
Among(u"\u0925\u094D\u092F\u094B", 52, 1),
Among(u"\u091B\u094C", -1, 1),
Among(u"\u0907\u091B\u094C", 59, 1),
Among(u"\u090F\u091B\u094C", 59, 1),
Among(u"\u093F\u091B\u094C", 59, 1),
Among(u"\u0947\u091B\u094C", 59, 1),
Among(u"\u0928\u0947\u091B\u094C", 63, 1),
Among(u"\u092F\u094C", -1, 1),
Among(u"\u0925\u093F\u092F\u094C", 65, 1),
Among(u"\u091B\u094D\u092F\u094C", 65, 1),
Among(u"\u0925\u094D\u092F\u094C", 65, 1),
Among(u"\u091B\u0928\u094D", -1, 1),
Among(u"\u0907\u091B\u0928\u094D", 69, 1),
Among(u"\u090F\u091B\u0928\u094D", 69, 1),
Among(u"\u093F\u091B\u0928\u094D", 69, 1),
Among(u"\u0947\u091B\u0928\u094D", 69, 1),
Among(u"\u0928\u0947\u091B\u0928\u094D", 73, 1),
Among(u"\u0932\u093E\u0928\u094D", -1, 1),
Among(u"\u091B\u093F\u0928\u094D", -1, 1),
Among(u"\u0925\u093F\u0928\u094D", -1, 1),
Among(u"\u092A\u0930\u094D", -1, 1),
Among(u"\u0907\u0938\u094D", -1, 1),
Among(u"\u0925\u093F\u0907\u0938\u094D", 79, 1),
Among(u"\u091B\u0938\u094D", -1, 1),
Among(u"\u0907\u091B\u0938\u094D", 81, 1),
Among(u"\u090F\u091B\u0938\u094D", 81, 1),
Among(u"\u093F\u091B\u0938\u094D", 81, 1),
Among(u"\u0947\u091B\u0938\u094D", 81, 1),
Among(u"\u0928\u0947\u091B\u0938\u094D", 85, 1),
Among(u"\u093F\u0938\u094D", -1, 1),
Among(u"\u0925\u093F\u0938\u094D", 87, 1),
Among(u"\u091B\u0947\u0938\u094D", -1, 1),
Among(u"\u0939\u094B\u0938\u094D", -1, 1)
]
def __r_remove_category_1(self):
# (, line 53
# [, line 54
self.ket = self.cursor
# substring, line 54
among_var = self.find_among_b(NepaliStemmer.a_0)
if among_var == 0:
return False
# ], line 54
self.bra = self.cursor
if among_var == 1:
# (, line 58
# delete, line 58
if not self.slice_del():
return False
elif among_var == 2:
# (, line 59
# or, line 59
try:
v_1 = self.limit - self.cursor
try:
# (, line 59
# or, line 59
try:
v_2 = self.limit - self.cursor
try:
# literal, line 59
if not self.eq_s_b(u"\u090F"):
raise lab3()
raise lab2()
except lab3: pass
self.cursor = self.limit - v_2
# literal, line 59
if not self.eq_s_b(u"\u0947"):
raise lab1()
except lab2: pass
# (, line 59
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# delete, line 59
if not self.slice_del():
return False
except lab0: pass
return True
def __r_check_category_2(self):
# (, line 63
# [, line 64
self.ket = self.cursor
# substring, line 64
if self.find_among_b(NepaliStemmer.a_1) == 0:
return False
# ], line 64
self.bra = self.cursor
return True
def __r_remove_category_2(self):
# (, line 69
# [, line 70
self.ket = self.cursor
# substring, line 70
among_var = self.find_among_b(NepaliStemmer.a_2)
if among_var == 0:
return False
# ], line 70
self.bra = self.cursor
if among_var == 1:
# (, line 71
# or, line 71
try:
v_1 = self.limit - self.cursor
try:
# literal, line 71
if not self.eq_s_b(u"\u092F\u094C"):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
try:
# literal, line 71
if not self.eq_s_b(u"\u091B\u094C"):
raise lab2()
raise lab0()
except lab2: pass
self.cursor = self.limit - v_1
try:
# literal, line 71
if not self.eq_s_b(u"\u0928\u094C"):
raise lab3()
raise lab0()
except lab3: pass
self.cursor = self.limit - v_1
# literal, line 71
if not self.eq_s_b(u"\u0925\u0947"):
return False
except lab0: pass
# delete, line 71
if not self.slice_del():
return False
elif among_var == 2:
# (, line 72
# literal, line 72
if not self.eq_s_b(u"\u0924\u094D\u0930"):
return False
# delete, line 72
if not self.slice_del():
return False
return True
def __r_remove_category_3(self):
# (, line 76
# [, line 77
self.ket = self.cursor
# substring, line 77
if self.find_among_b(NepaliStemmer.a_3) == 0:
return False
# ], line 77
self.bra = self.cursor
# (, line 79
# delete, line 79
if not self.slice_del():
return False
return True
def _stem(self):
# (, line 85
# backwards, line 86
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 86
# do, line 87
v_1 = self.limit - self.cursor
try:
# call remove_category_1, line 87
if not self.__r_remove_category_1():
raise lab0()
except lab0: pass
self.cursor = self.limit - v_1
# do, line 88
v_2 = self.limit - self.cursor
try:
# (, line 88
# repeat, line 89
try:
while True:
try:
v_3 = self.limit - self.cursor
try:
# (, line 89
# do, line 89
v_4 = self.limit - self.cursor
try:
# (, line 89
# and, line 89
v_5 = self.limit - self.cursor
# call check_category_2, line 89
if not self.__r_check_category_2():
raise lab5()
self.cursor = self.limit - v_5
# call remove_category_2, line 89
if not self.__r_remove_category_2():
raise lab5()
except lab5: pass
self.cursor = self.limit - v_4
# call remove_category_3, line 89
if not self.__r_remove_category_3():
raise lab4()
raise lab3()
except lab4: pass
self.cursor = self.limit - v_3
raise lab2()
except lab3: pass
except lab2: pass
except lab1: pass
self.cursor = self.limit - v_2
self.cursor = self.limit_backward
return True
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
| 2.421875 | 2 |
tests/auto_test_class_creation_spec.py | MountainField/uspec | 2 | 5835 | <reponame>MountainField/uspec
# -*- coding: utf-8 -*-
# =================================================================
# uspec
#
# Copyright (c) 2020 <NAME>
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
# =================================================================
from __future__ import unicode_literals, print_function, division
import unittest
import uspec
from uspec import describe, context, it
###################################
class TestGame(unittest.TestCase): pass
with describe("Game", test_class=TestGame):
assert test_class is TestGame
@it("hoge")
def _(self):
self.assertTrue(True)
assert TestGame is not None
##################################
TEST_CLASS_NAME_GAME2 = None
with describe("Game2"):
TEST_CLASS_NAME_GAME2 = test_class.__name__
@it("hoge")
def _(self):
self.assertTrue(True)
assert TEST_CLASS_NAME_GAME2 in globals()
##################################
def wrap():
global TEST_CLASS_NAME_GAME3
with describe("Game3"):
TEST_CLASS_NAME_GAME3 = locals()["test_class"].__name__
@it("hoge")
def _(self):
self.assertTrue(True)
wrap()
assert TEST_CLASS_NAME_GAME3 in globals()
if __name__ == '__main__':
import unittest
unittest.main(verbosity=2)
| 2.84375 | 3 |
main.py | Matthewk01/Snake-AI | 0 | 5836 | import pygame
from game.game_logic.game import Game
import matplotlib.pyplot as plt
def main():
scores_history = []
GAME_COUNT = 2
for i in range(GAME_COUNT):
game = Game(400, "Snake AI")
score = game.start()
scores_history.append(score)
print("Game:", i)
plt.ylim(0, 36)
plt.plot(range(len(scores_history)), scores_history)
plt.ylabel('Snake length')
plt.xlabel('Game count')
plt.show()
if __name__ == "__main__":
main()
| 3.890625 | 4 |
closed/Intel/code/resnet50/openvino-cpu/src/tools/create_image_list.py | ctuning/inference_results_v1.1 | 19 | 5837 | import os
import sys
from glob import glob
def create_list(images_dir, output_file, img_ext=".jpg"):
ImgList = os.listdir(images_dir)
val_list = []
for img in ImgList:
img,ext = img.split(".")
val_list.append(img)
with open(os.path.join(images_dir, output_file),'w') as fid:
for line in val_list[:-1]:
fid.write(line + "\n")
fid.write(val_list[-1])
def main():
if len(sys.argv) < 2:
print("Requires images directory")
sys.exit(1)
elif len(sys.argv) < 3:
images_dir = sys.argv[1]
output_file = "image_list.txt"
else:
images_dir = sys.argv[1]
output_file = sys.argv[2]
create_list(images_dir, output_file)
if __name__=="__main__":
main() | 3.3125 | 3 |
AI/others/churn/churn_2.py | honchardev/Fun | 0 | 5838 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# src: http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/
# In[ ]:
# Показатель оттока клиентов – бизнес-термин, описывающий
# насколько интенсивно клиенты покидают компанию или
# прекращают оплачивать товары или услуги.
# Это ключевой показатель для многих компаний, потому что
# зачастую приобретение новых клиентов обходится намного дороже,
# чем удержание старых (в некоторых случаях от 5 до 20 раз дороже).
# Примеры использования:
# 1. мобильные операторы, операторы кабельного телевидения и
# компании, обслуживающие прием платежей с помощью кредитных карт
# 2. казино используют прогнозные модели, чтобы предсказать
# идеальные условия в зале, позволяющие удержать игроков
# в Блэкджек за столом.
# 3. Aвиакомпании могут предложить клиентам, у которых есть
# жалобы, заменить их билет на билет первого класса.
# Эффективное удержание клиентов сводится к задаче, в рамках
# которой, используя имеющиеся данные, необходимо отличить
# клиентов, собирающихся уйти, от тех, кто этого делать
# не собирается.
# In[ ]:
# datset src: https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv
# In[88]:
# Load libraries
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support
from sklearn.model_selection import KFold, train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
# In[3]:
# Load dataset
raw_churn_df = pd.read_csv('churn.csv')
# In[17]:
display(raw_churn_df.shape)
display(raw_churn_df.head(), raw_churn_df.tail())
display(raw_churn_df.columns.values)
display(raw_churn_df.dtypes)
display(raw_churn_df.isnull().sum())
# In[78]:
# Isolate target data
y = raw_churn_df['Churn?']
X = raw_churn_df.drop('Churn?', axis=1)
# In[79]:
# Drop irrelevant features
features_to_drop = ['State', 'Area Code', 'Phone']
X = X.drop(features_to_drop, axis=1)
# In[80]:
# Encode yes/no with 1/0 values
X["Int'l Plan"] = X["Int'l Plan"].map({'no': 0, 'yes': 1})
X["VMail Plan"] = X["VMail Plan"].map({'no': 0, 'yes': 1})
# In[81]:
# Scale everything
std_scaler = StandardScaler(with_mean=True)
X = std_scaler.fit_transform(X)
display(X.shape)
# In[90]:
# Perform CV for SVM, random forest and kNN
def try_clf(X, y, clf_nofit):
X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42)
clf = clf_nofit.fit(X_tr, y_tr)
y_pred = clf.predict(X_val)
display(clf_nofit.__class__.__name__)
display(accuracy_score(y_val, y_pred))
display(confusion_matrix(y_val, y_pred))
display("prec, rec, f1, support", precision_recall_fscore_support(y_val, y_pred))
try_clf(X, y, SVC(gamma='scale'))
try_clf(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1))
try_clf(X, y, KNeighborsClassifier())
# std scaler with_mean=False accuracies:
# 0.9256594724220624
# 0.9484412470023981
# 0.8896882494004796
# std scaler with_mean=True accuracies:
# 0.9256594724220624
# 0.9496402877697842
# 0.8896882494004796
# In[86]:
# Recall
# Каково отношение количества правильно спрогнозированных уходов
# к общему количеству фактических уходов?
# Precision
# Каково отношение количества правильно спрогнозированных уходов
# к общему количеству спрогнозированных уходов?
# In[101]:
# # Predict probabilities
# def try_probab(X, y, clf_nofit):
# X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42)
# clf = clf_nofit.fit(X_tr, y_tr)
# y_prob = clf.predict_proba(X_val)
# # for i in range(len(X)):
# # display("y_true={0}, Predicted={1}".format(y[i], y_prob[i]))
# display(pd.value_counts(y_prob[:, 1]))
# try_probab(X, y, SVC(gamma='scale', probability=True))
# # try_probab(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1))
# # try_probab(X, y, KNeighborsClassifier())
# # for i in range(len(Xnew)):
# # print("X=%s, Predicted=%s" % (Xnew[i], ynew[i]))
# In[ ]:
# todo: calibration and discrimination
# https://github.com/ghuiber/churn/blob/master/churn_measurements.py
# from churn_measurements import calibration, discrimination
| 2.78125 | 3 |
airbyte-integrations/connectors/source-google-sheets/google_sheets_source/models/spreadsheet.py | rajatariya21/airbyte | 0 | 5839 | <reponame>rajatariya21/airbyte
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
from typing import List, Optional
from pydantic import BaseModel, Extra, Field
class SpreadsheetProperties(BaseModel):
class Config:
extra = Extra.allow
title: Optional[str] = None
class SheetProperties(BaseModel):
class Config:
extra = Extra.allow
title: Optional[str] = None
class CellData(BaseModel):
class Config:
extra = Extra.allow
formattedValue: Optional[str] = None
class RowData(BaseModel):
class Config:
extra = Extra.allow
values: Optional[List[CellData]] = None
class GridData(BaseModel):
class Config:
extra = Extra.allow
rowData: Optional[List[RowData]] = None
class Sheet(BaseModel):
class Config:
extra = Extra.allow
data: Optional[List[GridData]] = None
properties: Optional[SheetProperties] = None
class Spreadsheet(BaseModel):
class Config:
extra = Extra.allow
spreadsheetId: str
sheets: List[Sheet]
properties: Optional[SpreadsheetProperties] = None
| 1.90625 | 2 |
pytrivia/trivia.py | Dnewman9/Python-Trivia-API | 6 | 5840 | <filename>pytrivia/trivia.py
"""
A simple python api wrapper for https://opentdb.com/
"""
from aiohttp import ClientSession
from requests import get
from pytrivia.__helpers import decode_dict, get_token, make_request
from pytrivia.enums import *
class Trivia:
def __init__(self, with_token: bool):
"""
Initialize an instance of the Trivia class
:param with_token: If True then the instance will uses a session token
"""
self.token = get_token() if with_token else None
def request(self, num_questions: int, category: Category = None,
diffculty: Diffculty = None, type_: Type = None) -> dict:
"""
Send an api request to https://opentdb.com/
Limitations:
Only 1 Category can be requested per API Call.
To get questions from any category, don't specify a category.
A Maximum of 50 Questions can be retrieved per call.
:param num_questions: the number of questions,
must be between 1 and 50 (inclusive)
:param category: the category of the question. None for any category
:param diffculty: the diffculty of the question. None for any diffculty
:param type_: the type of the question. None for any type
:return: the api call response
:rtype: dict
:raises: ValueError when the num_questions parameter is less than 1
or greater than 50
"""
result = get(
self.__url(num_questions, category, diffculty, type_)).json()
if result['response_code'] in (3, 4):
self.token = get_token()
return self.request(num_questions, category, diffculty, type_)
else:
return decode_dict(result)
async def request_async(self, session: ClientSession, close_session: bool,
num_questions: int, category: Category = None,
diffculty: Diffculty = None,
type_: Type = None) -> dict:
"""
Send an api request to https://opentdb.com/
Limitations:
Only 1 Category can be requested per API Call.
To get questions from any category, don't specify a category.
A Maximum of 50 Questions can be retrieved per call.
:param session: an Aiohttp client session.
:param close_session: True to close the session after the request.
:param num_questions: the number of questions,
must be between 1 and 50 (inclusive)
:param category: the category of the question. None for any category
:param diffculty: the diffculty of the question. None for any diffculty
:param type_: the type of the question. None for any type
:return: the api call response
:rtype: dict
:raises: ValueError when the num_questions parameter is less than 1
or greater than 50
:raises ClientResponseError if the HTTP response code isn't 200
"""
try:
return await self.__request(
session, num_questions, category, diffculty, type_)
finally:
if close_session:
session.close()
async def __request(self, session: ClientSession, num_questions: int,
category: Category = None, diffculty: Diffculty = None,
type_: Type = None) -> dict:
"""
Helper method for the async request.
"""
resp = await make_request(
session, self.__url(num_questions, category, diffculty, type_))
result = await resp.json()
if result['response_code'] in (3, 4):
self.token = get_token()
return await self.__request(
session, num_questions, category, diffculty, type_)
else:
return decode_dict(result)
def __url(self, num_questions, category, diffculty, type_):
"""
Helper method to generate request url.
"""
if num_questions < 1 or num_questions > 50:
raise ValueError
url = 'https://opentdb.com/api.php?amount={}&encode=base64'.format(
num_questions)
if category is not None:
url += '&category={}'.format(category.value)
if diffculty is not None:
url += '&difficulty={}'.format(diffculty.value)
if type_ is not None:
url += '&type={}'.format(type_.value)
if self.token is not None:
url += '&token={}'.format(self.token)
return url
| 3.71875 | 4 |
utils.py | py-ranoid/practical-nlp | 0 | 5841 | import requests
import tarfile
import os
def download_file(url, directory):
local_filename = os.path.join(directory, url.split('/')[-1])
print ("Downloading %s --> %s"%(url, local_filename))
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return local_filename
def extract_tar(fpath):
fname_dir, fname = os.path.split(fpath)
dest_path = os.path.join(fname_dir,fname.split('.')[0])
print ("Extracting %s --> %s"%(fpath, dest_path))
if fname.endswith("tar.gz"):
tar = tarfile.open(fpath, "r:gz")
tar.extractall(path=fname_dir)
tar.close()
elif fname.endswith("tar"):
tar = tarfile.open(fname, "r:")
tar.extractall(path=fname_dir)
tar.close()
return dest_path
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f)) | 3.15625 | 3 |
spritecss/config.py | yostudios/Spritemapper | 49 | 5842 | import shlex
from os import path
from itertools import imap, ifilter
from urlparse import urljoin
from .css import CSSParser, iter_events
def parse_config_stmt(line, prefix="spritemapper."):
line = line.strip()
if line.startswith(prefix) and "=" in line:
(key, value) = line.split("=", 1)
return (key[len(prefix):].strip(), value.strip())
def iter_config_stmts(data):
return ifilter(None, imap(parse_config_stmt, data.splitlines()))
def iter_css_config(parser):
for ev in iter_events(parser, lexemes=("comment",)):
for v in iter_config_stmts(ev.comment):
yield v
class CSSConfig(object):
def __init__(self, parser=None, base=None, root=None, fname=None):
if fname and root is None:
root = path.dirname(fname)
self.root = root
self._data = dict(base) if base else {}
if parser is not None:
self._data.update(iter_css_config(parser))
def __iter__(self):
# this is mostly so you can go CSSConfig(base=CSSConfig(..))
return self._data.iteritems()
@classmethod
def from_file(cls, fname):
with open(fname, "rb") as fp:
return cls(CSSParser.from_file(fp), fname=fname)
def normpath(self, p):
"""Normalize a possibly relative path *p* to the root."""
return path.normpath(path.join(self.root, p))
def absurl(self, p):
"""Make an absolute reference to *p* from any configured base URL."""
base = self.base_url
if base:
p = urljoin(base, p)
return p
@property
def base_url(self):
return self._data.get("base_url")
@property
def sprite_dirs(self):
if "sprite_dirs" not in self._data:
return
elif self._data.get("output_image"):
raise RuntimeError("cannot have sprite_dirs "
"when output_image is set")
sdirs = shlex.split(self._data["sprite_dirs"])
return map(self.normpath, sdirs)
@property
def output_image(self):
if "output_image" in self._data:
return self.normpath(self._data["output_image"])
@property
def is_mapping_recursive(self):
rv = self._data.get("recursive")
if rv and self._data.get("output_image"):
raise RuntimeError("cannot have recursive spritemapping "
"when output_image is set")
elif rv is None:
return not self._data.get("output_image")
else:
return bool(rv)
@property
def padding(self):
return self._data.get("padding", (1, 1))
@property
def anneal_steps(self):
return int(self._data.get("anneal_steps", 9200))
def get_spritemap_out(self, dn):
"Get output image filename for spritemap directory *dn*."
if "output_image" in self._data:
return self.output_image
return dn + ".png"
def get_spritemap_url(self, fname):
"Get output image URL for spritemap *fname*."
return self.absurl(path.relpath(fname, self.root))
def get_css_out(self, fname):
"Get output image filename for spritemap directory *fname*."
(dirn, base) = path.split(fname)
if "output_css" in self._data:
(base, ext) = path.splitext(base)
names = dict(filename=fname, dirname=dirn,
basename=base, extension=ext)
return self.normpath(self._data["output_css"].format(**names))
else:
return path.join(dirn, "sm_" + base)
def print_config(fname):
from pprint import pprint
from .css import CSSParser
with open(fname, "rb") as fp:
print "%s\n%s\n" % (fname, "=" * len(fname))
pprint(dict(iter_css_config(CSSParser.read_file(fp))))
print
def main():
import sys
for fn in sys.argv[1:]:
print_config(fn)
if __name__ == "__main__":
main()
| 2.65625 | 3 |
plotting/make_bar_graph.py | DanielTakeshi/debridement-code | 3 | 5843 | """ A bar graph.
(c) September 2017 by <NAME>
"""
import argparse
from collections import defaultdict
from keras.models import Sequential
from keras.layers import Dense, Activation
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sys
np.set_printoptions(suppress=True, linewidth=200)
# Some matplotlib settings.
plt.style.use('seaborn-darkgrid')
titlesize = 21
labelsize = 17
legendsize = 15
ticksize = 15
bar_width = 0.80
opacity = 1.0
error_config = {'ecolor': '0.0', 'linewidth':3.0}
def deprecated():
"""
This is a deprecated method, only to show how to possibly combine these into
one plot. However, I find this unwieldly.
"""
fig, ax = plt.subplots()
bar_width = 0.80
opacity = 0.5
error_config = {'ecolor': '0.3'}
rects1 = plt.bar(np.array([0,1]), means_lin, bar_width,
alpha=opacity,
color='b',
yerr=std_lin,
error_kw=error_config,
label='Lin')
rects2 = plt.bar(np.array([3,4,5,6,7]), means_rfs, bar_width,
alpha=opacity,
color='r',
yerr=std_rfs,
error_kw=error_config,
label='RF')
rects3 = plt.bar(np.array([9,10]), means_dnn, bar_width,
alpha=opacity,
color='y',
yerr=std_dnn,
error_kw=error_config,
label='DNN')
plt.xticks(np.arange(11) + bar_width / 2,
('A','B','','D','E','F','G','','','J','K'))
plt.xlabel('Group')
plt.ylabel('Scores')
plt.title('Scores by group and gender')
plt.tight_layout()
plt.legend()
plt.savefig('figures/validation_set_results.png')
def plot(results, vv):
lin_mean = []
lin_std = []
lin_keys = []
rfs_mean = []
rfs_std = []
rfs_keys = []
dnn_mean = []
dnn_std = []
dnn_keys = []
sorted_keys = sorted(results.keys())
for key in sorted_keys:
info = [ss['loss'] for ss in results[key]]
if 'Lin' in key:
lin_mean.append(np.mean(info))
lin_std.append(np.std(info))
lin_keys.append(key)
elif 'RFs' in key:
rfs_mean.append(np.mean(info))
rfs_std.append(np.std(info))
rfs_keys.append(key)
elif 'DNN' in key:
dnn_mean.append(np.mean(info))
dnn_std.append(np.std(info))
dnn_keys.append(key)
print("\nlin_mean: {}".format(lin_mean))
print("lin_std: {}".format(lin_std))
print("lin_keys: {}".format(lin_keys))
print("\nrfs_mean: {}".format(rfs_mean))
print("rfs_std: {}".format(rfs_std))
print("rfs_keys: {}".format(rfs_keys))
print("\nDNN results:")
for (mean,std,key) in zip(dnn_mean,dnn_std,dnn_keys):
print("{:.2f}\t{:.2f}\t{}".format(mean,std,key))
# sys.exit()
# Use this to determine which DNN models should be here.
dnn_threshold = 3.0
real_index = 0
for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)):
if mean > dnn_threshold:
continue
real_index += 1
# Gah! Now I can finally make the bar chart. I think it's easiest to have it
# split across three different subplots, one per algorithm category.
width_ratio = [len(lin_keys),len(rfs_keys),real_index]
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16,5),
gridspec_kw={'width_ratios':width_ratio})
for ii,(mean,std,key) in enumerate(zip(lin_mean,lin_std,lin_keys)):
ax[0].bar(np.array([ii]), mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label=key[4:])
for ii,(mean,std,key) in enumerate(zip(rfs_mean,rfs_std,rfs_keys)):
ax[1].bar(np.array([ii]), mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label=key[4:])
real_index = 0
for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)):
if mean > dnn_threshold:
continue
ax[2].bar(np.array([real_index]), mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label=key[4:])
real_index += 1
# Some rather tedious but necessary stuff to make it publication-quality.
ax[0].set_title('Linear', fontsize=titlesize)
ax[1].set_title('Random Forests', fontsize=titlesize)
ax[2].set_title('Deep Neural Networks', fontsize=titlesize)
ax[0].set_ylabel('Average Squared $L_2$, 10-Fold CV', fontsize=labelsize)
for i in range(3):
ax[i].set_xlabel('Algorithm', fontsize=labelsize)
ax[i].set_ylim([0.0,9.0])
ax[i].tick_params(axis='y', labelsize=ticksize)
ax[i].set_xticklabels([])
ax[0].legend(loc="best", ncol=1, prop={'size':legendsize})
ax[1].legend(loc="best", ncol=2, prop={'size':legendsize})
ax[2].legend(loc="best", ncol=3, prop={'size':legendsize})
plt.tight_layout()
plt.savefig('figures/validation_set_results_v'+vv+'.png')
if __name__ == "__main__":
pp = argparse.ArgumentParser()
pp.add_argument('--version', type=int)
pp.add_argument('--kfolds', type=int, default=10)
args = pp.parse_args()
assert args.version is not None
VERSION = str(args.version).zfill(2)
file_name = 'results/results_kfolds10_v'+VERSION+'.npy'
results = np.load(file_name)[()]
print("results has keys: {}".format(results.keys()))
plot(results, VERSION)
| 2.796875 | 3 |
setup.py | tzengerink/groceries-api | 0 | 5844 | <filename>setup.py
#!/usr/bin/env python
from setuptools import find_packages, setup
import os
import re
ROOT = os.path.dirname(__file__)
VERSION_RE = re.compile(r'''__version__ = \'([0-9.]+)\'''')
def get_version():
init = open(os.path.join(ROOT, 'application', '__init__.py')).read()
return VERSION_RE.search(init).group(1)
setup(
name='groceries-api',
version=get_version(),
license='MIT',
packages=find_packages(),
include_package_data=True,
install_requires=[
'alembic==0.7.5.post2',
'APScheduler==3.1.0',
'Flask==0.10.1',
'Flask-Cors==2.0.0',
'Flask-SQLAlchemy==2.0',
'gunicorn==19.3.0',
'psycopg2==2.6.1',
'PyJWT==1.1.0',
'requests==2.8.1',
'six==1.9.0',
],
extras_require={
'dev': {
'coverage==3.7.1',
'coveralls==0.5',
'flake8==2.4.0',
'mock==1.0.1',
'pytest==2.7.0',
'tox==2.1.1',
},
},
)
| 1.734375 | 2 |
toontown/suit/DistributedLawbotBoss.py | SuperM0use24/TT-CL-Edition | 0 | 5845 | from direct.showbase.ShowBase import *
from direct.interval.IntervalGlobal import *
from toontown.battle.BattleProps import *
from direct.distributed.ClockDelta import *
from direct.showbase.PythonUtil import Functor
from direct.showbase.PythonUtil import StackTrace
from direct.gui.DirectGui import *
from panda3d.core import *
from libotp import *
from direct.fsm import FSM
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import ToontownBattleGlobals
import DistributedBossCog
from toontown.toonbase import TTLocalizer
import SuitDNA
from toontown.toon import Toon
from toontown.battle import BattleBase
from direct.directutil import Mopath
from direct.showutil import Rope
from toontown.distributed import DelayDelete
from toontown.battle import MovieToonVictory
from toontown.building import ElevatorUtils
from toontown.battle import RewardPanel
from toontown.toon import NPCToons
from direct.task import Task
import random
import math
from toontown.coghq import CogDisguiseGlobals
from toontown.building import ElevatorConstants
from toontown.toonbase import ToontownTimer
OneBossCog = None
class DistributedLawbotBoss(DistributedBossCog.DistributedBossCog, FSM.FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotBoss')
debugPositions = False
def __init__(self, cr):
self.notify.debug('----- __init___')
DistributedBossCog.DistributedBossCog.__init__(self, cr)
FSM.FSM.__init__(self, 'DistributedLawbotBoss')
self.lawyers = []
self.lawyerRequest = None
self.bossDamage = 0
self.attackCode = None
self.attackAvId = 0
self.recoverRate = 0
self.recoverStartTime = 0
self.bossDamageMovie = None
self.everThrownPie = 0
self.battleThreeMusicTime = 0
self.insidesANodePath = None
self.insidesBNodePath = None
self.strafeInterval = None
self.onscreenMessage = None
self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage
self.elevatorType = ElevatorConstants.ELEVATOR_CJ
self.gavels = {}
self.chairs = {}
self.cannons = {}
self.useCannons = 1
self.juryBoxIval = None
self.juryTimer = None
self.witnessToon = None
self.witnessToonOnstage = False
self.numToonJurorsSeated = 0
self.mainDoor = None
self.reflectedMainDoor = None
self.panFlashInterval = None
self.panDamage = ToontownGlobals.LawbotBossDefensePanDamage
if base.config.GetBool('lawbot-boss-cheat', 0):
self.panDamage = 25
self.evidenceHitSfx = None
self.toonUpSfx = None
self.bonusTimer = None
self.warningSfx = None
self.juryMovesSfx = None
self.baseColStashed = False
self.battleDifficulty = 0
self.bonusWeight = 0
self.numJurorsLocalToonSeated = 0
self.cannonIndex = -1
return
def announceGenerate(self):
global OneBossCog
self.notify.debug('----- announceGenerate')
DistributedBossCog.DistributedBossCog.announceGenerate(self)
self.setName(TTLocalizer.LawbotBossName)
nameInfo = TTLocalizer.BossCogNameWithDept % {'name': self._name,
'dept': SuitDNA.getDeptFullname(self.style.dept)}
self.setDisplayName(nameInfo)
self.piesRestockSfx = loader.loadSfx('phase_5/audio/sfx/LB_receive_evidence.ogg')
self.rampSlideSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_ramp_slide.ogg')
self.evidenceHitSfx = loader.loadSfx('phase_11/audio/sfx/LB_evidence_hit.ogg')
self.warningSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_GOON_tractor_beam_alarmed.ogg')
self.juryMovesSfx = loader.loadSfx('phase_11/audio/sfx/LB_jury_moves.ogg')
self.toonUpSfx = loader.loadSfx('phase_11/audio/sfx/LB_toonup.ogg')
self.strafeSfx = []
for i in xrange(10):
self.strafeSfx.append(loader.loadSfx('phase_3.5/audio/sfx/SA_shred.ogg'))
render.setTag('pieCode', str(ToontownGlobals.PieCodeNotBossCog))
insidesA = CollisionPolygon(Point3(4.0, -2.0, 5.0), Point3(-4.0, -2.0, 5.0), Point3(-4.0, -2.0, 0.5), Point3(4.0, -2.0, 0.5))
insidesANode = CollisionNode('BossZap')
insidesANode.addSolid(insidesA)
insidesANode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask)
self.insidesANodePath = self.axle.attachNewNode(insidesANode)
self.insidesANodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides))
self.insidesANodePath.stash()
insidesB = CollisionPolygon(Point3(-4.0, 2.0, 5.0), Point3(4.0, 2.0, 5.0), Point3(4.0, 2.0, 0.5), Point3(-4.0, 2.0, 0.5))
insidesBNode = CollisionNode('BossZap')
insidesBNode.addSolid(insidesB)
insidesBNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask)
self.insidesBNodePath = self.axle.attachNewNode(insidesBNode)
self.insidesBNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides))
self.insidesBNodePath.stash()
target = CollisionTube(0, -1, 4, 0, -1, 9, 3.5)
targetNode = CollisionNode('BossZap')
targetNode.addSolid(target)
targetNode.setCollideMask(ToontownGlobals.PieBitmask)
self.targetNodePath = self.pelvis.attachNewNode(targetNode)
self.targetNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossCog))
shield = CollisionTube(0, 1, 4, 0, 1, 7, 3.5)
shieldNode = CollisionNode('BossZap')
shieldNode.addSolid(shield)
shieldNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.CameraBitmask)
shieldNodePath = self.pelvis.attachNewNode(shieldNode)
disk = loader.loadModel('phase_9/models/char/bossCog-gearCollide')
disk.find('**/+CollisionNode').setName('BossZap')
disk.reparentTo(self.pelvis)
disk.setZ(0.8)
self.loadEnvironment()
self.__makeWitnessToon()
self.__loadMopaths()
localAvatar.chatMgr.chatInputSpeedChat.addCJMenu()
if OneBossCog != None:
self.notify.warning('Multiple BossCogs visible.')
OneBossCog = self
return
def disable(self):
global OneBossCog
self.notify.debug('----- disable')
DistributedBossCog.DistributedBossCog.disable(self)
self.request('Off')
self.unloadEnvironment()
self.__cleanupWitnessToon()
self.__unloadMopaths()
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
self.__cleanupStrafe()
self.__cleanupJuryBox()
render.clearTag('pieCode')
self.targetNodePath.detachNode()
self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest)
self.lawyerRequest = None
self.betweenBattleMusic.stop()
self.promotionMusic.stop()
self.stingMusic.stop()
self.battleTwoMusic.stop()
self.battleThreeMusic.stop()
self.epilogueMusic.stop()
if self.juryTimer:
self.juryTimer.destroy()
del self.juryTimer
if self.bonusTimer:
self.bonusTimer.destroy()
del self.bonusTimer
localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu()
if OneBossCog == self:
OneBossCog = None
return
def delete(self):
self.notify.debug('----- delete')
DistributedBossCog.DistributedBossCog.delete(self)
def d_hitBoss(self, bossDamage):
self.notify.debug('----- d_hitBoss')
self.sendUpdate('hitBoss', [bossDamage])
def d_healBoss(self, bossHeal):
self.notify.debug('----- d_bossHeal')
self.sendUpdate('healBoss', [bossHeal])
def d_hitBossInsides(self):
self.notify.debug('----- d_hitBossInsides')
self.sendUpdate('hitBossInsides', [])
def d_hitDefensePan(self):
self.notify.debug('----- d_hitDefensePan')
self.sendUpdate('hitDefensePan', [])
def d_hitProsecutionPan(self):
self.notify.debug('----- d_hitProsecutionPan')
self.sendUpdate('hitProsecutionPan', [])
def d_hitToon(self, toonId):
self.notify.debug('----- d_hitToon')
self.sendUpdate('hitToon', [toonId])
def gotToon(self, toon):
stateName = self.state
if stateName == 'Elevator':
self.placeToonInElevator(toon)
def setLawyerIds(self, lawyerIds):
self.lawyers = []
self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest)
self.lawyerRequest = self.cr.relatedObjectMgr.requestObjects(lawyerIds, allCallback=self.__gotLawyers)
def __gotLawyers(self, lawyers):
self.lawyerRequest = None
self.lawyers = lawyers
for i in xrange(len(self.lawyers)):
suit = self.lawyers[i]
suit.fsm.request('neutral')
suit.loop('neutral')
suit.setBossCogId(self.doId)
return
def setBossDamage(self, bossDamage, recoverRate, timestamp):
recoverStartTime = globalClockDelta.networkToLocalTime(timestamp)
self.bossDamage = bossDamage
self.recoverRate = recoverRate
self.recoverStartTime = recoverStartTime
taskName = 'RecoverBossDamage'
taskMgr.remove(taskName)
if self.bossDamageMovie:
if self.bossDamage >= self.bossMaxDamage:
self.notify.debug('finish the movie then transition to NearVictory')
self.bossDamageMovie.resumeUntil(self.bossDamageMovie.getDuration())
else:
self.bossDamageMovie.resumeUntil(self.bossDamage * self.bossDamageToMovie)
if self.recoverRate:
taskMgr.add(self.__recoverBossDamage, taskName)
self.makeScaleReflectDamage()
self.bossHealthBar.update(self.bossMaxDamage - bossDamage, self.bossMaxDamage)
def getBossDamage(self):
self.notify.debug('----- getBossDamage')
now = globalClock.getFrameTime()
elapsed = now - self.recoverStartTime
return max(self.bossDamage - self.recoverRate * elapsed / 60.0, 0)
def __recoverBossDamage(self, task):
self.notify.debug('----- __recoverBossDamage')
if self.bossDamageMovie:
self.bossDamageMovie.setT(self.getBossDamage() * self.bossDamageToMovie)
return Task.cont
def __walkToonToPromotion(self, toonId, delay, mopath, track, delayDeletes):
self.notify.debug('----- __walkToonToPromotion')
toon = base.cr.doId2do.get(toonId)
if toon:
destPos = toon.getPos()
self.placeToonInElevator(toon)
toon.wrtReparentTo(render)
ival = Sequence(Wait(delay), Func(toon.suit.setPlayRate, 1, 'walk'), Func(toon.suit.loop, 'walk'), toon.posInterval(1, Point3(0, 90, 20)), ParallelEndTogether(MopathInterval(mopath, toon), toon.posInterval(2, destPos, blendType='noBlend')), Func(toon.suit.loop, 'neutral'))
track.append(ival)
delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.__walkToonToPromotion'))
def __walkSuitToPoint(self, node, fromPos, toPos):
self.notify.debug('----- __walkSuitToPoint')
vector = Vec3(toPos - fromPos)
distance = vector.length()
time = distance / (ToontownGlobals.SuitWalkSpeed * 1.8)
return Sequence(Func(node.setPos, fromPos), Func(node.headsUp, toPos), node.posInterval(time, toPos))
def __makeRollToBattleTwoMovie(self):
startPos = Point3(ToontownGlobals.LawbotBossBattleOnePosHpr[0], ToontownGlobals.LawbotBossBattleOnePosHpr[1], ToontownGlobals.LawbotBossBattleOnePosHpr[2])
if self.arenaSide:
topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosB)
topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosB)
p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosB)
else:
topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosA)
topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosA)
p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosA)
battlePos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2])
battleHpr = VBase3(ToontownGlobals.LawbotBossBattleTwoPosHpr[3], ToontownGlobals.LawbotBossBattleTwoPosHpr[4], ToontownGlobals.LawbotBossBattleTwoPosHpr[5])
bossTrack = Sequence()
self.notify.debug('calling setPosHpr')
myInterval = camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10, -13, 0), startPos=Point3(-22, -90, 35), startHpr=Point3(-10, -13, 0), blendType='easeInOut')
chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempJury1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0), Func(self.releaseToons, 1))
bossTrack.append(Func(self.getGeomNode().setH, 180))
track, hpr = self.rollBossToPoint(startPos, None, battlePos, None, 0)
bossTrack.append(track)
track, hpr = self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0)
self.makeToonsWait()
finalPodiumPos = Point3(self.podium.getX(), self.podium.getY(), self.podium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2])
finalReflectedPodiumPos = Point3(self.reflectedPodium.getX(), self.reflectedPodium.getY(), self.reflectedPodium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2])
return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), Parallel(self.podium.posInterval(5.0, finalPodiumPos), self.reflectedPodium.posInterval(5.0, finalReflectedPodiumPos), Func(self.stashBoss), self.posInterval(5.0, battlePos), Func(taskMgr.doMethodLater, 0.01, self.unstashBoss, 'unstashBoss')), name=self.uniqueName('BattleTwoMovie'))
def __makeRollToBattleThreeMovie(self):
startPos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2])
battlePos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2])
battleHpr = VBase3(ToontownGlobals.LawbotBossBattleThreePosHpr[3], ToontownGlobals.LawbotBossBattleThreePosHpr[4], ToontownGlobals.LawbotBossBattleThreePosHpr[5])
bossTrack = Sequence()
myInterval = camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10, -13, 0), startPos=Point3(-22, -90, 35), startHpr=Point3(-10, -13, 0), blendType='easeInOut')
chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTrialChat1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0), Func(self.releaseToons, 1))
bossTrack.append(Func(self.getGeomNode().setH, 180))
bossTrack.append(Func(self.loop, 'Ff_neutral'))
track, hpr = self.rollBossToPoint(startPos, None, battlePos, None, 0)
bossTrack.append(track)
track, hpr = self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0)
self.makeToonsWait()
return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), name=self.uniqueName('BattleTwoMovie'))
def toNeutralMode(self):
if self.cr:
place = self.cr.playGame.getPlace()
if place and hasattr(place, 'fsm'):
place.setState('waitForBattle')
def makeToonsWait(self):
self.notify.debug('makeToonsWait')
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
toon.stopLookAround()
toon.stopSmooth()
if self.hasLocalToon():
self.toMovieMode()
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
toon.loop('neutral')
def makeEndOfBattleMovie(self, hasLocalToon):
name = self.uniqueName('Drop')
seq = Sequence(name=name)
seq += [Wait(0.0)]
if hasLocalToon:
seq += [Func(self.show),
Func(camera.reparentTo, localAvatar),
Func(camera.setPos, localAvatar.getOldCameraPos()),
Func(camera.setHpr, 0, 0, 0)]
seq.append(Func(self.setChatAbsolute, TTLocalizer.LawbotBossPassExam, CFSpeech))
seq.append(Wait(5.0))
seq.append(Func(self.clearChat))
return seq
def __makeBossDamageMovie(self):
self.notify.debug('---- __makeBossDamageMovie')
startPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2])
startHpr = Point3(*ToontownGlobals.LawbotBossBattleThreeHpr)
bottomPos = Point3(*ToontownGlobals.LawbotBossBottomPos)
deathPos = Point3(*ToontownGlobals.LawbotBossDeathPos)
self.setPosHpr(startPos, startHpr)
bossTrack = Sequence()
bossTrack.append(Func(self.loop, 'Ff_neutral'))
track, hpr = self.rollBossToPoint(startPos, startHpr, bottomPos, None, 1)
bossTrack.append(track)
track, hpr = self.rollBossToPoint(bottomPos, startHpr, deathPos, None, 1)
bossTrack.append(track)
duration = bossTrack.getDuration()
return bossTrack
def __showOnscreenMessage(self, text):
self.notify.debug('----- __showOnscreenmessage')
if self.onscreenMessage:
self.onscreenMessage.destroy()
self.onscreenMessage = None
self.onscreenMessage = DirectLabel(text=text, text_fg=VBase4(1, 1, 1, 1), text_align=TextNode.ACenter, relief=None, pos=(0, 0, 0.35), scale=0.1)
return
def __clearOnscreenMessage(self):
if self.onscreenMessage:
self.onscreenMessage.destroy()
self.onscreenMessage = None
return
def __showWaitingMessage(self, task):
self.notify.debug('----- __showWaitingMessage')
self.__showOnscreenMessage(TTLocalizer.BuildingWaitingForVictors)
def loadEnvironment(self):
self.notify.debug('----- loadEnvironment')
DistributedBossCog.DistributedBossCog.loadEnvironment(self)
self.geom = loader.loadModel('phase_11/models/lawbotHQ/LawbotCourtroom3')
self.geom.setPos(0, 0, -71.601)
self.geom.setScale(1)
self.elevatorEntrance = self.geom.find('**/elevator_origin')
self.elevatorEntrance.getChildren().detach()
self.elevatorEntrance.setScale(1)
elevatorModel = loader.loadModel('phase_11/models/lawbotHQ/LB_Elevator')
elevatorModel.reparentTo(self.elevatorEntrance)
self.setupElevator(elevatorModel)
self.promotionMusic = base.loader.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg')
self.betweenBattleMusic = base.loader.loadMusic('phase_9/audio/bgm/encntr_toon_winning.ogg')
self.battleTwoMusic = base.loader.loadMusic('phase_11/audio/bgm/LB_juryBG.ogg')
floor = self.geom.find('**/MidVaultFloor1')
if floor.isEmpty():
floor = self.geom.find('**/CR3_Floor')
self.evFloor = self.replaceCollisionPolysWithPlanes(floor)
self.evFloor.reparentTo(self.geom)
self.evFloor.setName('floor')
plane = CollisionPlane(Plane(Vec3(0, 0, 1), Point3(0, 0, -50)))
planeNode = CollisionNode('dropPlane')
planeNode.addSolid(plane)
planeNode.setCollideMask(ToontownGlobals.PieBitmask)
self.geom.attachNewNode(planeNode)
self.door3 = self.geom.find('**/SlidingDoor1/')
if self.door3.isEmpty():
self.door3 = self.geom.find('**/interior/CR3_Door')
self.mainDoor = self.geom.find('**/Door_1')
if not self.mainDoor.isEmpty():
itemsToHide = ['interior/Door_1']
for str in itemsToHide:
stuffToHide = self.geom.find('**/%s' % str)
if not stuffToHide.isEmpty():
self.notify.debug('found %s' % stuffToHide)
stuffToHide.wrtReparentTo(self.mainDoor)
else:
self.notify.debug('not found %s' % stuffToHide)
self.reflectedMainDoor = self.geom.find('**/interiorrefl/CR3_Door')
if not self.reflectedMainDoor.isEmpty():
itemsToHide = ['Reflections/Door_1']
for str in itemsToHide:
stuffToHide = self.geom.find('**/%s' % str)
if not stuffToHide.isEmpty():
self.notify.debug('found %s' % stuffToHide)
stuffToHide.wrtReparentTo(self.reflectedMainDoor)
else:
self.notify.debug('not found %s' % stuffToHide)
self.geom.reparentTo(render)
self.loadWitnessStand()
self.loadScale()
self.scaleNodePath.stash()
self.loadJuryBox()
self.loadPodium()
ug = self.geom.find('**/Reflections')
ug.setBin('ground', -10)
def loadJuryBox(self):
self.juryBox = self.geom.find('**/JuryBox')
juryBoxPos = self.juryBox.getPos()
newPos = juryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos)
if not self.debugPositions:
self.juryBox.setPos(newPos)
self.reflectedJuryBox = self.geom.find('**/JuryBox_Geo_Reflect')
reflectedJuryBoxPos = self.reflectedJuryBox.getPos()
newReflectedPos = reflectedJuryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos)
if not self.debugPositions:
self.reflectedJuryBox.setPos(newReflectedPos)
if not self.reflectedJuryBox.isEmpty():
if self.debugPositions:
self.reflectedJuryBox.show()
self.reflectedJuryBox.setZ(self.reflectedJuryBox.getZ() + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2])
def loadPodium(self):
self.podium = self.geom.find('**/Podium')
newZ = self.podium.getZ() - ToontownGlobals.LawbotBossBattleTwoPosHpr[2]
if not self.debugPositions:
self.podium.setZ(newZ)
self.reflectedPodium = self.geom.find('**/Podium_Geo1_Refl')
reflectedZ = self.reflectedPodium.getZ()
if not self.debugPositions:
self.reflectedPodium.setZ(reflectedZ)
if not self.reflectedPodium.isEmpty():
if self.debugPositions:
self.reflectedPodium.show()
def loadCannons(self):
pass
def loadWitnessStand(self):
self.realWitnessStand = self.geom.find('**/WitnessStand')
if not self.realWitnessStand.isEmpty():
pass
self.reflectedWitnessStand = self.geom.find('**/Witnessstand_Geo_Reflect')
if not self.reflectedWitnessStand.isEmpty():
pass
colNode = self.realWitnessStand.find('**/witnessStandCollisions/Witnessstand_Collision')
colNode.setName('WitnessStand')
def loadScale(self):
self.useProgrammerScale = base.config.GetBool('want-injustice-scale-debug', 0)
if self.useProgrammerScale:
self.loadScaleOld()
else:
self.loadScaleNew()
def __debugScale(self):
prosecutionPanPos = self.prosecutionPanNodePath.getPos()
origin = Point3(0, 0, 0)
prosecutionPanRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionPanNodePath, origin)
panRenderPos = render.getRelativePoint(self.prosecutionPanNodePath, origin)
self.notify.debug('prosecutionPanPos = %s' % prosecutionPanPos)
self.notify.debug('prosecutionPanRelPos = %s' % prosecutionPanRelPos)
self.notify.debug('panRenderPos = %s' % panRenderPos)
prosecutionLocatorPos = self.prosecutionLocator.getPos()
prosecutionLocatorRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionLocator, origin)
locatorRenderPos = render.getRelativePoint(self.prosecutionLocator, origin)
self.notify.debug('prosecutionLocatorPos = %s ' % prosecutionLocatorPos)
self.notify.debug('prosecutionLocatorRelPos = %s ' % prosecutionLocatorRelPos)
self.notify.debug('locatorRenderPos = %s' % locatorRenderPos)
beamPos = self.beamNodePath.getPos()
beamRelPos = self.scaleNodePath.getRelativePoint(self.beamNodePath, origin)
beamRenderPos = render.getRelativePoint(self.beamNodePath, origin)
self.notify.debug('beamPos = %s' % beamPos)
self.notify.debug('beamRelPos = %s' % beamRelPos)
self.notify.debug('beamRenderPos = %s' % beamRenderPos)
beamBoundsCenter = self.beamNodePath.getBounds().getCenter()
self.notify.debug('beamBoundsCenter = %s' % beamBoundsCenter)
beamLocatorBounds = self.beamLocator.getBounds()
beamLocatorPos = beamLocatorBounds.getCenter()
self.notify.debug('beamLocatorPos = %s' % beamLocatorPos)
def loadScaleNew(self):
self.scaleNodePath = loader.loadModel('phase_11/models/lawbotHQ/scale')
self.beamNodePath = self.scaleNodePath.find('**/scaleBeam')
self.defensePanNodePath = self.scaleNodePath.find('**/defensePan')
self.prosecutionPanNodePath = self.scaleNodePath.find('**/prosecutionPan')
self.defenseColNodePath = self.scaleNodePath.find('**/DefenseCol')
self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan))
self.prosecutionColNodePath = self.scaleNodePath.find('**/ProsecutionCol')
self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan))
self.standNodePath = self.scaleNodePath.find('**/scaleStand')
self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr)
self.defenseLocator = self.scaleNodePath.find('**/DefenseLocator')
defenseLocBounds = self.defenseLocator.getBounds()
defenseLocPos = defenseLocBounds.getCenter()
self.notify.debug('defenseLocatorPos = %s' % defenseLocPos)
self.defensePanNodePath.setPos(defenseLocPos)
self.defensePanNodePath.reparentTo(self.beamNodePath)
self.notify.debug('defensePanNodePath.getPos()=%s' % self.defensePanNodePath.getPos())
self.prosecutionLocator = self.scaleNodePath.find('**/ProsecutionLocator')
prosecutionLocBounds = self.prosecutionLocator.getBounds()
prosecutionLocPos = prosecutionLocBounds.getCenter()
self.notify.debug('prosecutionLocatorPos = %s' % prosecutionLocPos)
self.prosecutionPanNodePath.setPos(prosecutionLocPos)
self.prosecutionPanNodePath.reparentTo(self.beamNodePath)
self.beamLocator = self.scaleNodePath.find('**/StandLocator1')
beamLocatorBounds = self.beamLocator.getBounds()
beamLocatorPos = beamLocatorBounds.getCenter()
negBeamLocatorPos = -beamLocatorPos
self.notify.debug('beamLocatorPos = %s' % beamLocatorPos)
self.notify.debug('negBeamLocatorPos = %s' % negBeamLocatorPos)
self.beamNodePath.setPos(beamLocatorPos)
self.scaleNodePath.setScale(*ToontownGlobals.LawbotBossInjusticeScale)
self.scaleNodePath.wrtReparentTo(self.geom)
self.baseHighCol = self.scaleNodePath.find('**/BaseHighCol')
oldBitMask = self.baseHighCol.getCollideMask()
newBitMask = oldBitMask & ~ToontownGlobals.PieBitmask
newBitMask = newBitMask & ~ToontownGlobals.CameraBitmask
self.baseHighCol.setCollideMask(newBitMask)
self.defenseHighCol = self.scaleNodePath.find('**/DefenseHighCol')
self.defenseHighCol.stash()
self.defenseHighCol.setCollideMask(newBitMask)
self.baseTopCol = self.scaleNodePath.find('**/Scale_base_top_collision')
self.baseSideCol = self.scaleNodePath.find('**/Scale_base_side_col')
self.defenseLocator.hide()
self.prosecutionLocator.hide()
self.beamLocator.hide()
def loadScaleOld(self):
startingTilt = 0
self.scaleNodePath = NodePath('injusticeScale')
beamGeom = self.createBlock(0.25, 2, 0.125, -0.25, -2, -0.125, 0, 1.0, 0, 1.0)
self.beamNodePath = NodePath('scaleBeam')
self.beamNodePath.attachNewNode(beamGeom)
self.beamNodePath.setPos(0, 0, 3)
self.beamNodePath.reparentTo(self.scaleNodePath)
defensePanGeom = self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2, 0, 0, 1.0, 0.25)
self.defensePanNodePath = NodePath('defensePan')
self.defensePanNodePath.attachNewNode(defensePanGeom)
self.defensePanNodePath.setPos(0, -2, 0)
self.defensePanNodePath.reparentTo(self.beamNodePath)
defenseTube = CollisionTube(0, 0, -0.5, 0, 0, -1.5, 0.6)
defenseTube.setTangible(1)
defenseCollNode = CollisionNode('DefenseCol')
defenseCollNode.addSolid(defenseTube)
self.defenseColNodePath = self.defensePanNodePath.attachNewNode(defenseCollNode)
self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan))
prosecutionPanGeom = self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2, 1.0, 0, 0, 1.0)
self.prosecutionPanNodePath = NodePath('prosecutionPan')
self.prosecutionPanNodePath.attachNewNode(prosecutionPanGeom)
self.prosecutionPanNodePath.setPos(0, 2, 0)
self.prosecutionPanNodePath.reparentTo(self.beamNodePath)
prosecutionTube = CollisionTube(0, 0, -0.5, 0, 0, -1.5, 0.6)
prosecutionTube.setTangible(1)
prosecutionCollNode = CollisionNode(self.uniqueName('ProsecutionCol'))
prosecutionCollNode.addSolid(prosecutionTube)
self.prosecutionColNodePath = self.prosecutionPanNodePath.attachNewNode(prosecutionCollNode)
self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan))
standGeom = self.createBlock(0.25, 0.25, 0, -0.25, -0.25, 3)
self.standNodePath = NodePath('scaleStand')
self.standNodePath.attachNewNode(standGeom)
self.standNodePath.reparentTo(self.scaleNodePath)
self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr)
self.scaleNodePath.setScale(5.0)
self.scaleNodePath.wrtReparentTo(self.geom)
self.setScaleTilt(startingTilt)
def setScaleTilt(self, tilt):
self.beamNodePath.setP(tilt)
if self.useProgrammerScale:
self.defensePanNodePath.setP(-tilt)
self.prosecutionPanNodePath.setP(-tilt)
else:
self.defensePanNodePath.setP(-tilt)
self.prosecutionPanNodePath.setP(-tilt)
def stashBaseCol(self):
if not self.baseColStashed:
self.notify.debug('stashBaseCol')
self.baseTopCol.stash()
self.baseSideCol.stash()
self.baseColStashed = True
def unstashBaseCol(self):
if self.baseColStashed:
self.notify.debug('unstashBaseCol')
self.baseTopCol.unstash()
self.baseSideCol.unstash()
self.baseColStashed = False
def makeScaleReflectDamage(self):
diffDamage = self.bossDamage - ToontownGlobals.LawbotBossInitialDamage
diffDamage *= 1.0
if diffDamage >= 0:
percentDamaged = diffDamage / (ToontownGlobals.LawbotBossMaxDamage - ToontownGlobals.LawbotBossInitialDamage)
tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt
else:
percentDamaged = diffDamage / (ToontownGlobals.LawbotBossInitialDamage - 0)
tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt
self.setScaleTilt(tilt)
if self.bossDamage < ToontownGlobals.LawbotBossMaxDamage * 0.85:
self.unstashBaseCol()
else:
self.stashBaseCol()
def unloadEnvironment(self):
self.notify.debug('----- unloadEnvironment')
DistributedBossCog.DistributedBossCog.unloadEnvironment(self)
self.geom.removeNode()
del self.geom
def __loadMopaths(self):
self.notify.debug('----- __loadMopaths')
self.toonsEnterA = Mopath.Mopath()
self.toonsEnterA.loadFile('phase_9/paths/bossBattle-toonsEnterA')
self.toonsEnterA.fFaceForward = 1
self.toonsEnterA.timeScale = 35
self.toonsEnterB = Mopath.Mopath()
self.toonsEnterB.loadFile('phase_9/paths/bossBattle-toonsEnterB')
self.toonsEnterB.fFaceForward = 1
self.toonsEnterB.timeScale = 35
def __unloadMopaths(self):
self.notify.debug('----- __unloadMopaths')
self.toonsEnterA.reset()
self.toonsEnterB.reset()
def enterOff(self):
self.notify.debug('----- enterOff')
DistributedBossCog.DistributedBossCog.enterOff(self)
if self.witnessToon:
self.witnessToon.clearChat()
def enterWaitForToons(self):
self.notify.debug('----- enterWaitForToons')
DistributedBossCog.DistributedBossCog.enterWaitForToons(self)
self.geom.hide()
self.witnessToon.removeActive()
def exitWaitForToons(self):
self.notify.debug('----- exitWaitForToons')
DistributedBossCog.DistributedBossCog.exitWaitForToons(self)
self.geom.show()
self.witnessToon.addActive()
def enterElevator(self):
self.notify.debug('----- enterElevator')
DistributedBossCog.DistributedBossCog.enterElevator(self)
self.witnessToon.removeActive()
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr)
self.happy = 1
self.raised = 1
self.forward = 1
self.doAnimate()
self.__hideWitnessToon()
if not self.mainDoor.isEmpty():
self.mainDoor.stash()
if not self.reflectedMainDoor.isEmpty():
self.reflectedMainDoor.stash()
camera.reparentTo(self.elevatorModel)
camera.setPosHpr(0, 30, 8, 180, 0, 0)
def exitElevator(self):
self.notify.debug('----- exitElevator')
DistributedBossCog.DistributedBossCog.exitElevator(self)
self.witnessToon.removeActive()
def enterIntroduction(self):
self.notify.debug('----- enterIntroduction')
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr)
self.stopAnimate()
self.__hideWitnessToon()
DistributedBossCog.DistributedBossCog.enterIntroduction(self)
base.playMusic(self.promotionMusic, looping=1, volume=0.9)
if not self.mainDoor.isEmpty():
self.mainDoor.stash()
if not self.reflectedMainDoor.isEmpty():
self.reflectedMainDoor.stash()
def exitIntroduction(self):
self.notify.debug('----- exitIntroduction')
DistributedBossCog.DistributedBossCog.exitIntroduction(self)
self.promotionMusic.stop()
if not self.mainDoor.isEmpty():
pass
if not self.reflectedMainDoor.isEmpty():
self.reflectedMainDoor.unstash()
if not self.elevatorEntrance.isEmpty():
pass
def enterBattleOne(self):
self.notify.debug('----- LawbotBoss.enterBattleOne ')
DistributedBossCog.DistributedBossCog.enterBattleOne(self)
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr)
self.clearChat()
self.loop('Ff_neutral')
self.notify.debug('self.battleANode = %s' % self.battleANode)
self.__hideWitnessToon()
if self.battleA == None or self.battleB == None:
pass
return
def exitBattleOne(self):
self.notify.debug('----- exitBattleOne')
DistributedBossCog.DistributedBossCog.exitBattleOne(self)
def stashBoss(self):
self.stash()
def unstashBoss(self, task):
self.unstash()
self.reparentTo(render)
def enterRollToBattleTwo(self):
self.notify.debug('----- enterRollToBattleTwo')
self.releaseToons(finalBattle=1)
self.stashBoss()
self.toonsToBattlePosition(self.involvedToons, self.battleANode)
self.stickBossToFloor()
intervalName = 'RollToBattleTwo'
seq = Sequence(self.__makeRollToBattleTwoMovie(), Func(self.__onToPrepareBattleTwo), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9)
taskMgr.doMethodLater(0.01, self.unstashBoss, 'unstashBoss')
def __onToPrepareBattleTwo(self):
self.notify.debug('----- __onToPrepareBattleTwo')
self.unstickBoss()
self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr)
self.doneBarrier('RollToBattleTwo')
def exitRollToBattleTwo(self):
self.notify.debug('----- exitRollToBattleTwo')
self.unstickBoss()
intervalName = 'RollToBattleTwo'
self.clearInterval(intervalName)
self.betweenBattleMusic.stop()
def enterPrepareBattleTwo(self):
self.notify.debug('----- enterPrepareBattleTwo')
self.cleanupIntervals()
self.controlToons()
self.setToonsToNeutral(self.involvedToons)
self.clearChat()
self.reparentTo(render)
self.__showWitnessToon()
prepareBattleTwoMovie = self.__makePrepareBattleTwoMovie()
intervalName = 'prepareBattleTwo'
seq = Sequence(prepareBattleTwoMovie, name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
self.acceptOnce('doneChatPage', self.__showCannonsAppearing)
base.playMusic(self.stingMusic, looping=0, volume=1.0)
def __showCannonsAppearing(self, elapsedTime = 0):
allCannonsAppear = Sequence(Func(self.__positionToonsInFrontOfCannons), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPosTwo()), Func(camera.lookAt, localAvatar))
multiCannons = Parallel()
index = 0
self.involvedToons.sort()
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
if index in self.cannons:
cannon = self.cannons[index]
cannonSeq = cannon.generateCannonAppearTrack(toon)
multiCannons.append(cannonSeq)
index += 1
else:
self.notify.warning('No cannon %d but we have a toon =%d' % (index, toonId))
allCannonsAppear.append(multiCannons)
intervalName = 'prepareBattleTwoCannonsAppear'
seq = Sequence(allCannonsAppear, Func(self.__onToBattleTwo), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
def __onToBattleTwo(self, elapsedTime = 0):
self.notify.debug('----- __onToBattleTwo')
self.doneBarrier('PrepareBattleTwo')
taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage'))
def exitPrepareBattleTwo(self):
self.notify.debug('----- exitPrepareBattleTwo')
self.show()
taskMgr.remove(self.uniqueName('WaitingMessage'))
self.ignore('doneChatPage')
self.__clearOnscreenMessage()
self.stingMusic.stop()
def enterBattleTwo(self):
self.notify.debug('----- enterBattleTwo')
self.cleanupIntervals()
mult = ToontownBattleGlobals.getBossBattleCreditMultiplier(2)
localAvatar.inventory.setBattleCreditMultiplier(mult)
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr)
self.clearChat()
self.witnessToon.clearChat()
self.releaseToons(finalBattle=1)
self.__showWitnessToon()
if not self.useCannons:
self.toonsToBattlePosition(self.toonsA, self.battleANode)
self.toonsToBattlePosition(self.toonsB, self.battleBNode)
base.playMusic(self.battleTwoMusic, looping=1, volume=0.9)
self.startJuryBoxMoving()
for index in xrange(len(self.cannons)):
cannon = self.cannons[index]
cannon.cannon.show()
def getChairParent(self):
return self.juryBox
def startJuryBoxMoving(self):
if self.juryBoxIval:
self.juryBoxIval.finish()
self.juryBoxIval = None
self.juryBox.setPos(-30, 0, -12.645)
self.reflectedJuryBox.setPos(-30, 0, 0)
curPos = self.juryBox.getPos()
endingAbsPos = Point3(curPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2])
curReflectedPos = self.reflectedJuryBox.getPos()
reflectedEndingAbsPos = Point3(curReflectedPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curReflectedPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curReflectedPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2])
self.juryBoxIval = Parallel(self.juryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, endingAbsPos), self.reflectedJuryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, reflectedEndingAbsPos), SoundInterval(self.juryMovesSfx, node=self.chairs[2].nodePath, duration=ToontownGlobals.LawbotBossJuryBoxMoveTime, loop=1, volume=1.0))
self.juryBoxIval.start()
self.juryTimer = ToontownTimer.ToontownTimer()
self.juryTimer.posInTopRightCorner()
self.juryTimer.countdown(ToontownGlobals.LawbotBossJuryBoxMoveTime)
def exitBattleTwo(self):
self.notify.debug('----- exitBattleTwo')
intervalName = self.uniqueName('Drop')
self.clearInterval(intervalName)
self.cleanupBattles()
self.battleTwoMusic.stop()
localAvatar.inventory.setBattleCreditMultiplier(1)
if self.juryTimer:
self.juryTimer.destroy()
del self.juryTimer
self.juryTimer = None
for chair in self.chairs.values():
chair.stopCogsFlying()
return
def enterRollToBattleThree(self):
self.notify.debug('----- enterRollToBattleThree')
self.reparentTo(render)
self.stickBossToFloor()
intervalName = 'RollToBattleThree'
seq = Sequence(self.__makeRollToBattleThreeMovie(), Func(self.__onToPrepareBattleThree), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9)
def __onToPrepareBattleThree(self):
self.notify.debug('----- __onToPrepareBattleThree')
self.unstickBoss()
self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr)
self.doneBarrier('RollToBattleThree')
def exitRollToBattleThree(self):
self.notify.debug('----- exitRollToBattleThree')
self.unstickBoss()
intervalName = 'RollToBattleThree'
self.clearInterval(intervalName)
self.betweenBattleMusic.stop()
def enterPrepareBattleThree(self):
self.notify.debug('----- enterPrepareBattleThree')
self.cleanupIntervals()
self.controlToons()
self.setToonsToNeutral(self.involvedToons)
self.clearChat()
self.reparentTo(render)
base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9)
self.__showWitnessToon()
prepareBattleThreeMovie = self.__makePrepareBattleThreeMovie()
self.acceptOnce('doneChatPage', self.__onToBattleThree)
intervalName = 'prepareBattleThree'
seq = Sequence(prepareBattleThreeMovie, name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
def __onToBattleThree(self, elapsed):
self.notify.debug('----- __onToBattleThree')
self.doneBarrier('PrepareBattleThree')
taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage'))
def exitPrepareBattleThree(self):
self.notify.debug('----- exitPrepareBattleThree')
self.show()
taskMgr.remove(self.uniqueName('WaitingMessage'))
self.ignore('doneChatPage')
intervalName = 'PrepareBattleThree'
self.clearInterval(intervalName)
self.__clearOnscreenMessage()
self.betweenBattleMusic.stop()
def enterBattleThree(self):
DistributedBossCog.DistributedBossCog.enterBattleThree(self)
self.scaleNodePath.unstash()
localAvatar.setPos(-3, 0, 0)
base.localAvatar.orbitalCamera.start()
self.clearChat()
self.witnessToon.clearChat()
self.reparentTo(render)
self.happy = 1
self.raised = 1
self.forward = 1
self.doAnimate()
self.accept('enterWitnessStand', self.__touchedWitnessStand)
self.accept('pieSplat', self.__pieSplat)
self.accept('localPieSplat', self.__localPieSplat)
self.accept('outOfPies', self.__outOfPies)
self.accept('begin-pie', self.__foundPieButton)
self.accept('enterDefenseCol', self.__enterDefenseCol)
self.accept('enterProsecutionCol', self.__enterProsecutionCol)
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
taskMgr.doMethodLater(30, self.__howToGetPies, self.uniqueName('PieAdvice'))
self.stickBossToFloor()
self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr)
self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9)
self.__showWitnessToon()
diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty]
self.bossHealthBar.initialize(self.bossMaxDamage - self.bossDamage, self.bossMaxDamage)
if diffSettings[4]:
localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu()
localAvatar.chatMgr.chatInputSpeedChat.addCJMenu(self.bonusWeight)
def __doneBattleThree(self):
self.notify.debug('----- __doneBattleThree')
self.setState('NearVictory')
self.unstickBoss()
def exitBattleThree(self):
self.notify.debug('----- exitBattleThree')
DistributedBossCog.DistributedBossCog.exitBattleThree(self)
NametagGlobals.setMasterArrowsOn(1)
bossDoneEventName = self.uniqueName('DestroyedBoss')
self.ignore(bossDoneEventName)
taskMgr.remove(self.uniqueName('StandUp'))
self.ignore('enterWitnessStand')
self.ignore('pieSplat')
self.ignore('localPieSplat')
self.ignore('outOfPies')
self.ignore('begin-pie')
self.ignore('enterDefenseCol')
self.ignore('enterProsecutionCol')
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
if self.bossDamageMovie:
self.bossDamageMovie.finish()
self.bossDamageMovie = None
self.unstickBoss()
taskName = 'RecoverBossDamage'
taskMgr.remove(taskName)
self.battleThreeMusicTime = self.battleThreeMusic.getTime()
self.battleThreeMusic.stop()
return
def enterNearVictory(self):
self.cleanupIntervals()
self.reparentTo(render)
self.setPos(*ToontownGlobals.LawbotBossDeathPos)
self.setHpr(*ToontownGlobals.LawbotBossBattleThreeHpr)
self.clearChat()
self.releaseToons(finalBattle=1)
self.accept('pieSplat', self.__finalPieSplat)
self.accept('localPieSplat', self.__localPieSplat)
self.accept('outOfPies', self.__outOfPies)
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
self.happy = 0
self.raised = 0
self.forward = 1
self.doAnimate()
self.setDizzy(1)
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)
def exitNearVictory(self):
self.notify.debug('----- exitNearVictory')
self.ignore('pieSplat')
self.ignore('localPieSplat')
self.ignore('outOfPies')
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
self.setDizzy(0)
self.battleThreeMusicTime = self.battleThreeMusic.getTime()
self.battleThreeMusic.stop()
def enterVictory(self):
self.notify.debug('----- enterVictory')
self.cleanupIntervals()
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr)
self.loop('neutral')
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
self.clearChat()
self.witnessToon.clearChat()
self.controlToons()
self.setToonsToNeutral(self.involvedToons)
self.happy = 1
self.raised = 1
self.forward = 1
intervalName = 'VictoryMovie'
seq = Sequence(self.makeVictoryMovie(), Func(self.__continueVictory), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
self.bossHealthBar.deinitialize()
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)
def __continueVictory(self):
self.notify.debug('----- __continueVictory')
self.stopAnimate()
self.doneBarrier('Victory')
def exitVictory(self):
self.notify.debug('----- exitVictory')
self.stopAnimate()
self.unstash()
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
self.battleThreeMusicTime = self.battleThreeMusic.getTime()
self.battleThreeMusic.stop()
def enterDefeat(self):
self.notify.debug('----- enterDefeat')
self.cleanupIntervals()
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
self.reparentTo(render)
self.clearChat()
self.releaseToons(finalBattle=1)
self.happy = 0
self.raised = 0
self.forward = 1
intervalName = 'DefeatMovie'
seq = Sequence(self.makeDefeatMovie(), Func(self.__continueDefeat), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)
def __continueDefeat(self):
self.notify.debug('----- __continueDefeat')
self.stopAnimate()
self.doneBarrier('Defeat')
def exitDefeat(self):
self.notify.debug('----- exitDefeat')
self.stopAnimate()
self.unstash()
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
self.battleThreeMusicTime = self.battleThreeMusic.getTime()
self.battleThreeMusic.stop()
def enterReward(self):
self.cleanupIntervals()
self.clearChat()
self.witnessToon.clearChat()
self.stash()
self.stopAnimate()
self.controlToons()
panelName = self.uniqueName('reward')
self.rewardPanel = RewardPanel.RewardPanel(panelName)
victory, camVictory, skipper = MovieToonVictory.doToonVictory(1, self.involvedToons, self.toonRewardIds, self.toonRewardDicts, self.deathList, self.rewardPanel, allowGroupShot=0, uberList=self.uberList, noSkip=True)
ival = Sequence(Parallel(victory, camVictory), Func(self.__doneReward))
intervalName = 'RewardMovie'
delayDeletes = []
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.enterReward'))
ival.delayDeletes = delayDeletes
ival.start()
self.storeInterval(ival, intervalName)
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)
def __doneReward(self):
self.notify.debug('----- __doneReward')
self.doneBarrier('Reward')
self.toWalkMode()
def exitReward(self):
self.notify.debug('----- exitReward')
intervalName = 'RewardMovie'
self.clearInterval(intervalName)
self.unstash()
self.rewardPanel.destroy()
del self.rewardPanel
self.battleThreeMusicTime = 0
self.battleThreeMusic.stop()
def enterEpilogue(self):
self.cleanupIntervals()
self.clearChat()
self.witnessToon.clearChat()
self.stash()
self.stopAnimate()
self.controlToons()
self.__showWitnessToon()
self.witnessToon.reparentTo(render)
self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessEpiloguePosHpr)
self.witnessToon.loop('Sit')
self.__arrangeToonsAroundWitnessToon()
camera.reparentTo(render)
camera.setPos(self.witnessToon, -9, 12, 6)
camera.lookAt(self.witnessToon, 0, 0, 3)
intervalName = 'EpilogueMovie'
seq = Sequence(self.makeEpilogueMovie(), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
self.accept('doneChatPage', self.__doneEpilogue)
base.playMusic(self.epilogueMusic, looping=1, volume=0.9)
def __doneEpilogue(self, elapsedTime = 0):
self.notify.debug('----- __doneEpilogue')
intervalName = 'EpilogueMovieToonAnim'
self.clearInterval(intervalName)
track = Parallel(Sequence(Wait(0.5), Func(self.localToonToSafeZone)))
self.storeInterval(track, intervalName)
track.start()
def exitEpilogue(self):
self.notify.debug('----- exitEpilogue')
self.clearInterval('EpilogueMovieToonAnim')
self.unstash()
self.epilogueMusic.stop()
def enterFrolic(self):
self.notify.debug('----- enterFrolic')
self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr)
DistributedBossCog.DistributedBossCog.enterFrolic(self)
self.show()
def doorACallback(self, isOpen):
if self.insidesANodePath:
if isOpen:
self.insidesANodePath.unstash()
else:
self.insidesANodePath.stash()
def doorBCallback(self, isOpen):
if self.insidesBNodePath:
if isOpen:
self.insidesBNodePath.unstash()
else:
self.insidesBNodePath.stash()
def __toonsToPromotionPosition(self, toonIds, battleNode):
self.notify.debug('----- __toonsToPromotionPosition')
points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1]
for i in xrange(len(toonIds)):
toon = base.cr.doId2do.get(toonIds[i])
if toon:
toon.reparentTo(render)
pos, h = points[i]
toon.setPosHpr(battleNode, pos[0], pos[1] + 10, pos[2], h, 0, 0)
def __outOfPies(self):
self.notify.debug('----- outOfPies')
self.__showOnscreenMessage(TTLocalizer.LawbotBossNeedMoreEvidence)
taskMgr.doMethodLater(20, self.__howToGetPies, self.uniqueName('PieAdvice'))
def __howToGetPies(self, task):
self.notify.debug('----- __howToGetPies')
self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToGetEvidence)
def __howToThrowPies(self, task):
self.notify.debug('----- __howToThrowPies')
self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToThrowPies)
def __foundPieButton(self):
self.everThrownPie = 1
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
def __touchedWitnessStand(self, entry):
self.sendUpdate('touchWitnessStand', [])
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
base.playSfx(self.piesRestockSfx)
if not self.everThrownPie:
taskMgr.doMethodLater(30, self.__howToThrowPies, self.uniqueName('PieAdvice'))
def __pieSplat(self, toon, pieCode):
if pieCode == ToontownGlobals.PieCodeBossInsides:
if toon == localAvatar:
self.d_hitBossInsides()
self.flashRed()
elif pieCode == ToontownGlobals.PieCodeBossCog:
if toon == localAvatar:
self.d_hitBoss(1)
if self.dizzy:
self.flashRed()
self.doAnimate('hit', now=1)
elif pieCode == ToontownGlobals.PieCodeDefensePan:
self.flashRed()
self.flashPanBlue()
base.playSfx(self.evidenceHitSfx, node=self.defensePanNodePath, volume=0.25)
if toon == localAvatar:
self.d_hitBoss(self.panDamage)
elif pieCode == ToontownGlobals.PieCodeProsecutionPan:
self.flashGreen()
if toon == localAvatar:
pass
elif pieCode == ToontownGlobals.PieCodeLawyer:
pass
def __localPieSplat(self, pieCode, entry):
if pieCode == ToontownGlobals.PieCodeLawyer:
self.__lawyerGotHit(entry)
if pieCode != ToontownGlobals.PieCodeToon:
return
avatarDoId = entry.getIntoNodePath().getNetTag('avatarDoId')
if avatarDoId == '':
self.notify.warning('Toon %s has no avatarDoId tag.' % repr(entry.getIntoNodePath()))
return
doId = int(avatarDoId)
if doId != localAvatar.doId:
self.d_hitToon(doId)
def __lawyerGotHit(self, entry):
lawyerCol = entry.getIntoNodePath()
names = lawyerCol.getName().split('-')
lawyerDoId = int(names[1])
for lawyer in self.lawyers:
if lawyerDoId == lawyer.doId:
lawyer.sendUpdate('hitByToon', [])
def __finalPieSplat(self, toon, pieCode):
if pieCode != ToontownGlobals.PieCodeDefensePan:
return
self.sendUpdate('finalPieSplat', [])
self.ignore('pieSplat')
def cleanupAttacks(self):
self.notify.debug('----- cleanupAttacks')
self.__cleanupStrafe()
def __cleanupStrafe(self):
self.notify.debug('----- __cleanupStrage')
if self.strafeInterval:
self.strafeInterval.finish()
self.strafeInterval = None
return
def __cleanupJuryBox(self):
self.notify.debug('----- __cleanupJuryBox')
if self.juryBoxIval:
self.juryBoxIval.finish()
self.juryBoxIval = None
if self.juryBox:
self.juryBox.removeNode()
return
def doStrafe(self, side, direction):
gearRoot = self.rotateNode.attachNewNode('gearRoot')
if side == 0:
gearRoot.setPos(0, -7, 3)
gearRoot.setHpr(180, 0, 0)
door = self.doorA
else:
gearRoot.setPos(0, 7, 3)
door = self.doorB
gearRoot.setTag('attackCode', str(ToontownGlobals.BossCogStrafeAttack))
gearModel = self.getGearFrisbee()
gearModel.setScale(0.1)
t = self.getBossDamage() / 100.0
gearTrack = Parallel()
numGears = int(4 + 6 * t + 0.5)
time = 5.0 - 4.0 * t
spread = 60 * math.pi / 180.0
if direction == 1:
spread = -spread
dist = 50
rate = time / numGears
for i in xrange(numGears):
node = gearRoot.attachNewNode(str(i))
node.hide()
node.setPos(0, 0, 0)
gear = gearModel.instanceTo(node)
angle = (float(i) / (numGears - 1) - 0.5) * spread
x = dist * math.sin(angle)
y = dist * math.cos(angle)
h = random.uniform(-720, 720)
gearTrack.append(Sequence(Wait(i * rate), Func(node.show), Parallel(node.posInterval(1, Point3(x, y, 0), fluid=1), node.hprInterval(1, VBase3(h, 0, 0), fluid=1), Sequence(SoundInterval(self.strafeSfx[i], volume=0.2, node=self), duration=0)), Func(node.detachNode)))
seq = Sequence(Func(door.request, 'open'), Wait(0.7), gearTrack, Func(door.request, 'close'))
self.__cleanupStrafe()
self.strafeInterval = seq
seq.start()
def replaceCollisionPolysWithPlanes(self, model):
newCollisionNode = CollisionNode('collisions')
newCollideMask = BitMask32(0)
planes = []
collList = model.findAllMatches('**/+CollisionNode')
if not collList:
collList = [model]
for cnp in collList:
cn = cnp.node()
if not isinstance(cn, CollisionNode):
self.notify.warning('Not a collision node: %s' % repr(cnp))
break
newCollideMask = newCollideMask | cn.getIntoCollideMask()
for i in xrange(cn.getNumSolids()):
solid = cn.getSolid(i)
if isinstance(solid, CollisionPolygon):
plane = Plane(solid.getPlane())
planes.append(plane)
else:
self.notify.warning('Unexpected collision solid: %s' % repr(solid))
newCollisionNode.addSolid(plane)
newCollisionNode.setIntoCollideMask(newCollideMask)
threshold = 0.1
planes.sort(lambda p1, p2: p1.compareTo(p2, threshold))
lastPlane = None
for plane in planes:
if lastPlane == None or plane.compareTo(lastPlane, threshold) != 0:
cp = CollisionPlane(plane)
newCollisionNode.addSolid(cp)
lastPlane = plane
return NodePath(newCollisionNode)
def makeIntroductionMovie(self, delayDeletes):
self.notify.debug('----- makeIntroductionMovie')
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.makeIntroductionMovie'))
track = Parallel()
bossAnimTrack = Sequence(
ActorInterval(self, 'Ff_speech', startTime=2, duration=10, loop=1),
ActorInterval(self, 'Ff_lookRt', duration=3),
ActorInterval(self, 'Ff_lookRt', duration=3, startTime=3, endTime=0),
ActorInterval(self, 'Ff_neutral', duration=2),
ActorInterval(self, 'Ff_speech', duration=7, loop=1))
track.append(bossAnimTrack)
attackToons = TTLocalizer.BossCogAttackToons
dialogTrack = Track(
(0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro0, CFSpeech)),
(5.6, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro1, CFSpeech)),
(12, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro2, CFSpeech)),
(18, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro3, CFSpeech)),
(22, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro4, CFSpeech)),
(24, Sequence(
Func(self.clearChat),
self.loseCogSuits(self.toonsA + self.toonsB, render, (-2.798, -70, 10, 180, 0, 0)))),
(27, Sequence(
self.toonNormalEyes(self.involvedToons),
Func(self.loop, 'Ff_neutral'),
Func(self.setChatAbsolute, attackToons, CFSpeech))))
track.append(dialogTrack)
return Sequence(
Func(self.stickToonsToFloor),
track,
Func(self.unstickToons), name=self.uniqueName('Introduction'))
def walkToonsToBattlePosition(self, toonIds, battleNode):
self.notify.debug('walkToonsToBattlePosition-----------------------------------------------')
self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode))
ival = Parallel()
points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1]
self.notify.debug('walkToonsToBattlePosition: points = %s' % points[0][0])
for i in xrange(len(toonIds)):
toon = base.cr.doId2do.get(toonIds[i])
if toon:
pos, h = points[i]
origPos = pos
self.notify.debug('origPos = %s' % origPos)
self.notify.debug('batlleNode.getTransform = %s render.getTransform=%s' % (battleNode.getTransform(), render.getTransform()))
self.notify.debug('render.getScale()=%s battleNode.getScale()=%s' % (render.getScale(), battleNode.getScale()))
myCurPos = self.getPos()
self.notify.debug('myCurPos = %s' % self.getPos())
self.notify.debug('battleNode.parent() = %s' % battleNode.getParent())
self.notify.debug('battleNode.parent().getPos() = %s' % battleNode.getParent().getPos())
bnParent = battleNode.getParent()
battleNode.wrtReparentTo(render)
bnWorldPos = battleNode.getPos()
battleNode.wrtReparentTo(bnParent)
self.notify.debug('battle node world pos = %s' % bnWorldPos)
pos = render.getRelativePoint(battleNode, pos)
self.notify.debug('walktToonsToBattlePosition: render.getRelativePoint result = %s' % pos)
self.notify.debug('walkToonsToBattlePosition: final pos = %s' % pos)
ival.append(Sequence(Func(toon.setPlayRate, 0.8, 'walk'), Func(toon.loop, 'walk'), toon.posInterval(3, pos), Func(toon.setPlayRate, 1, 'walk'), Func(toon.loop, 'neutral')))
return ival
def toonsToBattlePosition(self, toonIds, battleNode):
self.notify.debug('DistrutedLawbotBoss.toonsToBattlePosition----------------------------------------')
self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode))
if len(toonIds) < 5:
points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1]
else:
points = list(BattleBase.BattleBase.toonPoints[3])
points.extend(BattleBase.BattleBase.toonPoints[len(toonIds) - 5])
self.notify.debug('toonsToBattlePosition: points = %s' % points[0][0])
for i in xrange(len(toonIds)):
toon = base.cr.doId2do.get(toonIds[i])
if toon:
toon.wrtReparentTo(render)
pos, h = points[i]
if i > 3:
pos.setY(pos.getY() + 2.0)
bnParent = battleNode.getParent()
battleNode.wrtReparentTo(render)
bnWorldPos = battleNode.getPos()
battleNode.wrtReparentTo(bnParent)
toon.setPosHpr(battleNode, pos[0], pos[1], pos[2], h, 0, 0)
self.notify.debug('new toon pos %s ' % toon.getPos())
def touchedGavel(self, gavel, entry):
self.notify.debug('touchedGavel')
attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode')
if attackCodeStr == '':
self.notify.warning('Node %s has no attackCode tag.' % repr(entry.getIntoNodePath()))
return
attackCode = int(attackCodeStr)
into = entry.getIntoNodePath()
self.zapLocalToon(attackCode, into)
def touchedGavelHandle(self, gavel, entry):
attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode')
if attackCodeStr == '':
self.notify.warning('Node %s has no attackCode tag.' % repr(entry.getIntoNodePath()))
return
attackCode = int(attackCodeStr)
into = entry.getIntoNodePath()
self.zapLocalToon(attackCode, into)
def createBlock(self, x1, y1, z1, x2, y2, z2, r = 1.0, g = 1.0, b = 1.0, a = 1.0):
gFormat = GeomVertexFormat.getV3n3cpt2()
myVertexData = GeomVertexData('holds my vertices', gFormat, Geom.UHDynamic)
vertexWriter = GeomVertexWriter(myVertexData, 'vertex')
normalWriter = GeomVertexWriter(myVertexData, 'normal')
colorWriter = GeomVertexWriter(myVertexData, 'color')
texWriter = GeomVertexWriter(myVertexData, 'texcoord')
vertexWriter.addData3f(x1, y1, z1)
vertexWriter.addData3f(x2, y1, z1)
vertexWriter.addData3f(x1, y2, z1)
vertexWriter.addData3f(x2, y2, z1)
vertexWriter.addData3f(x1, y1, z2)
vertexWriter.addData3f(x2, y1, z2)
vertexWriter.addData3f(x1, y2, z2)
vertexWriter.addData3f(x2, y2, z2)
for index in xrange(8):
normalWriter.addData3f(1.0, 1.0, 1.0)
colorWriter.addData4f(r, g, b, a)
texWriter.addData2f(1.0, 1.0)
tris = GeomTriangles(Geom.UHDynamic)
tris.addVertex(0)
tris.addVertex(1)
tris.addVertex(2)
tris.closePrimitive()
tris.addVertex(1)
tris.addVertex(3)
tris.addVertex(2)
tris.closePrimitive()
tris.addVertex(2)
tris.addVertex(3)
tris.addVertex(6)
tris.closePrimitive()
tris.addVertex(3)
tris.addVertex(7)
tris.addVertex(6)
tris.closePrimitive()
tris.addVertex(0)
tris.addVertex(2)
tris.addVertex(4)
tris.closePrimitive()
tris.addVertex(2)
tris.addVertex(6)
tris.addVertex(4)
tris.closePrimitive()
tris.addVertex(1)
tris.addVertex(5)
tris.addVertex(3)
tris.closePrimitive()
tris.addVertex(3)
tris.addVertex(5)
tris.addVertex(7)
tris.closePrimitive()
tris.addVertex(0)
tris.addVertex(4)
tris.addVertex(5)
tris.closePrimitive()
tris.addVertex(1)
tris.addVertex(0)
tris.addVertex(5)
tris.closePrimitive()
tris.addVertex(4)
tris.addVertex(6)
tris.addVertex(7)
tris.closePrimitive()
tris.addVertex(7)
tris.addVertex(5)
tris.addVertex(4)
tris.closePrimitive()
cubeGeom = Geom(myVertexData)
cubeGeom.addPrimitive(tris)
cubeGN = GeomNode('cube')
cubeGN.addGeom(cubeGeom)
return cubeGN
def __enterDefenseCol(self, entry):
self.notify.debug('__enterDefenseCol')
def __enterProsecutionCol(self, entry):
self.notify.debug('__enterProsecutionCol')
def makeVictoryMovie(self):
myFromPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2])
myToPos = Point3(myFromPos[0], myFromPos[1] + 30, myFromPos[2])
rollThroughDoor = self.rollBossToPoint(fromPos=myFromPos, fromHpr=None, toPos=myToPos, toHpr=None, reverse=0)
rollTrack = Sequence(
Func(self.getGeomNode().setH, 180),
rollThroughDoor[0],
Func(self.getGeomNode().setH, 0))
rollTrackDuration = rollTrack.getDuration()
self.notify.debug('rollTrackDuration = %f' % rollTrackDuration)
doorStartPos = self.door3.getPos()
doorEndPos = Point3(doorStartPos[0], doorStartPos[1], doorStartPos[2] + 25)
bossTrack = Track(
(0.5, Sequence(
Func(self.clearChat),
Func(camera.reparentTo, render),
Func(camera.setPos, -3, 45, 25),
Func(camera.setHpr, 0, 10, 0))),
(1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins1, CFSpeech)),
(5.5, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins2, CFSpeech)),
(9.5, Sequence(Func(camera.wrtReparentTo, render))),
(9.6, Parallel(
rollTrack,
Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins3, CFSpeech),
self.door3.posInterval(2, doorEndPos, startPos=doorStartPos))),
(13.1, Sequence(self.door3.posInterval(1, doorStartPos))))
retTrack = Parallel(bossTrack, ActorInterval(self, 'Ff_speech', loop=1))
return bossTrack
def makeEpilogueMovie(self):
epSpeech = TTLocalizer.WitnessToonCongratulations
epSpeech = self.__talkAboutPromotion(epSpeech)
bossTrack = Sequence(Func(self.witnessToon.animFSM.request, 'neutral'), Func(self.witnessToon.setLocalPageChat, epSpeech, 0))
return bossTrack
def makeDefeatMovie(self):
bossTrack = Track((0.0, Sequence(Func(self.clearChat), Func(self.reverseHead), ActorInterval(self, 'Ff_speech'))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossProsecutionWins, CFSpeech)))
return bossTrack
def __makeWitnessToon(self):
dnaNetString = 't\x1b\x00\x01\x01\x00\x03\x00\x03\x01\x10\x13\x00\x13\x13'
npc = Toon.Toon()
npc.setDNAString(dnaNetString)
npc.setName(TTLocalizer.WitnessToonName)
npc.setPickable(0)
npc.setPlayerType(NametagGroup.CCNonPlayer)
npc.animFSM.request('Sit')
self.witnessToon = npc
self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessStandPosHpr)
def __cleanupWitnessToon(self):
self.__hideWitnessToon()
if self.witnessToon:
self.witnessToon.removeActive()
self.witnessToon.delete()
self.witnessToon = None
return
def __showWitnessToon(self):
if not self.witnessToonOnstage:
self.witnessToon.addActive()
self.witnessToon.reparentTo(self.geom)
seatCenter = self.realWitnessStand.find('**/witnessStandSeatEdge')
center = seatCenter.getPos()
self.notify.debug('center = %s' % center)
self.witnessToon.setPos(center)
self.witnessToon.setH(180)
self.witnessToon.setZ(self.witnessToon.getZ() - 1.5)
self.witnessToon.setY(self.witnessToon.getY() - 1.15)
self.witnessToonOnstage = 1
def __hideWitnessToon(self):
if self.witnessToonOnstage:
self.witnessToon.removeActive()
self.witnessToon.detachNode()
self.witnessToonOnstage = 0
def __hideToons(self):
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
toon.hide()
def __showToons(self):
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
toon.show()
def __arrangeToonsAroundWitnessToon(self):
radius = 7
numToons = len(self.involvedToons)
center = (numToons - 1) / 2.0
for i in xrange(numToons):
toon = self.cr.doId2do.get(self.involvedToons[i])
if toon:
angle = 90 - 15 * (i - center)
radians = angle * math.pi / 180.0
x = math.cos(radians) * radius
y = math.sin(radians) * radius
toon.setPos(self.witnessToon, x, y, 0)
toon.headsUp(self.witnessToon)
toon.loop('neutral')
toon.show()
def __talkAboutPromotion(self, speech):
if self.prevCogSuitLevel < ToontownGlobals.MaxCogSuitLevel:
newCogSuitLevel = localAvatar.getCogLevels()[CogDisguiseGlobals.dept2deptIndex(self.style.dept)]
if newCogSuitLevel == ToontownGlobals.MaxCogSuitLevel:
speech += TTLocalizer.WitnessToonLastPromotion % (ToontownGlobals.MaxCogSuitLevel + 1)
if newCogSuitLevel in ToontownGlobals.CogSuitHPLevels:
speech += TTLocalizer.WitnessToonHPBoost
else:
speech += TTLocalizer.WitnessToonMaxed % (ToontownGlobals.MaxCogSuitLevel + 1)
return speech
def __positionToonsInFrontOfCannons(self):
self.notify.debug('__positionToonsInFrontOfCannons')
index = 0
self.involvedToons.sort()
for toonId in self.involvedToons:
if index in self.cannons:
cannon = self.cannons[index]
toon = self.cr.doId2do.get(toonId)
self.notify.debug('cannonId = %d' % cannon.doId)
cannonPos = cannon.nodePath.getPos(render)
self.notify.debug('cannonPos = %s' % cannonPos)
if toon:
self.notify.debug('toon = %s' % toon.getName())
toon.reparentTo(cannon.nodePath)
toon.setPos(0, 8, 0)
toon.setH(180)
renderPos = toon.getPos(render)
self.notify.debug('renderPos =%s' % renderPos)
index += 1
self.notify.debug('done with positionToons')
def __makePrepareBattleTwoMovie(self):
chatString = TTLocalizer.WitnessToonPrepareBattleTwo % ToontownGlobals.LawbotBossJurorsForBalancedScale
movie = Sequence(Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8, 2), Func(camera.setHpr, 180, 10, 0), Func(self.witnessToon.setLocalPageChat, chatString, 0))
return movie
def __doWitnessPrepareBattleThreeChat(self):
self.notify.debug('__doWitnessPrepareBattleThreeChat: original self.numToonJurorsSeated = %d' % self.numToonJurorsSeated)
self.countToonJurors()
self.notify.debug('after calling self.countToonJurors, numToonJurorsSeated=%d' % self.numToonJurorsSeated)
if self.numToonJurorsSeated == 0:
juryResult = TTLocalizer.WitnessToonNoJuror
elif self.numToonJurorsSeated == 1:
juryResult = TTLocalizer.WitnessToonOneJuror
elif self.numToonJurorsSeated == 12:
juryResult = TTLocalizer.WitnessToonAllJurors
else:
juryResult = TTLocalizer.WitnessToonSomeJurors % self.numToonJurorsSeated
juryResult += '\x07'
trialSpeech = juryResult
trialSpeech += TTLocalizer.WitnessToonPrepareBattleThree
diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty]
if diffSettings[4]:
newWeight, self.bonusWeight, self.numJurorsLocalToonSeated = self.calculateWeightOfToon(base.localAvatar.doId)
if self.bonusWeight > 0:
if self.bonusWeight == 1:
juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusSingular.get(self.battleDifficulty)
else:
juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusPlural.get(self.battleDifficulty)
if juryWeightBonus:
weightBonusText = juryWeightBonus % (self.numJurorsLocalToonSeated, self.bonusWeight)
trialSpeech += '\x07'
trialSpeech += weightBonusText
self.witnessToon.setLocalPageChat(trialSpeech, 0)
def __makePrepareBattleThreeMovie(self):
movie = Sequence(Func(camera.reparentTo, render), Func(camera.setPos, -15, 15, 20), Func(camera.setHpr, -90, 0, 0), Wait(3), Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8, 2), Func(camera.setHpr, 180, 10, 0), Func(self.__doWitnessPrepareBattleThreeChat))
return movie
def countToonJurors(self):
self.numToonJurorsSeated = 0
for key in self.chairs.keys():
chair = self.chairs[key]
if chair.state == 'ToonJuror' or chair.state == None and chair.newState == 'ToonJuror':
self.numToonJurorsSeated += 1
self.notify.debug('self.numToonJurorsSeated = %d' % self.numToonJurorsSeated)
return
def cleanupPanFlash(self):
if self.panFlashInterval:
self.panFlashInterval.finish()
self.panFlashInterval = None
return
def flashPanBlue(self):
self.cleanupPanFlash()
intervalName = 'FlashPanBlue'
self.defensePanNodePath.setColorScale(1, 1, 1, 1)
seq = Sequence(self.defensePanNodePath.colorScaleInterval(0.1, colorScale=VBase4(0, 0, 1, 1)), self.defensePanNodePath.colorScaleInterval(0.3, colorScale=VBase4(1, 1, 1, 1)), name=intervalName)
self.panFlashInterval = seq
seq.start()
self.storeInterval(seq, intervalName)
def saySomething(self, chatString):
intervalName = 'ChiefJusticeTaunt'
seq = Sequence(name=intervalName)
seq.append(Func(self.setChatAbsolute, chatString, CFSpeech))
seq.append(Wait(4.0))
seq.append(Func(self.clearChat))
oldSeq = self.activeIntervals.get(intervalName)
if oldSeq:
oldSeq.finish()
seq.start()
self.storeInterval(seq, intervalName)
def setTaunt(self, tauntIndex, extraInfo):
gotError = False
if not hasattr(self, 'state'):
self.notify.warning('returning from setTaunt, no attr state')
gotError = True
elif not self.state == 'BattleThree':
self.notify.warning('returning from setTaunt, not in battle three state, state=%s', self.state)
gotError = True
if not hasattr(self, 'nametag'):
self.notify.warning('returning from setTaunt, no attr nametag')
gotError = True
if gotError:
st = StackTrace()
print st
return
chatString = TTLocalizer.LawbotBossTaunts[1]
if tauntIndex == 0:
if extraInfo < len(self.involvedToons):
toonId = self.involvedToons[extraInfo]
toon = base.cr.doId2do.get(toonId)
if toon:
chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] % toon.getName()
else:
chatString = TTLocalizer.LawbotBossTaunts[tauntIndex]
self.saySomething(chatString)
def toonGotHealed(self, toonId):
toon = base.cr.doId2do.get(toonId)
if toon:
base.playSfx(self.toonUpSfx, node=toon)
def hideBonusTimer(self):
if self.bonusTimer:
self.bonusTimer.hide()
def enteredBonusState(self):
self.witnessToon.clearChat()
text = TTLocalizer.WitnessToonBonus % (ToontownGlobals.LawbotBossBonusWeightMultiplier, ToontownGlobals.LawbotBossBonusDuration)
self.witnessToon.setChatAbsolute(text, CFSpeech | CFTimeout)
base.playSfx(self.toonUpSfx)
if not self.bonusTimer:
self.bonusTimer = ToontownTimer.ToontownTimer()
self.bonusTimer.posInTopRightCorner()
self.bonusTimer.show()
self.bonusTimer.countdown(ToontownGlobals.LawbotBossBonusDuration, self.hideBonusTimer)
def setAttackCode(self, attackCode, avId = 0):
DistributedBossCog.DistributedBossCog.setAttackCode(self, attackCode, avId)
if attackCode == ToontownGlobals.BossCogAreaAttack:
self.saySomething(TTLocalizer.LawbotBossAreaAttackTaunt)
base.playSfx(self.warningSfx)
def setBattleDifficulty(self, diff):
self.notify.debug('battleDifficulty = %d' % diff)
self.battleDifficulty = diff
def toonEnteredCannon(self, toonId, cannonIndex):
if base.localAvatar.doId == toonId:
self.cannonIndex = cannonIndex
def numJurorsSeatedByCannon(self, cannonIndex):
retVal = 0
for chair in self.chairs.values():
if chair.state == 'ToonJuror':
if chair.toonJurorIndex == cannonIndex:
retVal += 1
return retVal
def calculateWeightOfToon(self, toonId):
defaultWeight = 1
bonusWeight = 0
newWeight = 1
cannonIndex = self.cannonIndex
numJurors = 0
if not cannonIndex == None and cannonIndex >= 0:
diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty]
if diffSettings[4]:
numJurors = self.numJurorsSeatedByCannon(cannonIndex)
bonusWeight = numJurors - diffSettings[5]
if bonusWeight < 0:
bonusWeight = 0
newWeight = defaultWeight + bonusWeight
self.notify.debug('toon %d has weight of %d' % (toonId, newWeight))
return (newWeight, bonusWeight, numJurors)
| 1.625 | 2 |
tests/test_custom_rnncell.py | lightmatter-ai/tensorflow-onnx | 0 | 5846 | # SPDX-License-Identifier: Apache-2.0
"""Unit Tests for custom rnns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import init_ops
from backend_test_base import Tf2OnnxBackendTestBase
from common import * # pylint: disable=wildcard-import, unused-wildcard-import
from tf2onnx.tf_loader import is_tf2
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test
# pylint: disable=abstract-method,arguments-differ
if is_tf2():
BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell
LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell
GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell
RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell
MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell
dynamic_rnn = tf.compat.v1.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn
else:
LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell
LSTMCell = tf.nn.rnn_cell.LSTMCell
GRUCell = tf.nn.rnn_cell.LSTMCell
RNNCell = tf.nn.rnn_cell.RNNCell
MultiRNNCell = tf.contrib.rnn.MultiRNNCell
dynamic_rnn = tf.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn
class CustomRnnCellTests(Tf2OnnxBackendTestBase):
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn(self):
size = 5 # size of each model layer.
batch_size = 1
cell = GatedGRUCell(size)
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=False)
return tf.identity(xs, name="output"), tf.identity(s, name="final_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn_time_major(self):
size = 5 # size of each model layer.
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
cell = GatedGRUCell(size)
xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=True)
return tf.identity(xs, name="output"), tf.identity(s, name="final_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn_with_seq_length(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
# no scope
cell = GatedGRUCell(units)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=[4, 3, 4, 5, 2, 1])
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn_with_non_const_seq_length(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
y_val = np.array([4, 3, 4, 5, 2, 1], dtype=np.int32)
def func(x, seq_length):
# no scope
cell = GatedGRUCell(units)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=tf.identity(seq_length))
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_const_encoder(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`.
decoder_time_step = 6
x_val = np.random.randn(decoder_time_step, input_size).astype('f')
x_val = np.stack([x_val] * batch_size)
attention_states = np.random.randn(batch_size, time_step, attn_size).astype('f')
def func(x):
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, x, dtype=tf.float32)
return tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0"]
output_names_with_port = ["output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_lstm_encoder(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')
decoder_x_val = np.stack([decoder_x_val] * batch_size)
def func(encoder_x, decoder_x):
encoder_cell = LSTMCell(size)
output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
output_0 = tf.identity(output, name="output_0")
attention_states = output
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)
return output_0, tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output_0:0", "output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_gru_encoder(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')
decoder_x_val = np.stack([decoder_x_val] * batch_size)
def func(encoder_x, decoder_x):
encoder_cell = GRUCell(size)
output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
_ = tf.identity(output, name="output_0")
attention_states = output
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = GRUCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)
return tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output_0:0", "output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_lstm_encoder_input_has_none_dim(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')
decoder_x_val = np.stack([decoder_x_val] * batch_size)
def func(encoder_x, decoder_x):
encoder_cell = LSTMCell(size)
output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
_ = tf.identity(output, name="output_0")
attention_states = output
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)
return tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output_0:0", "output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_multi_rnn_lstm(self, state_is_tuple=True):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
cell_0 = LSTMCell(units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cell_1 = LSTMCell(units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cell_2 = LSTMCell(units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cells = MultiRNNCell([cell_0, cell_1, cell_2], state_is_tuple=state_is_tuple)
outputs, cell_state = dynamic_rnn(cells, x, dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
input_names_with_port = ["input_1:0"]
feed_dict = {"input_1:0": x_val}
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_opset(9, "ReverseSequence")
@skip_tf2()
@allow_missing_shapes("Missing RNN shape")
def test_bidrectional_attention_wrapper_lstm_encoder(self):
size = 30
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, batch_size, input_size).astype('f')
def func(encoder_x, decoder_x, seq_length):
encoder_cell = LSTMCell(size)
attention_states, _ = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
# [9, 3, 30], [9, 30]
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
match_cell_bk = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
(match_output_fw, match_output_bk), (match_state_fw, match_state_bk) = \
bidirectional_dynamic_rnn(cell_fw=match_cell_fw,
cell_bw=match_cell_bk,
inputs=decoder_x,
sequence_length=tf.identity(seq_length),
dtype=tf.float32,
time_major=True)
matched_output = tf.concat([match_output_fw, match_output_bk], axis=-1)
matched_state = tf.concat([match_state_fw.cell_state, match_state_bk.cell_state], -1)
return tf.identity(matched_output, name="output_0"), tf.identity(matched_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val,
"input_3:0": np.array([6, 5, 4, 3, 2, 1, 2, 3, 6], dtype=np.int32)}
input_names_with_port = ["input_1:0", "input_2:0", "input_3:0"]
output_names_with_port = ["output_0:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
class GatedGRUCell(RNNCell):
def __init__(self, hidden_dim, reuse=None):
super().__init__(self, _reuse=reuse)
self._num_units = hidden_dim
self._activation = tf.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
# inputs shape: [batch size, time step, input size] = [1, 3, 2]
# num_units: 5
# W shape: [2, 3 * 5] = [2, 15]
# U shape: [5, 3 * 5] = [5, 15]
# b shape: [1, 3 * 5] = [1, 15]
# state shape: [batch size, state size] = [1, 5]
input_dim = inputs.get_shape()[-1]
assert input_dim is not None, "input dimension must be defined"
# W = tf.get_variable(name="W", shape=[input_dim, 3 * self._num_units], dtype=tf.float32)
W = np.arange(30.0, dtype=np.float32).reshape((2, 15))
# U = tf.get_variable(name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32)
U = np.arange(75.0, dtype=np.float32).reshape((5, 15))
# b = tf.get_variable(name='b', shape=[1, 3 * self._num_units], dtype=tf.float32)
b = np.arange(15.0, dtype=np.float32).reshape((1, 15))
xw = tf.split(tf.matmul(inputs, W) + b, 3, 1)
hu = tf.split(tf.matmul(state, U), 3, 1)
r = tf.sigmoid(xw[0] + hu[0])
z = tf.sigmoid(xw[1] + hu[1])
h1 = self._activation(xw[2] + r * hu[2])
next_h = h1 * (1 - z) + state * z
return next_h, next_h
if __name__ == '__main__':
unittest_main()
| 1.953125 | 2 |
cookie-cutter/src/templates/template.py | noname34/CHARM_Project_Hazard_Perception_I | 0 | 5847 | #!/user/bin/env python3
# -*- coding: utf-8 -*-
#!/user/bin/env python3
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Email: <EMAIL>
# @Date: 04.2020
# Context: CHARM PROJECT - Harzard perception
"""
Module documentation.
"""
# Imports
import sys
#import os
# Global variables
# Class declarations
# Function declarations
def main():
args = sys.argv[1:]
if not args:
print('usage: [--flags options] [inputs] ')
sys.exit(1)
# Main body
if __name__ == '__main__':
main() | 1.8125 | 2 |
utils/gridpeak.py | siwill22/magSA | 0 | 5848 | <filename>utils/gridpeak.py
import numpy
def gridpeak(t, X=None):
# GP = GRIDPEAK(...)
# gp = gridpeak(t) return gridpeaks based on Blakely
# and Simpson method
# gp = gridpeak(t,X) optionally remove peak values scoring less than X,
# where X can be between 1 and 4.
print 'shape ', t.shape
m, n = t.shape
p = 1
gp = numpy.zeros((m, n))
for i in numpy.arange(p, m - p):
for j in numpy.arange(p, n - p):
data = numpy.zeros(4)
data[0] = t[i - p, j] < t[i, j] and t[i, j] > t[i + p, j]
data[1] = t[i, j - p] < t[i, j] and t[i, j] > t[i, j + p]
data[2] = t[i + p, j - p] < t[i, j] and t[i, j] > t[i - p, j + p]
data[3] = t[i - p, j - p] < t[i, j] and t[i, j] > t[i + p, j + p]
gp[i, j] = numpy.sum(data)
if X:
gp[gp < X] = numpy.nan
gp = gp / gp
return gp
| 2.640625 | 3 |
Chapter 10/trackbackLog.py | Miillky/automate_the_boring_stuff_with_python | 0 | 5849 | <reponame>Miillky/automate_the_boring_stuff_with_python<gh_stars>0
import traceback
try:
raise Exception('This is the error message.')
except:
errorFile = open('./Chapter 10/errorInfo.txt', 'w')
errorFile.write(traceback.format_exc())
errorFile.close()
print('The traceback info was written to errorInfo.txt') | 3 | 3 |
Module_III/PySparkNetworkSimilarityClass.py | wuchiehhan/KDD2019-HandsOn-Tutorial | 0 | 5850 | # Databricks notebook source
from pyspark.sql.types import *
from pyspark.sql import functions as F
import base64
import array
# COMMAND ----------
# s is a base64 encoded float[] with first element being the magnitude
def Base64ToFloatArray(s):
arr = array.array('f', base64.b64decode(s))
return (arr[0], arr[1:])
def cosineSimilarity(s1, s2):
(m1, v1) = Base64ToFloatArray(s1)
(m2, v2) = Base64ToFloatArray(s2)
if (m1 == 0) or (m2 == 0):
return 0
else :
return sum(x*y for x,y in zip(v1, v2))/(m1 * m2)
# Register udf functions so that it could be used in dataframe
#
# Perform same computation as cosineSimilarity()
#
@F.udf("float")
def udfCosineSimilarity(s1, s2):
return cosineSimilarity(s1, s2)
# COMMAND ----------
# MAGIC %md **NetworkSimilarity** class to compute Network Similarity
# COMMAND ----------
# Parameters:
# resource: resource stream path
# container: container name in Azure Storage (AS) account
# account: Azure Storage (AS) account
# sas: complete 'Blob service SAS URL' of the shared access signature (sas) for the container
# key: access key for the container, if sas is specified, key is ignored
#
# Note:
# resource does not have header
# you need to provide value for either sas or key
#
class NetworkSimilarity(AzureStorageAccess):
# constructor
def __init__(self, resource, container, account, sas='', key=''):
AzureStorageAccess.__init__(self, container, account, sas, key)
schema = StructType()
schema.add(StructField('EntityId', LongType(), False))
schema.add(StructField('EntityType', StringType(), False))
schema.add(StructField('Data', StringType(), False))
self.df = spark.read.format('csv').options(header='false', delimiter='\t').schema(schema).load(self.getFullpath(resource))
def getDataframe(self):
return self.df
def raiseErrorIfNotFound(self, row, e):
if row is None:
raise KeyError('entity ' + str(e) + ' not found')
def getSimilarity(self, e1, e2):
df = self.df
row1 = df.where(df.EntityId == e1).first()
self.raiseErrorIfNotFound(row1, e1)
row2 = df.where(df.EntityId == e2).first()
self.raiseErrorIfNotFound(row2, e2)
return cosineSimilarity(row1.Data, row2.Data)
def getTopEntities(self, e, targetType = '', maxCount = 20, minScore = 0.0):
df1 = self.df
row1 = df1.where(df1.EntityId == e).first()
self.raiseErrorIfNotFound(row1, e)
if targetType == '':
df2 = df1.where(df1.EntityId != e)
else :
df2 = df1.where((df1.EntityId != e) & (df1.EntityType == targetType))
df3 = df2.select(df2.EntityId, df2.EntityType, udfCosineSimilarity(F.lit(row1.Data), df2.Data).alias('Score'))
return df3.where(df3.Score >= minScore).orderBy(df3.Score.desc()).limit(maxCount)
| 2.8125 | 3 |
fizzbuzz.py | vagnes/fizzbuzzgame | 0 | 5851 | print("Press q to quit")
quit = False
while quit is False:
in_val = input("Please enter a positive integer.\n > ")
if in_val is 'q':
quit = True
elif int(in_val) % 3 == 0 and int(in_val) % 5 == 0:
print("FizzBuzz")
elif int(in_val) % 5 == 0:
print("Buzz")
elif int(in_val) % 3 == 0:
print("Fizz")
else:
pass
| 4.09375 | 4 |
lesson10019_projects/pen/data/transition.py | muzudho/py-state-machine-practice | 0 | 5852 | from lesson14_projects.pen.data.const import (
A,
E_A,
E_AN,
E_IS,
E_OVER,
E_PEN,
E_PIN,
E_THAT,
E_THIS,
E_WAS,
INIT,
IS,
PEN,
THIS,
)
pen_transition_doc_v19 = {
"title": "This is a pen",
"entry_state": INIT,
"data": {
INIT: {
E_OVER: [INIT],
E_THAT: [INIT],
E_THIS: [INIT, THIS],
THIS: {
E_OVER: [INIT],
E_WAS: [INIT],
E_IS: [INIT, THIS, IS],
IS: {
E_OVER: [INIT],
E_AN: [INIT],
E_A: [INIT, THIS, IS, A],
A: {
E_OVER: [INIT],
E_PIN: [INIT],
E_PEN: [PEN],
},
},
},
},
PEN: {
E_OVER: None,
},
},
}
| 1.648438 | 2 |
Animation/Main.py | olesmith/SmtC | 0 | 5853 | <filename>Animation/Main.py
import gd,os,time
from Html import Animation_Html
from Iteration import Animation_Iteration
from Write import Animation_Write
from Base import *
from Canvas2 import *
from Canvas2 import Canvas2
from Image import Image
from HTML import HTML
__Canvas__=None
class Animation(
Animation_Html,
Animation_Iteration,
Animation_Write,
Base,HTML
):
Convert_Bin="/usr/bin/convert"
HTML_Root="http://127.0.0.1/Graphics"
CGI_Root="http://127.0.0.1/cgi-bin/Graphics/Display.py"
__Switches__={
"v": {
"Attr": "Verbose",
"Text": "Verbosity level. Augment to see more numbers...",
"Type": None,
},
"-clean": {
"Attr": "Clean",
"Text": "Remove PNGs generated",
"Type": "int",
},
"-rewrite": {
"Attr": "Images_Rewrite",
"Text": "Rewrite image file between iterations",
"Type": None,
},
"l": {
"Attr": "Loop",
"Text": "Animated GIF no of loops (passed to convert)",
"Type": None,
},
"d": {
"Attr": "Delay",
"Text": "Animated GIF delay (passed to convert)",
"Type": None,
},
"W": {
"Attr": "W",
"Text": "White background",
"Type": "bool",
},
}
__Args__=[]
Indent=" "
W=False
Verbose=1
Delay="5"
Loop="0"
Path="curves"
Curve_Parms_Path=""
FileName="Curve"
Name="Curve"
Parameters=["a","b","c"]
Parameter_Names=["a","b","c"]
Clean=0 #Clean up afterwords
Iteration_Files=[]
Images_Rewrite=1
def __init__(self,pmin,pmax,vals={}):
self.Hash2Obj(vals)
self.__Canvas__=Canvas2(vals,[ pmin,pmax ])
self.Canvas([ pmin,pmax ]).CLI2Obj()
##!
##! Overrride __str__ to print some useful info.
##!
def __str__(self):
text="Animation, Path: "+self.Path
text+="\n\tFileName: "+self.FileName
text+="\n\tParms: "+self.Curve_Parms_Path
text+="\n\tLoop: "+self.Loop
text+="\n\tDelay: "+self.Delay
text+="\n\tClean: "+str(self.Clean)
text+="\n"+str(self.Canvas())
return text
##!
##! Returns Canvas object, stored in self.__Canvas__
##!
def Canvas(self,pexts=[]):
global __Canvas__ # Needed to modify global copy of __Canvas__
if (not __Canvas__):
parms={
}
__Canvas__=Canvas2(parms,pexts)
return __Canvas__
def BackGround_Color(self):
if (self.W):
return "White"
else:
return "Black"
def Initialize(self):
self.Canvas().Resolution=self.Resolution
self.Canvas().Image_Rewrite()
| 2.984375 | 3 |
pytorch_metric_learning/miners/distance_weighted_miner.py | junjungoal/pytorch_metric_learning | 1 | 5854 | #! /usr/bin/env python3
from .base_miner import BasePostGradientMiner
import torch
from ..utils import loss_and_miner_utils as lmu
# adapted from
# https://github.com/chaoyuaw/incubator-mxnet/blob/master/example/gluon/
# /embedding_learning/model.py
class DistanceWeightedMiner(BasePostGradientMiner):
def __init__(self, cutoff, nonzero_loss_cutoff, **kwargs):
super().__init__(**kwargs)
self.cutoff = cutoff
self.nonzero_loss_cutoff = nonzero_loss_cutoff
def mine(self, embeddings, labels):
label_set = torch.unique(labels)
n, d = embeddings.size()
dist_mat = lmu.dist_mat(embeddings)
dist_mat = dist_mat + torch.eye(dist_mat.size(0)).to(embeddings.device)
# so that we don't get log(0). We mask the diagonal out later anyway
# Cut off to avoid high variance.
dist_mat = torch.max(dist_mat, torch.tensor(self.cutoff).to(dist_mat.device))
# Subtract max(log(distance)) for stability.
# See the first equation from Section 4 of the paper
log_weights = (2.0 - float(d)) * torch.log(dist_mat) - (
float(d - 3) / 2
) * torch.log(1.0 - 0.25 * (dist_mat ** 2.0))
weights = torch.exp(log_weights - torch.max(log_weights))
# Sample only negative examples by setting weights of
# the same-class examples to 0.
mask = torch.ones(weights.size()).to(embeddings.device)
for i in label_set:
idx = (labels == i).nonzero()
mask[torch.meshgrid(idx.squeeze(1), idx.squeeze(1))] = 0
weights = weights * mask * ((dist_mat < self.nonzero_loss_cutoff).float())
weights = weights / torch.sum(weights, dim=1, keepdim=True)
np_weights = weights.cpu().numpy()
return lmu.get_random_triplet_indices(labels, weights=np_weights)
| 2.4375 | 2 |
Keywords/__init__.py | cassie01/PumpLibrary | 0 | 5855 | <reponame>cassie01/PumpLibrary
# -*- coding: utf-8 -*-
from .Alarm.alarm import Alarm
from .DeliveryView.bolus import Bolus
from .DeliveryView.info import Info
from .DeliveryView.infusion import Infusion
from .DeliveryView.infusion_parameter import InfusionParameter
from .DeliveryView.priming import Priming
from .HardwareControl.motor import Motor
from .MenuSettings.device_report import DeviceReport
from .MenuSettings.history_log import HistoryLog
from .MenuSettings.infusion_setting import InfusionSetting
from .MenuSettings.maintenance import Maintenance
from .MenuSettings.safety_setting import SafetySetting
from .MenuSettings.system_setting import SystemSetting
from .SensorControl.sensor import Sensor
__all__ = ["Alarm",
"Bolus",
"Info",
"Infusion",
"InfusionParameter",
"Priming",
"Motor",
"DeviceReport",
"HistoryLog",
"InfusionSetting",
"Maintenance",
"SafetySetting",
"SystemSetting",
"Sensor",
]
| 1.171875 | 1 |
src/responsibleai/rai_analyse/constants.py | Azure/automl-devplat2-preview | 7 | 5856 | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
class DashboardInfo:
MODEL_ID_KEY = "id" # To match Model schema
MODEL_INFO_FILENAME = "model_info.json"
RAI_INSIGHTS_MODEL_ID_KEY = "model_id"
RAI_INSIGHTS_RUN_ID_KEY = "rai_insights_parent_run_id"
RAI_INSIGHTS_PARENT_FILENAME = "rai_insights.json"
class PropertyKeyValues:
# The property to indicate the type of Run
RAI_INSIGHTS_TYPE_KEY = "_azureml.responsibleai.rai_insights.type"
RAI_INSIGHTS_TYPE_CONSTRUCT = "construction"
RAI_INSIGHTS_TYPE_CAUSAL = "causal"
RAI_INSIGHTS_TYPE_COUNTERFACTUAL = "counterfactual"
RAI_INSIGHTS_TYPE_EXPLANATION = "explanation"
RAI_INSIGHTS_TYPE_ERROR_ANALYSIS = "error_analysis"
RAI_INSIGHTS_TYPE_GATHER = "gather"
# Property to point at the model under examination
RAI_INSIGHTS_MODEL_ID_KEY = "_azureml.responsibleai.rai_insights.model_id"
# Property for tool runs to point at their constructor run
RAI_INSIGHTS_CONSTRUCTOR_RUN_ID_KEY = (
"_azureml.responsibleai.rai_insights.constructor_run"
)
# Property to record responsibleai version
RAI_INSIGHTS_RESPONSIBLEAI_VERSION_KEY = (
"_azureml.responsibleai.rai_insights.responsibleai_version"
)
# Property format to indicate presence of a tool
RAI_INSIGHTS_TOOL_KEY_FORMAT = "_azureml.responsibleai.rai_insights.has_{0}"
class RAIToolType:
CAUSAL = "causal"
COUNTERFACTUAL = "counterfactual"
ERROR_ANALYSIS = "error_analysis"
EXPLANATION = "explanation"
| 1.710938 | 2 |
pulsar/apps/data/redis/store.py | goodboy/pulsar | 1 | 5857 | from functools import partial
from pulsar import Connection, Pool, get_actor
from pulsar.utils.pep import to_string
from pulsar.apps.data import RemoteStore
from pulsar.apps.ds import redis_parser
from .client import RedisClient, Pipeline, Consumer, ResponseError
from .pubsub import RedisPubSub, RedisChannels
class RedisStoreConnection(Connection):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.parser = self._producer._parser_class()
async def execute(self, *args, **options):
consumer = self.current_consumer()
await consumer.start((args, options))
result = await consumer.on_finished
if isinstance(result, ResponseError):
raise result.exception
return result
async def execute_pipeline(self, commands, raise_on_error=True):
consumer = self.current_consumer()
consumer.start((commands, raise_on_error, []))
result = await consumer.on_finished
if isinstance(result, ResponseError):
raise result.exception
return result
class RedisStore(RemoteStore):
'''Redis :class:`.Store` implementation.
'''
protocol_factory = partial(RedisStoreConnection, Consumer)
supported_queries = frozenset(('filter', 'exclude'))
def _init(self, namespace=None, parser_class=None, pool_size=50,
decode_responses=False, **kwargs):
self._decode_responses = decode_responses
if not parser_class:
actor = get_actor()
pyparser = actor.cfg.redis_py_parser if actor else False
parser_class = redis_parser(pyparser)
self._parser_class = parser_class
if namespace:
self._urlparams['namespace'] = namespace
self._pool = Pool(self.connect, pool_size=pool_size, loop=self._loop)
if self._database is None:
self._database = 0
self._database = int(self._database)
self.loaded_scripts = set()
@property
def pool(self):
return self._pool
@property
def namespace(self):
'''The prefix namespace to append to all transaction on keys
'''
n = self._urlparams.get('namespace')
return '%s:' % n if n else ''
def key(self):
return (self._dns, self._encoding)
def client(self):
'''Get a :class:`.RedisClient` for the Store'''
return RedisClient(self)
def pipeline(self):
'''Get a :class:`.Pipeline` for the Store'''
return Pipeline(self)
def pubsub(self, protocol=None):
return RedisPubSub(self, protocol=protocol)
def channels(self, protocol=None, **kw):
return RedisChannels(self.pubsub(protocol=protocol), **kw)
def ping(self):
return self.client().ping()
async def execute(self, *args, **options):
connection = await self._pool.connect()
with connection:
result = await connection.execute(*args, **options)
return result
async def execute_pipeline(self, commands, raise_on_error=True):
conn = await self._pool.connect()
with conn:
result = await conn.execute_pipeline(commands, raise_on_error)
return result
async def connect(self, protocol_factory=None):
protocol_factory = protocol_factory or self.create_protocol
if isinstance(self._host, tuple):
host, port = self._host
transport, connection = await self._loop.create_connection(
protocol_factory, host, port)
else:
raise NotImplementedError('Could not connect to %s' %
str(self._host))
if self._password:
await connection.execute('AUTH', self._password)
if self._database:
await connection.execute('SELECT', self._database)
return connection
def flush(self):
return self.execute('flushdb')
def close(self):
'''Close all open connections.'''
return self._pool.close()
def has_query(self, query_type):
return query_type in self.supported_queries
def basekey(self, meta, *args):
key = '%s%s' % (self.namespace, meta.table_name)
postfix = ':'.join((to_string(p) for p in args if p is not None))
return '%s:%s' % (key, postfix) if postfix else key
def meta(self, meta):
'''Extract model metadata for lua script stdnet/lib/lua/odm.lua'''
# indices = dict(((idx.attname, idx.unique) for idx in meta.indices))
data = meta.as_dict()
data['namespace'] = self.basekey(meta)
return data
class CompiledQuery:
def __init__(self, pipe, query):
self.pipe = pipe
| 2.3125 | 2 |
tasks/migrations/0005_auto_20200616_0123.py | tschelbs18/fruitful | 0 | 5858 | # Generated by Django 3.0.7 on 2020-06-16 05:23
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('tasks', '0004_auto_20200616_0116'),
]
operations = [
migrations.AddField(
model_name='userreward',
name='created_dt',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='userreward',
name='last_updated_dt',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='usertask',
name='created_dt',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='usertask',
name='last_updated_dt',
field=models.DateTimeField(auto_now=True),
),
]
| 1.835938 | 2 |
pcg_libraries/src/pcg_gazebo/parsers/types/vector.py | boschresearch/pcg_gazebo_pkgs | 42 | 5859 | <filename>pcg_libraries/src/pcg_gazebo/parsers/types/vector.py
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import XMLBase
import collections
class XMLVector(XMLBase):
_NAME = ''
def __init__(self, size=None):
XMLBase.__init__(self)
assert size is not None, 'Vector size cannot be None'
assert isinstance(size, int), \
'[{}] Vector size input must be an integer, received={}'.format(
self.xml_element_name, size)
assert size > 0, '[{}] Size must be greater than zero'.format(
self.xml_element_name)
self._size = size
self._value = [0 for _ in range(self._size)]
def _set_value(self, value):
assert isinstance(value, collections.Iterable), \
'Input must be iterable, element={}, received={}, type={}'.format(
self._NAME, value, type(value))
assert len(list(value)) == self._size, \
'Input vector has the wrong size, element={}, received={}, ' \
'size of received={}, expected length={}'.format(
self._NAME, value, len(list(value)), self._size)
for item in value:
assert isinstance(item, float) or isinstance(item, int)
self._value = list(value)
def reset(self):
self._value = [0 for _ in range(self._size)]
XMLBase.reset(self)
def is_valid(self):
if not isinstance(self._value, list):
print('Vector object must have a list as value')
return False
if len(self._value) != self._size:
print('Normal value must be a list with 3 elements')
return False
for item in self._value:
if not isinstance(item, float) and not isinstance(item, int):
print('Each vector element must be a float or integer')
return False
return True
def get_formatted_value_as_str(self):
assert self.is_valid(), 'Invalid vector'
output_str = ' '.join(['{}'] * self._size)
return output_str.format(*[format(x, 'n') for x in self._value])
| 2.609375 | 3 |
tests/main/helpers/test_buyers_helpers.py | uk-gov-mirror/alphagov.digitalmarketplace-briefs-frontend | 1 | 5860 | <filename>tests/main/helpers/test_buyers_helpers.py
import mock
import pytest
from werkzeug.exceptions import NotFound
import app.main.helpers as helpers
from dmcontent.content_loader import ContentLoader
from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub
content_loader = ContentLoader('tests/fixtures/content')
content_loader.load_manifest('dos', 'data', 'edit_brief')
questions_builder = content_loader.get_manifest('dos', 'edit_brief')
class TestBuyersHelpers(object):
def test_get_framework_and_lot(self):
provided_lot = LotStub(slug='digital-specialists', allows_brief=True).response()
data_api_client = mock.Mock()
data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status='live',
lots=[provided_lot],
).single_result_response()
framework, lot = helpers.buyers_helpers.get_framework_and_lot('digital-outcomes-and-specialists-4',
'digital-specialists',
data_api_client)
assert framework['status'] == "live"
assert framework['name'] == 'Digital Outcomes and Specialists 4'
assert framework['slug'] == 'digital-outcomes-and-specialists-4'
assert framework['clarificationQuestionsOpen'] is True
assert lot == provided_lot
def test_get_framework_and_lot_404s_for_wrong_framework_status(self):
data_api_client = mock.Mock()
data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status='open',
lots=[
LotStub(slug='digital-specialists', allows_brief=True).response()
]
).single_result_response()
with pytest.raises(NotFound):
helpers.buyers_helpers.get_framework_and_lot(
'digital-outcomes-and-specialists-4',
'digital-specialists',
data_api_client,
allowed_statuses=['live'],
)
def test_get_framework_and_lot_404s_if_allows_brief_required(self):
data_api_client = mock.Mock()
data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status='live',
lots=[
LotStub(slug='digital-specialists', allows_brief=False).response()
]
).single_result_response()
with pytest.raises(NotFound):
helpers.buyers_helpers.get_framework_and_lot(
'digital-outcomes-and-specialists-4',
'digital-specialists',
data_api_client,
must_allow_brief=True,
)
@pytest.mark.parametrize(
['framework', 'lot', 'user', 'result'],
[
('digital-outcomes-and-specialists-4', 'digital-specialists', 123, True),
('not-digital-outcomes-and-specialists', 'digital-specialists', 123, False),
('digital-outcomes-and-specialists-4', 'not-digital-specialists', 123, False),
('digital-outcomes-and-specialists-4', 'digital-specialists', 124, False),
]
)
def test_is_brief_correct(self, framework, lot, user, result):
brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response()
assert helpers.buyers_helpers.is_brief_correct(brief, framework, lot, user) is result
@pytest.mark.parametrize(
['status', 'allow_withdrawn', 'result'],
[
('withdrawn', True, True),
('withdrawn', False, False),
('live', True, True),
('live', False, True),
]
)
def test_if_brief_correct_allow_withdrawn(self, status, allow_withdrawn, result):
brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status=status).response()
assert helpers.buyers_helpers.is_brief_correct(
brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allow_withdrawn=allow_withdrawn
) is result
@pytest.mark.parametrize(
'allowed_statuses, result', [
(['live', 'closed'], True),
(['closed'], False)
]
)
def test_is_brief_correct_allowed_statuses(self, allowed_statuses, result):
brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response()
assert helpers.buyers_helpers.is_brief_correct(
brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allowed_statuses=allowed_statuses
) is result
def test_is_brief_associated_with_user(self):
brief = BriefStub(user_id=123).response()
assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 123) is True
assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 234) is False
def test_brief_can_be_edited(self):
assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='draft').response()) is True
assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='live').response()) is False
def test_brief_is_withdrawn(self):
assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='withdrawn').response()) is True
assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='live').response()) is False
def test_section_has_at_least_one_required_question(self):
content = content_loader.get_manifest('dos', 'edit_brief').filter(
{'lot': 'digital-specialists'}
)
sections_with_required_questions = {
'section-1': True,
'section-2': True,
'section-4': False,
'section-5': True
}
for section in content.sections:
assert helpers.buyers_helpers.section_has_at_least_one_required_question(section) \
== sections_with_required_questions[section.slug]
def test_count_unanswered_questions(self):
brief = {
'status': 'draft',
'frameworkSlug': 'dos',
'lotSlug': 'digital-specialists',
'required1': True
}
content = content_loader.get_manifest('dos', 'edit_brief').filter(
{'lot': 'digital-specialists'}
)
sections = content.summary(brief)
unanswered_required, unanswered_optional = helpers.buyers_helpers.count_unanswered_questions(sections)
assert unanswered_required == 2
assert unanswered_optional == 2
def test_add_unanswered_counts_to_briefs(self):
briefs = [{
'status': 'draft',
'frameworkSlug': 'dos',
'lotSlug': 'digital-specialists',
'required1': True
}]
assert helpers.buyers_helpers.add_unanswered_counts_to_briefs(briefs, content_loader) == [{
'status': 'draft',
'frameworkSlug': 'dos',
'lotSlug': 'digital-specialists',
'required1': True,
'unanswered_required': 2,
'unanswered_optional': 2
}]
def test_get_sorted_responses_for_brief(self):
data_api_client = mock.Mock()
data_api_client.find_brief_responses.return_value = {
"briefResponses": [
{"id": "five", "niceToHaveRequirements": [True, True, True, True, True]},
{"id": "zero", "niceToHaveRequirements": [False, False, False, False, False]},
{"id": "three", "niceToHaveRequirements": [True, True, False, False, True]},
{"id": "five", "niceToHaveRequirements": [True, True, True, True, True]},
{"id": "four", "niceToHaveRequirements": [True, True, True, True, False]},
{"id": "one", "niceToHaveRequirements": [False, False, False, True, False]},
{"id": "four", "niceToHaveRequirements": [True, True, True, True, False]},
]
}
brief = {"id": 1, "niceToHaveRequirements": ["Nice", "to", "have", "yes", "please"]}
assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [
{'id': 'five', 'niceToHaveRequirements': [True, True, True, True, True]},
{'id': 'five', 'niceToHaveRequirements': [True, True, True, True, True]},
{'id': 'four', 'niceToHaveRequirements': [True, True, True, True, False]},
{'id': 'four', 'niceToHaveRequirements': [True, True, True, True, False]},
{'id': 'three', 'niceToHaveRequirements': [True, True, False, False, True]},
{"id": "one", "niceToHaveRequirements": [False, False, False, True, False]},
{'id': 'zero', 'niceToHaveRequirements': [False, False, False, False, False]}
]
def test_get_sorted_responses_does_not_sort_if_no_nice_to_haves(self):
data_api_client = mock.Mock()
data_api_client.find_brief_responses.return_value = {
"briefResponses": [
{"id": "five"},
{"id": "zero"},
{"id": "three"},
{"id": "five"}
]
}
brief = {"id": 1, "niceToHaveRequirements": []}
assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [
{"id": "five"},
{"id": "zero"},
{"id": "three"},
{"id": "five"}
]
| 2.171875 | 2 |
Plot/src/test/java/io/deephaven/db/plot/example_plots/PlottingPQ.py | devinrsmith/deephaven-core | 0 | 5861 | import deephaven.TableTools as tt
import deephaven.Plot as plt
t = tt.emptyTable(50)\
.update("X = i + 5", "XLow = X -1", "XHigh = X + 1", "Y = Math.random() * 5", "YLow = Y - 1", "YHigh = Y + 1", "USym = i % 2 == 0 ? `AAPL` : `MSFT`")
p = plt.plot("S1", t, "X", "Y").lineColor("black").show()
p2 = plt.plot("S1", t, "X", "Y").plotStyle("bar").gradientVisible(True).show()
p3 = plt.plot("S1", t, "X", "Y").plotStyle("scatter").pointColor("black").pointSize(2).show()
p4 = plt.plot("S1", t, "X", "Y").plotStyle("area").seriesColor("red").show()
p4 = plt.plot3d("S1", t, "X", "X", "Y").show()
pBy = plt.plotBy("S1", t, "X", "Y", "USym").show()
pBy = plt.plot3dBy("S1", t, "X", "X", "Y", "USym").show()
cp = plt.catPlot("S1", t, "X", "Y").lineColor("black").show()
cp2 = plt.catPlot("S1", t, "X", "Y").plotStyle("bar").gradientVisible(True).show()
cp3 = plt.catPlot("S1", t, "X", "Y").plotStyle("scatter").pointColor("black").pointSize(2).show()
cp4 = plt.catPlot("S1", t, "X", "Y").plotStyle("area").seriesColor("red").show()
cp = plt.catPlot3d("S1", t, "X", "X", "Y").show()
cpBy = plt.catPlotBy("S1", t, "X", "Y", "USym").show()
cpBy = plt.catPlot3dBy("S1", t, "X", "X", "Y", "USym").show()
pp = plt.piePlot("S1", t, "X", "Y")
chp = plt.catHistPlot("S1", t, "X").show()
hp = plt.histPlot("S1", t, "X", 5).show()
hp = plt.histPlot("S1", t, "X", 0, 10, 5).show()
ep = plt.errorBarXY("S1", t, "X", "XLow", "XHigh", "Y", "YLow", "YHigh").show()
epBy = plt.errorBarXYBy("S1", t, "X", "XLow", "XHigh", "Y", "YLow", "YHigh", "USym").show()
ep2 = plt.errorBarX("S1", t, "X", "XLow", "XHigh", "Y").show()
epBy2 = plt.errorBarXBy("S1", t, "X", "XLow", "XHigh", "Y", "USym").show()
ep3 = plt.errorBarY("S1", t, "X", "Y", "YLow", "YHigh").show()
epBy3 = plt.errorBarYBy("S1", t, "X", "Y", "YLow", "YHigh", "USym").show()
doubles = [3, 4, 3, 5, 4, 5]
time = 1491946585000000000
t = tt.newTable(tt.col("USym", ["A", "B", "A", "B", "A", "B"]),
tt.doubleCol("Open", doubles), tt.doubleCol("High", doubles),
tt.doubleCol("Low", doubles), tt.doubleCol("Close", doubles))
t = t.updateView("Time = new DBDateTime(time + (MINUTE * i))")
ohlc = plt.ohlcPlot("Test1", t, "Time", "Open", "High", "Low", "Close")
ohlcPlotBy = plt.figure().newChart(0)\
.chartTitle("Chart Title")\
.newAxes()\
.xLabel("X")\
.yLabel("Y")\
.ohlcPlotBy("Test1", t, "Time", "Open", "High", "Low", "Close", "USym")
categories = ["Samsung", "Others", "Nokia", "Apple", "MSFT"]
valuesD = [27.8, 55.3, 16.8, 17.1, 23.1]
valuesI = [27, 55, 16, 17, 15]
ap = plt.plot("S1", valuesD, valuesI).show()
ap = plt.plot3d("S1", valuesI, valuesI, valuesI).show()
acp = plt.catPlot("S1", categories, valuesI).show()
acp2 = plt.catPlot3d("S1", categories, categories, valuesD).show()
achp = plt.catHistPlot("S1", categories).show()
app = plt.figure().xLabel("X").yLabel("Y").piePlot("S1", categories, valuesI).pointLabelFormat("{0}").show()
aep = plt.errorBarXY("S1", valuesD, valuesD, valuesD, valuesD, valuesD, valuesD).show()
aep2 = plt.errorBarX("S1", valuesD, valuesD, valuesD, valuesD).show()
aep3 = plt.errorBarY("S1", valuesD, valuesD, valuesD, valuesD).show()
hp = plt.histPlot("S1", valuesD, 5).show()
hp = plt.histPlot("S1", valuesD, 0, 10, 5).show()
hp = plt.histPlot("S1", valuesI, 5).show()
| 2.25 | 2 |
rhoci/test/routes.py | ahmedmagdyawaad/redhat-ci-dashboard | 8 | 5862 | # Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from flask import current_app as app
from flask import render_template
from flask import url_for
import logging
LOG = logging.getLogger(__name__)
from rhoci.test import bp # noqa
@bp.route('/index')
@bp.route('/')
def index():
"""All tests."""
jenkins_url = app.config['custom']['jenkins']['url']
uf = url_for('api.all_tests')
return render_template('tests/index.html',
jenkins_url=jenkins_url,
uf=uf)
@bp.route('/class/<class_name>/name/<name>')
def test(class_name, name):
"""Specific test summary."""
uf = url_for('api.test_to_jobs', class_name=class_name, test_name=name)
return render_template('tests/test_to_jobs.html', uf=uf)
| 2.015625 | 2 |
mitmproxy/net/http/http1/__init__.py | aarnaut/mitmproxy | 0 | 5863 | from .read import (
read_request_head,
read_response_head,
connection_close,
expected_http_body_size,
validate_headers,
)
from .assemble import (
assemble_request, assemble_request_head,
assemble_response, assemble_response_head,
assemble_body,
)
__all__ = [
"read_request_head",
"read_response_head",
"connection_close",
"expected_http_body_size",
"validate_headers",
"assemble_request", "assemble_request_head",
"assemble_response", "assemble_response_head",
"assemble_body",
]
| 1.28125 | 1 |
request_token/migrations/0009_requesttokenerror.py | alex-hutton/django-request-token | 0 | 5864 | <filename>request_token/migrations/0009_requesttokenerror.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-05-21 19:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('request_token', '0008_convert_token_data_to_jsonfield'),
]
operations = [
migrations.CreateModel(
name='RequestTokenErrorLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('error_type', models.CharField(help_text='The underlying type of error raised.', max_length=50)),
('error_message', models.CharField(help_text='The error message supplied.', max_length=200)),
('log', models.OneToOneField(help_text='The token use against which the error occurred.', on_delete=django.db.models.deletion.CASCADE, related_name='error', to='request_token.RequestTokenLog')),
('token', models.ForeignKey(help_text='The RequestToken that was used.', on_delete=django.db.models.deletion.CASCADE, related_name='errors', to='request_token.RequestToken')),
],
),
]
| 1.84375 | 2 |
01-basic-programs/04-lines.py | ncodeitgithub1/python-get-hands-dirty-programs | 0 | 5865 | <filename>01-basic-programs/04-lines.py
#4 lines: Fibonacci, tuple assignment
parents, babies = (1, 1)
while babies < 100:
print ('This generation has {0} babies'.format(babies))
parents, babies = (babies, parents + babies) | 3.734375 | 4 |
winter/controller.py | EvgenySmekalin/winter | 1 | 5866 | import typing
from .core import Component
_Controller = typing.TypeVar('_Controller')
_ControllerType = typing.Type[_Controller]
ControllerFactory = typing.NewType('ControllerFactory', typing.Callable[[typing.Type], object])
_controller_factory: typing.Optional[ControllerFactory] = None
def controller(controller_class: _ControllerType) -> _ControllerType:
Component.register(controller_class)
return controller_class
def set_controller_factory(controller_factory: ControllerFactory) -> None:
global _controller_factory
_controller_factory = controller_factory
def build_controller(controller_class: _ControllerType) -> _Controller:
if _controller_factory is None:
return controller_class()
return _controller_factory(controller_class)
def get_component(controller_class: _ControllerType) -> Component:
return Component.get_by_cls(controller_class)
| 2.390625 | 2 |
go/def.bzl | bobg/rules_go | 0 | 5867 | # Copyright 2014 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Public definitions for Go rules.
All public Go rules, providers, and other definitions are imported and
re-exported in this file. This allows the real location of definitions
to change for easier maintenance.
Definitions outside this file are private unless otherwise noted, and
may change without notice.
"""
load(
"//go/private:context.bzl",
_go_context = "go_context",
)
load(
"//go/private:providers.bzl",
_GoArchive = "GoArchive",
_GoArchiveData = "GoArchiveData",
_GoLibrary = "GoLibrary",
_GoPath = "GoPath",
_GoSDK = "GoSDK",
_GoSource = "GoSource",
)
load(
"//go/private/rules:sdk.bzl",
_go_sdk = "go_sdk",
)
load(
"//go/private:go_toolchain.bzl",
_declare_toolchains = "declare_toolchains",
_go_toolchain = "go_toolchain",
)
load(
"//go/private/rules:wrappers.bzl",
_go_binary_macro = "go_binary_macro",
_go_library_macro = "go_library_macro",
_go_test_macro = "go_test_macro",
)
load(
"//go/private/rules:source.bzl",
_go_source = "go_source",
)
load(
"//extras:embed_data.bzl",
_go_embed_data = "go_embed_data",
)
load(
"//go/private/tools:path.bzl",
_go_path = "go_path",
)
load(
"//go/private/rules:library.bzl",
_go_tool_library = "go_tool_library",
)
load(
"//go/private/rules:nogo.bzl",
_nogo = "nogo_wrapper",
)
# TOOLS_NOGO is a list of all analysis passes in
# golang.org/x/tools/go/analysis/passes.
# This is not backward compatible, so use caution when depending on this --
# new analyses may discover issues in existing builds.
TOOLS_NOGO = [
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library",
"@org_golang_x_tools//go/analysis/passes/assign:go_default_library",
"@org_golang_x_tools//go/analysis/passes/atomic:go_default_library",
"@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library",
"@org_golang_x_tools//go/analysis/passes/bools:go_default_library",
"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library",
"@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library",
# TODO(#2396): pass raw cgo sources to cgocall and re-enable.
# "@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library",
"@org_golang_x_tools//go/analysis/passes/composite:go_default_library",
"@org_golang_x_tools//go/analysis/passes/copylock:go_default_library",
"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library",
"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library",
"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library",
"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library",
"@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library",
"@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library",
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
"@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library",
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library",
"@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library",
"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library",
"@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library",
"@org_golang_x_tools//go/analysis/passes/printf:go_default_library",
"@org_golang_x_tools//go/analysis/passes/shadow:go_default_library",
"@org_golang_x_tools//go/analysis/passes/shift:go_default_library",
"@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library",
"@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library",
"@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library",
"@org_golang_x_tools//go/analysis/passes/structtag:go_default_library",
"@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library",
"@org_golang_x_tools//go/analysis/passes/tests:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library",
]
# Current version or next version to be tagged. Gazelle and other tools may
# check this to determine compatibility.
RULES_GO_VERSION = "0.30.0"
declare_toolchains = _declare_toolchains
go_context = _go_context
go_embed_data = _go_embed_data
go_sdk = _go_sdk
go_tool_library = _go_tool_library
go_toolchain = _go_toolchain
nogo = _nogo
# See go/providers.rst#GoLibrary for full documentation.
GoLibrary = _GoLibrary
# See go/providers.rst#GoSource for full documentation.
GoSource = _GoSource
# See go/providers.rst#GoPath for full documentation.
GoPath = _GoPath
# See go/providers.rst#GoArchive for full documentation.
GoArchive = _GoArchive
# See go/providers.rst#GoArchiveData for full documentation.
GoArchiveData = _GoArchiveData
# See go/providers.rst#GoSDK for full documentation.
GoSDK = _GoSDK
# See docs/go/core/rules.md#go_library for full documentation.
go_library = _go_library_macro
# See docs/go/core/rules.md#go_binary for full documentation.
go_binary = _go_binary_macro
# See docs/go/core/rules.md#go_test for full documentation.
go_test = _go_test_macro
# See docs/go/core/rules.md#go_test for full documentation.
go_source = _go_source
# See docs/go/core/rules.md#go_path for full documentation.
go_path = _go_path
def go_vet_test(*args, **kwargs):
fail("The go_vet_test rule has been removed. Please migrate to nogo instead, which supports vet tests.")
def go_rule(**kwargs):
fail("The go_rule function has been removed. Use rule directly instead. See https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst#writing-new-go-rules")
def go_rules_dependencies():
_moved("go_rules_dependencies")
def go_register_toolchains(**kwargs):
_moved("go_register_toolchains")
def go_download_sdk(**kwargs):
_moved("go_download_sdk")
def go_host_sdk(**kwargs):
_moved("go_host_sdk")
def go_local_sdk(**kwargs):
_moved("go_local_sdk")
def go_wrap_sdk(**kwargs):
_moved("go_wrap_sdK")
def _moved(name):
fail(name + " has moved. Please load from " +
" @io_bazel_rules_go//go:deps.bzl instead of def.bzl.")
| 1.265625 | 1 |
anyway/parsers/united.py | ayalapol/anyway | 0 | 5868 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import calendar
import csv
from datetime import datetime
import os
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import and_
from ..constants import CONST
from ..models import AccidentMarker
from ..utilities import init_flask, decode_hebrew, open_utf8
from ..import importmail
from xml.dom import minidom
import math
import requests
import logging
############################################################################################
# United.py is responsible for the parsing and deployment of "united hatzala" data to the DB
############################################################################################
PROVIDER_CODE = CONST.UNITED_HATZALA_CODE
TIME_ZONE = 2
# convert IMS hours code to hours
RAIN_DURATION_CODE_TO_HOURS = {"1": 6, "2": 12, "3": 18, "4": 24, "/": 24, "5": 1, "6": 2, "7": 3, "8": 9, "9": 15}
WEATHER = {"0": 1, "1": 2, "3": 3, "4": 4, "5": 5, "7": 6, "8": 6, "9": 7, "10": 8, "11": 9,
"12": 10, "17": 11, "18": 12, "19": 13, "20": 14, "21": 15, "22": 16, "23": 17, "24": 18,
"25": 19, "26": 20, "27": 21, "28": 22, "29": 23, "30": 24, "31": 24, "32": 24, "33": 7,
"34": 7, "35": 7, "36": 25, "37": 25, "38": 25, "39": 25, "40": 26, "41": 27, "42": 28,
"43": 29, "44": 9, "45": 30, "46": 30, "47": 30, "48": 31, "49": 32, "50": 33, "51": 34,
"52": 33, "53": 35, "54": 36, "55": 37, "56": 38, "57": 39, "58": 37, "59": 37, "61": 37, "60": 36,
"62": 40, "63": 15, "64": 41, "65": 19, "66": 42, "67": 43, "68": 44, "69": 45, "70": 46, "71": 47,
"72": 48, "73": 16, "74": 50, "75": 51, "76": 52, "77": 53, "78": 54, "79": 55, "80": 56, "81": 57,
"82": 58, "83": 59, "84": 60, "85": 61, "86": 62, "87": 63, "88": 64, "89": 65, "90": 66, "91": 67,
"92": 68, "93": 69, "94": 70, "95": 71, "96": 72, "97": 73, "98": 74, "99": 75}
def retrieve_ims_xml(): # getting an xml document from the ims(israel meteorological service) website
logging.basicConfig(level=logging.DEBUG)
s = requests.session()
r = s.get('http://www.ims.gov.il/ims/PublicXML/observ.xml')
xml_doc = minidom.parseString(r.text)
collection = xml_doc.documentElement
return collection
def parse_date(created):
"""
:param created: Date & Time string from csv
:return: Python datetime object
"""
global time
global hour
DATE_FORMATS = ['%m/%d/%Y %I:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y/%m/%d %I:%M:%S', '%d/%m/%Y %I:%M', '%Y/%m/%d %I:%M', '%m/%d/%Y %I:%M']
for date_format in DATE_FORMATS:
try:
if date_format == '%Y-%m-%d %H:%M:%S':
time = datetime.strptime(str(created)[:-4], date_format)
hour = time.strftime('%H')
hour = int(hour)
else:
time = datetime.strptime(str(created)[:-3], date_format)
hour = time.strftime('%H')
hour = int(hour) if str(created).endswith('AM') else int(hour) + 12
break
except ValueError:
pass
return datetime(time.year, time.month, time.day, hour, time.minute, 0)
def is_nth_weekday(nth, daynum, year,
month): # find if date is the nth occurrence of the daynum day of the week (ex: the forth sunday of april 2016)
# start counting the daynum from monday = 0
return calendar.Calendar(nth).monthdatescalendar(
year,
month
)[nth][daynum]
def get_parent_object_node(node):
while node.parentNode:
node = node.parentNode
if node.nodeName == "Object":
return node
def accident_time_zone_adjustment(created): # return accident time in UTC time
# pylint: disable=unexpected-keyword-arg
accident_date = parse_date(created)
daylight_saving_time = is_nth_weekday(4, 4, accident_date.year, 3)
winter_clock = is_nth_weekday(4, 6, accident_date.year, 10)
# weather is given in UTC time
# therefore in daylight_saving_time we deduct 3 hours from the local time and in winter clock 2 hours
# [
accident_date = accident_date.replace(hour=accident_date.hour - TIME_ZONE)
# if accident happend between april and september
if accident_date.month < 10 & accident_date.month > 3:
accident_date.replace(hour=accident_date.hour - 1)
# if accident happend before the last sunday of october at 2:00 o'clock
elif accident_date.month == 10 & (
winter_clock.day > accident_date.day | (
winter_clock.day == accident_date.day & accident_date.hour < 2)):
accident_date.replace(hour=accident_date.hour - 1)
# if accident happend after the last friday of march at 2:00 o'clock
elif (accident_date.month == 3 & daylight_saving_time.day < accident_date.day | (
daylight_saving_time.day == accident_date.day & accident_date.hour >= 2)):
accident_date.replace(hour=accident_date.hour - 1)
# ]
adate = ''.join(
(str(accident_date.year), str(accident_date.month), str(accident_date.day), str(accident_date.hour)))
return adate
def all_station_in_date_frame(collection, created): # return the stations data in the time of the accident
doc = minidom.Document()
base = doc.createElement('accident_date')
doc.appendChild(base)
station_data_in_date = collection.getElementsByTagName('date_selected')
station_data_in_date.sort()
accident_date = accident_time_zone_adjustment(created)
for station in enumerate(station_data_in_date):
if accident_date in str(station.childNodes[0].nodeValue):
base.appendChild(get_parent_object_node(station))
return base
def find_station_by_coordinate(collection, latitude, longitude):
station_place_in_xml = -1
min_distance = float("inf") # initialize big starting value so the distance will always be smaller than the initial
station_data = collection.getElementsByTagName('surface_station')
for i, station in enumerate(station_data):
station_lon = station.getElementsByTagName('station_lon')
assert len(station_lon) == 1
lon = float(station_lon[0].childNodes[0].nodeValue)
lon_difference = (lon - float(longitude)) ** 2
station_lat = station.getElementsByTagName('station_lat')
assert len(station_lat) == 1
lat = float(station_lat[0].childNodes[0].nodeValue)
lat_difference = (lat - float(latitude)) ** 2
temp_dis = math.sqrt(lat_difference + lon_difference)
if temp_dis < min_distance:
min_distance = temp_dis
station_place_in_xml = i
return station_place_in_xml
def convert_xml_values_to_numbers(rain):
num_conv = rain[:2] # variable to help convert from string to number
for char in num_conv: # in the xml number are in a three digits format (4-004), we delete the 0es before the number
if char == '0':
rain.replace(char, '')
else:
break
rain_in_millimeters = float(rain)
if rain_in_millimeters >= 990:
# numbers that are higher then 990 in the xml code equals 0.(the last digit) for example 991 = 0.1
rain_in_millimeters *= 0.01
return rain_in_millimeters
def get_weather_element(station, weather_data, tag):
element = weather_data[station].getElementsByTagName(tag)
if element:
weather_element = element[0].childNodes[0].nodeValue
else:
weather_element = None
return weather_element
def process_weather_data(collection, latitude, longitude):
weather = 1 # default weather is clear sky
station = find_station_by_coordinate(collection, latitude, longitude)
weather_data = collection.getElementsByTagName('surface_observation')
wind_force = get_weather_element(station, weather_data, 'FF')
rain = get_weather_element(station, weather_data, 'RRR')
rain_duration = get_weather_element(station, weather_data,
'TR') # the duration of time in which the rain amount was measured
weather_code = get_weather_element(station, weather_data, 'WW')
if weather_code is not None:
return WEATHER[weather_code.strip()]
if wind_force is not None:
if int(wind_force) > 8:
weather = 76 # סופת רוחות
elif int(wind_force) > 5:
weather = 77 # רוחות חזקות
if rain is not None and rain_duration is not None:
rain_in_millimeters = convert_xml_values_to_numbers(rain)
rain_hours = RAIN_DURATION_CODE_TO_HOURS[str(rain_duration).strip()]
# rain amount is between 0.1 and 0.5 millimeter
if 0.0 < rain_in_millimeters <= 0.5 or (
0.0 < rain_in_millimeters / rain_hours <= 0.5):
if weather == 76:
weather = 80 # סופת רוחות, גשם קל
elif weather == 77:
weather = 84 # רוחות חזקות, גשם קל
else:
weather = 37 # גשם קל
# average rain amount per hour is between 0.5 and 4.0 millimeters
if 0.5 < rain_in_millimeters / rain_hours <= 4:
if weather == 76:
weather = 81 # גשם וסופת רוחות
elif weather == 77:
weather = 85 # גשם ורוחות חזקות
else:
weather = 15 # גשם
# average rain amount per hour is between 4.0 and 8.0 millimeters
elif 4 < rain_in_millimeters / rain_hours <= 8:
if 76 == weather:
weather = 82 # סופת רוחות, גשם שוטף
if weather == 77:
weather = 86 # רוחות חזקות, גשם שוטף
else:
weather = 78 # גשם שוטף
# average rain amount per hour is more than 8.0 millimeters
elif rain_in_millimeters / rain_hours > 8:
if weather == 76:
weather = 83 # סופת רוחות, גשם זלעפות
if weather == 77:
weather = 87 # רוחות חזקות, גשם זלעפות
else:
weather = 79 # גשם זלעפות
return weather
CSVMAP = [
{"id": 0, "time": 1, "lat": 2, "long": 3, "street": 4, "city": 6, "comment": 7, "type": 8, "casualties": 9},
{"id": 0, "time": 1, "type": 2, "long": 3, "lat": 4, "city": 5, "street": 6, "comment": 7, "casualties": 8},
]
def create_accidents(collection, file_location):
"""
:param file_location: local location of .csv
:return: Yields a marker object with every iteration
"""
logging.info("\tReading accidents data from '%s'..." % file_location)
with open_utf8(file_location, 'rU') as f:
reader = csv.reader(f, delimiter=',', dialect=csv.excel_tab)
for line, accident in enumerate(reader):
if line == 0: # header
format_version = 0 if "MissionID" in accident[0] else 1
continue
if not accident: # empty line
continue
if line == 1 and accident[0] == "":
logging.warn("\t\tEmpty File!")
continue
csvmap = CSVMAP[format_version]
if accident[csvmap["lat"]] == "" or accident[csvmap["long"]] == "" or \
accident[csvmap["lat"]] is None or accident[csvmap["long"]] is None or \
accident[csvmap["lat"]] == "NULL" or accident[csvmap["long"]] == "NULL":
logging.warn("\t\tMissing coordinates in line {0}. Moving on...".format(line + 1))
continue
created = parse_date(accident[csvmap["time"]])
marker = {'id': accident[csvmap["id"]], 'latitude': accident[csvmap["lat"]],
'longitude': accident[csvmap["long"]], 'created': created, 'provider_code': PROVIDER_CODE,
'title': decode_hebrew(accident[csvmap["type"]], encoding="utf-8")[:100],
'address': decode_hebrew((accident[csvmap["street"]] + ' ' + accident[csvmap["city"]]), encoding="utf-8"),
'accident_severity': 2 if u"קשה" in decode_hebrew(accident[csvmap["type"]], encoding="utf-8") else 3,
'location_accuracy': 1, 'accident_type': 21, 'type': CONST.MARKER_TYPE_ACCIDENT,
'description': decode_hebrew(accident[csvmap["comment"]], encoding="utf-8"),
'weather': process_weather_data(collection, accident[csvmap["lat"]],
accident[csvmap["long"]])}
if format_version == 0:
casualties = accident[csvmap["casualties"]]
marker['road_intactness'] = casualties if casualties.isdigit() else 0
yield marker
def import_to_db(collection, path):
"""
:param path: Local files directory ('united_path' on main() below)
:return: length of DB entries after execution
"""
app = init_flask()
db = SQLAlchemy(app)
accidents = list(create_accidents(collection, path))
if not accidents:
return 0
new_ids = [m["id"] for m in accidents
if 0 == db.session.query(AccidentMarker).filter(and_(AccidentMarker.id == m["id"],
AccidentMarker.provider_code == m["provider_code"])).count()]
if not new_ids:
logging.info("\t\tNothing loaded, all accidents already in DB")
return 0
db.session.execute(AccidentMarker.__table__.insert(), [m for m in accidents if m["id"] in new_ids])
db.session.commit()
return len(new_ids)
def update_db(collection):
"""
:return: length of DB entries after execution
"""
app = init_flask()
db = SQLAlchemy(app)
united = db.session.query(AccidentMarker).filter(AccidentMarker.provider_code == 2)
for accident in united:
if not accident.weather:
accident.weather = process_weather_data(collection, accident.latitude, accident.longitude)
db.session.commit()
logging.info("\tFinished commiting the changes")
def main(light=True, username='', password='', lastmail=False):
"""
Calls importmail.py prior to importing to DB
"""
collection = retrieve_ims_xml()
if not light:
logging.info("Importing data from mail...")
importmail.main(username, password, lastmail)
united_path = "static/data/united/"
total = 0
logging.info("Loading United accidents...")
for united_file in os.listdir(united_path):
if united_file.endswith(".csv"):
total += import_to_db(collection, united_path + united_file)
logging.info("\tImported {0} items".format(total))
update_db(collection)
| 2.09375 | 2 |
libact/query_strategies/tests/test_variance_reduction.py | joequant/libact | 1 | 5869 | import unittest
from numpy.testing import assert_array_equal
import numpy as np
from libact.base.dataset import Dataset
from libact.models import LogisticRegression
from libact.query_strategies import VarianceReduction
from .utils import run_qs
class VarianceReductionTestCase(unittest.TestCase):
"""Variance reduction test case using artifitial dataset"""
def setUp(self):
self.X = [[-2, -1], [1, 1], [-1, -2], [-1, -1], [1, 2], [2, 1]]
self.y = [0, 1, 0, 1, 0, 1]
self.quota = 4
def test_variance_reduction(self):
trn_ds = Dataset(self.X,
np.concatenate([self.y[:2],
[None] * (len(self.y) - 2)]))
qs = VarianceReduction(trn_ds, model=LogisticRegression(), sigma=0.1)
qseq = run_qs(trn_ds, qs, self.y, self.quota)
assert_array_equal(qseq, np.array([4, 5, 2, 3]))
if __name__ == '__main__':
unittest.main()
| 2.53125 | 3 |
hysds/log_utils.py | fgreg/hysds | 0 | 5870 | <reponame>fgreg/hysds<gh_stars>0
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import open
from builtins import str
from future import standard_library
standard_library.install_aliases()
import os
import re
import json
import copy
import socket
import msgpack
import traceback
import types
import backoff
from datetime import datetime
from uuid import uuid4
from redis import BlockingConnectionPool, StrictRedis, RedisError
from celery.utils.log import get_task_logger
import hysds
from hysds.celery import app
from prov_es.model import get_uuid, ProvEsDocument
# logger
logger = get_task_logger(__name__)
# redis connection pools
JOB_STATUS_POOL = None
JOB_INFO_POOL = None
WORKER_STATUS_POOL = None
EVENT_STATUS_POOL = None
# job status key template
JOB_STATUS_KEY_TMPL = "hysds-job-status-%s"
# worker status key template
WORKER_STATUS_KEY_TMPL = "hysds-worker-status-%s"
# task worker key template
TASK_WORKER_KEY_TMPL = "hysds-task-worker-%s"
def backoff_max_value():
"""Return max value for backoff."""
return app.conf.BACKOFF_MAX_VALUE
def backoff_max_tries():
"""Return max tries for backoff."""
return app.conf.BACKOFF_MAX_TRIES
def hard_time_limit_gap():
"""Return minimum gap time after soft time limit."""
return app.conf.HARD_TIME_LIMIT_GAP
def ensure_hard_time_limit_gap(soft_time_limit, time_limit):
"""Ensure hard time limit gap."""
gap = hard_time_limit_gap()
if soft_time_limit is not None and (time_limit is None or
time_limit <= soft_time_limit+gap):
time_limit = soft_time_limit + gap
return soft_time_limit, time_limit
def set_redis_job_status_pool():
"""Set redis connection pool for job status."""
global JOB_STATUS_POOL
if JOB_STATUS_POOL is None:
JOB_STATUS_POOL = BlockingConnectionPool.from_url(
app.conf.REDIS_JOB_STATUS_URL)
def set_redis_job_info_pool():
"""Set redis connection pool for job info metrics."""
global JOB_INFO_POOL
if JOB_INFO_POOL is None:
JOB_INFO_POOL = BlockingConnectionPool.from_url(
app.conf.REDIS_JOB_INFO_URL)
def set_redis_worker_status_pool():
"""Set redis connection pool for worker status."""
global WORKER_STATUS_POOL
if WORKER_STATUS_POOL is None:
WORKER_STATUS_POOL = BlockingConnectionPool.from_url(
app.conf.REDIS_JOB_STATUS_URL)
def set_redis_event_status_pool():
"""Set redis connection pool for event status."""
global EVENT_STATUS_POOL
if EVENT_STATUS_POOL is None:
EVENT_STATUS_POOL = BlockingConnectionPool.from_url(
app.conf.REDIS_JOB_STATUS_URL)
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def log_task_worker(task_id, worker):
"""Log task worker for task ID in redis."""
set_redis_worker_status_pool()
global WORKER_STATUS_POOL
# set task worker for task ID
r = StrictRedis(connection_pool=WORKER_STATUS_POOL)
r.setex(TASK_WORKER_KEY_TMPL % task_id,
app.conf.HYSDS_JOB_STATUS_EXPIRES,
worker)
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def get_task_worker(task_id):
"""Retrieve task worker by task ID from redis."""
set_redis_worker_status_pool()
global WORKER_STATUS_POOL
# retrieve task worker
r = StrictRedis(connection_pool=WORKER_STATUS_POOL)
return r.get(TASK_WORKER_KEY_TMPL % task_id)
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def get_worker_status(worker):
"""Retrieve worker status by worker ID from redis."""
set_redis_worker_status_pool()
global WORKER_STATUS_POOL
# retrieve worker status
r = StrictRedis(connection_pool=WORKER_STATUS_POOL)
return r.get(WORKER_STATUS_KEY_TMPL % worker)
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def get_job_status(task_id):
"""Retrieve job status by task ID from redis."""
set_redis_job_status_pool()
global JOB_STATUS_POOL
# retrieve job status
r = StrictRedis(connection_pool=JOB_STATUS_POOL)
return r.get(JOB_STATUS_KEY_TMPL % task_id)
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def log_job_status(job):
"""Print job status."""
set_redis_job_status_pool()
global JOB_STATUS_POOL
job['resource'] = 'job'
job['type'] = job.get('job', {}).get('type', 'unknown')
job['@version'] = '1'
job['@timestamp'] = "%sZ" % datetime.utcnow().isoformat()
if 'tag' in job.get('job', {}):
tags = job.setdefault('tags', [])
if isinstance(tags, str):
tags = [tags]
tags.append(job['job']['tag'])
job['tags'] = tags
# send update to redis
r = StrictRedis(connection_pool=JOB_STATUS_POOL)
r.setex(JOB_STATUS_KEY_TMPL % job['uuid'],
app.conf.HYSDS_JOB_STATUS_EXPIRES,
job['status']) # for dedup
r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(job)) # for ES
logger.info("job_status_json:%s" % json.dumps(job))
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def log_job_info(job):
"""Print job info."""
set_redis_job_info_pool()
global JOB_INFO_POOL
filtered_info = {}
for info in ('job_info', 'job_id', 'task_id', 'delivery_info', 'tag',
'priority', 'container_image_name', 'container_image_url',
'name'):
if info in job:
filtered_info[info] = job[info]
job_info = {'type': 'job_info',
'@version': '1',
'@timestamp': "%sZ" % datetime.utcnow().isoformat(),
'job': filtered_info,
'job_type': job['type']}
# send update to redis
r = StrictRedis(connection_pool=JOB_INFO_POOL)
r.rpush(app.conf.REDIS_JOB_INFO_KEY, msgpack.dumps(job_info))
logger.info("job_info_json:%s" % json.dumps(job_info))
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def log_custom_event(event_type, event_status, event, tags=[], hostname=None):
"""Log custom event."""
set_redis_event_status_pool()
global EVENT_STATUS_POOL
uuid = str(uuid4())
if hostname is None:
try:
hostname = socket.getfqdn()
except:
try:
hostname = socket.gethostbyname(socket.gethostname())
except:
hostname = ''
info = {'resource': 'event',
'type': event_type,
'status': event_status,
'@timestamp': "%sZ" % datetime.utcnow().isoformat(),
'hostname': hostname,
'uuid': uuid,
'tags': tags,
'@version': '1',
'event': event}
# send update to redis
r = StrictRedis(connection_pool=EVENT_STATUS_POOL)
r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(info))
logger.info("hysds.custom_event:%s" % json.dumps(info))
return uuid
def log_prov_es(job, prov_es_info, prov_es_file):
"""Log PROV-ES document. Create temp PROV-ES document to populate
attributes that only the worker has access to (e.g. PID)."""
# create PROV-ES doc to generate attributes that only verdi know
ps_id = "hysds:%s" % get_uuid(job['job_id'])
bundle_id = "hysds:%s" % get_uuid('bundle-%s' % job['job_id'])
doc = ProvEsDocument()
# get bundle
#bndl = doc.bundle(bundle_id)
bndl = None
# create sofware agent
sa_label = "hysds:pge_wrapper/%s/%d/%s" % (job['job_info']['execute_node'],
job['job_info']['pid'],
datetime.utcnow().isoformat())
sa_id = "hysds:%s" % get_uuid(sa_label)
doc.softwareAgent(sa_id, str(job['job_info']['pid']),
job['job_info']['execute_node'],
role=job.get('username', None),
label=sa_label, bundle=bndl)
# create processStep
doc.processStep(ps_id, job['job_info']['cmd_start'],
job['job_info']['cmd_end'], [], sa_id,
None, [], [], bundle=bndl,
prov_type="hysds:%s" % job['type'])
# get json
pd = json.loads(doc.serialize())
# update software agent and process step
if 'bundle' in prov_es_info:
if len(prov_es_info['bundle']) == 1:
bundle_id_orig = list(prov_es_info['bundle'].keys())[0]
# update software agent
prov_es_info['bundle'][bundle_id_orig].setdefault(
'agent', {}).update(pd['bundle'][bundle_id]['agent'])
# update wasAssociatedWith
prov_es_info['bundle'][bundle_id_orig].setdefault(
'wasAssociatedWith', {}).update(pd['bundle'][bundle_id]['wasAssociatedWith'])
# update activity
if 'activity' in prov_es_info['bundle'][bundle_id_orig]:
if len(prov_es_info['bundle'][bundle_id_orig]['activity']) == 1:
ps_id_orig = list(
prov_es_info['bundle'][bundle_id_orig]['activity'].keys())[0]
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][
'prov:startTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:startTime']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][
'prov:endTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:endTime']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_id'] = job['job_id']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_type'] = job['type']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL
if 'prov:type' not in prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]:
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][
'prov:type'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:type']
# update wasAssociatedWith activity ids
for waw_id in prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith']:
if prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id:
prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig
else:
prov_es_info['bundle'][bundle_id_orig]['activity'].update(
pd['bundle'][bundle_id]['activity'])
else:
prov_es_info['bundle'][bundle_id_orig]['activity'] = pd['bundle'][bundle_id]['activity']
else:
# update software agent
prov_es_info.setdefault('agent', {}).update(pd['agent'])
# update wasAssociatedWith
prov_es_info.setdefault('wasAssociatedWith', {}).update(
pd['wasAssociatedWith'])
# update process step
if 'activity' in prov_es_info:
if len(prov_es_info['activity']) == 1:
ps_id_orig = list(prov_es_info['activity'].keys())[0]
prov_es_info['activity'][ps_id_orig]['prov:startTime'] = pd['activity'][ps_id]['prov:startTime']
prov_es_info['activity'][ps_id_orig]['prov:endTime'] = pd['activity'][ps_id]['prov:endTime']
prov_es_info['activity'][ps_id_orig]['hysds:job_id'] = job['job_id']
prov_es_info['activity'][ps_id_orig]['hysds:job_type'] = job['type']
prov_es_info['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url']
prov_es_info['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL
if 'prov:type' not in prov_es_info['activity'][ps_id_orig]:
prov_es_info['activity'][ps_id_orig]['prov:type'] = pd['activity'][ps_id]['prov:type']
# update wasAssociatedWith activity ids
for waw_id in prov_es_info['wasAssociatedWith']:
if prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id:
prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig
else:
prov_es_info['activity'].update(pd['activity'])
else:
prov_es_info['activity'] = pd['activity']
# write prov
with open(prov_es_file, 'w') as f:
json.dump(prov_es_info, f, indent=2)
def log_publish_prov_es(prov_es_info, prov_es_file, prod_path, pub_urls,
prod_metrics, objectid):
"""Log publish step in PROV-ES document."""
# create PROV-ES doc
doc = ProvEsDocument(namespaces=prov_es_info['prefix'])
# get bundle
#bndl = doc.bundle(bundle_id)
bndl = None
# add input entity
execute_node = socket.getfqdn()
prod_url = "file://%s%s" % (execute_node, prod_path)
input_id = "hysds:%s" % get_uuid(prod_url)
input_ent = doc.granule(input_id, None, [prod_url], [], None, None, None,
label=os.path.basename(prod_url), bundle=bndl)
# add output entity
output_id = "hysds:%s" % get_uuid(pub_urls[0])
output_ent = doc.product(output_id, None, [pub_urls[0]], [], None, None,
None, label=objectid, bundle=bndl)
# software and algorithm
algorithm = "eos:product_publishing"
software_version = hysds.__version__
software_title = "%s v%s" % (hysds.__description__, software_version)
software = "eos:HySDS-%s" % software_version
software_location = hysds.__url__
doc.software(software, [algorithm], software_version, label=software_title,
location=software_location, bundle=bndl)
# create sofware agent
pid = os.getpid()
sa_label = "hysds:publish_dataset/%s/%d/%s" % (execute_node, pid,
prod_metrics['time_start'])
sa_id = "hysds:%s" % get_uuid(sa_label)
doc.softwareAgent(sa_id, str(pid), execute_node, role="invoked",
label=sa_label, bundle=bndl)
# create processStep
job_id = "publish_dataset-%s" % os.path.basename(prod_path)
doc.processStep("hysds:%s" % get_uuid(job_id), prod_metrics['time_start'],
prod_metrics['time_end'], [software], sa_id, None,
[input_id], [output_id], label=job_id, bundle=bndl,
prov_type="hysds:publish_dataset")
# get json
pd = json.loads(doc.serialize())
# update input entity
orig_ent = prov_es_info.get('entity', {}).get(input_id, {})
pd['entity'][input_id].update(orig_ent)
# update output entity
for attr in orig_ent:
if attr in ('prov:location', 'prov:label', 'prov:type'):
continue
pd['entity'][output_id][attr] = orig_ent[attr]
# write prov
with open(prov_es_file, 'w') as f:
json.dump(pd, f, indent=2)
| 2.109375 | 2 |
openstack_dashboard/api/rest/swift.py | CplusShen/aurora-horizon | 0 | 5871 | # Copyright 2015, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for the swift service.
"""
import os
from django import forms
from django.http import StreamingHttpResponse
from django.utils.http import urlunquote
from django.views.decorators.csrf import csrf_exempt
from django.views import generic
import six
from horizon import exceptions
from openstack_dashboard import api
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
from openstack_dashboard.api import swift
@urls.register
class Info(generic.View):
"""API for information about the Swift installation.
"""
url_regex = r'swift/info/$'
@rest_utils.ajax()
def get(self, request):
"""Get information about the Swift installation.
"""
capabilities = api.swift.swift_get_capabilities(request)
return {'info': capabilities}
@urls.register
class Containers(generic.View):
"""API for swift container listing for an account
"""
url_regex = r'swift/containers/$'
@rest_utils.ajax()
def get(self, request):
"""Get the list of containers for this account
TODO(neillc): Add pagination
"""
containers, has_more = api.swift.swift_get_containers(request)
containers = [container.to_dict() for container in containers]
return {'items': containers, 'has_more': has_more}
@urls.register
class Container(generic.View):
"""API for swift container level information
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/$'
@rest_utils.ajax()
def get(self, request, container):
"""Get the container details
"""
return api.swift.swift_get_container(request, container).to_dict()
@rest_utils.ajax()
def post(self, request, container):
metadata = {}
if 'is_public' in request.DATA:
metadata['is_public'] = request.DATA['is_public']
# This will raise an exception if the container already exists
try:
api.swift.swift_create_container(request, container,
metadata=metadata)
except exceptions.AlreadyExists as e:
# 409 Conflict
return rest_utils.JSONResponse(str(e), 409)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s' % container,
)
@rest_utils.ajax()
def delete(self, request, container):
try:
api.swift.swift_delete_container(request, container)
except exceptions.Conflict as e:
# It cannot be deleted if it's not empty.
return rest_utils.JSONResponse(str(e), 409)
@rest_utils.ajax(data_required=True)
def put(self, request, container):
metadata = {'is_public': request.DATA['is_public']}
api.swift.swift_update_container(request, container, metadata=metadata)
@urls.register
class Objects(generic.View):
"""API for a list of swift objects
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/objects/$'
@rest_utils.ajax()
def get(self, request, container):
"""Get object information.
:param request:
:param container:
:return:
"""
path = request.GET.get('path')
if path is not None:
path = urlunquote(path)
objects = api.swift.swift_get_objects(
request,
container,
prefix=path
)
# filter out the folder from the listing if we're filtering for
# contents of a (pseudo) folder
contents = [{
'path': o.subdir if isinstance(o, swift.PseudoFolder) else o.name,
'name': o.name.split('/')[-1],
'bytes': o.bytes,
'is_subdir': isinstance(o, swift.PseudoFolder),
'is_object': not isinstance(o, swift.PseudoFolder),
'content_type': getattr(o, 'content_type', None)
} for o in objects[0] if o.name != path]
return {'items': contents}
class UploadObjectForm(forms.Form):
file = forms.FileField(required=False)
@urls.register
class Object(generic.View):
"""API for a single swift object or pseudo-folder
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/object/' \
'(?P<object_name>.+)$'
# note: not an AJAX request - the body will be raw file content
@csrf_exempt
def post(self, request, container, object_name):
"""Create or replace an object or pseudo-folder
:param request:
:param container:
:param object_name:
If the object_name (ie. POST path) ends in a '/' then a folder is
created, rather than an object. Any file content passed along with
the request will be ignored in that case.
POST parameter:
:param file: the file data for the upload.
:return:
"""
form = UploadObjectForm(request.POST, request.FILES)
if not form.is_valid():
raise rest_utils.AjaxError(500, 'Invalid request')
data = form.clean()
if object_name[-1] == '/':
result = api.swift.swift_create_pseudo_folder(
request,
container,
object_name
)
else:
result = api.swift.swift_upload_object(
request,
container,
object_name,
data['file']
)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s/object/%s' % (container, result.name)
)
@rest_utils.ajax()
def delete(self, request, container, object_name):
if object_name[-1] == '/':
try:
api.swift.swift_delete_folder(request, container, object_name)
except exceptions.Conflict as e:
# In case the given object is pseudo folder
# It cannot be deleted if it's not empty.
return rest_utils.JSONResponse(str(e), 409)
else:
api.swift.swift_delete_object(request, container, object_name)
def get(self, request, container, object_name):
"""Get the object contents.
"""
obj = api.swift.swift_get_object(
request,
container,
object_name
)
# Add the original file extension back on if it wasn't preserved in the
# name given to the object.
filename = object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1]
if not os.path.splitext(obj.name)[1] and obj.orig_name:
name, ext = os.path.splitext(obj.orig_name)
filename = "%s%s" % (filename, ext)
response = StreamingHttpResponse(obj.data)
safe = filename.replace(",", "")
if six.PY2:
safe = safe.encode('utf-8')
response['Content-Disposition'] = 'attachment; filename="%s"' % safe
response['Content-Type'] = 'application/octet-stream'
response['Content-Length'] = obj.bytes
return response
@urls.register
class ObjectMetadata(generic.View):
"""API for a single swift object
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/' \
'(?P<object_name>.+)$'
@rest_utils.ajax()
def get(self, request, container, object_name):
return api.swift.swift_get_object(
request,
container_name=container,
object_name=object_name,
with_data=False
).to_dict()
@urls.register
class ObjectCopy(generic.View):
"""API to copy a swift object
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/copy/' \
'(?P<object_name>.+)$'
@rest_utils.ajax()
def post(self, request, container, object_name):
dest_container = request.DATA['dest_container']
dest_name = request.DATA['dest_name']
try:
result = api.swift.swift_copy_object(
request,
container,
object_name,
dest_container,
dest_name
)
except exceptions.AlreadyExists as e:
return rest_utils.JSONResponse(str(e), 409)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s/object/%s' % (dest_container,
result.name)
)
| 1.882813 | 2 |
datagen.py | kuangliu/pytorch-ssd | 124 | 5872 | '''Load image/class/box from a annotation file.
The annotation file is organized as:
image_name #obj xmin ymin xmax ymax class_index ..
'''
from __future__ import print_function
import os
import sys
import os.path
import random
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from encoder import DataEncoder
from PIL import Image, ImageOps
class ListDataset(data.Dataset):
img_size = 300
def __init__(self, root, list_file, train, transform):
'''
Args:
root: (str) ditectory to images.
list_file: (str) path to index file.
train: (boolean) train or test.
transform: ([transforms]) image transforms.
'''
self.root = root
self.train = train
self.transform = transform
self.fnames = []
self.boxes = []
self.labels = []
self.data_encoder = DataEncoder()
with open(list_file) as f:
lines = f.readlines()
self.num_samples = len(lines)
for line in lines:
splited = line.strip().split()
self.fnames.append(splited[0])
num_objs = int(splited[1])
box = []
label = []
for i in range(num_objs):
xmin = splited[2+5*i]
ymin = splited[3+5*i]
xmax = splited[4+5*i]
ymax = splited[5+5*i]
c = splited[6+5*i]
box.append([float(xmin),float(ymin),float(xmax),float(ymax)])
label.append(int(c))
self.boxes.append(torch.Tensor(box))
self.labels.append(torch.LongTensor(label))
def __getitem__(self, idx):
'''Load a image, and encode its bbox locations and class labels.
Args:
idx: (int) image index.
Returns:
img: (tensor) image tensor.
loc_target: (tensor) location targets, sized [8732,4].
conf_target: (tensor) label targets, sized [8732,].
'''
# Load image and bbox locations.
fname = self.fnames[idx]
img = Image.open(os.path.join(self.root, fname))
boxes = self.boxes[idx].clone()
labels = self.labels[idx]
# Data augmentation while training.
if self.train:
img, boxes = self.random_flip(img, boxes)
img, boxes, labels = self.random_crop(img, boxes, labels)
# Scale bbox locaitons to [0,1].
w,h = img.size
boxes /= torch.Tensor([w,h,w,h]).expand_as(boxes)
img = img.resize((self.img_size,self.img_size))
img = self.transform(img)
# Encode loc & conf targets.
loc_target, conf_target = self.data_encoder.encode(boxes, labels)
return img, loc_target, conf_target
def random_flip(self, img, boxes):
'''Randomly flip the image and adjust the bbox locations.
For bbox (xmin, ymin, xmax, ymax), the flipped bbox is:
(w-xmax, ymin, w-xmin, ymax).
Args:
img: (PIL.Image) image.
boxes: (tensor) bbox locations, sized [#obj, 4].
Returns:
img: (PIL.Image) randomly flipped image.
boxes: (tensor) randomly flipped bbox locations, sized [#obj, 4].
'''
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
w = img.width
xmin = w - boxes[:,2]
xmax = w - boxes[:,0]
boxes[:,0] = xmin
boxes[:,2] = xmax
return img, boxes
def random_crop(self, img, boxes, labels):
'''Randomly crop the image and adjust the bbox locations.
For more details, see 'Chapter2.2: Data augmentation' of the paper.
Args:
img: (PIL.Image) image.
boxes: (tensor) bbox locations, sized [#obj, 4].
labels: (tensor) bbox labels, sized [#obj,].
Returns:
img: (PIL.Image) cropped image.
selected_boxes: (tensor) selected bbox locations.
labels: (tensor) selected bbox labels.
'''
imw, imh = img.size
while True:
min_iou = random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9])
if min_iou is None:
return img, boxes, labels
for _ in range(100):
w = random.randrange(int(0.1*imw), imw)
h = random.randrange(int(0.1*imh), imh)
if h > 2*w or w > 2*h:
continue
x = random.randrange(imw - w)
y = random.randrange(imh - h)
roi = torch.Tensor([[x, y, x+w, y+h]])
center = (boxes[:,:2] + boxes[:,2:]) / 2 # [N,2]
roi2 = roi.expand(len(center), 4) # [N,4]
mask = (center > roi2[:,:2]) & (center < roi2[:,2:]) # [N,2]
mask = mask[:,0] & mask[:,1] #[N,]
if not mask.any():
continue
selected_boxes = boxes.index_select(0, mask.nonzero().squeeze(1))
iou = self.data_encoder.iou(selected_boxes, roi)
if iou.min() < min_iou:
continue
img = img.crop((x, y, x+w, y+h))
selected_boxes[:,0].add_(-x).clamp_(min=0, max=w)
selected_boxes[:,1].add_(-y).clamp_(min=0, max=h)
selected_boxes[:,2].add_(-x).clamp_(min=0, max=w)
selected_boxes[:,3].add_(-y).clamp_(min=0, max=h)
return img, selected_boxes, labels[mask]
def __len__(self):
return self.num_samples
| 2.9375 | 3 |
lingvo/core/builder.py | allenwang28/lingvo | 2,611 | 5873 | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to build composite layers.
WARNING:
The builder pattern is still experimental and we need to gain experience
on when to use and when not to use.
Please discuss w/ teammates before using it to build complicated
layers.
"""
import functools
from lingvo.core import activations
from lingvo.core import builder_layers
from lingvo.core import hyperparams
from lingvo.core import layers
from lingvo.core import py_utils
from lingvo.core import tshape
class Base:
"""Model builder with commonly used layers.
A method in a builder class constructs a layer param. FProp of a layer
constructed by a builder takes a tuple of tf.Tensor (one or more) and returns
a tuple of tf.Tensor (one or more). Even though certain layers support FProp
argument being None (e.g., Conv2DLayer), builder should not depend on such a
support.
The constructed layer is often a composition of multiple sub-layers connected
in certain patterns. We expect to have a few methods to facilitate building
these patterns. For example, _Seq() helps to build a sequential layer that
calls its sub-layer one after another.
TODO(zhifengc): Adds a more concrete example.
"""
@classmethod
def Params(cls):
"""The params of this layer."""
p = hyperparams.InstantiableParams(cls)
p.Define('deterministic_dropout', False,
'Used deterministic dropout or not.')
p.Define(
'fprop_dtype', None,
'Activations datatype to use. To enable bfloat16 activations for '
'layers built using model builder, set fprop_dtype to '
'tf.bfloat16, which will be propagated to layers that support '
'bfloat16 activations. Default is None, which will use float32 '
'activations.')
# SPMD partition related params.
p.Define(
'device_mesh', None,
'A numpy.ndarray specifying the topology of a device mesh to place the '
'computations onto. If device_mesh is None, it is assumed to be a '
'single device. Here are some examples: '
'np.array([0, 1, 2, 3, 4, 5, 6, 7]) which is a 1d mesh with 8 devices, '
'np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) which is 2d matrix of 8 '
'devices.')
p.Define(
'weight_split_dims_mapping', None,
'Relevant only if device_mesh above is not None. If not None, it '
'specifies how weight of this layer or those of the sublayers should '
'be sharded over device mesh. ')
p.Define(
'activation_split_dims_mapping', None,
'Relevant only if device_mesh above is not None. If not None, it '
'specifies how activation of this layer or those of the sublayers '
'should be sharded over device mesh. ')
return p
@property
def params(self):
"""Returns the params upon which this layer is built."""
return self._params
def __init__(self, params):
# Sub-classes should put some options common to many layers in __init__.
self._params = params.Copy()
######################################################################
# Layers to compose multiple layers.
#
# Sub-classes are discouraged to override these composition method.
######################################################################
def _Rep(self, name, repeat, *subs):
r"""Connects sub-layers sequentially and repeat multiple times.
E.g., _Rep('foo', 2, sa, sb, sc) constructs a layer with 6 layers
sequentially connected: [sa1, sb1, sc1, sa2, sb2, sc2]. sa1 and sa2 have
the same structure as the given sa, but sa1 and sa2 do not share the same
weight.
Args:
name: The layer name.
repeat: Repeat \*subs this many times in the compose layer.
*subs: A list of sub-layers.
Returns:
The param for the composed layer.
"""
iterations = []
for i in range(repeat):
iterations.append(self._Seq('iter_%03d' % i, *[p.Copy() for p in subs]))
return self._Seq(name, *iterations)
def _Seq(self, name, *subs):
"""Connects sub-layers sequentially."""
return builder_layers.SequentialLayer.Params().Set(
name=name, sub=list(subs))
def _Graph(self, name, input_endpoints, output_endpoints,
*signature_sub_param_list):
"""Connects sub-layers into a data flow graph."""
return builder_layers.GraphLayer.Params().Set(
name=name,
input_endpoints=input_endpoints,
output_endpoints=output_endpoints,
sub=list(signature_sub_param_list))
def _Id(self, name):
"""Identity. (t_1, ..., t_n) -> (t1, ..., t_n)."""
return self._Seq(name)
def _Arg(self, name, index):
"""Picks index-th element. (t_1, ..., t_n) -> (t_{index},)."""
return builder_layers.ArgIndexLayer.Params().Set(name=name, idx=[index])
def _Par(self, name, *subs):
"""y = (f1, f2, ..., fn)(x).
We feed the input tuple to all sub-layers and concatenates their output
tuples into one tuple.
Args:
name: The layer name.
*subs: A list of sub-layers.
Returns:
The param for the composed layer.
"""
def ConcatTuples(tuples):
# tuples is a list of tuples.
return tuple(functools.reduce(lambda x, y: x + list(y), tuples, []))
def ConcatMeta(tuples):
return py_utils.NestedMap(
flops=0,
out_shapes=tuple(
functools.reduce(lambda x, y: x + list(y), tuples, [])))
return builder_layers.ParallelLayer.Params().Set(
name=name, sub=list(subs), merge=ConcatTuples, merge_meta=ConcatMeta)
def _Fn(self, name, fn, fn_out=None, fn_flops=None):
"""y = fn(x).
Applies a fn: tuple(Tensor) -> a single Tensor or tuple(Tensor) to the input
tuple. Typically, fn is a very simple python function. This layer can be
used for prototyping but we advice to implement the logic as a sub-class of
BaseLayer for all established layers as FnLayer can't be serialized.
Args:
name: The layer name.
fn: A lambda tuple(Tensor) -> tuple(Tensor).
fn_out: A lambda tuple(tshape.Shape) -> output tuple(tshape.Shape)
fn_flops: A lambda tuple(tshape.Shape) -> estimated flops of fn.
If None, we assume flops == sum of elements in the inputs.
Returns:
The param for the composed layer.
"""
def FnMeta(*shapes):
"""A lambda tuple(tshape.Shape) -> NestedMap{flops, out_shapes}."""
if fn_out:
out_shapes = fn_out(*shapes)
if isinstance(out_shapes, tshape.Shape):
out_shapes = (out_shapes,)
else:
out_shapes = shapes
if fn_flops:
flops = fn_flops(*shapes)
else:
flops = sum([s.size for s in shapes])
return py_utils.NestedMap(flops=flops, out_shapes=out_shapes)
return builder_layers.FnLayer.Params().Set(name=name, fn=fn, fn_meta=FnMeta)
def _Save(self, name):
"""Returns a layer from which the activation and gradient can be accessed."""
return layers.FetchLayer.Params().Set(name=name)
def _AddFetches(self, name, body, fetches):
"""Fetches saved activations in the body sub-layer.
E.g.:
_AddFetches('foo', _Seq( 'stack', _Layer('layer1', ...),
_Save('layer1_out', ...), _Layer('layer2', ...), _Save('layer2_out', ...),
_Output('output', ...)), ['layer1_out', 'layer2_out'])
The layer returns the stack's final output together with intermediate
activations from layer1_out and layer2_out.
Args:
name: This layer's name.
body: The sub-layer.
fetches: A list of fetch names inside the sub-layer body.
Returns:
A layer whose outputs correspond to the activations of fetch points
in the sub-layer body. [input1, input2, ..., inputN, fetch1, ..., fetchM].
"""
return builder_layers.BranchLayer.Params().Set(
name=name, body=body, fetches=fetches)
def _Rematerialize(self, name, body):
"""Forces rematerialization on FProp of the body layer."""
return builder_layers.RematerializationLayer.Params().Set(
name=name, body=body)
def _BatchParallel(self, name, sub):
"""Splits the batch and compute the forward pass on multiple devices.
Args:
name: This layer's name.
sub: The sub-layer.
Returns:
A BatchParallel layer which splits the batch and computes the forward pass
on multiple devices.
"""
return builder_layers.BatchParallelLayer.Params().Set(name=name, sub=sub)
def _PrintShape(self, name):
"""Print FProp input shape information."""
return builder_layers.PrintShapeLayer.Params().Set(name=name)
def _CreateNestedMap(self, name, keys):
"""Returns a NestedMap with keys from fprop args."""
return builder_layers.CreateNestedMapLayer.Params().Set(
name=name, keys=keys)
###########################################################################
# Basic nn layers.
#
# The following method returns a layer param, whose FProp takes a single
# Tensor and returns a single Tensor.
#
# These methods are designed to have minimal knobs. Sub-classes which needs to
# be flexible can override these methods with different options. E.g., a
# sub-class builder can override _BN() to tune the decay option.
###########################################################################
def _BN(self, name, dims):
"""Batch norm."""
return layers.BatchNormLayer.Params().Set(name=name, dim=dims, decay=0.99)
def _LN(self, name, dims, use_fused_layernorm=False):
"""Layer norm."""
return layers.LayerNorm.Params().Set(
name=name,
input_dim=dims,
use_fused_layernorm=use_fused_layernorm,
fprop_dtype=self.params.fprop_dtype)
def _Dropout(self, name, keep_prob, noise_shape_broadcast_dims=None):
"""Returns a DropoutLayer Params."""
if self.params.deterministic_dropout:
return layers.DeterministicDropoutLayer.Params().Set(
name=name,
keep_prob=keep_prob,
noise_shape_broadcast_dims=noise_shape_broadcast_dims)
return layers.DropoutLayer.Params().Set(
name=name,
keep_prob=keep_prob,
noise_shape_broadcast_dims=noise_shape_broadcast_dims,
fprop_dtype=self.params.fprop_dtype)
def _Linear(self,
name,
idims,
odims,
device_mesh=None,
weight_split_dims_mapping=None,
qdomain=None):
"""Linear layer. y = matmul([..., idims], [idims, odims])."""
p = builder_layers.LinearLayer.Params()
p.name = name
p.input_dims = idims
p.output_dims = odims
p.fprop_dtype = self.params.fprop_dtype
p.device_mesh = device_mesh
p.weight_split_dims_mapping = weight_split_dims_mapping
p.qdomain.default = qdomain
return p
def _Bias(self, name, dims, device_mesh=None, weight_split_dims_mapping=None):
"""Bias layer. The bias is added to the last dimension of the input."""
return builder_layers.BiasLayer.Params().Set(
name=name,
dims=dims,
fprop_dtype=self.params.fprop_dtype,
device_mesh=device_mesh,
weight_split_dims_mapping=weight_split_dims_mapping)
def _Activation(self, name, fn='RELU'):
"""Activation layer."""
return activations.ActivationLayer.Params().Set(activation=fn, name=name)
def _FC(self, name, idims, odims, act='RELU'):
"""Feed-forward fully connected. y = act(matmul(x, w) + b)."""
# pyformat: disable
return self._Seq(
name,
self._Linear('linear', idims, odims),
self._Bias('bias', odims),
self._Activation('act', fn=act))
def _MLP(self, name, dims, act='RELU'):
"""Multiple layers of feed-forward fully connected.
Args:
name: The layer name.
dims: A list of int. i-th layer has dims[i] as its input dimension, and
dims[i+1] as its output dimensions.
act: The activation function.
Returns:
The param for the composed layer.
"""
l = []
for n, (i, o) in enumerate(zip(dims[:-1], dims[1:])):
l += [self._FC('l%03d' % n, i, o, act)]
return self._Seq(name, *l)
def _Conv2D(self, name, filter_shape, filter_stride):
"""Conv2D layer."""
return layers.Conv2DLayerNoPadding.Params().Set(
name=name, filter_shape=filter_shape, filter_stride=filter_stride,
fprop_dtype=self.params.fprop_dtype)
def _Reshape(self, name, shape):
"""Reshape inputs to the shape provided."""
return builder_layers.ReshapeLayer.Params().Set(name=name,
shape=shape)
| 1.960938 | 2 |
instmakelib/instmake_toolnames.py | gilramir/instmake | 0 | 5874 | <filename>instmakelib/instmake_toolnames.py
# Copyright (c) 2010 by Cisco Systems, Inc.
"""
Manage the tool plugins and use them appropriately.
"""
import os
TOOLNAME_PLUGIN_PREFIX = "toolname"
class ToolNameManager:
"""ToolName plugins have to register with this manager
the circumstances under which they wish to be called."""
def __init__(self, plugins):
toolname_plugins = plugins.LoadAllPlugins(TOOLNAME_PLUGIN_PREFIX)
self.first_arg_matches = []
self.first_arg_basename_matches = []
self.first_arg_regexes= []
self.first_arg_basename_regexes = []
self.command_line_regexes = []
for plugin in toolname_plugins:
plugin.register(self)
def RegisterFirstArgumentMatch(self, text, cb):
"""Call back parameters: first_arg, argv, cwd"""
self.first_arg_matches.append((text, cb))
def RegisterFirstArgumentRegex(self, regex, cb):
"""Call back parameters: first_arg, argv, cwd, regex_match"""
self.first_arg_regexes.append((regex, cb))
def RegisterFirstArgumentBasenameMatch(self, text, cb):
"""Call back parameters: basename, first_arg, argv, cwd"""
self.first_arg_basename_matches.append((text, cb))
def RegisterFirstArgumentBasenameRegex(self, regex, cb):
"""Call back parameters: basename, first_arg, argv, cw, regex_match"""
self.first_arg_basename_regexes.append((regex, cb))
def RegisterCommandLineRegex(self, regex, cb):
"""Call back parameters: argv, cwd, regex_match"""
self.command_line_regexes.append((regex, cb))
def GetTool(self, cmdline_args, cwd):
"""Returns a single string representing the tool in this
command-line. cmdline_args is an array of strings that will
be concatenated with spaces to form a single command-line."""
# It's done this way because of the way the command-line is
# stored in the instmake log. The top-most process (which is
# the first 'make' run, i.e., the last record in the instmake log)
# has a cmdline_args with one true argv-item per item. However,
# the instmakes that were called from 'make' have their entire
# command-line existing as a single string (the first and only
# item in cmdline_args).
argv_joined = ' '.join(cmdline_args)
argv = argv_joined.split()
# Call _GetTool as many times as necessary to find
# a non-changing answer.
seen = {}
max_iterations = 100
i = 0
while 1:
seen[argv_joined] = None
new_argv = self._GetTool(argv, cwd)
new_argv_joined = ' '.join(new_argv)
if new_argv_joined == argv_joined:
return new_argv[0]
elif seen.has_key(new_argv_joined):
return new_argv[0]
else:
i += 1
if i == max_iterations:
return new_argv[0]
argv = new_argv
argv_joined = new_argv_joined
def _GetTool(self, argv, cwd):
cmdline = ' '.join(argv)
# Check the command-line
for (regex, cb) in self.command_line_regexes:
m = regex.search(cmdline)
if m:
retval = cb(argv, cwd, m)
if retval != None:
return retval
# Get the first argument
if len(argv) >= 1:
first_arg = argv[0]
else:
return argv
# Check the first argument
for (text, cb) in self.first_arg_matches:
if first_arg == text:
retval = cb(first_arg, argv, cwd)
if retval != None:
return retval
for (regex, cb) in self.first_arg_regexes:
m = regex.search(first_arg)
if m:
retval = cb(first_arg, argv, cwd, m)
if retval != None:
return retval
# Check the basename of the first arg
basename = os.path.basename(first_arg)
for (text, cb) in self.first_arg_basename_matches:
if basename == text:
retval = cb(basename, first_arg, argv, cwd)
if retval != None:
return retval
for (regex, cb) in self.first_arg_basename_regexes:
m = regex.search(basename)
if m:
retval = cb(basename, first_arg, argv, cwd, m)
if retval != None:
return retval
# Nothing matched. Return the default value.
return argv
| 2.40625 | 2 |
raiden/tests/integration/long_running/test_stress.py | tirkarthi/raiden | 2,101 | 5875 | <gh_stars>1000+
import time
from http import HTTPStatus
from itertools import count
from typing import Sequence
import gevent
import grequests
import pytest
import structlog
from eth_utils import to_canonical_address
from flask import url_for
from raiden.api.python import RaidenAPI
from raiden.api.rest import APIServer, RestAPI
from raiden.constants import RoutingMode
from raiden.message_handler import MessageHandler
from raiden.network.transport import MatrixTransport
from raiden.raiden_event_handler import RaidenEventHandler
from raiden.raiden_service import RaidenService
from raiden.settings import RestApiConfig
from raiden.tests.integration.api.utils import wait_for_listening_port
from raiden.tests.integration.fixtures.raiden_network import RestartNode
from raiden.tests.utils.detect_failure import raise_on_failure
from raiden.tests.utils.protocol import HoldRaidenEventHandler
from raiden.tests.utils.transfer import (
assert_synced_channel_state,
wait_assert,
watch_for_unlock_failures,
)
from raiden.transfer import views
from raiden.ui.startup import RaidenBundle
from raiden.utils.formatting import to_checksum_address
from raiden.utils.typing import (
Address,
BlockNumber,
Host,
Iterator,
List,
Port,
TokenAddress,
TokenAmount,
TokenNetworkAddress,
Tuple,
)
log = structlog.get_logger(__name__)
def iwait_and_get(items: Sequence[gevent.Greenlet]) -> None:
"""Iteratively wait and get on passed greenlets.
This ensures exceptions in the greenlets are re-raised as soon as possible.
"""
for item in gevent.iwait(items):
item.get()
def _url_for(apiserver: APIServer, endpoint: str, **kwargs) -> str:
# url_for() expects binary address so we have to convert here
for key, val in kwargs.items():
if isinstance(val, str) and val.startswith("0x"):
kwargs[key] = to_canonical_address(val)
with apiserver.flask_app.app_context():
return url_for(f"v1_resources.{endpoint}", **kwargs)
def start_apiserver(raiden_app: RaidenService, rest_api_port_number: Port) -> APIServer:
raiden_api = RaidenAPI(raiden_app)
rest_api = RestAPI(raiden_api)
api_server = APIServer(
rest_api, config=RestApiConfig(host=Host("localhost"), port=rest_api_port_number)
)
# required for url_for
api_server.flask_app.config["SERVER_NAME"] = f"localhost:{rest_api_port_number}"
api_server.start()
wait_for_listening_port(rest_api_port_number)
return api_server
def start_apiserver_for_network(
raiden_network: List[RaidenService], port_generator: Iterator[Port]
) -> List[APIServer]:
return [start_apiserver(app, next(port_generator)) for app in raiden_network]
def restart_app(app: RaidenService, restart_node: RestartNode) -> RaidenService:
new_transport = MatrixTransport(
config=app.config.transport, environment=app.config.environment_type
)
raiden_event_handler = RaidenEventHandler()
hold_handler = HoldRaidenEventHandler(raiden_event_handler)
app = RaidenService(
config=app.config,
rpc_client=app.rpc_client,
proxy_manager=app.proxy_manager,
query_start_block=BlockNumber(0),
raiden_bundle=RaidenBundle(
app.default_registry,
app.default_secret_registry,
),
services_bundle=app.default_services_bundle,
transport=new_transport,
raiden_event_handler=hold_handler,
message_handler=MessageHandler(),
routing_mode=RoutingMode.PRIVATE,
)
restart_node(app)
return app
def restart_network(
raiden_network: List[RaidenService], restart_node: RestartNode
) -> List[RaidenService]:
for app in raiden_network:
app.stop()
wait_network = (gevent.spawn(restart_app, app, restart_node) for app in raiden_network)
gevent.joinall(set(wait_network), raise_error=True)
new_network = [greenlet.get() for greenlet in wait_network]
return new_network
def restart_network_and_apiservers(
raiden_network: List[RaidenService],
restart_node: RestartNode,
api_servers: List[APIServer],
port_generator: Iterator[Port],
) -> Tuple[List[RaidenService], List[APIServer]]:
"""Stop an app and start it back"""
for rest_api in api_servers:
rest_api.stop()
new_network = restart_network(raiden_network, restart_node)
new_servers = start_apiserver_for_network(new_network, port_generator)
return (new_network, new_servers)
def address_from_apiserver(apiserver: APIServer) -> Address:
return apiserver.rest_api.raiden_api.address
def transfer_and_assert(
server_from: APIServer,
server_to: APIServer,
token_address: TokenAddress,
identifier: int,
amount: TokenAmount,
) -> None:
url = _url_for(
server_from,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(address_from_apiserver(server_to)),
)
json = {"amount": amount, "identifier": identifier}
log.debug("PAYMENT REQUEST", url=url, json=json)
request = grequests.post(url, json=json)
start = time.monotonic()
response = request.send().response
duration = time.monotonic() - start
log.debug("PAYMENT RESPONSE", url=url, json=json, response=response, duration=duration)
assert getattr(request, "exception", None) is None
assert response is not None
assert response.status_code == HTTPStatus.OK, f"Payment failed, reason: {response.content}"
assert response.headers["Content-Type"] == "application/json"
def sequential_transfers(
server_from: APIServer,
server_to: APIServer,
number_of_transfers: int,
token_address: TokenAddress,
identifier_generator: Iterator[int],
) -> None:
for _ in range(number_of_transfers):
transfer_and_assert(
server_from=server_from,
server_to=server_to,
token_address=token_address,
identifier=next(identifier_generator),
amount=TokenAmount(1),
)
def stress_send_serial_transfers(
rest_apis: List[APIServer],
token_address: TokenAddress,
identifier_generator: Iterator[int],
deposit: TokenAmount,
) -> None:
"""Send `deposit` transfers of value `1` one at a time, without changing
the initial capacity.
"""
pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))
# deplete the channels in one direction
for server_from, server_to in pairs:
sequential_transfers(
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
# deplete the channels in the backwards direction
for server_to, server_from in pairs:
sequential_transfers(
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit * 2,
token_address=token_address,
identifier_generator=identifier_generator,
)
# reset the balances balances by sending the "extra" deposit forward
for server_from, server_to in pairs:
sequential_transfers(
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
def stress_send_parallel_transfers(
rest_apis: List[APIServer],
token_address: TokenAddress,
identifier_generator: Iterator[int],
deposit: TokenAmount,
) -> None:
"""Send `deposit` transfers in parallel, without changing the initial capacity."""
pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))
# deplete the channels in one direction
iwait_and_get(
[
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_from, server_to in pairs
]
)
# deplete the channels in the backwards direction
iwait_and_get(
[
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit * 2,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_to, server_from in pairs
]
)
# reset the balances balances by sending the "extra" deposit forward
iwait_and_get(
[
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_from, server_to in pairs
]
)
def stress_send_and_receive_parallel_transfers(
rest_apis: List[APIServer],
token_address: TokenAddress,
identifier_generator: Iterator[int],
deposit: TokenAmount,
) -> None:
"""Send transfers of value one in parallel"""
pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))
forward_transfers = [
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_from, server_to in pairs
]
backwards_transfers = [
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_to, server_from in pairs
]
iwait_and_get(forward_transfers + backwards_transfers)
def assert_channels(
raiden_network: List[RaidenService],
token_network_address: TokenNetworkAddress,
deposit: TokenAmount,
) -> None:
pairs = list(zip(raiden_network, raiden_network[1:] + [raiden_network[0]]))
for first, second in pairs:
wait_assert(
assert_synced_channel_state,
token_network_address,
first,
deposit,
[],
second,
deposit,
[],
)
@pytest.mark.skip(reason="flaky, see https://github.com/raiden-network/raiden/issues/4803")
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [3])
@pytest.mark.parametrize("number_of_tokens", [1])
@pytest.mark.parametrize("channels_per_node", [2])
@pytest.mark.parametrize("deposit", [2])
@pytest.mark.parametrize("reveal_timeout", [15])
@pytest.mark.parametrize("settle_timeout", [120])
def test_stress(
raiden_network: List[RaidenService],
restart_node: RestartNode,
deposit: TokenAmount,
token_addresses: List[TokenAddress],
port_generator: Iterator[Port],
) -> None:
token_address = token_addresses[0]
rest_apis = start_apiserver_for_network(raiden_network, port_generator)
identifier_generator = count(start=1)
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_raiden(raiden_network[0]),
raiden_network[0].default_registry.address,
token_address,
)
assert token_network_address
for _ in range(2):
assert_channels(raiden_network, token_network_address, deposit)
with watch_for_unlock_failures(*raiden_network):
stress_send_serial_transfers(rest_apis, token_address, identifier_generator, deposit)
raiden_network, rest_apis = restart_network_and_apiservers(
raiden_network, restart_node, rest_apis, port_generator
)
assert_channels(raiden_network, token_network_address, deposit)
with watch_for_unlock_failures(*raiden_network):
stress_send_parallel_transfers(rest_apis, token_address, identifier_generator, deposit)
raiden_network, rest_apis = restart_network_and_apiservers(
raiden_network, restart_node, rest_apis, port_generator
)
assert_channels(raiden_network, token_network_address, deposit)
with watch_for_unlock_failures(*raiden_network):
stress_send_and_receive_parallel_transfers(
rest_apis, token_address, identifier_generator, deposit
)
raiden_network, rest_apis = restart_network_and_apiservers(
raiden_network, restart_node, rest_apis, port_generator
)
restart_network(raiden_network, restart_node)
| 1.953125 | 2 |
pyabsa/utils/preprocess.py | jackie930/PyABSA | 0 | 5876 | # -*- coding: utf-8 -*-
# file: preprocess.py
# author: jackie
# Copyright (C) 2021. All Rights Reserved.
import os
import pandas as pd
import argparse
import emoji
import re
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser()
parser.add_argument("--inpath", type=str, required=True, default='./raw_data/data1.csv')
parser.add_argument("--folder_name", type=str, required=False, default='./custom')
parser.add_argument("--task", type=str, required=False, default='aptepc')
args = parser.parse_args()
def convert(text, labels):
# convert label to list
try:
labels = eval(labels)
tags = ['O'] * len(text)
sentiment = ['-999'] * len(text)
for j in range(len(labels)):
label = labels[j]
sentiment_key = labels[j][3]
if sentiment_key == '正':
sentiment_value = 'Positive'
elif sentiment_key == '负':
sentiment_value = 'Negative'
else:
sentiment_value = 'Others'
tags[label[4][0]] = 'B-ASP'
sentiment[label[4][0]] = sentiment_value
k = label[4][0] + 1
while k < label[4][1]:
tags[k] = 'I-ASP'
sentiment[k] = sentiment_value
k += 1
return text, tags, sentiment
except:
print ("labels", labels)
print ("text", text)
def convert_tag(text, labels):
# convert label to list
try:
labels = eval(labels)
tags = ['O'] * len(text)
sentiment = ['-999'] * len(text)
for j in range(len(labels)):
label = labels[j]
sentiment_key = labels[j][3]
if sentiment_key == '正':
sentiment_value = 'Positive'
elif sentiment_key == '负':
sentiment_value = 'Negative'
else:
sentiment_value = 'Others'
tags[label[4][0]] = 'B-'+label[1]
sentiment[label[4][0]] = sentiment_value
k = label[4][0] + 1
while k < label[4][1]:
tags[k] = 'I-'+label[1]
sentiment[k] = sentiment_value
k += 1
return text, tags, sentiment
except:
print ("labels", labels)
print ("text", text)
def convert_sentiment(sentiment_key):
if sentiment_key == '正':
sentiment_value = 'Positive'
else:
sentiment_value = 'Negative'
return sentiment_value
def convert_apc(text, label):
label_update = [(i[0], i[3], i[4]) for i in eval(label)]
label_update = list(set(label_update))
str1_list = []
str2_list = []
str3_list = []
for j in range(len(label_update)):
str1 = text[:label_update[j][2][0]] + '$T$ ' + text[label_update[j][2][1]:]
str1_list.append(str1)
str2_list.append(label_update[j][0])
str3_list.append(convert_sentiment(label_update[j][1]))
return str1_list, str2_list, str3_list
def filter_emoji(desstr, restr=''):
# 过滤表情
try:
co = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
co = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
return co.sub(restr, desstr)
def convert_to_atepc(inpath, dist_fname, flag):
# 写之前,先检验文件是否存在,存在就删掉
if os.path.exists(dist_fname):
os.remove(dist_fname)
f1 = open(dist_fname, 'w', encoding='utf8')
data = pd.read_csv(inpath)
data.columns = ['text', 'tag_sentiment_list']
# preprocess for emoji
data['text'] = data['text'].map(lambda x: filter_emoji(x, restr='xx'))
# 只保留review的长度小于600的
data = data[data['text'].str.len() <= 600]
# train test split
x_train, x_test = train_test_split(data, test_size=0.2, random_state=42)
if flag == 'train':
data_res = x_train.iloc[:, :].reset_index()
else:
data_res = x_test.iloc[:, :].reset_index()
# print (data_res.head())
for i in range(len(data_res)):
text, label = data_res['text'][i], data_res['tag_sentiment_list'][i]
text, tags, sentiment = convert(text, label)
for word, tag, sen in zip(text, tags, sentiment):
if word not in [',', '。', ' ', '\xa0', '\u2006', '\u3000', '\u2002', '\u2003', '\u2005', '\x0c', '\u2028',
'\u2009', '\u200a']:
f1.write(word + ' ' + tag + ' ' + sen + '\n')
else:
f1.write("\n")
f1.write("\n")
f1.close()
print ("process atepc finished!")
def convert_to_atepc_tag(inpath, dist_fname, flag):
# 写之前,先检验文件是否存在,存在就删掉
if os.path.exists(dist_fname):
os.remove(dist_fname)
f1 = open(dist_fname, 'w', encoding='utf8')
data = pd.read_csv(inpath)
data.columns = ['text', 'tag_sentiment_list']
# preprocess for emoji
data['text'] = data['text'].map(lambda x: filter_emoji(x, restr='xx'))
# drop id list not able to process
# print (data.iloc[8832,:])
# data = data.drop([8832])
# 只保留review的长度小于600的
data = data[data['text'].str.len() <= 600]
# train test split
x_train, x_test = train_test_split(data, test_size=0.2, random_state=42)
if flag == 'train':
data_res = x_train.iloc[:, :].reset_index()
else:
data_res = x_test.iloc[:, :].reset_index()
# print (data_res.head())
for i in range(len(data_res)):
text, label = data_res['text'][i], data_res['tag_sentiment_list'][i]
text, tags, sentiment = convert(text, label)
for word, tag, sen in zip(text, tags, sentiment):
if word not in [',', '。', ' ', '\xa0', '\u2006', '\u3000', '\u2002', '\u2003', '\u2005', '\x0c', '\u2028',
'\u2009', '\u200a']:
f1.write(word + ' ' + tag + ' ' + sen + '\n')
else:
f1.write("\n")
f1.write("\n")
f1.close()
print ("process atepc finished!")
def convert_to_apc(inpath, dist_fname, flag):
# 写之前,先检验文件是否存在,存在就删掉
if os.path.exists(dist_fname):
os.remove(dist_fname)
f1 = open(dist_fname, 'w', encoding='utf8')
data = pd.read_csv(inpath)
# train test split
x_train, x_test = train_test_split(data, test_size=0.2, random_state=42)
if flag == 'train':
data_res = x_train.iloc[:, :].reset_index()
else:
data_res = x_test.iloc[:, :].reset_index()
# print (data_res.head())
for i in range(len(data_res)):
text, label = data_res['text'][i], data_res['tag_sentiment_list'][i]
str1_list, str2_list, str3_list = convert_apc(text, label)
for x1, x2, x3 in zip(str1_list, str2_list, str3_list):
f1.write(x1 + '\n')
f1.write(x2 + '\n')
f1.write(x3 + '\n')
f1.close()
print ("process apc finished!")
def main(inpath, folder_name, task):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
if task == 'aptepc':
# get folder name
print ("start process for an aptepc task")
folder_name_prefix = folder_name.split('/')[-1]
dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt.atepc')
dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc')
# process train
convert_to_atepc(inpath, dist_train_fname, 'train')
print ("<<< finish training data preprocess")
# process test
convert_to_atepc(inpath, dist_test_fname, 'test')
print ("<<< finish test data preprocess")
elif task == 'apc':
# get folder name
folder_name_prefix = folder_name.split('/')[-1]
dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt')
dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt')
# process train
convert_to_apc(inpath, dist_train_fname, 'train')
print ("<<< finish training data preprocess")
# process test
convert_to_apc(inpath, dist_test_fname, 'test')
print ("<<< finish test data preprocess")
elif task == 'aptepc-tag':
# get folder name
print ("start process for an aptepc tag task")
folder_name_prefix = folder_name.split('/')[-1]
dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt.atepc')
dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc')
# process train
convert_to_atepc_tag(inpath, dist_train_fname, 'train')
print ("<<< finish training data preprocess")
# process test
convert_to_atepc_tag(inpath, dist_test_fname, 'test')
print ("<<< finish test data preprocess")
main(args.inpath, args.folder_name, args.task) | 2.71875 | 3 |
apps/06_lolcat_factory/you_try/PRD/cat_service.py | dparito/10Apps-Python_w-Andy | 1 | 5877 | import os
import shutil
import requests
def get_cat(folder, name):
url = "http://consuming-python-services-api.azurewebsites.net/cats/random"
data = get_data_from_url(url)
save_image(folder, name, data)
def get_data_from_url(url):
response = requests.get(url, stream=True)
return response.raw
def save_image(folder, name, data):
file_name = os.path.join(folder, name + '.jpg')
with open(file_name, 'wb') as fout:
shutil.copyfileobj(data, fout)
| 3.171875 | 3 |
dask/dataframe/io/hdf.py | TryTestspace/dask | 1 | 5878 | <filename>dask/dataframe/io/hdf.py
from __future__ import absolute_import, division, print_function
from fnmatch import fnmatch
from glob import glob
import os
import uuid
from warnings import warn
import pandas as pd
from toolz import merge
from .io import _link
from ...base import get_scheduler
from ..core import DataFrame, new_dd_object
from ... import config, multiprocessing
from ...base import tokenize, compute_as_if_collection
from ...bytes.utils import build_name_function
from ...compatibility import PY3
from ...delayed import Delayed, delayed
from ...utils import get_scheduler_lock
def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None):
""" A wrapper function around pd_to_hdf that enables locking"""
if lock:
lock.acquire()
try:
pd_to_hdf(*args, **kwargs)
finally:
if lock:
lock.release()
return None
def to_hdf(df, path, key, mode='a', append=False, get=None, scheduler=None,
name_function=None, compute=True, lock=None, dask_kwargs={},
**kwargs):
""" Store Dask Dataframe to Hierarchical Data Format (HDF) files
This is a parallel version of the Pandas function of the same name. Please
see the Pandas docstring for more detailed information about shared keyword
arguments.
This function differs from the Pandas version by saving the many partitions
of a Dask DataFrame in parallel, either to many files, or to many datasets
within the same file. You may specify this parallelism with an asterix
``*`` within the filename or datapath, and an optional ``name_function``.
The asterix will be replaced with an increasing sequence of integers
starting from ``0`` or with the result of calling ``name_function`` on each
of those integers.
This function only supports the Pandas ``'table'`` format, not the more
specialized ``'fixed'`` format.
Parameters
----------
path: string
Path to a target filename. May contain a ``*`` to denote many filenames
key: string
Datapath within the files. May contain a ``*`` to denote many locations
name_function: function
A function to convert the ``*`` in the above options to a string.
Should take in a number from 0 to the number of partitions and return a
string. (see examples below)
compute: bool
Whether or not to execute immediately. If False then this returns a
``dask.Delayed`` value.
lock: Lock, optional
Lock to use to prevent concurrency issues. By default a
``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock``
will be used depending on your scheduler if a lock is required. See
dask.utils.get_scheduler_lock for more information about lock
selection.
**other:
See pandas.to_hdf for more information
Examples
--------
Save Data to a single file
>>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP
Save data to multiple datapaths within the same file:
>>> df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP
Save data to multiple files:
>>> df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP
Save data to multiple files, using the multiprocessing scheduler:
>>> df.to_hdf('output-*.hdf', '/data', scheduler='processes') # doctest: +SKIP
Specify custom naming scheme. This writes files as
'2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc..
>>> from datetime import date, timedelta
>>> base = date(year=2000, month=1, day=1)
>>> def name_function(i):
... ''' Convert integer 0 to n to a string '''
... return base + timedelta(days=i)
>>> df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest: +SKIP
Returns
-------
None: if compute == True
delayed value: if compute == False
See Also
--------
read_hdf:
to_parquet:
"""
name = 'to-hdf-' + uuid.uuid1().hex
pd_to_hdf = getattr(df._partition_type, 'to_hdf')
single_file = True
single_node = True
# if path is string, format using i_name
if isinstance(path, str):
if path.count('*') + key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in file "
"path and dataset key")
fmt_obj = lambda path, i_name: path.replace('*', i_name)
if '*' in path:
single_file = False
else:
if key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in "
"dataset key")
fmt_obj = lambda path, _: path
if '*' in key:
single_node = False
if 'format' in kwargs and kwargs['format'] not in ['t', 'table']:
raise ValueError("Dask only support 'table' format in hdf files.")
if mode not in ('a', 'w', 'r+'):
raise ValueError("Mode must be one of 'a', 'w' or 'r+'")
if name_function is None:
name_function = build_name_function(df.npartitions - 1)
# we guarantee partition order is preserved when its saved and read
# so we enforce name_function to maintain the order of its input.
if not (single_file and single_node):
formatted_names = [name_function(i) for i in range(df.npartitions)]
if formatted_names != sorted(formatted_names):
warn("To preserve order between partitions name_function "
"must preserve the order of its input")
# If user did not specify scheduler and write is sequential default to the
# sequential scheduler. otherwise let the _get method choose the scheduler
if (get is None and
not config.get('get', None) and
scheduler is None and
not config.get('scheduler', None) and
single_node and single_file):
scheduler = 'single-threaded'
# handle lock default based on whether we're writing to a single entity
_actual_get = get_scheduler(get=get, collections=[df], scheduler=scheduler)
if lock is None:
if not single_node:
lock = True
elif not single_file and _actual_get is not multiprocessing.get:
# if we're writing to multiple files with the multiprocessing
# scheduler we don't need to lock
lock = True
else:
lock = False
if lock:
lock = get_scheduler_lock(get, df, scheduler=scheduler)
kwargs.update({'format': 'table', 'mode': mode, 'append': append})
dsk = dict()
i_name = name_function(0)
dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, 0), fmt_obj(path, i_name),
key.replace('*', i_name)], kwargs)
kwargs2 = kwargs.copy()
if single_file:
kwargs2['mode'] = 'a'
if single_node:
kwargs2['append'] = True
filenames = []
for i in range(0,df.npartitions):
i_name = name_function(i)
filenames.append(fmt_obj(path, i_name))
for i in range(1, df.npartitions):
i_name = name_function(i)
task = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, i), fmt_obj(path, i_name),
key.replace('*', i_name)], kwargs2)
if single_file:
link_dep = i - 1 if single_node else 0
task = (_link, (name, link_dep), task)
dsk[(name, i)] = task
dsk = merge(df.dask, dsk)
if single_file and single_node:
keys = [(name, df.npartitions - 1)]
else:
keys = [(name, i) for i in range(df.npartitions)]
if compute:
compute_as_if_collection(DataFrame, dsk, keys, get=get,
scheduler=scheduler, **dask_kwargs)
return filenames
else:
return delayed([Delayed(k, dsk) for k in keys])
dont_use_fixed_error_message = """
This HDFStore is not partitionable and can only be use monolithically with
pandas. In the future when creating HDFStores use the ``format='table'``
option to ensure that your dataset can be parallelized"""
read_hdf_error_msg = """
The start and stop keywords are not supported when reading from more than
one file/dataset.
The combination is ambiguous because it could be interpreted as the starting
and stopping index per file, or starting and stopping index of the global
dataset."""
def _read_single_hdf(path, key, start=0, stop=None, columns=None,
chunksize=int(1e6), sorted_index=False, lock=None,
mode='a'):
"""
Read a single hdf file into a dask.dataframe. Used for each file in
read_hdf.
"""
def get_keys_stops_divisions(path, key, stop, sorted_index, chunksize):
"""
Get the "keys" or group identifiers which match the given key, which
can contain wildcards. This uses the hdf file identified by the
given path. Also get the index of the last row of data for each matched
key.
"""
with pd.HDFStore(path, mode=mode) as hdf:
keys = [k for k in hdf.keys() if fnmatch(k, key)]
stops = []
divisions = []
for k in keys:
storer = hdf.get_storer(k)
if storer.format_type != 'table':
raise TypeError(dont_use_fixed_error_message)
if stop is None:
stops.append(storer.nrows)
elif stop > storer.nrows:
raise ValueError("Stop keyword exceeds dataset number "
"of rows ({})".format(storer.nrows))
else:
stops.append(stop)
if sorted_index:
division = [storer.read_column('index', start=start, stop=start + 1)[0]
for start in range(0, storer.nrows, chunksize)]
division_end = storer.read_column('index',
start=storer.nrows - 1,
stop=storer.nrows)[0]
division.append(division_end)
divisions.append(division)
else:
divisions.append(None)
return keys, stops, divisions
def one_path_one_key(path, key, start, stop, columns, chunksize, division, lock):
"""
Get the data frame corresponding to one path and one key (which should
not contain any wildcards).
"""
empty = pd.read_hdf(path, key, mode=mode, stop=0)
if columns is not None:
empty = empty[columns]
token = tokenize((path, os.path.getmtime(path), key, start,
stop, empty, chunksize, division))
name = 'read-hdf-' + token
if empty.ndim == 1:
base = {'name': empty.name, 'mode': mode}
else:
base = {'columns': empty.columns, 'mode': mode}
if start >= stop:
raise ValueError("Start row number ({}) is above or equal to stop "
"row number ({})".format(start, stop))
def update(s):
new = base.copy()
new.update({'start': s, 'stop': s + chunksize})
return new
dsk = dict(((name, i), (_pd_read_hdf, path, key, lock,
update(s)))
for i, s in enumerate(range(start, stop, chunksize)))
if division:
divisions = division
else:
divisions = [None] * (len(dsk) + 1)
return new_dd_object(dsk, name, empty, divisions)
keys, stops, divisions = get_keys_stops_divisions(path, key, stop, sorted_index, chunksize)
if (start != 0 or stop is not None) and len(keys) > 1:
raise NotImplementedError(read_hdf_error_msg)
from ..multi import concat
return concat([one_path_one_key(path, k, start, s, columns, chunksize, d, lock)
for k, s, d in zip(keys, stops, divisions)])
def _pd_read_hdf(path, key, lock, kwargs):
""" Read from hdf5 file with a lock """
if lock:
lock.acquire()
try:
result = pd.read_hdf(path, key, **kwargs)
finally:
if lock:
lock.release()
return result
def read_hdf(pattern, key, start=0, stop=None, columns=None,
chunksize=1000000, sorted_index=False, lock=True, mode='a'):
"""
Read HDF files into a Dask DataFrame
Read hdf files into a dask dataframe. This function is like
``pandas.read_hdf``, except it can read from a single large file, or from
multiple files, or from multiple keys from the same file.
Parameters
----------
pattern : string, list
File pattern (string), buffer to read from, or list of file
paths. Can contain wildcards.
key : group identifier in the store. Can contain wildcards
start : optional, integer (defaults to 0), row number to start at
stop : optional, integer (defaults to None, the last row), row number to
stop at
columns : list of columns, optional
A list of columns that if not None, will limit the return
columns (default is None)
chunksize : positive integer, optional
Maximal number of rows per partition (default is 1000000).
sorted_index : boolean, optional
Option to specify whether or not the input hdf files have a sorted
index (default is False).
lock : boolean, optional
Option to use a lock to prevent concurrency issues (default is True).
mode : {'a', 'r', 'r+'}, default 'a'. Mode to use when opening file(s).
'r'
Read-only; no data can be modified.
'a'
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
'r+'
It is similar to 'a', but the file must already exist.
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP
Load multiple files
>>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP
>>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest: +SKIP
Load multiple datasets
>>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP
"""
if lock is True:
lock = get_scheduler_lock()
key = key if key.startswith('/') else '/' + key
if isinstance(pattern, str):
paths = sorted(glob(pattern))
else:
paths = pattern
if (start != 0 or stop is not None) and len(paths) > 1:
raise NotImplementedError(read_hdf_error_msg)
if chunksize <= 0:
raise ValueError("Chunksize must be a positive integer")
if (start != 0 or stop is not None) and sorted_index:
raise ValueError("When assuming pre-partitioned data, data must be "
"read in its entirety using the same chunksizes")
from ..multi import concat
return concat([_read_single_hdf(path, key, start=start, stop=stop,
columns=columns, chunksize=chunksize,
sorted_index=sorted_index,
lock=lock, mode=mode)
for path in paths])
if PY3:
from ..core import _Frame
_Frame.to_hdf.__doc__ = to_hdf.__doc__
| 2.453125 | 2 |
src/charma/media_info/manager.py | mononobi/charma-server | 1 | 5879 | # -*- coding: utf-8 -*-
"""
media info manager module.
"""
from pyrin.core.mixin import HookMixin
from pyrin.core.structs import Manager
import pyrin.utils.path as path_utils
from charma.media_info import MediaInfoPackage
from charma.media_info.interface import AbstractMediaInfoProvider
from charma.media_info.exceptions import InvalidMediaInfoProviderTypeError
class MediaInfoManager(Manager, HookMixin):
"""
media info manager class.
"""
package_class = MediaInfoPackage
hook_type = AbstractMediaInfoProvider
invalid_hook_type_error = InvalidMediaInfoProviderTypeError
REQUIRED_INFO = ('runtime', 'width', 'height')
def _is_complete(self, info):
"""
gets a value indicating that given media info is complete.
:param dict info: media info to be checked.
:rtype: bool
"""
for item in self.REQUIRED_INFO:
result = info.get(item)
if result is None or result <= 0:
return False
return True
def register_provider(self, instance):
"""
registers the given instance into media info providers.
:param AbstractMediaInfoProvider instance: media info provider instance
to be registered.
:raises InvalidMediaInfoProviderTypeError: invalid media info provider type error.
"""
self.register_hook(instance)
def get_info(self, file, **options):
"""
gets a dict containing media info of given file.
:param str file: absolute path of video file.
:raises InvalidPathError: invalid path error.
:raises PathIsNotAbsoluteError: path is not absolute error.
:raises PathNotExistedError: path not existed error.
:raises IsNotFileError: is not directory error.
:returns: dict(int runtime,
int width,
int height)
:rtype: dict
"""
path_utils.assert_is_file(file)
result = dict()
for provider in self._get_hooks():
current_result = provider.get_info(file, **options)
result.update(current_result)
if self._is_complete(result) is True:
break
result.setdefault('runtime', 0)
result.setdefault('width', 0)
result.setdefault('height', 0)
return result
| 2.203125 | 2 |
tests/test_parsers.py | FlorisHoogenboom/BoxRec | 5 | 5880 | import unittest
from boxrec.parsers import FightParser
class MockResponse(object):
def __init__(self, content, encoding, url):
self.content= content
self.encoding = encoding
self.url = url
class TestFightParser(unittest.TestCase):
def setUp(self):
with open('mock_data/fights/draw.html', 'rb') as file:
self.drawn_fight = file.read()
self.parser = FightParser()
def test_parses_draw(self):
"""Test it correctly handles draws"""
mock_response = MockResponse(
self.drawn_fight,
'UTF-8',
"http://boxrec.com/en/event/115689/202488"
)
result = self.parser.parse(mock_response)
self.assertEqual(result.winner, 'drawn', "Result should equal draw.")
class TestBoxerParser(unittest.TestCase):
pass
| 3.109375 | 3 |
hyperdock/common/workqueue.py | ErikGartner/hyperdock | 8 | 5881 | <filename>hyperdock/common/workqueue.py<gh_stars>1-10
from datetime import datetime, timedelta
from bson.objectid import ObjectId
WORK_TIMEOUT = 600
class WorkQueue:
"""
A simple MongoDB priority work queue that handles the queue
of experiment.
"""
def __init__(self, mongodb):
super().__init__()
self._mongodb = mongodb
self._collection = mongodb.workqueue
def assign_next_job(self, worker_id):
"""
Assigns the next free job to worker.
Returns the object from the mongodb.
"""
t = datetime.utcnow()
job = self._collection.find_and_modify(
query={"start_time": -1, "cancelled": False},
sort=[("priority", -1), ("created_on", 1)],
update={"$set": {"start_time": t, "last_update": t, "worker": worker_id}},
new=True,
)
return job
def add_job(self, parameters, data, trial_id, trial_name, priority=0):
"""
Adds new work to the workqueue.
"""
id = self._collection.insert(
{
"start_time": -1,
"end_time": -1,
"last_update": -1,
"created_on": datetime.utcnow(),
"priority": priority,
"parameters": parameters,
"data": data,
"worker": None,
"result": {},
"trial": trial_id,
"trial_name": trial_name,
"_id": str(ObjectId()),
"cancelled": False,
"orphaned": False,
}
)
return id
def update_job(self, _id, update=None):
"""
Marks the job as alive and post an update from the job.
"""
t = datetime.utcnow()
self._collection.update(
{"_id": _id}, {"$set": {"last_update": t, "update": update}}
)
def is_job_cancelled(self, _id):
"""
Checks if a certain job has been cancelled or all together removed.
"""
return self._collection.find_one({"_id": _id, "cancelled": False}) is None
def finish_job(self, _id, result):
"""
Marks the job as finished and attach the result.
"""
t = datetime.utcnow()
self._collection.update_one(
{"_id": _id}, {"$set": {"end_time": t, "last_update": t, "result": result}}
)
def purge_dead_jobs(self):
"""
Returns jobs that have timed out due to worker death and cancel them.
"""
now = datetime.utcnow()
deadline = now - timedelta(seconds=WORK_TIMEOUT)
jobs = []
while True:
job = self._collection.find_and_modify(
query={
"start_time": {"$ne": -1},
"end_time": -1,
"last_update": {"$lt": deadline},
},
sort=[("priority", -1), ("last_update", 1)],
update={
"$set": {
"cancelled": True,
"orphaned": True,
"end_time": now,
"result": {"state": "fail", "msg": "Timed out!"},
}
},
new=True,
)
if job is not None:
jobs.append(job)
else:
return jobs
def check_for_orphans(self, id_list):
"""
Checks if a list of Docker container ids are marked as orphans.
Returns a list of (Docker id, experiment id) tuples.
"""
jobs = self._collection.find(
{"orphaned": True, "update.container.long_id": {"$in": id_list}}
)
return [(j["update"]["container"]["long_id"], j["_id"]) for j in list(jobs)]
def not_orphaned(self, _id):
"""
Marks a job as not orphaned.
"""
job = self._collection.find_and_modify(
query={"_id": _id}, update={"$set": {"orphaned": False}}, new=True
)
return job is not None
def cancel_invalid_jobs(self, trial_list):
"""
Takes a list of all active (not finished, cancelled or removed) trial ids.
Work that is not associated with any of these are cancelled.
"""
now = datetime.utcnow()
jobs = []
while True:
job = self._collection.find_and_modify(
query={"trial": {"$nin": trial_list}, "end_time": -1},
update={
"$set": {
"cancelled": True,
"end_time": now,
"result": {"state": "fail", "msg": "Abandoned"},
}
},
new=True,
)
if job is not None:
jobs.append(job)
else:
return jobs
| 2.5625 | 3 |
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/apache_libcloud-0.15.1-py2.7.egg/libcloud/test/test_connection.py | poojavade/Genomics_Docker | 1 | 5882 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import ssl
from mock import Mock, call
from libcloud.test import unittest
from libcloud.common.base import Connection
from libcloud.common.base import LoggingConnection
class ConnectionClassTestCase(unittest.TestCase):
def setUp(self):
self.originalConnect = Connection.connect
self.originalResponseCls = Connection.responseCls
Connection.connect = Mock()
Connection.responseCls = Mock()
Connection.allow_insecure = True
def tearDown(self):
Connection.connect = self.originalConnect
Connection.responseCls = Connection.responseCls
Connection.allow_insecure = True
def test_dont_allow_insecure(self):
Connection.allow_insecure = True
Connection(secure=False)
Connection.allow_insecure = False
expected_msg = (r'Non https connections are not allowed \(use '
'secure=True\)')
self.assertRaisesRegexp(ValueError, expected_msg, Connection,
secure=False)
def test_content_length(self):
con = Connection()
con.connection = Mock()
# GET method
# No data, no content length should be present
con.request('/test', method='GET', data=None)
call_kwargs = con.connection.request.call_args[1]
self.assertTrue('Content-Length' not in call_kwargs['headers'])
# '' as data, no content length should be present
con.request('/test', method='GET', data='')
call_kwargs = con.connection.request.call_args[1]
self.assertTrue('Content-Length' not in call_kwargs['headers'])
# 'a' as data, content length should be present (data in GET is not
# correct, but anyways)
con.request('/test', method='GET', data='a')
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '1')
# POST, PUT method
# No data, content length should be present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data=None)
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '0')
# '' as data, content length should be present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data='')
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '0')
# No data, raw request, do not touch Content-Length if present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data=None,
headers={'Content-Length': '42'}, raw=True)
putheader_call_list = con.connection.putheader.call_args_list
self.assertIn(call('Content-Length', '42'), putheader_call_list)
# '' as data, raw request, do not touch Content-Length if present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data=None,
headers={'Content-Length': '42'}, raw=True)
putheader_call_list = con.connection.putheader.call_args_list
self.assertIn(call('Content-Length', '42'), putheader_call_list)
# 'a' as data, content length should be present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data='a')
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '1')
def test_cache_busting(self):
params1 = {'foo1': 'bar1', 'foo2': 'bar2'}
params2 = [('foo1', 'bar1'), ('foo2', 'bar2')]
con = Connection()
con.connection = Mock()
con.pre_connect_hook = Mock()
con.pre_connect_hook.return_value = {}, {}
con.cache_busting = False
con.request(action='/path', params=params1)
args, kwargs = con.pre_connect_hook.call_args
self.assertFalse('cache-busting' in args[0])
self.assertEqual(args[0], params1)
con.request(action='/path', params=params2)
args, kwargs = con.pre_connect_hook.call_args
self.assertFalse('cache-busting' in args[0])
self.assertEqual(args[0], params2)
con.cache_busting = True
con.request(action='/path', params=params1)
args, kwargs = con.pre_connect_hook.call_args
self.assertTrue('cache-busting' in args[0])
con.request(action='/path', params=params2)
args, kwargs = con.pre_connect_hook.call_args
self.assertTrue('cache-busting' in args[0][len(params2)])
def test_context_is_reset_after_request_has_finished(self):
context = {'foo': 'bar'}
def responseCls(connection, response):
connection.called = True
self.assertEqual(connection.context, context)
con = Connection()
con.called = False
con.connection = Mock()
con.responseCls = responseCls
con.set_context(context)
self.assertEqual(con.context, context)
con.request('/')
# Context should have been reset
self.assertTrue(con.called)
self.assertEqual(con.context, {})
# Context should also be reset if a method inside request throws
con = Connection()
con.connection = Mock()
con.set_context(context)
self.assertEqual(con.context, context)
con.connection.request = Mock(side_effect=ssl.SSLError())
try:
con.request('/')
except ssl.SSLError:
pass
self.assertEqual(con.context, {})
con.connection = Mock()
con.set_context(context)
self.assertEqual(con.context, context)
con.responseCls = Mock(side_effect=ValueError())
try:
con.request('/')
except ValueError:
pass
self.assertEqual(con.context, {})
def test_log_curl(self):
url = '/test/path'
body = None
headers = {}
con = LoggingConnection()
con.protocol = 'http'
con.host = 'example.com'
con.port = 80
for method in ['GET', 'POST', 'PUT', 'DELETE']:
cmd = con._log_curl(method=method, url=url, body=body,
headers=headers)
self.assertEqual(cmd, 'curl -i -X %s --compress http://example.com:80/test/path' %
(method))
# Should use --head for head requests
cmd = con._log_curl(method='HEAD', url=url, body=body, headers=headers)
self.assertEqual(cmd, 'curl -i --head --compress http://example.com:80/test/path')
if __name__ == '__main__':
sys.exit(unittest.main())
| 2.125 | 2 |
igibson/utils/data_utils/ext_object/scripts/step_1_visual_mesh.py | mamadbiabon/iGibson | 360 | 5883 | <reponame>mamadbiabon/iGibson<filename>igibson/utils/data_utils/ext_object/scripts/step_1_visual_mesh.py
import os
import sys
import bpy
script_dir = os.path.dirname(os.path.abspath(__file__))
utils_dir = os.path.join(script_dir, "../../blender_utils")
sys.path.append(utils_dir)
from utils import bake_model, clean_unused, export_ig_object, import_obj_folder
#############################################
# Parse command line arguments
#############################################
def get_arg(argv, flag, default=None):
if flag in argv:
return argv[argv.index(flag) + 1]
return default
should_bake = "--bake" in sys.argv
axis = ["X", "Y", "Z", "-X", "-Y", "-Z"]
import_axis_up = get_arg(sys.argv, "--up", default="Z")
if import_axis_up not in axis:
raise ValueError("Axis up not supported: {} (should be among X,Y,Z,-X,-Y,-Z)".format(import_axis_up))
import_axis_forward = get_arg(sys.argv, "--forward", default="X")
if import_axis_forward not in axis:
raise ValueError("Axis forward not supported: {} (should be among X,Y,Z,-X,-Y,-Z)".format(import_axis_forward))
source_dir = get_arg(sys.argv, "--source_dir")
if source_dir is None:
raise ValueError("Source directory not specified.")
dest_dir = get_arg(sys.argv, "--dest_dir")
if dest_dir is None:
raise ValueError("Destination directory not specified.")
os.makedirs(dest_dir, exist_ok=True)
model_id = os.path.basename(source_dir)
#############################################
# Importing obj files from source dir
#############################################
for on in bpy.context.scene.objects.keys():
obj = bpy.context.scene.objects[on]
bpy.data.objects.remove(obj)
clean_unused()
import_obj_folder(model_id, source_dir, up=import_axis_up, forward=import_axis_forward)
#############################################
# Optional UV Unwrapping
# This only needed if baking will be performed
#############################################
if should_bake:
uv_unwrapped = True
for o in bpy.context.scene.objects:
if not o.data.uv_layers:
uv_unwrapped = False
if not uv_unwrapped:
bpy.ops.object.mode_set(mode="OBJECT")
vl = bpy.context.view_layer
bpy.ops.object.select_all(action="DESELECT")
for on in bpy.context.scene.objects.keys():
obj = bpy.context.scene.objects[on]
new_uv = bpy.context.scene.objects[on].data.uv_layers.new(name="obj_uv")
vl.objects.active = obj
obj.select_set(True)
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02)
bpy.context.tool_settings.mesh_select_mode = (False, False, True)
bpy.ops.object.mode_set(mode="OBJECT")
#############################################
# Export models
#############################################
export_ig_object(dest_dir, save_material=not should_bake)
#############################################
# Optional Texture Baking
#############################################
if should_bake:
mat_dir = os.path.join(dest_dir, "material")
os.makedirs(mat_dir, exist_ok=True)
# bpy.ops.wm.open_mainfile(filepath=blend_path)
# import_ig_object(model_root, import_mat=True)
for obj in bpy.context.scene.objects:
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.select_all(action="SELECT")
bpy.ops.object.join()
channels = {
"DIFFUSE": (2048, 32),
"ROUGHNESS": (1024, 16),
"METALLIC": (1024, 16),
"NORMAL": (1024, 16),
}
bake_model(mat_dir, channels, overwrite=True)
bpy.ops.wm.quit_blender()
| 1.898438 | 2 |
ceilometerclient/common/base.py | mail2nsrajesh/python-ceilometerclient | 0 | 5884 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
import copy
from ceilometerclient.apiclient import base
from ceilometerclient.apiclient import exceptions
from ceilometerclient import exc
def getid(obj):
"""Extracts object ID.
Abstracts the common pattern of allowing both an object or an
object's ID (UUID) as a parameter when dealing with relationships.
"""
try:
return obj.id
except AttributeError:
return obj
class Manager(object):
"""Managers interact with a particular type of API.
It works with samples, meters, alarms, etc. and provide CRUD operations for
them.
"""
resource_class = None
def __init__(self, api):
self.api = api
@property
def client(self):
"""Compatible with latest oslo-incubator.apiclient code."""
return self.api
def _create(self, url, body):
body = self.api.post(url, json=body).json()
if body:
return self.resource_class(self, body)
def _list(self, url, response_key=None, obj_class=None, body=None,
expect_single=False):
try:
resp = self.api.get(url)
except exceptions.NotFound:
raise exc.HTTPNotFound
if not resp.content:
raise exc.HTTPNotFound
body = resp.json()
if obj_class is None:
obj_class = self.resource_class
if response_key:
try:
data = body[response_key]
except KeyError:
return []
else:
data = body
if expect_single:
data = [data]
return [obj_class(self, res, loaded=True) for res in data if res]
def _update(self, url, body, response_key=None):
body = self.api.put(url, json=body).json()
# PUT requests may not return a body
if body:
return self.resource_class(self, body)
def _delete(self, url):
self.api.delete(url)
class Resource(base.Resource):
"""A resource represents a particular instance of an object.
Resource might be tenant, user, etc.
This is pretty much just a bag for attributes.
:param manager: Manager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
def to_dict(self):
return copy.deepcopy(self._info)
| 2.0625 | 2 |
lib/charms/layer/azure.py | freyes/charm-azure-integrator | 0 | 5885 | <reponame>freyes/charm-azure-integrator
import json
import os
import re
import subprocess
from base64 import b64decode
from enum import Enum
from math import ceil, floor
from pathlib import Path
from urllib.error import HTTPError
from urllib.request import urlopen
import yaml
from charmhelpers.core import hookenv
from charmhelpers.core.unitdata import kv
from charms.layer import status
ENTITY_PREFIX = 'charm.azure'
MODEL_UUID = os.environ['JUJU_MODEL_UUID']
MAX_ROLE_NAME_LEN = 64
MAX_POLICY_NAME_LEN = 128
class StandardRole(Enum):
NETWORK_MANAGER = '4d97b98b-1d4f-4787-a291-c67834d212e7'
SECURITY_MANAGER = 'e3d13bf0-dd5a-482e-ba6b-9b8433878d10'
DNS_MANAGER = 'befefa01-2a29-4197-83a8-272ff33ce314'
OBJECT_STORE_READER = '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1'
OBJECT_STORE_MANAGER = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe'
# When debugging hooks, for some reason HOME is set to /home/ubuntu, whereas
# during normal hook execution, it's /root. Set it here to be consistent.
os.environ['HOME'] = '/root'
def log(msg, *args):
hookenv.log(msg.format(*args), hookenv.INFO)
def log_err(msg, *args):
hookenv.log(msg.format(*args), hookenv.ERROR)
def get_credentials():
"""
Get the credentials from either the config or the hook tool.
Prefers the config so that it can be overridden.
"""
no_creds_msg = 'missing credentials; set credentials config'
config = hookenv.config()
# try to use Juju's trust feature
try:
result = subprocess.run(['credential-get'],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
creds = yaml.load(result.stdout.decode('utf8'))
creds_data = creds['credential']['attributes']
login_cli(creds_data)
return True
except FileNotFoundError:
pass # juju trust not available
except subprocess.CalledProcessError as e:
if 'permission denied' not in e.stderr.decode('utf8'):
raise
no_creds_msg = 'missing credentials access; grant with: juju trust'
# try credentials config
if config['credentials']:
try:
creds_data = b64decode(config['credentials']).decode('utf8')
login_cli(creds_data)
return True
except Exception:
status.blocked('invalid value for credentials config')
return False
# no creds provided
status.blocked(no_creds_msg)
return False
def login_cli(creds_data):
"""
Use the credentials to authenticate the Azure CLI.
"""
app_id = creds_data['application-id']
app_pass = <PASSWORD>_<PASSWORD>['<PASSWORD>']
sub_id = creds_data['subscription-id']
tenant_id = _get_tenant_id(sub_id)
try:
log('Forcing logout of Azure CLI')
_azure('logout')
except AzureError:
pass
try:
log('Logging in to Azure CLI')
_azure('login',
'--service-principal',
'-u', app_id,
'-p', app_pass,
'-t', tenant_id)
# cache the subscription ID for use in roles
kv().set('charm.azure.sub-id', sub_id)
except AzureError as e:
# redact the credential info from the exception message
stderr = re.sub(app_id, '<app-id>', e.args[0])
stderr = re.sub(app_pass, '<app-pass>', stderr)
stderr = re.sub(tenant_id, '<tenant-id>', stderr)
# from None suppresses the previous exception from the stack trace
raise AzureError(stderr) from None
def ensure_msi(request):
msi = _get_msi(request.vm_id)
if not msi:
log('Enabling Managed Service Identity')
result = _azure('vm', 'identity', 'assign',
'--name', request.vm_name,
'--resource-group', request.resource_group)
vm_identities = kv().get('charm.azure.vm-identities', {})
msi = vm_identities[request.vm_id] = result['systemAssignedIdentity']
kv().set('charm.azure.vm-identities', vm_identities)
log('Instance MSI is: {}', msi)
def send_additional_metadata(request):
"""
Get additional info about the requesting instance via the API that isn't
available from the metadata server.
"""
res_grp = _azure('group', 'show', '--name', request.resource_group)
# hard-code most of these because with Juju, they're always the same
# and the queries required to look them up are a PITA
request.send_additional_metadata(
resource_group_location=res_grp['location'],
vnet_name='juju-internal-network',
vnet_resource_group=request.resource_group,
subnet_name='juju-internal-subnet',
security_group_name='juju-internal-nsg',
)
def tag_instance(request):
"""
Tag the given instance with the given tags.
"""
log('Tagging instance with: {}', request.instance_tags)
_azure('vm', 'update',
'--name', request.vm_name,
'--resource-group', request.resource_group,
'--set', *['tags.{}={}'.format(tag, value)
for tag, value in request.instance_tags.items()])
def enable_instance_inspection(request):
"""
Enable instance inspection access for the given application.
"""
log('Enabling instance inspection')
_assign_role(request, _get_role('vm-reader'))
def enable_network_management(request):
"""
Enable network management for the given application.
"""
log('Enabling network management')
_assign_role(request, StandardRole.NETWORK_MANAGER)
def enable_security_management(request):
"""
Enable security management for the given application.
"""
log('Enabling security management')
_assign_role(request, StandardRole.SECURITY_MANAGER)
def enable_block_storage_management(request):
"""
Enable block storage (disk) management for the given application.
"""
log('Enabling block storage management')
_assign_role(request, _get_role('disk-manager'))
def enable_dns_management(request):
"""
Enable DNS management for the given application.
"""
log('Enabling DNS management')
_assign_role(request, StandardRole.DNS_MANAGER)
def enable_object_storage_access(request):
"""
Enable object storage read-only access for the given application.
"""
log('Enabling object storage read')
_assign_role(request, StandardRole.OBJECT_STORE_READER)
def enable_object_storage_management(request):
"""
Enable object storage management for the given application.
"""
log('Enabling object store management')
_assign_role(request, StandardRole.OBJECT_STORE_MANAGER)
def cleanup():
"""
Perform cleanup.
"""
pass
# Internal helpers
class AzureError(Exception):
"""
Exception class representing an error returned from the azure-cli tool.
"""
@classmethod
def get(cls, message):
"""
Factory method to create either an instance of this class or a
meta-subclass for certain `message`s.
"""
if 'already exists' in message:
return AlreadyExistsAzureError(message)
return AzureError(message)
class AlreadyExistsAzureError(AzureError):
"""
Meta-error subclass of AzureError representing something already existing.
"""
pass
def _elide(s, max_len, ellipsis='...'):
"""
Elide s in the middle to ensure it is under max_len.
That is, shorten the string, inserting an ellipsis where the removed
characters were to show that they've been removed.
"""
if len(s) > max_len:
hl = (max_len - len(ellipsis)) / 2
headl, taill = floor(hl), ceil(hl)
s = s[:headl] + ellipsis + s[-taill:]
return s
def _get_tenant_id(subscription_id):
"""
Translate the subscription ID into a tenant ID by making an unauthorized
request to the API and extracting the tenant ID from the WWW-Authenticate
header in the error response.
"""
url = ('https://management.azure.com/subscriptions/'
'{}?api-version=2018-03-01-01.6.1'.format(subscription_id))
try:
urlopen(url)
log_err('Error getting tenant ID: did not get "unauthorized" response')
return None
except HTTPError as e:
if 'WWW-Authenticate' not in e.headers:
log_err('Error getting tenant ID: missing WWW-Authenticate header')
return None
www_auth = e.headers['WWW-Authenticate']
match = re.search(r'authorization_uri="[^"]*/([^/"]*)"', www_auth)
if not match:
log_err('Error getting tenant ID: unable to find in {}', www_auth)
return None
return match.group(1)
def _azure(cmd, *args, return_stderr=False):
"""
Call the azure-cli tool.
"""
cmd = ['az', cmd]
cmd.extend(args)
result = subprocess.run(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = result.stdout.decode('utf8').strip()
stderr = result.stderr.decode('utf8').strip()
if result.returncode != 0:
raise AzureError.get(stderr)
if return_stderr:
return stderr
if stdout:
stdout = json.loads(stdout)
return stdout
def _get_msi(vm_id):
"""
Get the Managed System Identity for the VM.
"""
vm_identities = kv().get('charm.azure.vm-identities', {})
return vm_identities.get(vm_id)
def _get_role(role_name):
"""
Translate short role name into a full role name and ensure that the
custom role is loaded.
The custom roles have to be applied to a specific subscription ID, but
the subscription ID applies to the entire credential, so will almost
certainly be reused, so there's not much danger in hitting the 2k
custom role limit.
"""
known_roles = kv().get('charm.azure.roles', {})
if role_name in known_roles:
return known_roles[role_name]
sub_id = kv().get('charm.azure.sub-id')
role_file = Path('files/roles/{}.json'.format(role_name))
role_data = json.loads(role_file.read_text())
role_fullname = role_data['Name'].format(sub_id)
scope = role_data['AssignableScopes'][0].format(sub_id)
role_data['Name'] = role_fullname
role_data['AssignableScopes'][0] = scope
try:
log('Ensuring role {}', role_fullname)
_azure('role', 'definition', 'create',
'--role-definition', json.dumps(role_data))
except AzureError as e:
if 'already exists' not in e.args[0]:
raise
known_roles[role_name] = role_fullname
return role_fullname
def _assign_role(request, role):
if isinstance(role, StandardRole):
role = role.value
msi = _get_msi(request.vm_id)
try:
_azure('role', 'assignment', 'create',
'--assignee-object-id', msi,
'--resource-group', request.resource_group,
'--role', role)
except AlreadyExistsAzureError:
pass
| 1.742188 | 2 |
Assignment-1/Code/server3.py | pankajk22/Computer-Networks-Assignments | 0 | 5886 | import socket
import csv
import traceback
import threading
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
usrpass={}
def openfile():
filename="login_credentials.csv"
with open(filename,'r')as csvfile:
csv_file = csv.reader(csvfile, delimiter=",")
for col in csv_file:
usrpass[col[0]]=col[1]
usrpass.pop("Username")
#print(usrpass)
ihost=socket.gethostname()
host=socket.gethostbyname(ihost)
ihost=socket.gethostname()
host=socket.gethostbyname(ihost)
iport=[]
hostfile="host.csv"
with open(hostfile,'r')as host_file:
csv_hfile = csv.reader(host_file, delimiter=",")
for row in csv_hfile:
iport.append(row[1])
port=int(iport[4])
def socketbind():
try:
s.bind(('',port))
print("Bind with host at port number : "+str(port))
s.listen(10)
print("Socket is listening!!")
except socket.error as msg:
print("Error in Binding: "+ str(msg)+"\n Retrying....")
socketbind()
def socketaccept():
conn,add=s.accept()
print("connection is established with IP : "+str(add[0])+" and Port Number : "+str(add[1]))
conn.send(bytes("1","utf-8"))
conversation(conn)
conn.close()
def conversation(conn):
while True:
username=str(conn.recv(1024),"utf-8")
password=str(conn.recv(1024),"utf-8")
res=checkpass(username,password)
if res==1:
print("Valid Password!")
conn.send(bytes("1","utf-8"))
conn.send(bytes("1","utf-8"))
else:
conn.send(bytes("-1","utf-8"))
conn.send(bytes("-1","utf-8"))
# def checkusr(username):
# if username in usrpass:
# return 1
# else:
# print("Invalid Username")
# return -1
def checkpass(username,password):
if usrpass[username]==password:
return 1
else:
print("Invalid Password")
return -1
def main():
openfile()
socketbind()
socketaccept()
# count=0
# while (count<6):
# new_thread=threading.Thread(target =socketaccept)
# new_thread.start()
# count=count+1
main() | 2.953125 | 3 |
research/utils/_check_pipelines.py | joaopfonseca/research | 1 | 5887 | from itertools import product
from sklearn.base import clone
from sklearn.preprocessing import FunctionTransformer
from sklearn.model_selection import ParameterGrid
from imblearn.pipeline import Pipeline
from rlearn.utils import check_random_states
def check_pipelines(objects_list, random_state, n_runs):
"""Extract estimators and parameters grids."""
# Create random states
random_states = check_random_states(random_state, n_runs)
pipelines = []
param_grid = []
for comb, rs in product(product(*objects_list), random_states):
name = "|".join([i[0] for i in comb])
# name, object, sub grid
comb = [
(nm, ob, ParameterGrid(sg))
if ob is not None
else (nm, FunctionTransformer(), ParameterGrid(sg))
for nm, ob, sg in comb
]
# Create estimator
if name not in [n[0] for n in pipelines]:
est = Pipeline([(nm, ob) for nm, ob, _ in comb])
pipelines.append((name, est))
# Create intermediate parameter grids
sub_grids = [
[{f"{nm}__{k}": v for k, v in param_def.items()} for param_def in sg]
for nm, obj, sg in comb
]
# Create parameter grids
for sub_grid in product(*sub_grids):
param_prefix = "" if len(comb) == 1 else f"{name}__"
grid = {"est_name": [name]}
grid.update(
{f"{param_prefix}{k}": [v] for d in sub_grid for k, v in d.items()}
)
random_states = {
f"{param_prefix}{param}": [rs]
for param in est.get_params()
if "random_state" in param
}
grid.update(random_states)
# Avoid multiple runs over pipelines without random state
if grid not in param_grid:
param_grid.append(grid)
return pipelines, param_grid
def check_pipelines_wrapper(
objects_list, wrapper, random_state, n_runs, wrapped_only=False
):
wrapper_label = wrapper[0]
wrapper_obj = wrapper[1]
wrapper_grid = wrapper[2]
estimators, param_grids = check_pipelines(objects_list, random_state, n_runs)
wrapped_estimators = [
(
f"{wrapper_label}|{name}",
clone(wrapper_obj).set_params(**{"classifier": pipeline}),
)
for name, pipeline in estimators
]
wrapped_param_grids = [
{
"est_name": [f'{wrapper_label}|{d["est_name"][0]}'],
**{
f'{wrapper_label}|{d["est_name"][0]}__classifier__{k}': v
for k, v in d.items()
if k != "est_name"
},
**{
f'{wrapper_label}|{d["est_name"][0]}__{k}': v
for k, v in wrapper_grid.items()
},
}
for d in param_grids
]
if wrapped_only:
return wrapped_estimators, wrapped_param_grids
else:
return (estimators + wrapped_estimators, param_grids + wrapped_param_grids)
| 2.359375 | 2 |
mushroom_rl/utils/plots/common_plots.py | PuzeLiu/mushroom-rl | 344 | 5888 | from mushroom_rl.utils.plots import PlotItemBuffer, DataBuffer
from mushroom_rl.utils.plots.plot_item_buffer import PlotItemBufferLimited
class RewardPerStep(PlotItemBuffer):
"""
Class that represents a plot for the reward at every step.
"""
def __init__(self, plot_buffer):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used.
"""
title = "Step_Reward"
curves_params = [dict(data_buffer=plot_buffer)]
super().__init__(title, curves_params)
class RewardPerEpisode(PlotItemBuffer):
"""
Class that represents a plot for the accumulated reward per episode.
"""
def __init__(self, plot_buffer):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used.
"""
title = "Episode_Reward"
curves_params = [dict(data_buffer=plot_buffer)]
super().__init__(title, curves_params)
class Actions(PlotItemBufferLimited):
"""
Class that represents a plot for the actions.
"""
def __init__(self, plot_buffers, maxs=None, mins=None):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used;
maxs(list, None): list of max values of each data buffer plotted.
If an element is None, no max line is drawn;
mins(list, None): list of min values of each data buffer plotted.
If an element is None, no min line is drawn.
"""
title = "Actions"
super().__init__(title, plot_buffers, maxs=maxs, mins=mins)
class Observations(PlotItemBufferLimited):
"""
Class that represents a plot for the observations.
"""
def __init__(self, plot_buffers, maxs=None, mins=None, dotted_limits=None):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used;
maxs(list, None): list of max values of each data buffer plotted.
If an element is None, no max line is drawn;
mins(list, None): list of min values of each data buffer plotted.
If an element is None, no min line is drawn.
dotted_limits (list, None): list of booleans. If True, the
corresponding limit is dotted; otherwise, it is printed as a
solid line.
"""
title = "Observations"
super().__init__(title, plot_buffers, maxs=maxs, mins=mins,
dotted_limits=dotted_limits)
class LenOfEpisodeTraining(PlotItemBuffer):
"""
Class that represents a plot for the length of the episode.
"""
def __init__(self, plot_buffer):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used;
"""
title = "Len of Episode"
plot_params = [dict(data_buffer=plot_buffer)]
super().__init__(title, plot_params)
| 2.9375 | 3 |
libs/python-daemon-2.2.0/test/test_metadata.py | helion-security/helion | 1 | 5889 | <reponame>helion-security/helion
# -*- coding: utf-8 -*-
#
# test/test_metadata.py
# Part of ‘python-daemon’, an implementation of PEP 3143.
#
# This is free software, and you are welcome to redistribute it under
# certain conditions; see the end of this file for copyright
# information, grant of license, and disclaimer of warranty.
""" Unit test for ‘_metadata’ private module.
"""
from __future__ import (absolute_import, unicode_literals)
import collections
import errno
import functools
import json
import re
try:
# Python 3 standard library.
import urllib.parse as urlparse
except ImportError:
# Python 2 standard library.
import urlparse
import mock
import pkg_resources
import testtools.helpers
import testtools.matchers
from . import scaffold
from .scaffold import unicode
import daemon._metadata as metadata
class HasAttribute(testtools.matchers.Matcher):
""" A matcher to assert an object has a named attribute. """
def __init__(self, name):
self.attribute_name = name
def match(self, instance):
""" Assert the object `instance` has an attribute named `name`. """
result = None
if not testtools.helpers.safe_hasattr(instance, self.attribute_name):
result = AttributeNotFoundMismatch(instance, self.attribute_name)
return result
class AttributeNotFoundMismatch(testtools.matchers.Mismatch):
""" The specified instance does not have the named attribute. """
def __init__(self, instance, name):
self.instance = instance
self.attribute_name = name
def describe(self):
""" Emit a text description of this mismatch. """
text = (
"{instance!r}"
" has no attribute named {name!r}").format(
instance=self.instance, name=self.attribute_name)
return text
class metadata_value_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for metadata module values. """
expected_str_attributes = set([
'version_installed',
'author',
'copyright',
'license',
'url',
])
scenarios = [
(name, {'attribute_name': name})
for name in expected_str_attributes]
for (name, params) in scenarios:
if name == 'version_installed':
# No duck typing, this attribute might be None.
params['ducktype_attribute_name'] = NotImplemented
continue
# Expect an attribute of ‘str’ to test this value.
params['ducktype_attribute_name'] = 'isdigit'
def test_module_has_attribute(self):
""" Metadata should have expected value as a module attribute. """
self.assertThat(
metadata, HasAttribute(self.attribute_name))
def test_module_attribute_has_duck_type(self):
""" Metadata value should have expected duck-typing attribute. """
if self.ducktype_attribute_name == NotImplemented:
self.skipTest("Can't assert this attribute's type")
instance = getattr(metadata, self.attribute_name)
self.assertThat(
instance, HasAttribute(self.ducktype_attribute_name))
class YearRange_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for ‘YearRange’ class. """
scenarios = [
('simple', {
'begin_year': 1970,
'end_year': 1979,
'expected_text': "1970–1979",
}),
('same year', {
'begin_year': 1970,
'end_year': 1970,
'expected_text': "1970",
}),
('no end year', {
'begin_year': 1970,
'end_year': None,
'expected_text': "1970",
}),
]
def setUp(self):
""" Set up test fixtures. """
super(YearRange_TestCase, self).setUp()
self.test_instance = metadata.YearRange(
self.begin_year, self.end_year)
def test_text_representation_as_expected(self):
""" Text representation should be as expected. """
result = unicode(self.test_instance)
self.assertEqual(result, self.expected_text)
FakeYearRange = collections.namedtuple('FakeYearRange', ['begin', 'end'])
@mock.patch.object(metadata, 'YearRange', new=FakeYearRange)
class make_year_range_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for ‘make_year_range’ function. """
scenarios = [
('simple', {
'begin_year': "1970",
'end_date': "1979-01-01",
'expected_range': FakeYearRange(begin=1970, end=1979),
}),
('same year', {
'begin_year': "1970",
'end_date': "1970-01-01",
'expected_range': FakeYearRange(begin=1970, end=1970),
}),
('no end year', {
'begin_year': "1970",
'end_date': None,
'expected_range': FakeYearRange(begin=1970, end=None),
}),
('end date UNKNOWN token', {
'begin_year': "1970",
'end_date': "UNKNOWN",
'expected_range': FakeYearRange(begin=1970, end=None),
}),
('end date FUTURE token', {
'begin_year': "1970",
'end_date': "FUTURE",
'expected_range': FakeYearRange(begin=1970, end=None),
}),
]
def test_result_matches_expected_range(self):
""" Result should match expected YearRange. """
result = metadata.make_year_range(self.begin_year, self.end_date)
self.assertEqual(result, self.expected_range)
class metadata_content_TestCase(scaffold.TestCase):
""" Test cases for content of metadata. """
def test_copyright_formatted_correctly(self):
""" Copyright statement should be formatted correctly. """
regex_pattern = (
"Copyright © "
"\d{4}" # Four-digit year.
"(?:–\d{4})?" # Optional range dash and four-digit year.
)
regex_flags = re.UNICODE
self.assertThat(
metadata.copyright,
testtools.matchers.MatchesRegex(regex_pattern, regex_flags))
def test_author_formatted_correctly(self):
""" Author information should be formatted correctly. """
regex_pattern = (
".+ " # Name.
"<[^>]+>" # Email address, in angle brackets.
)
regex_flags = re.UNICODE
self.assertThat(
metadata.author,
testtools.matchers.MatchesRegex(regex_pattern, regex_flags))
def test_copyright_contains_author(self):
""" Copyright information should contain author information. """
self.assertThat(
metadata.copyright,
testtools.matchers.Contains(metadata.author))
def test_url_parses_correctly(self):
""" Homepage URL should parse correctly. """
result = urlparse.urlparse(metadata.url)
self.assertIsInstance(
result, urlparse.ParseResult,
"URL value {url!r} did not parse correctly".format(
url=metadata.url))
try:
FileNotFoundError
except NameError:
# Python 2 uses IOError.
FileNotFoundError = functools.partial(IOError, errno.ENOENT)
version_info_filename = "version_info.json"
def fake_func_has_metadata(testcase, resource_name):
""" Fake the behaviour of ‘pkg_resources.Distribution.has_metadata’. """
if (
resource_name != testcase.version_info_filename
or not hasattr(testcase, 'test_version_info')):
return False
return True
def fake_func_get_metadata(testcase, resource_name):
""" Fake the behaviour of ‘pkg_resources.Distribution.get_metadata’. """
if not fake_func_has_metadata(testcase, resource_name):
error = FileNotFoundError(resource_name)
raise error
content = testcase.test_version_info
return content
def fake_func_get_distribution(testcase, distribution_name):
""" Fake the behaviour of ‘pkg_resources.get_distribution’. """
if distribution_name != metadata.distribution_name:
raise pkg_resources.DistributionNotFound
if hasattr(testcase, 'get_distribution_error'):
raise testcase.get_distribution_error
mock_distribution = testcase.mock_distribution
mock_distribution.has_metadata.side_effect = functools.partial(
fake_func_has_metadata, testcase)
mock_distribution.get_metadata.side_effect = functools.partial(
fake_func_get_metadata, testcase)
return mock_distribution
@mock.patch.object(metadata, 'distribution_name', new="mock-dist")
class get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for ‘get_distribution_version_info’ function. """
default_version_info = {
'release_date': "UNKNOWN",
'version': "UNKNOWN",
'maintainer': "UNKNOWN",
}
scenarios = [
('version 0.0', {
'test_version_info': json.dumps({
'version': "0.0",
}),
'expected_version_info': {'version': "0.0"},
}),
('version 1.0', {
'test_version_info': json.dumps({
'version': "1.0",
}),
'expected_version_info': {'version': "1.0"},
}),
('file lorem_ipsum.json', {
'test_filename': "lorem_ipsum.json",
'version_info_filename': "lorem_ipsum.json",
'test_version_info': json.dumps({
'version': "1.0",
}),
'expected_resource_name': "lorem_ipsum.json",
'expected_version_info': {'version': "1.0"},
}),
('not installed', {
'get_distribution_error': pkg_resources.DistributionNotFound(),
'expected_version_info': default_version_info,
}),
('no version_info', {
'expected_version_info': default_version_info,
}),
('wrong filename', {
'test_filename': "lorem_ipsum.json",
'test_version_info': json.dumps({
'version': "1.0",
}),
'expected_resource_name': "lorem_ipsum.json",
'expected_version_info': default_version_info,
}),
]
def setUp(self):
""" Set up test fixtures. """
super(get_distribution_version_info_TestCase, self).setUp()
self.test_args = {}
if hasattr(self, 'test_filename'):
self.test_args['filename'] = self.test_filename
if not hasattr(self, 'version_info_filename'):
self.version_info_filename = version_info_filename
if not hasattr(self, 'expected_resource_name'):
self.expected_resource_name = version_info_filename
self.mock_distribution = mock.MagicMock()
func_patcher_get_distribution = mock.patch.object(
pkg_resources, 'get_distribution')
func_patcher_get_distribution.start()
self.addCleanup(func_patcher_get_distribution.stop)
pkg_resources.get_distribution.side_effect = functools.partial(
fake_func_get_distribution, self)
def test_requests_installed_distribution(self):
""" The package distribution should be retrieved. """
expected_distribution_name = metadata.distribution_name
metadata.get_distribution_version_info(**self.test_args)
pkg_resources.get_distribution.assert_called_with(
expected_distribution_name)
def test_requests_specified_filename(self):
""" The specified metadata resource name should be requested. """
if hasattr(self, 'get_distribution_error'):
self.skipTest("No access to distribution")
metadata.get_distribution_version_info(**self.test_args)
self.mock_distribution.has_metadata.assert_called_with(
self.expected_resource_name)
def test_result_matches_expected_items(self):
""" The result should match the expected items. """
version_info = metadata.get_distribution_version_info(**self.test_args)
self.assertEqual(self.expected_version_info, version_info)
# Copyright © 2008–2018 <NAME> <<EMAIL>>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 3 of that license or any later version.
# No warranty expressed or implied. See the file ‘LICENSE.GPL-3’ for details.
# Local variables:
# coding: utf-8
# mode: python
# End:
# vim: fileencoding=utf-8 filetype=python :
| 2.421875 | 2 |
objectModel/Python/cdm/persistence/cdmfolder/types/purpose_reference.py | wheatdog/CDM | 0 | 5890 | <reponame>wheatdog/CDM
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from typing import Union, List
from .purpose import *
from .trait_reference import TraitReference
from cdm.utilities import JObject
class PurposeReference(JObject):
def __init__(self):
super().__init__()
self.purposeReference = None # type: Union[str, Purpose]
self.appliedTraits = [] # type: List[Union[str, TraitReference]]
| 1.984375 | 2 |
text_preprocessing/normalizer.py | cyberpunk317/inverted_index | 9 | 5891 | <filename>text_preprocessing/normalizer.py
import re
from typing import Union, List
import nltk
from bs4 import BeautifulSoup
class Normalizer:
def __init__(self):
self.lemmatizer = nltk.stem.WordNetLemmatizer()
def normalize(self, x: Union[list, str]) -> List[str]:
"""
Accepts text (possibly tokenized) and makes it suitable for machine processing
"""
x = self._remove_stop_words(x)
x = self._denoise(x)
x = self._lemmatize(x)
return x
def _remove_stop_words(self, x: Union[list, str]) -> List[str]:
"""
Removes stop words from text in english
"""
if isinstance(x, str):
x = x.split(' ')
stop_words = set(nltk.corpus.stopwords.words('english'))
return [w for w in x if not w in stop_words]
def _lemmatize(self, x: Union[list, str]) -> List[str]:
"""
Removes endings,
"""
if isinstance(x, list):
x = ' '.join(x)
x = self.lemmatizer.lemmatize(x)
return x
def _denoise(self, x: Union[list, str]) -> str:
if isinstance(x, list):
x = ' '.join(x)
def strip_html(x):
soup = BeautifulSoup(x, "html.parser")
x = soup.get_text()
return x
def remove_between_square_brackets(x):
x = re.sub('\[[^]]*\]', '', x)
x = re.sub(r'http\S+', '', x)
return x
def remove_rating(x):
return re.sub('\W\d/\d+\S*', '', x)
x = x.lower()
x = re.sub(',|\.|!|\?', '', x)
x = strip_html(x)
x = remove_between_square_brackets(x)
x = remove_rating(x)
return x | 3.078125 | 3 |
env/lib/python3.7/site-packages/prompt_toolkit/filters/cli.py | MarcoMancha/BreastCancerDetector | 2 | 5892 | """
For backwards-compatibility. keep this file.
(Many people are going to have key bindings that rely on this file.)
"""
from __future__ import unicode_literals
from .app import *
__all__ = [
# Old names.
'HasArg',
'HasCompletions',
'HasFocus',
'HasSelection',
'HasValidationError',
'IsDone',
'IsReadOnly',
'IsMultiline',
'RendererHeightIsKnown',
'InEditingMode',
'InPasteMode',
'ViMode',
'ViNavigationMode',
'ViInsertMode',
'ViInsertMultipleMode',
'ViReplaceMode',
'ViSelectionMode',
'ViWaitingForTextObjectMode',
'ViDigraphMode',
'EmacsMode',
'EmacsInsertMode',
'EmacsSelectionMode',
'IsSearching',
'HasSearch',
'ControlIsSearchable',
]
# Keep the original classnames for backwards compatibility.
HasValidationError = lambda: has_validation_error
HasArg = lambda: has_arg
IsDone = lambda: is_done
RendererHeightIsKnown = lambda: renderer_height_is_known
ViNavigationMode = lambda: vi_navigation_mode
InPasteMode = lambda: in_paste_mode
EmacsMode = lambda: emacs_mode
EmacsInsertMode = lambda: emacs_insert_mode
ViMode = lambda: vi_mode
IsSearching = lambda: is_searching
HasSearch = lambda: is_searching
ControlIsSearchable = lambda: control_is_searchable
EmacsSelectionMode = lambda: emacs_selection_mode
ViDigraphMode = lambda: vi_digraph_mode
ViWaitingForTextObjectMode = lambda: vi_waiting_for_text_object_mode
ViSelectionMode = lambda: vi_selection_mode
ViReplaceMode = lambda: vi_replace_mode
ViInsertMultipleMode = lambda: vi_insert_multiple_mode
ViInsertMode = lambda: vi_insert_mode
HasSelection = lambda: has_selection
HasCompletions = lambda: has_completions
IsReadOnly = lambda: is_read_only
IsMultiline = lambda: is_multiline
HasFocus = has_focus # No lambda here! (Has_focus is callable that returns a callable.)
InEditingMode = in_editing_mode
| 1.320313 | 1 |
genetic/spaces.py | shilpasayura/bk | 4 | 5893 | <filename>genetic/spaces.py<gh_stars>1-10
#spaces.py
'''
AlgoHack Genetic Algorithm for University Semaster Planning
Version 0.03 2018
<NAME> Sh<EMAIL>
'''
import xdb
def crt_spaces_table(cursor,drop=False):
if (drop):
sql="DROP TABLE IF EXISTS spaces;"
success, count=xdb.runSQL(cursor, sql)
sql='''CREATE TABLE IF NOT EXISTS spaces (
spid INTEGER PRIMARY KEY AUTOINCREMENT,
name varchar(30),
sptype INTEGER,
fitness INTEGER,
gid INTEGER DEFAULT 0,
semid INTEGER DEFAULT 0)
'''
success, count=xdb.runSQL(cursor, sql)
return success
def insert_spaces(cursor,nlect,nlabs,gid,semid, delay):
# nlabs is number of labs
# nlecs is number of lecture halls
# if gid =0 common for all groups else dedicated
# if semid=0 common for all semasters else dedicated
sql="SELECT * FROM spaces LIMIT 1";
success, count=xdb.runSQL(cursor, sql)
if (count > 0):
print("spaces table: Records exist")
return False, 0
sqls=""
fitness=1
for i in range (nlect):
name="Lect Hall " + str(i+1)
sptype=1
sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+ '"{}",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');'
for i in range (nlabs):
name="Lab " + str(i+1)
sptype=2
sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+ '"{}",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');'
success, count=xdb.runSQL_stmts(cursor, sqls,delay)
return success, count
if __name__ == "__main__":
delay=0.05
conn=xdb.opendb('genetic56.db')
cursor =conn.cursor() # create a cursor object
success=crt_spaces_table(cursor, True) # create spaces table
#dedicated lecture hall, lab for group and semaster
success, count =insert_spaces(cursor,1,1,1,1,delay) # generate records
xdb.commit(conn)
xdb.closedb(conn)
| 2.640625 | 3 |
threaded_remote_pi_camera.py | hyansuper/flask-video-streaming | 7 | 5894 | <reponame>hyansuper/flask-video-streaming
import urllib.request
import cv2
import numpy as np
import time
import threading
class ThreadedRemotePiCamera:
def __init__(self, pi_address, resolution=(320,240), framerate=10, hflip=False, vflip=False):
if hflip and vflip:
self.flip = -1
elif hflip:
self.flip = 0
elif vflip:
self.flip = 1
else:
self.flip = None
self.stream = urllib.request.urlopen('http://%s:5000/video_feed?w=%d&h=%d&fps=%d' % ((pi_address,)+resolution+(framerate,)))
self.total_bytes = b''
self.ev = threading.Event()
self.th = threading.Thread(target=self.run, daemon=True)
self.running = True
self.frame = None
self.th.start()
def run(self):
while self.running:
self.frame = self.get_frame()
self.ev.set()
self.stream.close()
def read(self):
'''
while self.frame is None:
time.sleep(.1)
f = self.frame
self.frame = None
return f
'''
self.ev.wait()
self.ev.clear()
return self.frame
def get_frame(self):
while True:
self.total_bytes += self.stream.read(1024)
end = self.total_bytes.find(b'\xff\xd9') # JPEG end
if not end == -1:
start = self.total_bytes.find(b'\xff\xd8') # JPEG start
jpg = cv2.imdecode(np.fromstring(self.total_bytes[start: end+2], dtype=np.uint8), cv2.IMREAD_COLOR)
if self.flip is not None:
jpg = cv2.flip(jpg, self.flip)
self.total_bytes = self.total_bytes[end+2:]
return jpg
def release(self):
self.running = False
self.th.join()
def frames(self):
while True:
yield self.read()
def __iter__(self):
return self.frames()
def __enter__(self):
return self
def __exit__(self, *args):
self.release()
def __del__(self):
self.release()
| 2.796875 | 3 |
scheduler/misc/Ec2SpotCustomScheduler_jan19.py | jalawala/custom-kubernetes-scheduler | 4 | 5895 | #! /usr/bin/python3
import time
import random
import json
import os
from pprint import pprint
from kubernetes.client.rest import ApiException
from pint import UnitRegistry
from collections import defaultdict
from kubernetes import client, config, watch
from timeloop import Timeloop
from datetime import timedelta
config.load_kube_config()
#config.load_incluster_config()
# doing this computation within a k8s cluster
#k8s.config.load_incluster_config()
core_api = client.CoreV1Api()
apis_api = client.AppsV1Api()
#sdclient = SdcClient(<Your Sysdig API token>)
sysdig_metric = "net.http.request.time"
metrics = [{ "id": sysdig_metric, "aggregations": { "time": "timeAvg", "group": "avg" } }]
#scheduler_name = "Ec2SpotK8sScheduler"
CustomSchedulerName ='K8SCustomScheduler'
ureg = UnitRegistry()
ureg.load_definitions('kubernetes_units.txt')
pendingPodsList = []
failedPodsList = []
runningPodsList =[]
nodesListPerNodeLabel = {}
Q_ = ureg.Quantity
def scheduler(name, node, namespace):
target=client.V1ObjectReference(api_version='v1', kind="Node", name=node)
meta=client.V1ObjectMeta()
meta.name=name
body=client.V1Binding(metadata=meta, target=target)
return core_api.create_namespaced_binding(namespace, body, _preload_content=False)
#tl = Timeloop()
<EMAIL>(interval=timedelta(seconds=10))
def RunEc2SpotCustomScheduler():
#global pendingPodsList
#global failedPodsList
CustomKubeSchedulingClusterDeploymentData = get_custom_deployments()
pprint("CustomKubeSchedulingClusterDeploymentData={}".format(CustomKubeSchedulingClusterDeploymentData))
for namespace, deploymentCustomSchedulingData in CustomKubeSchedulingClusterDeploymentData.items():
print("namespace={} deploymentCustomSchedulingData={}".format(namespace, deploymentCustomSchedulingData))
if deploymentCustomSchedulingData != {}:
CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData)
def CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData):
global runningPodsList
global pendingPodsList
global failedPodsList
global nodesListPerNodeLabel
print("namespace={} deploymentCustomSchedulingData={}".format(namespace, deploymentCustomSchedulingData))
#exit(0)
#namespace = 'default'
#lifecycleList = ['OnDemand', 'Ec2Spot']
for deploymentName, CustomSchedulingData in deploymentCustomSchedulingData.items():
print("deploymentName={} CustomSchedulingData={}".format(deploymentName, CustomSchedulingData))
#exit(0)
#podsList = getPodsListForDeployment(namespace, deploymentName)
runningPodsList = []
pendingPodsList = []
failedPodsList =[]
getPodsListForDeployment(namespace, deploymentName)
NumOfPodsRunning = len (runningPodsList)
NumOfPodsPending = len (pendingPodsList)
NumOfPodsFailed = len (failedPodsList)
#print("NumOfPodsRunning={} runningPodsList={}".format(NumOfPodsRunning, runningPodsList))
#print("NumOfPodsPending={} pendingPodsList={}".format(NumOfPodsPending, pendingPodsList))
#print("NumOfPodsFailed={} failedPodsList={}".format(NumOfPodsFailed, failedPodsList))
get_node_available_nodes_list(CustomSchedulingData)
for i, p in enumerate (runningPodsList):
pprint("i={} running pod_name={} node_name={}".format(i,p['node_name'], p['name']))
for i, p in enumerate (pendingPodsList):
pprint("i={} pending pod_name={} node_name={}".format(i,p['node_name'], p['name']))
for i, p in enumerate (failedPodsList):
pprint("i={} failed pod_name={} node_name={}".format(i,p['node_name'], p['name']))
#print("nodeLabel={} NumOfAlreadyRunningPods={}".format(nodeLabel, NumOfAlreadyRunningPods))
print("lifecycle={} NumOfNodes={}".format(lifecycle, len(NodesList)))
for nodeLabel, in NodesList.keys():
pprint("node_name={}".format(n))
#exit(0)
#runningPodsList = podsList['runningPodsList']
#pendingPodsList = podsList['pendingPodsList']
#failedPodsList = podsList['failedPodsList']
for nodeLabel, numOfReplicas in CustomSchedulingData.items():
print("Scheduling numOfReplicas={} on nodeLabel={}".format(numOfReplicas, nodeLabel))
#pprint(podsList)
#lifecycle = 'OnDemand'
#NodesList = get_node_available_nodes_list(lifecycle)
#pprint(NodesList)
NumOfPodsRunningAlready = 0
podsAlreadyRunningOnNodeLabelList = []
for podRunning in runningPodsList:
if podRunning['node_name'] in nodesListPerNodeLabel[nodeLabel].keys():
podsAlreadyRunningOnNodeLabelList.append(podRunning)
NumOfAlreadyRunningPods = len (podsAlreadyRunningOnNodeLabelList)
for i, p in enumerate (podsAlreadyRunningOnNodeLabelList):
pprint("running pod i={} nodeLabel={} node_name={} name={}".format(i,nodeLabel, p['node_name'], p['name']))
if NumOfAlreadyRunningPods == NumOfPodsToBeRunning:
print("NumOfAlreadyRunningPods == NumOfPodsToBeRunning = {}. So no need to Schedule".format(NumOfAlreadyRunningPods))
elif NumOfAlreadyRunningPods < NumOfPodsToBeRunning:
NumOfPodsToBeScheduled = NumOfPodsToBeRunning - NumOfAlreadyRunningPods
try:
schedulePods(NumOfPodsToBeScheduled, NodesList)
except Exception as e:
pprint(e)
elif NumOfAlreadyRunningPods > NumOfPodsToBeRunning:
NumOfPodsToDeleted = NumOfAlreadyRunningPods - NumOfPodsToBeRunning
try:
deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList)
except Exception as e:
pprint(e)
pendingPodsList = []
NumOfPodsFailed = []
#pprint(podsList)
#lifecycle = 'OnDemand'
#lifecycle = 'Ec2Spot'
#get_node_available_nodes_list(lifecycle)
def deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList):
namespace = 'default'
for i in range(0, NumOfPodsToDeleted):
pod = podsAlreadyRunningOnNodeLabelList[i]
grace_period_seconds = 30
body = client.V1DeleteOptions()
#body = {}
pprint("deletePods i={} pod={} NumOfPodsToDeleted={}".format(i, pod['name'], NumOfPodsToDeleted ))
response = core_api.delete_namespaced_pod(name=pod['name'], namespace=namespace, grace_period_seconds=grace_period_seconds, body=body)
pprint(response)
def schedulePods(NumOfPodsToBeScheduled, NodesList):
global pendingPodsList
global failedPodsList
namespace = 'default'
if NumOfPodsToBeScheduled > len(pendingPodsList):
pprint("schedulePods NumOfPodsToBeScheduled={} is greater than number of pending pods={}. So skipping schedulePods".format(NumOfPodsToBeScheduled, len(pendingPodsList)))
return
for i in range(NumOfPodsToBeScheduled):
pod = pendingPodsList[0]
print("schedulePods Trying to schedule i={} NumOfPodsToBeScheduled={} pod={} with cpu_req={} mem_req={}".format(i, NumOfPodsToBeScheduled, pod['name'], pod['cpu_req'], pod['mem_req']))
for node, stats in NodesList.items():
print("schedulePods Checking for free resources on node={} with cpu_free={} mem_free={}".format(node, stats['cpu_free'], stats['mem_free']))
#pprint(node)
if pod['cpu_req'] <= stats['cpu_free'] and pod['mem_req'] <= stats['mem_free']:
print("schedulePods scheduling pod={} onto the node={}".format(pod['name'], node))
res = scheduler(pod['name'], node, namespace)
pprint(res)
stats['cpu_free'] = stats['cpu_free'] - pod['cpu_req']
stats['mem_free'] = stats['mem_free'] - pod['mem_req']
pendingPodsList.remove(pod)
break
def getPodsListForDeployment(namespace, deploymentName):
#global pendingPodsList
#runningPodsList =[]
#failedPodsList =[]
#podsList = {}
#namespace='default'
#name='Ec2SpotK8sScheduler'
#field_selector = ("spec.scheduler_name=" + CustomSchedulerName)
field_selector = ("spec.schedulerName=" + CustomSchedulerName)
pods = core_api.list_namespaced_pod(namespace=namespace, field_selector=field_selector).to_dict()
#pods = core_api.list_namespaced_pod(namespace=namespace).to_dict()
#print("pods={}".format(pods))
for pod in pods['items']:
#pprint(pod)
#print("node_name={}".format(pod['spec']['node_name']))
#return ""
stats = {}
cpureqs,cpulmts,memreqs,memlmts = [], [], [], []
if deploymentName in pod['metadata']['name'] and pod['spec']['scheduler_name'] == CustomSchedulerName:
for container in pod['spec']['containers']:
res = container['resources']
reqs = defaultdict(lambda: 0, res['requests'] or {})
lmts = defaultdict(lambda: 0, res['limits'] or {})
cpureqs.append(Q_(reqs["cpu"]))
memreqs.append(Q_(reqs["memory"]))
cpulmts.append(Q_(lmts["cpu"]))
memlmts.append(Q_(lmts["memory"]))
stats["cpu_req"] = sum(cpureqs)
stats["cpu_lmt"] = sum(cpulmts)
stats["mem_req"] = sum(memreqs)
stats["mem_lmt"] = sum(memlmts)
stats["name"] = pod['metadata']['name']
stats["status"] = pod['status']['phase']
if stats["status"] == 'Pending':
pendingPodsList.append(stats)
elif stats["status"] == 'Running':
stats["node_name"] = pod['spec']['node_name']
runningPodsList.append(stats)
elif stats["status"] == 'Failed':
failedPodsList.append(stats)
#podsList['pendingPodsList'] = pendingPodsList
#podsList['runningPodsList'] = runningPodsList
#podsList['failedPodsList'] = failedPodsList
#pprint(podsList)
#pprint("pendingPodsList={} runningPodsList={} failedPodsList={}".format(runningPodsList, runningPodsList, failedPodsList )
#return pendingPodsList,runningPodsList,failedPodsList
#return podsList
def get_custom_deployments():
CustomKubeSchedulingClusterDeploymentData = {}
#namespaceList =[]
namespacedataList = core_api.list_namespace().to_dict()['items']
for namespaceData in namespacedataList:
namespace = namespaceData['metadata']['name']
CustomKubeSchedulingClusterDeploymentData[namespace] = get_custom_deployments_per_namespace(namespace)
#namespaceList.append(name)
print("CustomKubeSchedulingClusterDeploymentData={}".format(CustomKubeSchedulingClusterDeploymentData))
return CustomKubeSchedulingClusterDeploymentData
def get_custom_deployments_per_namespace(namespace):
#CustomKubeSchedulingDeploymentData = []
CustomKubeSchedulingDeploymentData = {}
#namespace='default'
#name = 'nginx'
name = '1'
#field_selector = ("metadata.name=" + name)
field_selector = ("metadata.annotations.OnDemandBase=" + name)
# get deployment by namespace
#resp = apis_api.list_namespaced_deployment(namespace=namespace, field_selector=field_selector)
resp = apis_api.list_namespaced_deployment(namespace=namespace)
for deployment in resp.items:
#pprint(deployment.metadata.annotations)
#pprint(deployment)
deploymentData = {}
CustomPodScheduleStrategy = {}
annotations = deployment.metadata.annotations
if 'UseCustomKubeScheduler' in annotations.keys():
if annotations['UseCustomKubeScheduler'] == 'true':
deploymentName = deployment.metadata.name
numOfReplicas = deployment.spec.replicas
#deploymentData[deploymentName] = deployment.metadata.name
Strategy = annotations['CustomPodScheduleStrategy']
#deploymentData['pod_replicas'] = deployment.spec.replicas
#deploymentData['CustomPodScheduleStrategy'] = get_pods_custom_pod_schedule_strategy(Strategy, deployment.spec.replicas)
CustomKubeSchedulingDeploymentData[deploymentName] = get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas)
#deploymentData['NumOfOnDemandPodsToBeRunning'] = int (deploymentData['OnDemandBase'] + (deploymentData['pod_replicas'] - deploymentData['OnDemandBase']) * deploymentData['OnDemandAbovePercentage'] / 100)
#deploymentData['NumOfSpotPodsToBeRunning'] = deploymentData['pod_replicas'] - deploymentData['NumOfOnDemandPodsToBeRunning']
#CustomKubeSchedulingDeploymentData.append(deploymentData)
return CustomKubeSchedulingDeploymentData
#print("OnDemandBase={}, OnDemandAbovePercentage={} SpotASGName={} OnDemandASGName={} pod_replicas={} NumOfOnDemandPods={} NumOfSpotPods={}".format(OnDemandBase, OnDemandAbovePercentage, SpotASGName, OnDemandASGName, pod_replicas, NumOfOnDemandPods, NumOfSpotPods))
def get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas):
print("Strategy={} numOfReplicas={}".format(Strategy, numOfReplicas))
CustomPodScheduleStrategy = {}
nodeLabelToReplicas = {}
nodeLabelToWights = {}
totalWeight = 0
StrategyList = Strategy.split(':')
print("StrategyList={}".format(StrategyList))
numOfBaseValues = 0
for nodeStrategy in StrategyList:
print("nodeStrategy: {}".format(nodeStrategy))
nodeStrategyPartsList = nodeStrategy.split(',')
base = 0
weight = 0
nodeLabel = ''
for nodeStrategyPart in nodeStrategyPartsList:
nodeStrategySubPartList = nodeStrategyPart.split('=')
if nodeStrategySubPartList[0] == 'base':
if numOfBaseValues != 0:
print("base value cannot be non-zero for more than node strategy")
exit(1)
else:
numOfBaseValues += 1
base = int(nodeStrategySubPartList[1])
if base <= numOfReplicas:
numOfReplicas -= base
else:
base = numOfReplicas
numOfReplicas = 0
print("base={}".format(nodeStrategySubPartList[1]))
elif nodeStrategySubPartList[0] == 'weight':
weight = int(nodeStrategySubPartList[1])
totalWeight += weight
print("weight={}".format(weight))
else:
nodeLabel = nodeStrategyPart
print("label key={} value={}".format(nodeStrategySubPartList[0], nodeStrategySubPartList[1]))
#nodeLabelToReplicas [nodeLabel] = base
nodeLabelToWights [nodeLabel] = weight
CustomPodScheduleStrategy [nodeLabel] = base
print("nodeLabelToReplicas={} nodeLabelToWights={}".format(nodeLabelToReplicas, nodeLabelToWights))
print("numOfBaseValues = {} totalWeight={} numOfReplicas={}".format(numOfBaseValues, totalWeight, numOfReplicas))
print("CustomPodScheduleStrategy = {}".format(CustomPodScheduleStrategy))
totalNumOfLables = len (CustomPodScheduleStrategy)
labelNum = 0
for key, replicas in CustomPodScheduleStrategy.items():
weight = nodeLabelToWights[key]
print("key: {} replicas={} weight={}, totalWeight={}".format(key, replicas, weight, totalWeight))
if labelNum == totalNumOfLables - 1:
weightReplicas = numOfReplicas
replicas = replicas + weightReplicas
else:
weightReplicas = int (numOfReplicas * (weight/totalWeight))
replicas = replicas + weightReplicas
labelNum += 1
numOfReplicas -= weightReplicas
print("weightReplicas: {} replicas={} labelNum={}, numOfReplicas={}".format(weightReplicas, replicas, labelNum, numOfReplicas))
CustomPodScheduleStrategy[key] = replicas
print("CustomPodScheduleStrategy = {}".format(CustomPodScheduleStrategy))
print("numOfBaseValues = {} totalWeight={} numOfReplicas={}".format(numOfBaseValues, totalWeight, numOfReplicas))
return CustomPodScheduleStrategy
__all__ = ["get_node_available_nodes_list"]
def get_node_available_nodes_list(CustomSchedulingData):
global nodesListPerNodeLabel
#data = []
#data = {}
for nodeLabel in CustomSchedulingData.keys():
nodesListPerNodeLabel[nodeLabel] = {}
nodeLabelParts = nodeLabel.split('=')
nodeLabelKey = nodeLabelParts[0]
nodeLabelValue = nodeLabelParts[1]
#selector = "metadata.labels."+nodeLabelParts[0]+"="+nodeLabelParts[1]
#selector = "metadata.labels.nodesize="+nodeLabelParts[1]
#print("selector={}".format(selector))
#name = 'ip-192-168-73-104.ec2.internal'
#selector = "metadata.name"+"="+name
#print("selector={}".format(selector))
#field_selector = (selector)
#resp = core_api.list_node(field_selector=field_selector).to_dict()['items']
#pprint("resp={}".format(resp))
#exit(0)
availableNodesData = {}
for node in core_api.list_node().to_dict()['items']:
#pprint(node)
node_labels = node['metadata']['labels']
if nodeLabelKey in node_labels.keys():
if node_labels[nodeLabelKey] == nodeLabelValue:
stats = {}
node_name = node['metadata']['name']
allocatable = node['status']['allocatable']
max_pods = int(int(allocatable["pods"]) * 1.5)
field_selector = ("status.phase!=Succeeded,status.phase!=Failed," +
"spec.nodeName=" + node_name)
stats["cpu_alloc"] = Q_(allocatable["cpu"])
stats["mem_alloc"] = Q_(allocatable["memory"])
#stats["lifecycle"] = lifecycle
pods = core_api.list_pod_for_all_namespaces(limit=max_pods,
field_selector=field_selector).to_dict()['items']
# compute the allocated resources
cpureqs,cpulmts,memreqs,memlmts = [], [], [], []
for pod in pods:
#pprint(pod)
for container in pod['spec']['containers']:
res = container['resources']
reqs = defaultdict(lambda: 0, res['requests'] or {})
lmts = defaultdict(lambda: 0, res['limits'] or {})
cpureqs.append(Q_(reqs["cpu"]))
memreqs.append(Q_(reqs["memory"]))
cpulmts.append(Q_(lmts["cpu"]))
memlmts.append(Q_(lmts["memory"]))
stats["cpu_req"] = sum(cpureqs)
stats["cpu_lmt"] = sum(cpulmts)
stats["cpu_req_per"] = (stats["cpu_req"] / stats["cpu_alloc"] * 100)
stats["cpu_lmt_per"] = (stats["cpu_lmt"] / stats["cpu_alloc"] * 100)
stats["mem_req"] = sum(memreqs)
stats["mem_lmt"] = sum(memlmts)
stats["mem_req_per"] = (stats["mem_req"] / stats["mem_alloc"] * 100)
stats["mem_lmt_per"] = (stats["mem_lmt"] / stats["mem_alloc"] * 100)
stats["cpu_free"] = stats["cpu_alloc"] - stats["cpu_req"]
stats["mem_free"] = stats["mem_alloc"] - stats["mem_req"]
#stats["name"] = node['metadata']['name']
#data.append(stats)
availableNodesData[node_name] = stats
nodesListPerNodeLabel[nodeLabel] = availableNodesData
#print(nodesListPerNodeLabel)
#for nodeLabel, availableNodesData in nodesListPerNodeLabel.items():
#print("nodeLabel={} availableNodesData={}".format(nodeLabel, availableNodesData))
#exit(0)
#pprint(data)
return data
if __name__ == '__main__':
#ready_nodes = nodes_available()
#pprint(ready_nodes)
#name='review-v1-787d8fbfbb-ltdzt'
node='ip-10-0-3-253.ec2.internal'
#namespace='ecommerce'
#ret=scheduler(name, node, namespace)
#pprint(ret)
#main()
#test()
#testpod()
#check_node_resources(node)
#RunEc2SpotCustomScheduler()
#getPodsListForDeployment(' ')
#lifecycle = 'OnDemand'
#lifecycle = 'Ec2Spot'
#get_node_available_nodes_list(lifecycle)
#RunEc2SpotCustomScheduler()
#NumOfPodsToDeleted = 1
#podsAlreadyRunningOnNodeLabelList = []
#d ={'name':'nginx-66cb875766-vx6bp'}
#podsAlreadyRunningOnNodeLabelList.append(d)
#deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList)
#deploymentName='nginx'
#deploymentName = 'kube-ops-view'
#getPodsListForDeployment(deploymentName)
#testlist()
#tl.start(block=True)
while True:
RunEc2SpotCustomScheduler()
time.sleep(10)
| 2.046875 | 2 |
local/utils/validate_label_locale.py | DewiBrynJones/docker-deepspeech-cy | 3 | 5896 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from clean_transcript import clean_transcript
ALPHABET_FILE_PATH = "/DeepSpeech/bin/bangor_welsh/alphabet.txt"
def validate_label(label):
clean = clean_transcript(ALPHABET_FILE_PATH)
cleaned, transcript = clean.clean(label)
if cleaned:
return transcript.lower()
return None
| 3.015625 | 3 |
src/models/nn/adaptive_softmax.py | dumpmemory/state-spaces | 513 | 5897 | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
class OptionalParameterList(nn.ParameterList):
def extra_repr(self):
child_lines = []
for k, p in self._parameters.items():
if p is not None:
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = 'Parameter containing: [{} of size {}{}]'.format(
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
tie_projs=None, out_layers_weights=None, out_projs=None,
keep_order=False,
bias_scale=0.0,
dropout=0.0,
):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = list(cutoffs) + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
# [21-09-15 AG]: bake the first False into the definition, just as [0] is built into the cutoffs
if tie_projs is None: tie_projs = []
elif isinstance(tie_projs, bool): tie_projs = [tie_projs] * len(cutoffs)
else: tie_projs = list(tie_projs)
tie_projs = [False] + tie_projs
self.tie_projs = tie_projs
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
if not out_layers_weights:
self.out_layers_weights = nn.ParameterList()
else:
self.out_layers_weights = out_layers_weights
self.out_layers_biases = nn.ParameterList()
self.shared_out_projs = out_projs
self.out_projs = OptionalParameterList()
self.dropout = dropout
self.drop = nn.Dropout(dropout)
if div_val == 1:
if d_proj != d_embed:
for i in range(len(self.cutoffs)):
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_embed))
)
else:
# self.out_projs = [None] * len(self.cutoffs)
self.out_projs.append(None)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(n_token))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(n_token, d_embed))
)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_emb_i))
)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(r_idx - l_idx))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i))
)
for bias in self.out_layers_biases:
bound = bias_scale * d_proj ** -.5
nn.init.uniform_(bias, -bound, bound)
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
if self.dropout > 0.0:
logit = hidden @ proj
logit = self.drop(logit)
logit = logit @ weight.t()
else:
logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
if bias is not None:
logit = logit + bias
return logit
def get_out_proj(self, i):
if self.tie_projs[i]:
if len(self.shared_out_projs) == 0:
return None
elif len(self.shared_out_projs) == 1:
return self.shared_out_projs[0]
else:
return self.shared_out_projs[i]
else:
return self.out_projs[i]
def forward(self, hidden, target, keep_order=False, key_padding_mask=None, *args, **kwargs):
# [21-09-15 AG]: TODO may need to handle key_padding_mask
'''
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
'''
hidden = hidden.reshape(-1, hidden.size(-1))
target = target.reshape(-1)
if hidden.size(0) != target.size(0):
print(hidden.shape, target.shape)
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers_weights[0],
self.out_layers_biases[0], self.get_out_proj(0))
nll = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weights[0][l_idx:r_idx]
bias_i = self.out_layers_biases[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weights[i]
bias_i = self.out_layers_biases[i]
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0)
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero(as_tuple=False).squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i)
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
if self.keep_order or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll.mean() # TODO maybe cases for length or padding_mask
class AdaptiveEmbedding(nn.Module):
""" Copy of transformers.AdaptiveEmbedding that works with fp16 by replacing the index_put_ operation
Initialization has been fixed for the case when d_proj = d_embed
"""
def __init__(self, n_token, d_embed, d_proj, cutoffs : List[int], div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = list(cutoffs) + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.drop = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
_init_embed(self.emb_layers[-1].weight, d_embed, init_scale)
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed ** -.5)
if d_proj != d_embed: # TODO
# self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i ** -.5)
_init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale)
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
def forward(self, inp, *args, **kwargs):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
embed = self.drop(embed)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
# Changes
# emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
embeddings = []
indices = torch.zeros_like(inp_flat) # empty should work as long as cutoffs[-1] > max token
_total_tokens = 0
# emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze(-1) # shape (_tokens,)
_tokens = indices_i.numel()
if _tokens == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = self.drop(emb_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
# Changes
embeddings.append(emb_i)
indices.index_put_(
(indices_i,),
torch.arange(_tokens, device=inp.device) + _total_tokens
)
_total_tokens += _tokens
# emb_flat.index_copy_(0, indices_i, emb_i)
embeddings = torch.cat(embeddings, dim=0)
emb_flat = embeddings[indices]
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
# embed.div_(self.emb_scale)
return embed
def _init_weight(weight, d : int, init_scale : Optional[float], default=None):
assert init_scale or default
if init_scale is None:
std = default
else:
std = init_scale * (d ** -0.5)
nn.init.normal_(weight, mean=0, std=std)
_init_embed = functools.partial(_init_weight, default=0.02)
_init_proj = functools.partial(_init_weight, default=0.01)
### Just for this codebase, we need to squeeze the last dimension because inputs are always given as (B, L, D) instead of (B, L)
import src.models.nn.utils as U
# AdaptiveEmbedding = U.Squeeze(AdaptiveEmbedding)
| 1.726563 | 2 |
the_el/cli.py | CityOfPhiladelphia/the-el | 11 | 5898 | import json
import csv
import sys
import os
import re
import codecs
import logging
from logging.config import dictConfig
import click
import yaml
from sqlalchemy import create_engine
from jsontableschema_sql import Storage
from smart_open import smart_open
from . import postgres
from . import carto
csv.field_size_limit(sys.maxsize)
def get_logger(logging_config):
try:
with open(logging_config) as file:
config = yaml.load(file)
dictConfig(config)
except:
FORMAT = '[%(asctime)-15s] %(levelname)s [%(name)s] %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO, stream=sys.stderr)
logger = logging.getLogger('the_el')
def exception_handler(type, value, tb):
logger.exception("Uncaught exception: {}".format(str(value)), exc_info=(type, value, tb))
sys.excepthook = exception_handler
return logger
@click.group()
def main():
pass
def get_connection_string(connection_string):
connection_string = os.getenv('CONNECTION_STRING', connection_string)
if connection_string == None:
raise Exception('`CONNECTION_STRING` environment variable or `--connection-string` option required')
return connection_string
def create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=None, to_srid=None):
engine = create_engine(connection_string)
storage = Storage(engine, dbschema=db_schema, geometry_support=geometry_support, from_srid=from_srid, to_srid=to_srid, views=True)
return engine, storage
def fopen(file, mode='r'):
if file == None:
if mode == 'r':
return sys.stdin
elif mode == 'w':
return sys.stdout
else:
return smart_open(file, mode=mode)
def get_table_schema(table_schema_path):
with fopen(table_schema_path) as file:
contents = file.read()
if not isinstance(contents, str):
contents = contents.decode('utf-8')
return json.loads(contents)
@main.command()
@click.argument('table_name')
@click.option('--connection-string')
@click.option('-o','--output-file')
@click.option('--db-schema')
@click.option('--geometry-support')
def describe_table(table_name, connection_string, output_file, db_schema, geometry_support):
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support)
descriptor = storage.describe(table_name)
with fopen(output_file, mode='w') as file:
json.dump(descriptor, file)
@main.command()
@click.argument('table_name')
@click.argument('table_schema_path')
@click.option('--connection-string')
@click.option('--db-schema')
@click.option('--indexes-fields')
@click.option('--geometry-support')
@click.option('--if-not-exists', is_flag=True, default=False)
@click.option('--logging-config', default='logging_config.conf')
def create_table(table_name,
table_schema_path,
connection_string,
db_schema,
indexes_fields,
geometry_support,
if_not_exists,
logging_config):
logger = get_logger(logging_config)
table_schema = get_table_schema(table_schema_path)
if indexes_fields != None:
indexes_fields = indexes_fields.split(',')
if re.match(carto.carto_connection_string_regex, connection_string) != None:
load_postgis = geometry_support == 'postgis'
logger.info('{} - Creating table using Carto'.format(table_name))
return carto.create_table(logger, table_name, load_postgis, table_schema, if_not_exists, indexes_fields, connection_string)
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support)
logger.info('{} - Creating table using SQLAlchemy'.format(table_name))
storage.create(table_name, table_schema, indexes_fields=indexes_fields)
@main.command()
@click.argument('table_name')
@click.option('--table-schema-path')
@click.option('--connection-string')
@click.option('-f','--input-file')
@click.option('--db-schema')
@click.option('--geometry-support')
@click.option('--from-srid')
@click.option('--skip-headers', is_flag=True)
@click.option('--indexes-fields')
@click.option('--upsert', is_flag=True)
@click.option('--truncate/--no-truncate', is_flag=True, default=False)
@click.option('--logging-config', default='logging_config.conf')
def write(table_name,
table_schema_path,
connection_string,
input_file,
db_schema,
geometry_support,
from_srid,
skip_headers,
indexes_fields,
upsert,
truncate,
logging_config):
logger = get_logger(logging_config)
table_schema = get_table_schema(table_schema_path)
## TODO: csv settings? use Frictionless Data csv standard?
## TODO: support line delimted json?
with fopen(input_file) as file:
rows = csv.reader(file)
if skip_headers:
next(rows)
if re.match(carto.carto_connection_string_regex, connection_string) != None:
load_postgis = geometry_support == 'postgis'
if indexes_fields != None:
indexes_fields = indexes_fields.split(',')
logger.info('{} - Writing to table using Carto'.format(table_name))
carto.load(logger,
db_schema,
table_name,
load_postgis,
table_schema,
connection_string,
rows,
indexes_fields,
truncate)
else:
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid)
## TODO: truncate? carto does. Makes this idempotent
logger.info('{} - Writing to table using SQLAlchemy'.format(table_name))
if table_schema_path != None:
table_schema = get_table_schema(table_schema_path)
storage.describe(table_name, descriptor=table_schema)
else:
storage.describe(table_name)
if upsert:
postgres.upsert(engine, db_schema, table_name, table_schema, rows)
elif geometry_support == None and engine.dialect.driver == 'psycopg2':
postgres.copy_from(engine, table_name, table_schema, rows)
else:
storage.write(table_name, rows)
@main.command()
@click.argument('table_name')
@click.option('--connection-string')
@click.option('-o','--output-file')
@click.option('--db-schema')
@click.option('--geometry-support')
@click.option('--from-srid')
@click.option('--to-srid')
@click.option('--logging-config', default='logging_config.conf')
def read(table_name, connection_string, output_file, db_schema, geometry_support, from_srid, to_srid, logging_config):
logger = get_logger(logging_config)
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid, to_srid=to_srid)
## TODO: csv settings? use Frictionless Data csv standard?
## TODO: support line delimited json?
with fopen(output_file, mode='w') as file:
writer = csv.writer(file)
descriptor = storage.describe(table_name)
fields = map(lambda x: x['name'], descriptor['fields'])
writer.writerow(fields)
if geometry_support == None and engine.dialect.driver == 'psycopg2':
postgres.copy_to(engine, table_name, file)
else:
for row in storage.iter(table_name):
row_out = []
for field in row:
if isinstance(field, dict) or isinstance(field, list):
field = json.dumps(field)
row_out.append(field)
writer.writerow(row_out)
@main.command()
@click.argument('new_table_name')
@click.argument('old_table_name')
@click.option('--connection-string')
@click.option('--db-schema')
@click.option('--select-users', help='Users to grant SELECT on updated table')
@click.option('--logging-config', default='logging_config.conf')
def swap_table(new_table_name, old_table_name, connection_string, db_schema, select_users, logging_config):
logger = get_logger(logging_config)
if re.match(carto.carto_connection_string_regex, connection_string) != None:
if select_users != None:
select_users = select_users.split(',')
else:
select_users = []
logger.info('Swapping tables using Carto: {} - {}'.format(new_table_name, old_table_name))
return carto.swap_table(logger, db_schema, new_table_name, old_table_name, select_users, connection_string)
connection_string = get_connection_string(connection_string)
engine = create_engine(connection_string)
if engine.dialect.driver == 'psycopg2':
logger.info('Swapping tables using psycopg2: {} - {}'.format(new_table_name, old_table_name))
conn = engine.raw_connection()
try:
with conn.cursor() as cur:
sql = 'ALTER TABLE "{}" RENAME TO "{}_old";'.format(old_table_name, old_table_name) +\
'ALTER TABLE "{}" RENAME TO "{}";'.format(new_table_name, old_table_name) +\
'DROP TABLE "{}_old";'.format(old_table_name)
cur.execute(sql)
conn.commit()
except:
conn.rollback()
raise
conn.close()
elif engine.dialect.driver == 'cx_oracle':
logger.info('Swapping tables using cx_Oracle: {} - {}'.format(new_table_name, old_table_name))
conn = engine.connect()
if select_users != None:
select_users = select_users.split(',')
else:
select_users = []
grants_sql = []
for user in select_users:
grants_sql.append('GRANT SELECT ON {} TO {}'.format(old_table_name, user.strip()))
# Oracle does not allow table modification within a transaction, so make individual transactions:
sql1 = 'ALTER TABLE {} RENAME TO {}_old'.format(old_table_name, old_table_name)
sql2 = 'ALTER TABLE {} RENAME TO {}'.format(new_table_name, old_table_name)
sql3 = 'DROP TABLE {}_old'.format(old_table_name)
try:
conn.execute(sql1)
except:
logger.error("Could not rename {} table. Does it exist?".format(old_table_name))
raise
try:
conn.execute(sql2)
except:
logger.error("Could not rename {} table. Does it exist?".format(new_table_name))
rb_sql = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name)
conn.execute(rb_sql)
raise
try:
conn.execute(sql3)
except:
logger.error("Could not drop {}_old table. Do you have permission?".format(old_table_name))
rb_sql1 = 'DROP TABLE {}'.format(old_table_name)
conn.execute(rb_sql1)
rb_sql2 = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name)
conn.execute(rb_sql2)
raise
try:
for sql in grants_sql:
conn.execute(sql)
except:
logger.error("Could not grant all permissions to {}.".format(old_table_name))
raise
else:
raise Exception('`{}` not supported by swap_table'.format(engine.dialect.driver))
| 2.296875 | 2 |
examples/asr/experimental/speech_to_text_sclite.py | vadam5/NeMo | 2 | 5899 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is based on speech_to_text_infer.py and allows you to score the hypotheses
with sclite. A local installation from https://github.com/usnistgov/SCTK is required.
Hypotheses and references are first saved in trn format and are scored after applying a glm
file (if provided).
"""
import errno
import json
import os
import subprocess
from argparse import ArgumentParser
import torch
from nemo.collections.asr.metrics.wer import WER
from nemo.collections.asr.models import EncDecCTCModel
from nemo.utils import logging
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
def score_with_sctk(sctk_dir, ref_fname, hyp_fname, out_dir, glm=""):
sclite_path = os.path.join(sctk_dir, "bin", "sclite")
if not os.path.exists(sclite_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), sclite_path)
# apply glm
if os.path.exists(glm):
rfilter_path = os.path.join(sctk_dir, "bin", "rfilter1")
if not os.path.exists(rfilter_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), rfilter_path)
hypglm = os.path.join(out_dir, os.path.basename(hyp_fname)) + ".glm"
rfilt_cmd = [rfilter_path] + [glm]
with open(hypglm, "w") as hypf, open(hyp_fname, "r") as hyp_in:
subprocess.run(rfilt_cmd, stdin=hyp_in, stdout=hypf)
refglm = os.path.join(out_dir, os.path.basename(ref_fname)) + ".glm"
with open(refglm, "w") as reff, open(ref_fname, "r") as ref_in:
subprocess.run(rfilt_cmd, stdin=ref_in, stdout=reff)
else:
refglm = ref_fname
hypglm = hyp_fname
_ = subprocess.check_output(f"{sclite_path} -h {hypglm} -r {refglm} -i wsj -o all", shell=True)
can_gpu = torch.cuda.is_available()
def get_utt_info(manifest_path):
info_list = []
with open(manifest_path, "r") as utt_f:
for line in utt_f:
utt = json.loads(line)
info_list.append(utt)
return info_list
def main():
parser = ArgumentParser()
parser.add_argument(
"--asr_model", type=str, default="QuartzNet15x5Base-En", required=False, help="Pass: 'QuartzNet15x5Base-En'",
)
parser.add_argument("--dataset", type=str, required=True, help="path to evaluation data")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument(
"--dont_normalize_text",
default=False,
action='store_true',
help="Turn off trasnscript normalization. Recommended for non-English.",
)
parser.add_argument("--out_dir", type=str, required=True, help="Destination dir for output files")
parser.add_argument("--sctk_dir", type=str, required=False, default="", help="Path to sctk root dir")
parser.add_argument("--glm", type=str, required=False, default="", help="Path to glm file")
args = parser.parse_args()
torch.set_grad_enabled(False)
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
use_sctk = os.path.exists(args.sctk_dir)
if args.asr_model.endswith('.nemo'):
logging.info(f"Using local ASR model from {args.asr_model}")
asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model)
else:
logging.info(f"Using NGC cloud ASR model {args.asr_model}")
asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model)
asr_model.setup_test_data(
test_data_config={
'sample_rate': 16000,
'manifest_filepath': args.dataset,
'labels': asr_model.decoder.vocabulary,
'batch_size': args.batch_size,
'normalize_transcripts': not args.dont_normalize_text,
}
)
if can_gpu:
asr_model = asr_model.cuda()
asr_model.eval()
labels_map = dict([(i, asr_model.decoder.vocabulary[i]) for i in range(len(asr_model.decoder.vocabulary))])
wer = WER(vocabulary=asr_model.decoder.vocabulary)
hypotheses = []
references = []
all_log_probs = []
for test_batch in asr_model.test_dataloader():
if can_gpu:
test_batch = [x.cuda() for x in test_batch]
with autocast():
log_probs, encoded_len, greedy_predictions = asr_model(
input_signal=test_batch[0], input_signal_length=test_batch[1]
)
for r in log_probs.cpu().numpy():
all_log_probs.append(r)
hypotheses += wer.ctc_decoder_predictions_tensor(greedy_predictions)
for batch_ind in range(greedy_predictions.shape[0]):
reference = ''.join([labels_map[c] for c in test_batch[2][batch_ind].cpu().detach().numpy()])
references.append(reference)
del test_batch
info_list = get_utt_info(args.dataset)
hypfile = os.path.join(args.out_dir, "hyp.trn")
reffile = os.path.join(args.out_dir, "ref.trn")
with open(hypfile, "w") as hyp_f, open(reffile, "w") as ref_f:
for i in range(len(hypotheses)):
utt_id = os.path.splitext(os.path.basename(info_list[i]['audio_filepath']))[0]
# rfilter in sctk likes each transcript to have a space at the beginning
hyp_f.write(" " + hypotheses[i] + " (" + utt_id + ")" + "\n")
ref_f.write(" " + references[i] + " (" + utt_id + ")" + "\n")
if use_sctk:
score_with_sctk(args.sctk_dir, reffile, hypfile, args.out_dir, glm=args.glm)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| 1.84375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.