max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
sandbox/pdp2/arbitrary_data/zip_files.py | projectpai/paipass | 3 | 5200 | import zipfile
import random
RAND_INT_RANGE = (1,100)
def wrf(fname):
with open(fname, 'w') as f:
for i in range(100):
f.write(str(random.randint(*RAND_INT_RANGE)))
fnames = []
for i in range(10):
fname = 'file' + str(i) + '.txt'
wrf(fname)
fnames.append(fname)
dirpaths = set()
with zipfile.ZipFile('myzip.zip', 'w', compression=zipfile.ZIP_DEFLATED) as zf:
for fname in fnames:
dirpath = '/dirpath'+str(random.randint(*RAND_INT_RANGE))
# let's not have duplicate dirpaths.
while dirpath in dirpaths:
dirpath = '/dirpath' + str(random.randint(*RAND_INT_RANGE))
zf.write(fname, arcname=dirpath+'/'+fname)
dirpaths.add(dirpath)
print('dirpaths', dirpaths)
print('fnames', fnames)
| 3.25 | 3 |
tests/testproject/testproject/tests/test_middleware.py | mwesterhof/wagtail_managed404 | 1 | 5201 | import unittest
from django.test import Client
from wagtail.core.models import Page
from wagtail_managed404.models import PageNotFoundEntry
class TestMiddleware(unittest.TestCase):
"""Tests for `wagtail_app_pages` package."""
def setUp(self):
self.client = Client()
self.invalid_url = '/definitely_not_an_actual_url/'
self.redirect_to_url = '/much_better_url/'
self.redirect_to_page = Page.objects.get(depth=2)
def test_redirect_to_url(self):
PageNotFoundEntry.objects.all().delete()
entry = self._trigger_404()
entry.redirect_to_url = self.redirect_to_url
entry.save()
self._validate_redirect(self.invalid_url, self.redirect_to_url)
def test_redirect_to_page(self):
PageNotFoundEntry.objects.all().delete()
entry = self._trigger_404()
entry.redirect_to_page = self.redirect_to_page
entry.save()
self._validate_redirect(self.invalid_url, self.redirect_to_page.url)
def _trigger_404(self):
response = self.client.get(self.invalid_url)
self.assertEquals(response.status_code, 404)
entries = PageNotFoundEntry.objects.filter(url=self.invalid_url)
self.assertEquals(entries.count(), 1)
return entries.first()
def _validate_redirect(self, source_url, target_url):
response = self.client.get(source_url)
self.assertEquals(response.status_code, 302)
self.assertEquals(response.url, target_url)
| 2.46875 | 2 |
src/reversion/version.py | maraujop/django-reversion | 0 | 5202 | __version__ = (1, 8, 5)
| 1.148438 | 1 |
observations/r/bomsoi.py | hajime9652/observations | 199 | 5203 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def bomsoi(path):
"""Southern Oscillation Index Data
The Southern Oscillation Index (SOI) is the difference in barometric
pressure at sea level between Tahiti and Darwin. Annual SOI and
Australian rainfall data, for the years 1900-2001, are given.
Australia's annual mean rainfall is an area-weighted average of the
total annual precipitation at approximately 370 rainfall stations around
the country.
This data frame contains the following columns:
Year
a numeric vector
Jan
average January SOI values for each year
Feb
average February SOI values for each year
Mar
average March SOI values for each year
Apr
average April SOI values for each year
May
average May SOI values for each year
Jun
average June SOI values for each year
Jul
average July SOI values for each year
Aug
average August SOI values for each year
Sep
average September SOI values for each year
Oct
average October SOI values for each year
Nov
average November SOI values for each year
Dec
average December SOI values for each year
SOI
a numeric vector consisting of average annual SOI values
avrain
a numeric vector consisting of a weighted average annual rainfall at
a large number of Australian sites
NTrain
Northern Territory rain
northRain
north rain
seRain
southeast rain
eastRain
east rain
southRain
south rain
swRain
southwest rain
Australian Bureau of Meteorology web pages:
http://www.bom.gov.au/climate/change/rain02.txt and
http://www.bom.gov.au/climate/current/soihtm1.shtml
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `bomsoi.csv`.
Returns:
Tuple of np.ndarray `x_train` with 106 rows and 21 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'bomsoi.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/bomsoi.csv'
maybe_download_and_extract(path, url,
save_file_name='bomsoi.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 2.515625 | 3 |
openpype/hosts/houdini/plugins/publish/validate_bypass.py | dangerstudios/OpenPype | 0 | 5204 | import pyblish.api
import openpype.api
class ValidateBypassed(pyblish.api.InstancePlugin):
"""Validate all primitives build hierarchy from attribute when enabled.
The name of the attribute must exist on the prims and have the same name
as Build Hierarchy from Attribute's `Path Attribute` value on the Alembic
ROP node whenever Build Hierarchy from Attribute is enabled.
"""
order = openpype.api.ValidateContentsOrder - 0.1
families = ["*"]
hosts = ["houdini"]
label = "Validate ROP Bypass"
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
rop = invalid[0]
raise RuntimeError(
"ROP node %s is set to bypass, publishing cannot continue.." %
rop.path()
)
@classmethod
def get_invalid(cls, instance):
rop = instance[0]
if rop.isBypassed():
return [rop]
| 2.203125 | 2 |
gavPrj/dataset_core.py | GavinK-ai/cv | 1 | 5205 | <filename>gavPrj/dataset_core.py
import os
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
#srcPaths = ('dataset/Screenshot1','dataset/Screenshot2','dataset/Screenshot3', 'dataset/Screenshot4')
#srcPaths = ('all_dataset/s1',
# 'all_dataset/s10',
# 'all_dataset/s11',
# 'all_dataset/s12',
# 'all_dataset/s13',
# 'all_dataset/s14',
# 'all_dataset/s15',
# 'all_dataset/s16',
# 'all_dataset/s17',
# 'all_dataset/s18',
# 'all_dataset/s19',
# 'all_dataset/s2',
# 'all_dataset/s20',
# 'all_dataset/s21',
# 'all_dataset/s22',
# 'all_dataset/s23',
# 'all_dataset/s24',
# 'all_dataset/s25',
# 'all_dataset/s26',
# 'all_dataset/s27',
# 'all_dataset/s28',
# 'all_dataset/s29',
# 'all_dataset/s3',
# 'all_dataset/s30',
# 'all_dataset/s31',
# 'all_dataset/s32',
# 'all_dataset/s33',
# 'all_dataset/s34',
# 'all_dataset/s35',
# 'all_dataset/s36',
# 'all_dataset/s37',
# 'all_dataset/s38',
# 'all_dataset/s39',
# 'all_dataset/s4',
# 'all_dataset/s40',
# 'all_dataset/s41',
# 'all_dataset/s42',
# 'all_dataset/s43',
# 'all_dataset/s44',
# 'all_dataset/s45',
# 'all_dataset/s46',
# 'all_dataset/s47',
# 'all_dataset/s48',
# 'all_dataset/s49',
# 'all_dataset/s5',
# 'all_dataset/s50',
# 'all_dataset/s51',
# 'all_dataset/s52',
# 'all_dataset/s53',
# 'all_dataset/s54',
# 'all_dataset/s55',
# 'all_dataset/s56',
# 'all_dataset/s57',
# 'all_dataset/s58',
# 'all_dataset/s59',
# 'all_dataset/s6',
# 'all_dataset/s60',
# 'all_dataset/s61',
# 'all_dataset/s62',
# 'all_dataset/s63',
# 'all_dataset/s7',
# 'all_dataset/s8',
# 'all_dataset/s9')
srcPaths = ('testdataset/t1','testdataset/t2')
datasetfilename = 'testdataset1.npz'
def create_dataset(datasetfilename, srcPaths, classNames):
imgList = []
labelList = []
labelNameList = []
for srcPath in srcPaths:
# append all files in srcPath dir into imgList and labelList
for fname in os.listdir(srcPath):
filePath = os.path.join(srcPath, fname)
img = cv.imread(filePath)
# spilt the last text in file name to save as label
fname_no_ext = os.path.splitext(fname)[0]
# label = fname_no_ext[-1]
label = fname_no_ext
imgList.append(img)
labelList.append(classNames[label])
labelNameList.append(label)
# convert to imgList to numpy
images = np.array(imgList, dtype='object')
labels = np.array(labelList, dtype='object')
labelnames = np.array(labelNameList)
# save converted images and labels into compressed numpy zip file
np.savez_compressed(datasetfilename, images=images, labels=labels, labelnames=labelnames)
return True
def displayImg():
# for fname in os.listdir(srcPath):
pass
if __name__ == '__main__':
# save a dataset in numpy compressed format
# datasetfilename = 'tiredataset.npz'
classNames = {'afiq':0, 'azureen':1, 'gavin':2, 'goke':3, 'inamul':4, 'jincheng':5, 'mahmuda':6, 'numan':7, 'saseendran':8}
if create_dataset(datasetfilename, srcPaths, classNames):
data = np.load(datasetfilename, allow_pickle=True)
imgList = data['images']
labelList = data['labels']
labelNameList = data['labelnames']
img = imgList[0]
label = labelList[0]
labelNameList = data['labelnames']
imgRGB = img[:, :, ::-1]
plt.imshow(imgRGB)
plt.title(label)
plt.show()
print(imgList.shape)
print(labelList.shape)
# imgList, labelList = create_dataset()
# img = imgList[0]
# label = labelList[0]
# imgRGB = img[:, :, ::-1]
# plt.imshow(imgRGB)
# plt.title(label)
# plt.show()
# img = imgList[1]
# label = labelList[1]
# imgRGB = img[:, :, ::-1]
# plt.imshow(imgRGB)
# plt.title(label)
# plt.show()
# img = imgList[3]
# label = labelList[3]
# imgRGB = img[:, :, ::-1]
# plt.imshow(imgRGB)
# plt.title(label)
# plt.show()
| 2.015625 | 2 |
kronos/kronos.py | jinified/kronos | 0 | 5206 | """
Kronos: A simple scheduler for graduate training programme
Entities: User, Schedule, Rotation
"""
from operator import itemgetter
from datetime import datetime, timedelta
def getRotationCapacity(rotationId, startDate, endDate, assignments):
""" Calculate number of users assigned to a particular rotation during the specified duration
"""
start = datetime.strptime(startDate, "%d%m%Y")
end = datetime.strptime(endDate, "%d%m%Y")
duration = int((end - start).days / 7.0)
# Weeks involved during the rotation
weeks = [(start + timedelta(weeks=x)).strftime("%W%Y") for x in range(0, duration)]
capacity = sum(itemgetter(*weeks)(assignments[rotationId][0][0]))
return capacity
def score_assignment(
assignments,
solution,
earliestAvailableDate,
core_rotations=["PMO", "PE", "SE", "PM"],
rotation_duration={
"PMO": 12,
"PE": 12,
"SE": 12,
"PM": 12,
"SYS": 12,
"ARC": 12,
"ANA": 12,
},
):
""" Calculate loss function for suggested solution (negative = better)
Parameters:
assignments (dict): global assignment object by rotation
solution (dict): rotation assignment for a user
earliestAvailableDate (date): earliest date where a user can be assigned a rotation
core_rotations (list): rotation that should be completed first
rotation_duration (dict): duration of each rotation
"""
print(solution)
# SOFT CONSTRAINT 1 - Core rotations should be completed in the first 4 rotations if possible
core_first_loss = sum(
[
-3 if x[0] in core_rotations else 0
for x in solution
if int(x[1]) <= len(core_rotations)
]
)
# SOFT CONSTRAINT 2 - External Assignment must be assigned last
external_assignment_loss = (
99 if "EXT" in [x[0] for x in solution] and solution[-1][0] != "EXT" else 0
)
# Calculate timing of each rotation from solution
solution = [
(
x[0],
rotation_duration[x[0]]
+ (sum([rotation_duration[x[0]] for x in solution[:i]]) if i != 0 else 0),
)
for i, x in enumerate(solution)
]
startDate = earliestAvailableDate
schedule = []
for x in solution:
endDate = startDate + timedelta(weeks=x[1]) - timedelta(days=1)
# Make sure the date falls on weekday
if endDate.weekday() >= 5:
endDate -= timedelta(endDate.weekday() - 4)
schedule.append(
(x[0], startDate.strftime("%d%m%Y"), endDate.strftime("%d%m%Y"))
)
startDate += timedelta(weeks=x[1])
spread_first_loss = sum(
[getRotationCapacity(x[0], x[1], x[2], assignments) for x in schedule]
)
loss = core_first_loss + external_assignment_loss + spread_first_loss
return loss
def schedule2assignments(schedule):
""" Convert schedule object to assignment object
"""
rotations = {}
for userId, userSchedule in schedule.items():
for rotation in userSchedule:
id = rotation["rotationId"]
if id not in rotations:
rotations[id] = [[{}], []]
print(rotations[id][0][0])
startDate, endDate = itemgetter("startDate", "endDate")(rotation)
start = datetime.strptime(startDate, "%d%m%Y")
end = datetime.strptime(endDate, "%d%m%Y")
duration = int((end - start).days / 7.0)
for i in range(duration):
date = (start + timedelta(weeks=i)).strftime("%W%Y")
if date not in rotations[id][0][0]:
rotations[id][0][0][date] = 0
rotations[id][0][0][date] += 1
rotations[id][1].append((userId, startDate, endDate))
sortedDate = sorted(list(rotations[id][0][0].keys()))
if len(rotations[id][0]) < 2:
rotations[id][0].append(sortedDate[0])
rotations[id][0].append(sortedDate[-1])
elif sortedDate[0] < rotations[id][0][1]:
rotations[id][0][1] = sortedDate[0]
elif len(rotations[id][0]) > 2 and sortedDate[-1] > rotations[id][0][2]:
rotations[id][0][2] = sortedDate[-1]
print(rotations)
return rotations
def assignments2schedule(assignments):
""" Convert assignment object to overall schedule
"""
users = {}
for rotationId, rotationInfo in assignments.items():
for userId, userAssignment in rotationInfo[1].items():
if userId not in users:
users[userId] = []
users[userId].append(
{
"rotationId": rotationId,
"startDate": userAssignment[0],
"endDate": userAssignment[1],
}
)
print(users)
return users
def generateUserSchedule(user, assignments, scoring_function):
""" Generate most optimal user schedule
Parameters:
user (object): User
assignments (dict): Time-bounded assignments
scoring_function (function): scoring function to rank possible assignments
Returns:
schedule (list): list of rotations
"""
return [{"rotationId": "PMO", "startDate": "012018"}]
def getOverallSchedule(users):
""" Generate overall schedule from individual user's schedule
Parameters:
users (list): list of Users
Returns:
schedule (dict): overall assignments
"""
return {}
def getConflictingAssignments(schedule):
""" Get list of assignments which exceeded rotation capacity
Parameters:
schedule (dict): overall assignments
Returns:
confictingAssignmentsByRotation (dict): overall schedule with conflicting assignments
"""
return {}
if __name__ == "__main__":
pass
| 3.03125 | 3 |
personal_env/lib/python3.8/site-packages/pylint/lint/utils.py | jestinmwilson/personal-website | 0 | 5207 | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
import contextlib
import sys
from pylint.utils import utils
class ArgumentPreprocessingError(Exception):
"""Raised if an error occurs during argument preprocessing."""
def preprocess_options(args, search_for):
"""look for some options (keys of <search_for>) which have to be processed
before others
values of <search_for> are callback functions to call when the option is
found
"""
i = 0
while i < len(args):
arg = args[i]
if arg.startswith("--"):
try:
option, val = arg[2:].split("=", 1)
except ValueError:
option, val = arg[2:], None
try:
cb, takearg = search_for[option]
except KeyError:
i += 1
else:
del args[i]
if takearg and val is None:
if i >= len(args) or args[i].startswith("-"):
msg = "Option %s expects a value" % option
raise ArgumentPreprocessingError(msg)
val = args[i]
del args[i]
elif not takearg and val is not None:
msg = "Option %s doesn't expects a value" % option
raise ArgumentPreprocessingError(msg)
cb(option, val)
else:
i += 1
def _patch_sys_path(args):
original = list(sys.path)
changes = []
seen = set()
for arg in args:
path = utils.get_python_path(arg)
if path not in seen:
changes.append(path)
seen.add(path)
sys.path[:] = changes + sys.path
return original
@contextlib.contextmanager
def fix_import_path(args):
"""Prepare sys.path for running the linter checks.
Within this context, each of the given arguments is importable.
Paths are added to sys.path in corresponding order to the arguments.
We avoid adding duplicate directories to sys.path.
`sys.path` is reset to its original value upon exiting this context.
"""
original = _patch_sys_path(args)
try:
yield
finally:
sys.path[:] = original
| 2.34375 | 2 |
mol_dqn/experimental/multi_obj.py | deepneuralmachine/google-research | 23,901 | 5208 | <reponame>deepneuralmachine/google-research<gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Generates molecules that satisfy two targets.
Target1: SAS
Target2: QED
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
from absl import app
from absl import flags
from rdkit import Chem
from rdkit.Chem import QED
from rdkit.Contrib import SA_Score
from tensorflow.compat.v1 import gfile
from mol_dqn.chemgraph.mcts import deep_q_networks
from mol_dqn.chemgraph.mcts import molecules as molecules_mdp
from mol_dqn.chemgraph.mcts import run_dqn
from mol_dqn.chemgraph.tensorflow import core
flags.DEFINE_float('target_sas', 1, 'The target SAS of the molecule.')
flags.DEFINE_float('target_qed', 0.5, 'The target QED of the molecule.')
flags.DEFINE_float('gamma', 0.999, 'discount')
FLAGS = flags.FLAGS
class MultiObjectiveRewardMolecule(molecules_mdp.Molecule):
"""Defines the subclass of generating a molecule with a specific reward.
The reward is defined as a 1-D vector with 2 entries: similarity and QED
reward = (similarity_score, qed_score)
"""
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as a tuple of the similarity and QED value.
Returns:
A tuple of the similarity and qed value
"""
# calculate similarity.
# if the current molecule does not contain the scaffold of the target,
# similarity is zero.
if self._state is None:
return 0.0, 0.0
mol = Chem.MolFromSmiles(self._state)
if mol is None:
return 0.0, 0.0
qed_value = QED.qed(mol)
sas = SA_Score.sascorer.calculateScore(mol)
return -abs(sas - FLAGS.target_sas), -abs(qed_value - FLAGS.target_qed)
def soft_cst(v, l, r):
if l <= v <= r:
return 1
return -min(abs(l - v), abs(r - v))
class Molecule(molecules_mdp.Molecule):
"""SAS and QED reward molecule."""
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as a tuple of the similarity and QED value.
Returns:
A tuple of the similarity and qed value
"""
# calculate similarity.
# if the current molecule does not contain the scaffold of the target,
# similarity is zero.
if self._state is None:
return 0.0, 0.0
mol = Chem.MolFromSmiles(self._state)
if mol is None:
return 0.0, 0.0
qed_value = QED.qed(mol)
sas = SA_Score.sascorer.calculateScore(mol)
# c1 = soft_cst(sas, FLAGS.target_sas - 0.2, FLAGS.target_sas + 0.2)
# c2 = soft_cst(qed_value, FLAGS.target_qed - 0.1, FLAGS.target_qed + 0.1)
# # if c1 < 0 and c2 < 0:
# # return - c1 * c2
# # else:
# # return c1 * c2
return (soft_cst(sas, FLAGS.target_sas - 0.2, FLAGS.target_sas + 0.2) +
soft_cst(qed_value, FLAGS.target_qed - 0.1,
FLAGS.target_qed + 0.1)) * FLAGS.gamma**(
self.max_steps - self._counter)
def main(argv):
del argv
if FLAGS.hparams is not None:
with gfile.Open(FLAGS.hparams, 'r') as f:
hparams = deep_q_networks.get_hparams(**json.load(f))
else:
hparams = deep_q_networks.get_hparams()
hparams.add_hparam('target_qed', FLAGS.target_qed)
hparams.add_hparam('target_sas', FLAGS.target_sas)
environment = Molecule(
atom_types=set(hparams.atom_types),
init_mol='CCc1c(C)[nH]c2CCC(CN3CCOCC3)C(=O)c12',
allow_removal=hparams.allow_removal,
allow_no_modification=hparams.allow_no_modification,
allow_bonds_between_rings=False,
allowed_ring_sizes={3, 4, 5, 6},
max_steps=hparams.max_steps_per_episode)
dqn = deep_q_networks.DeepQNetwork(
input_shape=(hparams.batch_size, hparams.fingerprint_length + 1),
q_fn=functools.partial(
deep_q_networks.multi_layer_model, hparams=hparams),
optimizer=hparams.optimizer,
grad_clipping=hparams.grad_clipping,
num_bootstrap_heads=hparams.num_bootstrap_heads,
gamma=hparams.gamma,
epsilon=1.0)
run_dqn.run_training(
hparams=hparams,
environment=environment,
dqn=dqn,
)
core.write_hparams(hparams, os.path.join(FLAGS.model_dir, 'config.json'))
if __name__ == '__main__':
app.run(main)
| 1.890625 | 2 |
myuw/test/views/test_rest_search.py | uw-it-aca/myuw | 18 | 5209 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
from django.test.utils import override_settings
from django.urls import reverse
from myuw.test.api import MyuwApiTest
@override_settings(
RESTCLIENTS_ADMIN_AUTH_MODULE='rc_django.tests.can_proxy_restclient')
class RestSearchViewTest(MyuwApiTest):
def test_post(self):
self.set_user('javerage')
# hfs
url = reverse("myuw_rest_search", args=["hfs", "accounts"])
response = self.client.post(url, {"uwnetid": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url, "/restclients/view/hfs/myuw/v1/javerage")
# bookstore
url = reverse("myuw_rest_search", args=["book", "index"])
response = self.client.post(url, {
"sln1": "123", "quarter": "spring", "returnlink": "t"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/book/uw/json_utf8_202007.ubs%3F"
"quarter=spring&sln1=123&returnlink=t"))
# myplan
url = reverse("myuw_rest_search", args=["myplan", "index"])
response = self.client.post(url, {
"uwregid": "ABC", "year": "2013", "quarter": "spring"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/myplan/student/api/plan/v1/2013,spring,1,ABC")
# libraries
url = reverse("myuw_rest_search", args=["libraries", "accounts"])
response = self.client.post(url, {"id": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/libraries/mylibinfo/v1/?id=javerage")
# iasystem
url = reverse("myuw_rest_search", args=[
"iasystem_uw", "uw/api/v1/evaluation"])
response = self.client.post(url, {"student_id": "123456"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/iasystem_uw/api/" +
"v1/evaluation?student_id=123456"))
# uwnetid
url = reverse("myuw_rest_search", args=["uwnetid", "password"])
response = self.client.post(url, {"uwnetid": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/uwnetid/nws/v1/uwnetid/javerage/password")
url = reverse("myuw_rest_search", args=["uwnetid", "subscription"])
response = self.client.post(url, {"uwnetid": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/uwnetid/nws/v1/uwnetid/" +
"javerage/subscription/60,64,105")
# grad
url = reverse("myuw_rest_search", args=[
"grad", "services/students/v1/api/committee"])
response = self.client.post(url, {
"id": "12345", "csrfmiddlewaretoken": "<PASSWORD>"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/grad/services/" +
"students/v1/api/committee?id=12345"))
# notices
url = reverse("myuw_rest_search", args=["sws", "notices"])
response = self.client.post(url, {
"uwregid": "12345678123456781234567812345678",
"csrfmiddlewaretoken": "<PASSWORD>"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/sws/student/v5/notice/" +
"12345678123456781234567812345678.json"))
# upass
url = reverse("myuw_rest_search", args=["upass", "index"])
response = self.client.post(url, {
"uwnetid": "bill",
"csrfmiddlewaretoken": "<PASSWORD>"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/upass/MyUWUpass/MyUWUpass.aspx%3Fid=bill")
| 2.28125 | 2 |
examples/cli-solver/cli_solver.py | danagle/boggled | 0 | 5210 | <reponame>danagle/boggled
# cli_solver.py
import argparse
import os
from boggled import BoggleBoard, BoggleSolver, BoggleWords
def solve_board(board, words):
solver = BoggleSolver(board, words)
solver.solve()
return solver
def display_board_details(board):
print("Board details:")
print("Columns: ", board.columns)
print("Rows: ", board.rows)
s = '\n'
for pos in board.tiles:
s += ' ' if len(board.tiles[pos]) == 2 else ' '
s += board.tiles[pos]
if (pos % board.columns) == 0:
s += '\n'
print(s)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("letters", type=str,
help="Board letters")
parser.add_argument("dictionary", type=str,
help="The text file containing the dictionary word list.")
parser.add_argument("-m", "--min", type=int,
help="The minimum word size.")
parser.add_argument("-p", "--paths", action="store_true",
help="Include the path followed for each word found.")
args = parser.parse_args()
if os.path.isfile(args.dictionary):
if isinstance(args.min, int):
words = BoggleWords(args.min)
else:
words = BoggleWords()
words.loadFromFile(args.dictionary)
board = BoggleBoard(args.letters)
display_board_details(board)
solved_board = solve_board(board, words)
print('Found:', len(solved_board.found))
if args.paths:
for word in solved_board.found:
print('{} : {}'.format(word, solved_board.found[word]))
else:
print(solved_board.foundWords)
else:
print("Error: Unable to find the dictionary.")
| 3.21875 | 3 |
src/wepy/orchestration/orchestrator.py | gitter-badger/wepy-1 | 35 | 5211 | from copy import copy, deepcopy
import sqlite3
from hashlib import md5
import time
import os
import os.path as osp
from base64 import b64encode, b64decode
from zlib import compress, decompress
import itertools as it
import logging
# instead of pickle we use dill, so we can save dynamically defined
# classes
import dill
from wepy.sim_manager import Manager
from wepy.orchestration.configuration import Configuration
from wepy.orchestration.snapshot import SimApparatus, SimSnapshot
from wepy.util.kv import KV, SQLITE3_INMEMORY_URI, gen_uri
class OrchestratorError(Exception):
""" """
pass
class Orchestrator():
""" """
# we freeze the pickle protocol for making hashes, because we care
# more about stability than efficiency of newer versions
HASH_PICKLE_PROTOCOL = 3
DEFAULT_WORKDIR = Configuration.DEFAULT_WORKDIR
DEFAULT_CONFIG_NAME = Configuration.DEFAULT_CONFIG_NAME
DEFAULT_NARRATION = Configuration.DEFAULT_NARRATION
DEFAULT_MODE = Configuration.DEFAULT_MODE
DEFAULT_CHECKPOINT_FILENAME = "checkpoint.orch.sqlite"
ORCH_FILENAME_TEMPLATE = "{config}{narration}.orch.sqlite"
# the default way to oepn up the whole parent database
DEFAULT_ORCHESTRATION_MODE = 'x'
# mode to open the individual kv stores on the parent database
KV_MODE = 'r+'
# default timeout for connecting to a database
SQLITE3_DEFAULT_TIMEOUT = 5
# the fields to return (and their order) as a record for a run
# query
RUN_SELECT_FIELDS = ('last_cycle_idx', 'config_hash')
def __init__(self, orch_path=None,
mode='x',
append_only=False,
):
self._mode = mode
self._append_only = append_only
# handle the path and convert to a proper URI for the database
# given the path and the mode
self._db_uri = gen_uri(orch_path, mode)
# run table: start_hash, end_hash, num_cycles, configuration_id
# get a raw connection to the database
self._db = sqlite3.connect(self.db_uri, uri=True,
timeout=self.SQLITE3_DEFAULT_TIMEOUT)
self._closed = False
# set isolation level to autocommit
self._db.isolation_level = None
# we can use read_uncommited only in append_only mode (no
# updates) because you never have to worry about dirty reads
# since you can't update
if self.append_only:
self._db.execute("PRAGMA read_uncommited=1")
# we make a table for the run data, if it doesn't already
# exist
c = self._db.cursor().execute(self.create_run_table_query)
# initialize or open each of the separate KV-stores (tables in
# the same SQLite3 database)
# change the mode for the KV stores since we already created the database
# metadata: default init walkers, default apparatus, default
# configuration
self.metadata_kv = KV(db_url=self.db_uri,
table='meta',
mode='a',
value_types=None,
append_only=self.append_only)
# snapshots
self.snapshot_kv = KV(db_url=self.db_uri,
table='snapshots',
primary_key='snaphash',
value_name='snapshot',
mode='a',
append_only=self.append_only)
# configurations
self.configuration_kv = KV(db_url=self.db_uri,
table='configurations',
primary_key='config_hash',
value_name='config',
mode='a',
append_only=self.append_only)
@property
def mode(self):
return self._mode
@property
def append_only(self):
return self._append_only
def close(self):
if self._closed == True:
raise IOError("The database connection is already closed")
else:
# close all the connections
self.metadata_kv.close()
self.configuration_kv.close()
self.snapshot_kv.close()
self._db.close()
self._closed = True
@property
def db_uri(self):
return self._db_uri
@property
def orch_path(self):
# if it is not an in-memory database we parse off the path and
# return that
if self.db_uri == SQLITE3_INMEMORY_URI:
return None
else:
# URIs have the following form: protocol:url?query
# destructure the URI
_, tail = self.db_uri.split(':')
if len(tail.split('?')) > 1:
url, _ = tail.split('?')
else:
url = tail
return url
@classmethod
def serialize(cls, snapshot):
"""Serialize a snapshot to a compressed, encoded, pickle string
representation.
Currently uses the dill module for pickling because the base
pickle module is inadequate. However, it is mostly compatible
and can be read natively with pickle but this usage is
officially not supported. Instead use the deserialize_snapshot.
Also compresses with default zlib compression and is encoded
in base64.
The object will always have a deepcopy performed on it so that
all of the extraneous references to it are avoided since there
is no (AFAIK) way to make sure all references to an object are
deleted.
NOTE: Perhaps there is a way and that should be done (and
tested) to see if it provides stable pickles (i.e. pickles
that always hash to the same value). To avoid the overhead of
copying large objects.
Parameters
----------
snapshot : SimSnapshot object
The snapshot of the simulation you want to serialize.
Returns
-------
serial_str : str
Serialized string of the snapshot object
"""
serial_str = b64encode(
compress(
dill.dumps(
deepcopy(snapshot),
protocol=cls.HASH_PICKLE_PROTOCOL,
recurse=True)
)
)
return serial_str
# core methods for serializing python objects, used for snapshots,
# apparatuses, configurations, and the initial walker list
@classmethod
def deserialize(cls, serial_str):
"""Deserialize an unencoded string snapshot to an object.
Parameters
----------
serial_str : str
Serialized string of the snapshot object
Returns
-------
snapshot : SimSnapshot object
Simulation snapshot object
"""
return dill.loads(decompress(b64decode(serial_str)))
# defaults getters and setters
def set_default_sim_apparatus(self, sim_apparatus):
# serialize the apparatus and then set it
serial_app = self.serialize(sim_apparatus)
self.metadata_kv['default_sim_apparatus'] = serial_app
def set_default_init_walkers(self, init_walkers):
# serialize the apparatus and then set it
serial_walkers = self.serialize(init_walkers)
self.metadata_kv['default_init_walkers'] = serial_walkers
def set_default_configuration(self, configuration):
# serialize the apparatus and then set it
serial_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serial_config)
self.metadata_kv['default_configuration_hash'] = config_hash
self.configuration_kv[config_hash] = serial_config
def set_default_snapshot(self, snapshot):
snaphash = self.add_snapshot(snapshot)
# then save the hash in the metadata
self.metadata_kv['default_snapshot_hash'] = snaphash
return snaphash
def gen_default_snapshot(self):
# generate the snapshot
sim_start_hash = self.gen_start_snapshot(self.get_default_init_walkers())
# then save the hash in the metadata
self.metadata_kv['default_snapshot_hash'] = sim_start_hash
return sim_start_hash
def get_default_sim_apparatus(self):
return self.deserialize(self.metadata_kv['default_sim_apparatus'])
def get_default_init_walkers(self):
return self.deserialize(self.metadata_kv['default_init_walkers'])
def get_default_configuration(self):
config_hash = self.metadata_kv['default_configuration_hash']
return self.get_configuration(config_hash)
def get_default_configuration_hash(self):
return self.metadata_kv['default_configuration_hash']
def get_default_snapshot(self):
start_hash = self.metadata_kv['default_snapshot_hash']
return self.get_snapshot(start_hash)
def get_default_snapshot_hash(self):
return self.metadata_kv['default_snapshot_hash']
@classmethod
def hash_snapshot(cls, serial_str):
"""
Parameters
----------
serial_str :
Returns
-------
"""
return md5(serial_str).hexdigest()
def get_snapshot(self, snapshot_hash):
"""Returns a copy of a snapshot.
Parameters
----------
snapshot_hash :
Returns
-------
"""
return self.deserialize(self.snapshot_kv[snapshot_hash])
def get_configuration(self, config_hash):
"""Returns a copy of a snapshot.
Parameters
----------
config_hash :
Returns
-------
"""
return self.deserialize(self.configuration_kv[config_hash])
@property
def snapshot_hashes(self):
""" """
# iterate over the snapshot kv
return list(self.snapshot_kv.keys())
@property
def configuration_hashes(self):
""" """
# iterate over the snapshot kv
return list(self.configuration_kv.keys())
def add_snapshot(self, snapshot):
"""
Parameters
----------
snapshot :
Returns
-------
"""
# serialize the snapshot using the protocol for doing so
serialized_snapshot = self.serialize(snapshot)
# get the hash of the snapshot
snaphash = self.hash_snapshot(serialized_snapshot)
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in self.snapshot_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the snapshot in the KV store
self.snapshot_kv[snaphash] = serialized_snapshot
return snaphash
def add_serial_snapshot(self, serial_snapshot):
# get the hash of the snapshot
snaphash = self.hash_snapshot(serial_snapshot)
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in self.snapshot_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the snapshot in the KV store
self.snapshot_kv[snaphash] = serial_snapshot
return snaphash
def gen_start_snapshot(self, init_walkers):
"""
Parameters
----------
init_walkers :
Returns
-------
"""
# make a SimSnapshot object using the initial walkers and
start_snapshot = SimSnapshot(init_walkers, self.get_default_sim_apparatus())
# save the snapshot, and generate its hash
sim_start_md5 = self.add_snapshot(start_snapshot)
return sim_start_md5
@property
def default_snapshot_hash(self):
""" """
return self.metadata_kv['default_snapshot_hash']
@property
def default_snapshot(self):
""" """
return self.get_snapshot(self.default_snapshot_hash)
def snapshot_registered(self, snapshot):
"""Check whether a snapshot is already in the database, based on the
hash of it.
This serializes the snapshot so may be slow.
Parameters
----------
snapshot : SimSnapshot object
The snapshot object you want to query for.
Returns
-------
"""
# serialize and hash the snapshot
snaphash = self.hash_snapshot(self.serialize(snapshot))
# then check it
return self.snapshot_hash_registered(snaphash)
def snapshot_hash_registered(self, snapshot_hash):
"""Check whether a snapshot hash is already in the database.
Parameters
----------
snapshot_hash : str
The string hash of the snapshot.
Returns
-------
"""
if any([True if snapshot_hash == h else False for h in self.snapshot_hashes]):
return True
else:
return False
def configuration_hash_registered(self, config_hash):
"""Check whether a snapshot hash is already in the database.
Parameters
----------
snapshot_hash : str
The string hash of the snapshot.
Returns
-------
"""
if any([True if config_hash == h else False for h in self.configuration_hashes]):
return True
else:
return False
### run methods
def add_configuration(self, configuration):
serialized_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serialized_config)
# check that the hash is not already in the snapshots
if any([True if config_hash == md5 else False for md5 in self.configuration_hashes]):
# just skip the rest of the function and return the hash
return config_hash
# save the snapshot in the KV store
self.configuration_kv[config_hash] = serialized_config
return config_hash
def add_serial_configuration(self, serial_configuration):
# get the hash of the configuration
snaphash = self.hash_snapshot(serial_configuration)
# check that the hash is not already in the configurations
if any([True if snaphash == md5 else False for md5 in self.configuration_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the configuration in the KV store
self.configuration_kv[snaphash] = serial_configuration
return snaphash
@property
def create_run_table_query(self):
create_run_table_query = """
CREATE TABLE IF NOT EXISTS runs
(start_hash TEXT NOT NULL,
end_hash TEXT NOT NULL,
config_hash NOT NULL,
last_cycle_idx INTEGER NOT NULL,
PRIMARY KEY (start_hash, end_hash))
"""
return create_run_table_query
@property
def add_run_record_query(self):
add_run_row_query = """
INSERT INTO runs (start_hash, end_hash, config_hash, last_cycle_idx)
VALUES (?, ?, ?, ?)
"""
return add_run_row_query
@property
def update_run_record_query(self):
q = """
UPDATE runs
SET config_hash = ?,
last_cycle_idx = ?
WHERE start_hash=? AND end_hash=?
"""
return q
@property
def delete_run_record_query(self):
q = """
DELETE FROM runs
WHERE start_hash=? AND end_hash=?
"""
return q
def _add_run_record(self, start_hash, end_hash, configuration_hash, cycle_idx):
params = (start_hash, end_hash, configuration_hash, cycle_idx)
# do it as a transaction
c = self._db.cursor()
# run the insert
c.execute(self.add_run_record_query, params)
def _delete_run_record(self, start_hash, end_hash):
params = (start_hash, end_hash)
cursor = self._db.cursor()
cursor.execute(self.delete_run_record_query, params)
def _update_run_record(self, start_hash, end_hash, new_config_hash, new_last_cycle_idx):
params = (new_config_hash, new_last_cycle_idx, start_hash, end_hash)
# do it as a transaction
c = self._db.cursor()
# run the update
c.execute(self.update_run_record_query, params)
def register_run(self, start_hash, end_hash, config_hash, cycle_idx):
"""
Parameters
----------
start_hash :
end_hash :
config_hash :
cycle_idx : int
The cycle of the simulation run the checkpoint was generated for.
Returns
-------
"""
# check that the hashes are for snapshots in the orchestrator
# if one is not registered raise an error
if not self.snapshot_hash_registered(start_hash):
raise OrchestratorError(
"snapshot start_hash {} is not registered with the orchestrator".format(
start_hash))
if not self.snapshot_hash_registered(end_hash):
raise OrchestratorError(
"snapshot end_hash {} is not registered with the orchestrator".format(
end_hash))
if not self.configuration_hash_registered(config_hash):
raise OrchestratorError(
"config hash {} is not registered with the orchestrator".format(
config_hash))
# save the configuration and get it's id
self._add_run_record(start_hash, end_hash, config_hash, cycle_idx)
def get_run_records(self):
get_run_record_query = """
SELECT *
FROM runs
""".format(fields=', '.join(self.RUN_SELECT_FIELDS))
cursor = self._db.cursor()
cursor.execute(get_run_record_query)
records = cursor.fetchall()
return records
def get_run_record(self, start_hash, end_hash):
get_run_record_query = """
SELECT {fields}
FROM runs
WHERE start_hash=? AND end_hash=?
""".format(fields=', '.join(self.RUN_SELECT_FIELDS))
params = (start_hash, end_hash)
cursor = self._db.cursor()
cursor.execute(get_run_record_query, params)
record = cursor.fetchone()
return record
def run_last_cycle_idx(self, start_hash, end_hash):
record = self.get_run_record(start_hash, end_hash)
last_cycle_idx = record[self.RUN_SELECT_FIELDS.index('last_cycle_idx')]
return last_cycle_idx
def run_configuration(self, start_hash, end_hash):
record = self.get_run_record(start_hash, end_hash)
config_hash = record[self.RUN_SELECT_FIELDS.index('config_hash')]
# get the configuration object and deserialize it
return self.deserialize(self.configuration_kv[config_hash])
def run_configuration_hash(self, start_hash, end_hash):
record = self.get_run_record(start_hash, end_hash)
config_hash = record[self.RUN_SELECT_FIELDS.index('config_hash')]
return config_hash
def run_hashes(self):
return [(rec[0], rec[1]) for rec in self.get_run_records()]
def run_continues(self, start_hash, end_hash):
"""Given a start hash and end hash for a run, find the run that this
continues.
Parameters
----------
start_hash :
end_hash :
Returns
-------
run_id
"""
# loop through the runs in this orchestrator until we find one
# where the start_hash matches the end hash
runs = self.run_hashes()
run_idx = 0
while True:
run_start_hash, run_end_hash = runs[run_idx]
# if the start hash of the queried run is the same as the
# end hash for this run we have found it
if start_hash == run_end_hash:
return (run_start_hash, run_end_hash)
run_idx += 1
# if the index is over the number of runs we quit and
# return None as no match
if run_idx >= len(runs):
return None
def _init_checkpoint_db(self, start_hash, configuration, checkpoint_dir, mode='x'):
logging.debug("Initializing checkpoint orch database")
# make the checkpoint with the default filename at the checkpoint directory
checkpoint_path = osp.join(checkpoint_dir, self.DEFAULT_CHECKPOINT_FILENAME)
# create a new database in the mode specified
logging.debug("Creating checkpoint database")
checkpoint_orch = Orchestrator(checkpoint_path, mode=mode)
# add the starting snapshot, bypassing the serialization stuff
logging.debug("Setting the starting snapshot")
checkpoint_orch.snapshot_kv[start_hash] = self.snapshot_kv[start_hash]
# if we have a new configuration at runtime serialize and
# hash it
serialized_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serialized_config)
# save the configuration as well
checkpoint_orch.configuration_kv[config_hash] = serialized_config
checkpoint_orch.close()
logging.debug("closing connection to checkpoint database")
return checkpoint_path, config_hash
def _save_checkpoint(self, checkpoint_snapshot, config_hash,
checkpoint_db_path, cycle_idx,
):
"""
Parameters
----------
checkpoint_snapshot :
config_hash :
checkpoint_db_path :
mode :
(Default value = 'wb')
Returns
-------
"""
# orchestrator wrapper to the db
logging.debug("Opening the checkpoint orch database")
checkpoint_orch = Orchestrator(checkpoint_db_path, mode='r+')
# connection to the db
cursor = checkpoint_orch._db.cursor()
# we replicate the code for adding the snapshot here because
# we want it to occur transactionally the delete and add
# serialize the snapshot using the protocol for doing so
serialized_snapshot = self.serialize(checkpoint_snapshot)
# get the hash of the snapshot
snaphash = self.hash_snapshot(serialized_snapshot)
# the queries for deleting and inserting the new run record
delete_query = """
DELETE FROM runs
WHERE start_hash=?
AND end_hash=?
"""
insert_query = """
INSERT INTO runs (start_hash, end_hash, config_hash, last_cycle_idx)
VALUES (?, ?, ?, ?)
"""
# if there are any runs in the checkpoint orch remove the
# final snapshot
delete_params = None
if len(checkpoint_orch.run_hashes()) > 0:
start_hash, old_checkpoint_hash = checkpoint_orch.run_hashes()[0]
delete_params = (start_hash, old_checkpoint_hash)
else:
start_hash = list(checkpoint_orch.snapshot_kv.keys())[0]
# the config should already be in the orchestrator db
insert_params = (start_hash, snaphash, config_hash, cycle_idx)
# start this whole process as a transaction so we don't get
# something weird in between
logging.debug("Starting transaction for updating run table in checkpoint")
cursor.execute("BEGIN TRANSACTION")
# add the new one, using a special method for setting inside
# of a transaction
logging.debug("setting the new checkpoint snapshot into the KV")
cursor = checkpoint_orch.snapshot_kv.set_in_tx(cursor, snaphash, serialized_snapshot)
logging.debug("finished")
# if we need to delete the old end of the run snapshot and the
# run record for it
if delete_params is not None:
logging.debug("Old run record needs to be removed")
# remove the old run from the run table
logging.debug("Deleting the old run record")
cursor.execute(delete_query, delete_params)
logging.debug("finished")
# register the new run in the run table
logging.debug("Inserting the new run record")
cursor.execute(insert_query, insert_params)
logging.debug("finished")
# end the transaction
logging.debug("Finishing transaction")
cursor.execute("COMMIT")
logging.debug("Transaction committed")
# we do the removal of the old snapshot outside of the
# transaction since it is slow and can cause timeouts to
# occur. Furthermore, it is okay if it is in the checkpoint as
# the run record is what matters as long as the new checkpoint
# is there.
# delete the old snapshot if we need to
if delete_params is not None:
# WARN: occasionally and for unknown reasons we have found
# that the final checkpoint hash is the same as the one
# before. (The case where the last snapshot is on the same
# cycle as a backup is already covered). So as a last
# resort, we check that they don't have the same hash. If
# they do we don't delete it!
if snaphash != old_checkpoint_hash:
logging.debug("Deleting the old snapshot")
del checkpoint_orch.snapshot_kv[old_checkpoint_hash]
logging.debug("finished")
else:
logging.warn("Final snapshot has same hash as the previous checkpoint. Not deleting the previous one.")
checkpoint_orch.close()
logging.debug("closed the checkpoint orch connection")
@staticmethod
def gen_sim_manager(start_snapshot, configuration):
"""
Parameters
----------
start_snapshot :
configuration :
Returns
-------
"""
# construct the sim manager, in a wepy specific way
sim_manager = Manager(start_snapshot.walkers,
runner=start_snapshot.apparatus.filters[0],
boundary_conditions=start_snapshot.apparatus.filters[1],
resampler=start_snapshot.apparatus.filters[2],
# configuration options
work_mapper=configuration.work_mapper,
reporters=configuration.reporters,
sim_monitor=configuration.monitor,
)
return sim_manager
def run_snapshot_by_time(self, start_hash, run_time, n_steps,
checkpoint_freq=None,
checkpoint_dir=None,
configuration=None,
configuration_hash=None,
checkpoint_mode='x'):
"""For a finished run continue it but resetting all the state of the
resampler and boundary conditions
Parameters
----------
start_hash :
run_time :
n_steps :
checkpoint_freq :
(Default value = None)
checkpoint_dir :
(Default value = None)
configuration :
(Default value = None)
configuration_hash :
(Default value = None)
checkpoint_mode :
(Default value = None)
Returns
-------
"""
# you must have a checkpoint dir if you ask for a checkpoint
# frequency
if checkpoint_freq is not None and checkpoint_dir is None:
raise ValueError("Must provide a directory for the checkpoint file "
"is a frequency is specified")
if configuration_hash is not None and configuration is not None:
raise ValueError("Cannot specify both a hash of an existing configuration"
"and provide a runtime configuration")
# if no configuration was specified we use the default one, oth
elif (configuration is None) and (configuration_hash is None):
configuration = self.get_default_configuration()
# if a configuration hash was given only then we retrieve that
# configuration since we must pass configurations to the
# checkpoint DB initialization
elif configuration_hash is not None:
configuration = self.configuration_kv[configuration_hash]
# check that the directory for checkpoints exists, and create
# it if it doesn't and isn't already created
if checkpoint_dir is not None:
checkpoint_dir = osp.realpath(checkpoint_dir)
os.makedirs(checkpoint_dir, exist_ok=True)
# if the checkpoint dir is not specified don't create a
# checkpoint db orch
checkpoint_db_path = None
if checkpoint_dir is not None:
logging.debug("Initialization of checkpoint database is requested")
checkpoint_db_path, configuration_hash = self._init_checkpoint_db(start_hash,
configuration,
checkpoint_dir,
mode=checkpoint_mode)
logging.debug("finished initializing checkpoint database")
# get the snapshot and the configuration to use for the sim_manager
start_snapshot = self.get_snapshot(start_hash)
# generate the simulation manager given the snapshot and the
# configuration
sim_manager = self.gen_sim_manager(start_snapshot, configuration)
# handle and process the optional arguments for running simulation
if 'runner' in configuration.apparatus_opts:
runner_opts = configuration.apparatus_opts['runner']
else:
runner_opts = None
# run the init subroutine for the simulation manager
logging.debug("Running sim_manager.init")
sim_manager.init()
# run each cycle manually creating checkpoints when necessary
logging.debug("Starting run loop")
walkers = sim_manager.init_walkers
cycle_idx = 0
start_time = time.time()
while time.time() - start_time < run_time:
logging.debug("Running cycle {}".format(cycle_idx))
# run the cycle
walkers, filters = sim_manager.run_cycle(
walkers,
n_steps,
cycle_idx,
runner_opts=runner_opts,
)
# check to see if a checkpoint is necessary
if (checkpoint_freq is not None):
if (cycle_idx % checkpoint_freq == 0):
logging.debug("Checkpoint is required for this cycle")
# make the checkpoint snapshot
logging.debug("Generating the simulation snapshot")
checkpoint_snapshot = SimSnapshot(walkers, SimApparatus(filters))
# save the checkpoint (however that is implemented)
logging.debug("saving the checkpoint to the database")
self._save_checkpoint(checkpoint_snapshot,
configuration_hash,
checkpoint_db_path,
cycle_idx)
logging.debug("finished saving the checkpoint to the database")
# increase the cycle index for the next cycle
cycle_idx += 1
logging.debug("Finished the run cycle")
# the cycle index was set for the next cycle which didn't run
# so we decrement it
last_cycle_idx = cycle_idx - 1
logging.debug("Running sim_manager.cleanup")
# run the cleanup subroutine
sim_manager.cleanup()
# run the segment given the sim manager and run parameters
end_snapshot = SimSnapshot(walkers, SimApparatus(filters))
logging.debug("Run finished")
# return the things necessary for saving to the checkpoint if
# that is what is wanted later on
return end_snapshot, configuration_hash, checkpoint_db_path, last_cycle_idx
def orchestrate_snapshot_run_by_time(self, snapshot_hash, run_time, n_steps,
checkpoint_freq=None,
checkpoint_dir=None,
orchestrator_path=None,
configuration=None,
# these can reparametrize the paths
# for both the orchestrator produced
# files as well as the configuration
work_dir=None,
config_name=None,
narration=None,
mode=None,
# extra kwargs will be passed to the
# configuration.reparametrize method
**kwargs):
"""
Parameters
----------
snapshot_hash :
run_time :
n_steps :
checkpoint_freq :
(Default value = None)
checkpoint_dir :
(Default value = None)
orchestrator_path :
(Default value = None)
configuration :
(Default value = None)
# these can reparametrize the paths# for both the orchestrator produced# files as well as the configurationwork_dir :
(Default value = None)
config_name :
(Default value = None)
narration :
(Default value = None)
mode :
(Default value = None)
# extra kwargs will be passed to the# configuration.reparametrize method**kwargs :
Returns
-------
"""
# for writing the orchestration files we set the default mode
# if mode is not given
if mode is None:
# the orchestrator mode is used for pickling the
# orchestrator and so must be in bytes mode
orch_mode = self.DEFAULT_ORCHESTRATION_MODE
# there are two possible uses for the path reparametrizations:
# the configuration and the orchestrator file paths. If both
# of those are explicitly specified by passing in the whole
# configuration object or both of checkpoint_dir,
# orchestrator_path then those reparametrization kwargs will
# not be used. As this is likely not the intention of the user
# we will raise an error. If there is even one use for them no
# error will be raised.
# first check if any reparametrizations were even requested
parametrizations_requested = (True if work_dir is not None else False,
True if config_name is not None else False,
True if narration is not None else False,
True if mode is not None else False,)
# check if there are any available targets for reparametrization
reparametrization_targets = (True if configuration is None else False,
True if checkpoint_dir is None else False,
True if orchestrator_path is None else False)
# if paramatrizations were requested and there are no targets
# we need to raise an error
if any(parametrizations_requested) and not any(reparametrization_targets):
raise OrchestratorError("Reparametrizations were requested but none are possible,"
" due to all possible targets being already explicitly given")
# if any paths were not given and no defaults for path
# parameters we want to fill in the defaults for them. This
# will also fill in any missing parametrizations with defaults
# we do this by just setting the path parameters if they
# aren't set, then later the parametrization targets will be
# tested for if they have been set or not, and if they haven't
# then these will be used to generate paths for them.
if work_dir is None:
work_dir = self.DEFAULT_WORKDIR
if config_name is None:
config_name = self.DEFAULT_CONFIG_NAME
if narration is None:
narration = self.DEFAULT_NARRATION
if mode is None:
mode = self.DEFAULT_MODE
# if no configuration was specified use the default one
if configuration is None:
configuration = self.get_default_configuration()
# reparametrize the configuration with the given path
# parameters and anything else in kwargs. If they are none
# this will have no effect anyhow
logging.debug("Reparametrizing the configuration")
configuration = configuration.reparametrize(work_dir=work_dir,
config_name=config_name,
narration=narration,
mode=mode,
**kwargs)
# make parametric paths for the checkpoint directory and the
# orchestrator pickle to be made, unless they are explicitly given
if checkpoint_dir is None:
# the checkpoint directory will be in the work dir
logging.debug("checkpoint directory defaulted to the work_dir")
checkpoint_dir = work_dir
logging.debug("In the orchestrate run, calling to run_snapshot by time")
# then actually run the simulation with checkpointing. This
# returns the end snapshot and doesn't write out anything to
# orchestrators other than the checkpointing
(end_snapshot, configuration_hash, checkpoint_db_path, last_cycle_idx) =\
self.run_snapshot_by_time(snapshot_hash, run_time, n_steps,
checkpoint_freq=checkpoint_freq,
checkpoint_dir=checkpoint_dir,
configuration=configuration,
checkpoint_mode=orch_mode)
logging.debug("Finished running snapshot by time")
# if the last cycle in the run was a checkpoint skip this step
# of saving a checkpoint
do_final_checkpoint = True
# make sure the checkpoint_freq is defined before testing it
if checkpoint_freq is not None:
if checkpoint_freq % last_cycle_idx == 0:
logging.debug("Last cycle saved a checkpoint, no need to save one")
do_final_checkpoint = False
if do_final_checkpoint:
logging.debug("Saving a final checkpoint for the end of the run")
# now that it is finished we save the final snapshot to the
# checkpoint file. This is done transactionally using the
# SQLite transaction functionality (either succeeds or doesn't
# happen) that way we don't have worry about data integrity
# loss. Here we also don't have to worry about other processes
# interacting with the checkpoint which makes it isolated.
self._save_checkpoint(end_snapshot, configuration_hash,
checkpoint_db_path, last_cycle_idx)
logging.debug("Finished saving the final checkpoint for the run")
# then return the final orchestrator
logging.debug("Getting a connection to that orch to retun")
checkpoint_orch = Orchestrator(checkpoint_db_path,
mode='r+',
append_only=True)
return checkpoint_orch
def reconcile_orchestrators(host_path, *orchestrator_paths):
"""
Parameters
----------
template_orchestrator :
*orchestrators :
Returns
-------
"""
if not osp.exists(host_path):
assert len(orchestrator_paths) > 1, \
"If the host path is a new orchestrator, must give at least 2 orchestrators to merge."
# open the host orchestrator at the location which will have all
# of the new things put into it from the other orchestrators. If
# it doesn't already exist it will be created otherwise open
# read-write.
new_orch = Orchestrator(orch_path=host_path,
mode='a',
append_only=True)
# TODO deprecate, if there is no defaults we can't set them since
# the mode is append only, we don't really care about these so
# don't set them, otherwise do some mode logic to figure this out
# and open in write mode and set defaults, then change to append
# only
# # if this is an existing orchestrator copy the default
# # sim_apparatus and init_walkers
# try:
# default_app = new_orch.get_default_sim_apparatus()
# except KeyError:
# # no default apparatus, that is okay
# pass
# else:
# # set it
# new_orch.set_default_sim_apparatus(default_app)
# # same for the initial walkers
# try:
# default_walkers = new_orch.get_default_init_walkers()
# except KeyError:
# # no default apparatus, that is okay
# pass
# else:
# # set it
# new_orch.set_default_sim_apparatus(default_walkers)
for orch_path in orchestrator_paths:
# open it in read-write fail if doesn't exist
orch = Orchestrator(orch_path=orch_path,
mode='r+',
append_only=True)
# add in all snapshots from each orchestrator, by the hash not the
# snapshots themselves, we trust they are correct
for snaphash in orch.snapshot_hashes:
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in new_orch.snapshot_hashes]):
# skip it and move on
continue
# if it is not copy it over without deserializing
new_orch.snapshot_kv[snaphash] = orch.snapshot_kv[snaphash]
# add in the configurations for the runs from each
# orchestrator, by the hash not the snapshots themselves, we
# trust they are correct
for run_id in orch.run_hashes():
config_hash = orch.run_configuration_hash(*run_id)
# check that the hash is not already in the snapshots
if any([True if config_hash == md5 else False for md5 in new_orch.configuration_hashes]):
# skip it and move on
continue
# if it is not set it
new_orch.configuration_kv[config_hash] = orch.configuration_kv[config_hash]
# concatenate the run table with an SQL union from an attached
# database
attached_table_name = "other"
# query to attach the foreign database
attach_query = """
ATTACH '{}' AS {}
""".format(orch_path, attached_table_name)
# query to update the runs tabel with new unique runs
union_query = """
INSERT INTO runs
SELECT * FROM (
SELECT * FROM {}.runs
EXCEPT
SELECT * FROM runs
)
""".format(attached_table_name)
# query to detach the table
detach_query = """
DETACH {}
""".format(attached_table_name)
# then run the queries
cursor = new_orch._db.cursor()
try:
cursor.execute('BEGIN TRANSACTION')
cursor.execute(attach_query)
cursor.execute(union_query)
cursor.execute('COMMIT')
cursor.execute(detach_query)
except:
cursor.execute('COMMIT')
import pdb; pdb.set_trace()
cursor.execute("SELECT * FROM (SELECT * FROM other.runs EXCEPT SELECT * FROM runs)")
recs = cursor.fetchall()
return new_orch
| 1.992188 | 2 |
src/generate_class_specific_samples.py | HesterLim/pytorch-cnn-visualizations | 6,725 | 5212 | """
Created on Thu Oct 26 14:19:44 2017
@author: <NAME> - github.com/utkuozbulak
"""
import os
import numpy as np
import torch
from torch.optim import SGD
from torchvision import models
from misc_functions import preprocess_image, recreate_image, save_image
class ClassSpecificImageGeneration():
"""
Produces an image that maximizes a certain class with gradient ascent
"""
def __init__(self, model, target_class):
self.mean = [-0.485, -0.456, -0.406]
self.std = [1/0.229, 1/0.224, 1/0.225]
self.model = model
self.model.eval()
self.target_class = target_class
# Generate a random image
self.created_image = np.uint8(np.random.uniform(0, 255, (224, 224, 3)))
# Create the folder to export images if not exists
if not os.path.exists('../generated/class_'+str(self.target_class)):
os.makedirs('../generated/class_'+str(self.target_class))
def generate(self, iterations=150):
"""Generates class specific image
Keyword Arguments:
iterations {int} -- Total iterations for gradient ascent (default: {150})
Returns:
np.ndarray -- Final maximally activated class image
"""
initial_learning_rate = 6
for i in range(1, iterations):
# Process image and return variable
self.processed_image = preprocess_image(self.created_image, False)
# Define optimizer for the image
optimizer = SGD([self.processed_image], lr=initial_learning_rate)
# Forward
output = self.model(self.processed_image)
# Target specific class
class_loss = -output[0, self.target_class]
if i % 10 == 0 or i == iterations-1:
print('Iteration:', str(i), 'Loss',
"{0:.2f}".format(class_loss.data.numpy()))
# Zero grads
self.model.zero_grad()
# Backward
class_loss.backward()
# Update image
optimizer.step()
# Recreate image
self.created_image = recreate_image(self.processed_image)
if i % 10 == 0 or i == iterations-1:
# Save image
im_path = '../generated/class_'+str(self.target_class)+'/c_'+str(self.target_class)+'_'+'iter_'+str(i)+'.png'
save_image(self.created_image, im_path)
return self.processed_image
if __name__ == '__main__':
target_class = 130 # Flamingo
pretrained_model = models.alexnet(pretrained=True)
csig = ClassSpecificImageGeneration(pretrained_model, target_class)
csig.generate()
| 2.765625 | 3 |
sumo/tools/net/visum_mapDistricts.py | iltempe/osmosi | 0 | 5213 | #!/usr/bin/env python
"""
@file visum_mapDistricts.py
@author <NAME>
@author <NAME>
@date 2007-10-25
@version $Id$
This script reads a network and a dump file and
draws the network, coloring it by the values
found within the dump-file.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2008-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import math
from optparse import OptionParser
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import sumolib.net
import netshiftadaptor
def computeDistance(n1, n2):
xd = n1._coord[0] - n2._coord[0]
yd = n1._coord[1] - n2._coord[1]
return math.sqrt(xd * xd + yd * yd)
def relAngle(angle1, angle2):
angle2 -= angle1
if angle2 > 180:
angle2 = (360. - angle2) * -1.
while angle2 < -180:
angle2 = 360 + angle2
return angle2
# initialise
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
# i/o
optParser.add_option("-1", "--net1", dest="net1",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-2", "--net2", dest="net2",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-a", "--nodes1", dest="nodes1",
help="The first matching nodes", metavar="NODELIST")
optParser.add_option("-b", "--nodes2", dest="nodes2",
help="The second matching nodes", metavar="NODELIST")
# parse options
(options, args) = optParser.parse_args()
# read networks
if options.verbose:
print("Reading net#1...")
net1 = sumolib.net.readNet(options.net1)
if options.verbose:
print("Reading net#2...")
net2 = sumolib.net.readNet(options.net2)
# reproject the visum net onto the navteq net
adaptor = netshiftadaptor.NetShiftAdaptor(
net1, net2, options.nodes1.split(","), options.nodes2.split(","))
adaptor.reproject(options.verbose)
# build a speed-up grid
xmin = 100000
xmax = -100000
ymin = 100000
ymax = -100000
for n in net1._nodes:
xmin = min(xmin, n._coord[0])
xmax = max(xmax, n._coord[0])
ymin = min(ymin, n._coord[1])
ymax = max(ymax, n._coord[1])
for n in net2._nodes:
xmin = min(xmin, n._coord[0])
xmax = max(xmax, n._coord[0])
ymin = min(ymin, n._coord[1])
ymax = max(ymax, n._coord[1])
xmin = xmin - .1
xmax = xmax + .1
ymin = ymin - .1
ymax = ymax + .1
CELLSIZE = 100
arr1 = []
arr2 = []
for y in range(0, CELLSIZE):
arr1.append([])
arr2.append([])
for x in range(0, CELLSIZE):
arr1[-1].append([])
arr2[-1].append([])
cw = (xmax - xmin) / float(CELLSIZE)
ch = (ymax - ymin) / float(CELLSIZE)
for n in net2._nodes:
cx = (n._coord[0] - xmin) / cw
cy = (n._coord[1] - ymin) / ch
arr1[int(cy)][int(cx)].append(n)
for n in net1._nodes:
cx = (n._coord[0] - xmin) / cw
cy = (n._coord[1] - ymin) / ch
arr2[int(cy)][int(cx)].append(n)
# map
nmap1to2 = {}
nmap2to1 = {}
nodes1 = net2._nodes
nodes2 = net1._nodes
highwayNodes2 = set()
highwaySinks2 = set()
highwaySources2 = set()
urbanNodes2 = set()
for n2 in nodes2:
noIncoming = 0
noOutgoing = 0
for e in n2._outgoing:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
highwayNodes2.add(n2)
if e.getSpeed() < 99:
noOutgoing = noOutgoing + 1
for e in n2._incoming:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
highwayNodes2.add(n2)
if e.getSpeed() < 99:
noIncoming = noIncoming + 1
if n2 in highwayNodes2:
if noOutgoing == 0:
highwaySinks2.add(n2)
if noIncoming == 0:
highwaySources2.add(n2)
else:
urbanNodes2.add(n2)
print("Found " + str(len(highwaySinks2)) + " highway sinks in net2")
cont = ""
for n in highwaySinks2:
cont = cont + n._id + ", "
print(cont)
cont = ""
print("Found " + str(len(highwaySources2)) + " highway sources in net2")
for n in highwaySources2:
cont = cont + n._id + ", "
print(cont)
fdd = open("dconns.con.xml", "w")
fdd.write("<connections>\n")
highwaySinks1 = set()
highwaySources1 = set()
origDistrictNodes = {}
nnn = {}
for n1 in nodes1:
if n1._id.find('-', 1) < 0:
continue
# if n1._id.find("38208387")<0:
# continue
un1 = None
for e in n1._outgoing:
un1 = e._to
for e in n1._incoming:
un1 = e._from
d = n1._id[:n1._id.find('-', 1)]
if d[0] == '-':
d = d[1:]
if d not in origDistrictNodes:
origDistrictNodes[d] = []
if options.verbose:
print("District: " + d)
isHighwayNode = False
isHighwaySink = False
isHighwaySource = False
noIncoming = 0
noOutgoing = 0
noInConns = 0
noOutConns = 0
for e in un1._outgoing:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
isHighwayNode = True
if e.getSpeed() < 99:
noOutgoing = noOutgoing + 1
if e.getSpeed() > 99:
noOutConns = noOutConns + 1
for e in un1._incoming:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
isHighwayNode = True
if e.getSpeed() < 99:
noIncoming = noIncoming + 1
if e.getSpeed() > 99:
noInConns = noInConns + 1
if options.verbose:
print("Check", un1._id, noOutgoing, noIncoming)
if isHighwayNode:
if noOutgoing == 0:
highwaySinks1.add(n1)
isHighwaySink = True
if noIncoming == 0:
highwaySources1.add(n1)
isHighwaySource = True
# the next is a hack for bad visum-networks
if noIncoming == 1 and noOutgoing == 1 and noInConns == 1 and noOutConns == 1:
highwaySinks1.add(n1)
isHighwaySink = True
highwaySources1.add(n1)
isHighwaySource = True
best = None
bestDist = -1
check = urbanNodes2
if n1 in highwaySinks1:
check = highwaySinks2
elif n1 in highwaySources1:
check = highwaySources2
elif isHighwayNode:
check = highwayNodes2
for n2 in check:
dist = computeDistance(un1, n2)
if bestDist == -1 or bestDist > dist:
best = n2
bestDist = dist
if best:
nnn[best] = n1
if d not in nmap1to2:
nmap1to2[d] = []
if best not in nmap1to2[d]:
nmap1to2[d].append(best)
if best not in nmap2to1:
nmap2to1[best] = []
if n1 not in nmap2to1[best]:
nmap2to1[best].append(n1)
if options.verbose:
print("a: " + d + "<->" + best._id)
if best not in origDistrictNodes[d]:
origDistrictNodes[d].append(best)
preBest = best
best = None
bestDist = -1
check = []
if n1 in highwaySinks1 or preBest in highwaySinks2:
check = highwaySources2
elif n1 in highwaySources1 or preBest in highwaySources2:
check = highwaySinks2
elif isHighwayNode:
check = highwayNodes2
for n2 in check:
dist = computeDistance(un1, n2)
if (bestDist == -1 or bestDist > dist) and n2 != preBest:
best = n2
bestDist = dist
if best:
nnn[best] = n1
if d not in nmap1to2:
nmap1to2[d] = []
if best not in nmap1to2[d]:
nmap1to2[d].append(best)
if best not in nmap2to1:
nmap2to1[best] = []
if n1 not in nmap2to1[best]:
nmap2to1[best].append(n1)
print("b: " + d + "<->" + best._id)
if best not in origDistrictNodes[d]:
origDistrictNodes[d].append(best)
if options.verbose:
print("Found " + str(len(highwaySinks1)) + " highway sinks in net1")
for n in highwaySinks1:
print(n._id)
print("Found " + str(len(highwaySources1)) + " highway sources in net1")
for n in highwaySources1:
print(n._id)
connectedNodesConnections = {}
for d in nmap1to2:
for n2 in nmap1to2[d]:
if n2 in connectedNodesConnections:
continue
n1i = net1.addNode("i" + n2._id, nnn[n2]._coord)
n1o = net1.addNode("o" + n2._id, nnn[n2]._coord)
haveIncoming = False
incomingLaneNo = 0
for e in n2._incoming:
if e._id[0] != "i" and e._id[0] != "o":
haveIncoming = True
incomingLaneNo = incomingLaneNo + e.getLaneNumber()
haveOutgoing = False
outgoingLaneNo = 0
for e in n2._outgoing:
if e._id[0] != "i" and e._id[0] != "o":
haveOutgoing = True
outgoingLaneNo = outgoingLaneNo + e.getLaneNumber()
if haveIncoming:
e1 = net1.addEdge("o" + n2._id, n2._id, n1o._id, -2)
if haveOutgoing:
net1.addLane(e1, 20, 100.)
else:
for i in range(0, incomingLaneNo):
net1.addLane(e1, 20, 100.)
if len(n2._incoming) == 1:
fdd.write(' <connection from="' + n2._incoming[
0]._id + '" to="' + e1._id + '" lane="' + str(i) + ':' + str(i) + '"/>\n')
if haveOutgoing:
if options.verbose:
print("has outgoing")
e2 = net1.addEdge("i" + n2._id, n1i._id, n2._id, -2)
if haveIncoming:
net1.addLane(e2, 20, 100.)
else:
for i in range(0, outgoingLaneNo):
net1.addLane(e2, 20, 100.)
if len(n2._outgoing) == 1:
fdd.write(' <connection from="' + e2._id + '" to="' +
n2._outgoing[0]._id + '" lane="' + str(i) + ':' + str(i) + '"/>\n')
connectedNodesConnections[n2] = [n1i, n1o]
newDistricts = {}
districtSources = {}
districtSinks = {}
mappedDistrictNodes = {}
connNodes = {}
dRemap = {}
for d in nmap1to2:
newDistricts[d] = []
if len(nmap1to2[d]) == 1:
n = nmap1to2[d][0]
if n in dRemap:
districtSources[d] = districtSources[dRemap[n]]
districtSinks[d] = districtSinks[dRemap[n]]
newDistricts[d] = []
newDistricts[d].append(n._id)
continue
else:
dRemap[n] = d
[ni, no] = connectedNodesConnections[n]
if len(ni._outgoing) > 0:
districtSources[d] = ni._outgoing[0]._id
if len(no._incoming) > 0:
districtSinks[d] = no._incoming[0]._id
fdd.write(' <connection from="' + no._incoming[0]._id + '"/>\n')
else:
incomingLaneNoG = 0
outgoingLaneNoG = 0
for n in nmap1to2[d]:
for e in n._incoming:
if e._id[0] != "i" and e._id[0] != "o":
incomingLaneNoG = incomingLaneNoG + e.getLaneNumber()
for e in n._outgoing:
if e._id[0] != "i" and e._id[0] != "o":
outgoingLaneNoG = outgoingLaneNoG + e.getLaneNumber()
p1 = [0, 0]
p11 = [0, 0]
p12 = [0, 0]
p2 = [0, 0]
for n in nmap1to2[d]:
p1[0] = p1[0] + n._coord[0]
p1[1] = p1[1] + n._coord[1]
p2[0] = p2[0] + nnn[n]._coord[0]
p2[1] = p2[1] + nnn[n]._coord[1]
p2[0] = (p1[0] + p2[0]) / float(len(origDistrictNodes[d]) * 2)
p2[1] = (p1[1] + p2[1]) / float(len(origDistrictNodes[d]) * 2)
dn2i = net1.addNode("cci" + d, p2)
dn2o = net1.addNode("cci" + d, p2)
p11[0] = p1[0] / float(len(origDistrictNodes[d]))
p11[1] = p1[1] / float(len(origDistrictNodes[d]))
dn1o = net1.addNode("co" + d, p11)
e1 = net1.addEdge("co" + d, dn1o._id, dn2o._id, -2)
for i in range(0, incomingLaneNoG):
net1.addLane(e1, 22, 100.)
districtSinks[d] = e1._id
p12[0] = p1[0] / float(len(origDistrictNodes[d]))
p12[1] = p1[1] / float(len(origDistrictNodes[d]))
dn1i = net1.addNode("ci" + d, p12)
e2 = net1.addEdge("ci" + d, dn2i._id, dn1i._id, -2)
for i in range(0, outgoingLaneNoG):
net1.addLane(e2, 21, 100.)
districtSources[d] = e2._id
runningOutLaneNumber = 0
runningInLaneNumber = 0
for n2 in nmap1to2[d]:
[ni, no] = connectedNodesConnections[n2]
print("In: " + ni._id + " " + str(len(ni._incoming)) +
" " + str(len(ni._outgoing)))
print("Out: " + no._id + " " + str(len(no._incoming)) +
" " + str(len(no._outgoing)))
if len(no._incoming) > 0:
incomingLaneNo = 0
for e in n2._incoming:
if e._id[0] != "i" and e._id[0] != "o":
incomingLaneNo = incomingLaneNo + e.getLaneNumber()
e1 = net1.addEdge("o" + d + "#" + n2._id, no._id, dn1o._id, -2)
for i in range(0, incomingLaneNo):
net1.addLane(e1, 19, 100.)
fdd.write(' <connection from="' + "o" + d + "#" + n2._id + '" to="' + dn1o._outgoing[
0]._id + '" lane="' + str(i) + ':' + str(runningOutLaneNumber) + '"/>\n')
runningOutLaneNumber = runningOutLaneNumber + 1
fdd.write(
' <connection from="' + dn1o._outgoing[0]._id + '"/>\n')
if incomingLaneNo == 0:
net1.addLane(e1, 19, 100.)
runningOutLaneNumber = runningOutLaneNumber + 1
if len(ni._outgoing) > 0:
outgoingLaneNo = 0
for e in n2._outgoing:
if e._id[0] != "i" and e._id[0] != "o":
outgoingLaneNo = outgoingLaneNo + e.getLaneNumber()
e2 = net1.addEdge("i" + d + "#" + n2._id, dn1i._id, ni._id, -2)
for i in range(0, outgoingLaneNo):
net1.addLane(e2, 18, 100.)
fdd.write(' <connection from="' + dn1i._incoming[
0]._id + '" to="' + "i" + d + "#" + n2._id + '" lane="' + str(runningInLaneNumber) + ':' + str(i) + '"/>\n')
runningInLaneNumber = runningInLaneNumber + 1
if outgoingLaneNo == 0:
net1.addLane(e2, 18, 100.)
runningInLaneNumber = runningInLaneNumber + 1
fd = open("districts.xml", "w")
fd.write("<tazs>\n")
for d in newDistricts:
fd.write(' <taz id="' + d + '">\n')
if d in districtSources:
fd.write(
' <tazSource id="' + districtSources[d] + '" weight="1"/>\n')
if d in districtSinks:
fd.write(
' <tazSink id="' + districtSinks[d] + '" weight="1"/>\n')
fd.write(' </taz>\n')
fd.write("</tazs>\n")
fd.close()
def writeNode(fd, node):
fd.write(" <node id=\"" + node._id + "\" x=\"" +
str(node._coord[0]) + "\" y=\"" + str(node._coord[1]) + "\"/>\n")
def writeEdge(fd, edge, withGeom=True):
fd.write(" <edge id=\"" + edge._id + "\" fromNode=\"" +
edge._from._id + "\" toNode=\"" + edge._to._id)
fd.write("\" speed=\"" + str(edge._speed))
fd.write("\" priority=\"" + str(edge._priority))
if withGeom:
fd.write("\" spreadType=\"center")
fd.write("\" numLanes=\"" + str(len(edge._lanes)) + "\"")
shape = edge.getShape()
if withGeom:
fd.write(" shape=\"")
for i, c in enumerate(shape):
if i != 0:
fd.write(" ")
fd.write(str(c[0]) + "," + str(c[1]))
fd.write("\"")
fd.write("/>\n")
def writeNodes(net):
fd = open("nodes.xml", "w")
fd.write("<nodes>\n")
for node in net._nodes:
writeNode(fd, node)
fd.write("</nodes>\n")
fd.close()
def writeEdges(net):
fd = open("edges.xml", "w")
fd.write("<edges>\n")
for edge in net._edges:
if edge._id.find("#") > 0 or edge._id.find("c") >= 0 or edge._id.find("i") >= 0:
writeEdge(fd, edge, False)
else:
writeEdge(fd, edge)
fd.write("</edges>\n")
fd.close()
fdd.write("</connections>\n")
writeNodes(net1)
writeEdges(net1)
| 3.265625 | 3 |
BKPMediaDetector.py | bkpifc/BKPMediaDetector | 5 | 5214 | #!/usr/bin/env python3
######
# General Detector
# 06.12.2018 / Last Update: 20.05.2021
# LRB
######
import numpy as np
import os
import sys
import tensorflow as tf
import hashlib
import cv2
import magic
import PySimpleGUI as sg
import csv
import imagehash
import face_recognition
import subprocess
from itertools import groupby
from distutils.version import StrictVersion
from PIL import Image
from datetime import datetime
from time import strftime
from time import gmtime
from multiprocessing import Pool
from Models.Face import detect_face
from pathlib import Path
from openvino.inference_engine import IENetwork, IECore
from AudioAnalysis import audioAnalysis
######
# Worker function to check the input provided via the GUI
#######
def validateInput(gui_input):
error = False
#Validate input
# for element in gui_input[1][0:7]:
# if element == '' or []:
# error = True
if gui_input[0] == "Cancel" or len(gui_input[1][8]) == 0:
error = True
if bool(gui_input[1][5]) == True and gui_input[1][12] == "":
error = True
if error == True:
sg.Popup('You have not populated all required fields. Aborting!', title='Error', button_color=('black', 'red'), background_color=('grey'))
exit()
######
# Worker function to update the progress bar
######
def updateProgressMeter(step, customText):
if sg.OneLineProgressMeter('BKP Media Detector', step, 12, 'key', customText, orientation='h', size=(50, 25)) == False:
exit()
######
# Worker function to prepare and reshape the input images into a Numpy array
# and to calculate the MD5 hashes of them.
######
def load_image_into_numpy_array(image_path):
try:
image_path = str(image_path)
# Open, measure and convert image to RGB channels
image = Image.open(image_path)
(im_width, im_height) = image.size
if int(im_width) < 34 or int(im_height) < 34:
logfile.write("Insufficient file dimensions: " + str(image_path) + "\n")
return None
if int(im_width) > 4512 or int(im_height) > 3008:
maxheight = int(3008)
maxwidth = int(4512)
resize_ratio = min(maxwidth/im_width, maxheight/im_height)
im_width = int(im_width * resize_ratio)
im_height = int(im_height * resize_ratio)
image = image.resize((im_width, im_height))
image = image.convert('RGB')
np_array = np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
image.close()
# Hash the image in byte-chunks of 4096
hash_md5 = hashlib.md5()
with open(image_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
f.close()
hashvalue = hash_md5.hexdigest()
return image_path, hashvalue, np_array
#Throw errors to stdout
except IOError or OSError:
magictype = str(magic.from_file((image_path), mime=True))
# If image file cannot be read, check if it is a video
if magictype[:5] == 'video': #or magictype[12:17] == 'octet':
# If so, return a video flag instead of numpy array
flag = "VIDEO"
elif magictype[:5] == 'audio':
flag = "AUDIO"
elif magictype[12:17] == 'octet':
flag = "OCTET"
else:
image_path = "Could not open file: " + str(image_path) + " (" + str(magictype) + ")\n"
flag = "ERROR"
return image_path, flag
except:
magictype = str(magic.from_file((image_path), mime=True))
logfile.write("General error with file: " + str(image_path) + " (" + str(magictype) + ")\n")
def check_video_orientation(image_path):
# Function to check video rotation with ffprobe and return corresponding CV2 rotation code
try:
cmnd = ['ffprobe', '-loglevel', 'error', '-select_streams', 'v:0', '-show_entries', 'stream_tags=rotate', '-of',
'default=nw=1:nk=1', image_path]
p = subprocess.Popen(cmnd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
orientation = out.decode('utf-8')
if orientation == '':
rotation = 3
elif int(orientation) == 180:
rotation = 1
elif int(orientation) == 90:
rotation = 0
else:
rotation = 2
return rotation
except:
logfile.write("Cannot determine video rotation: " + str(image_path) + "\n")
######
# Worker function to prepare and reshape the input videos to a Numpy array
# and to calculate the MD5 hashes of them.
# The function analyzes as much frames as indicated in the variable "frames_per_second" (Default = 0.5)
######
def load_video_into_numpy_array(image_path):
videoframes = []
old_hash = None
# Loading the video via the OpenCV framework
try:
rotation = check_video_orientation(image_path)
vidcap = cv2.VideoCapture(image_path)
im_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
im_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Switch height/width if video is to be rotated 90/270 degrees
if rotation == 0 or rotation == 2:
im_width_new = im_height
im_height_new = im_width
im_width = im_width_new
im_height = im_height_new
# Calculating frames per second, total frame count and analyze rate
fps = int(vidcap.get(cv2.CAP_PROP_FPS))
framecount = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
analyze_rate = int(framecount / fps * frames_per_second)
if 0 < analyze_rate < max_frames_per_video:
int(analyze_rate)
elif analyze_rate >= int(max_frames_per_video):
analyze_rate = int(max_frames_per_video) #Limiting maximum frames per video
else:
videoerror = 'Unable to extract frames from video: ' + str(image_path) + '\n'
return videoerror
# Hashing the video once
hash_md5 = hashlib.md5()
with open(image_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
hashvalue = hash_md5.hexdigest()
# Extracting the frames from the video
for percentile in range(0, analyze_rate):
vidcap.set(cv2.CAP_PROP_POS_FRAMES, (framecount / analyze_rate) * percentile)
success, extracted_frame = vidcap.read()
if rotation != 3:
extracted_frame = cv2.rotate(extracted_frame, rotation)
extracted_frame = cv2.cvtColor(extracted_frame, cv2.COLOR_BGR2RGB)
timecode = ((framecount / analyze_rate) * percentile) / fps
timecode = str(strftime("%H:%M:%S", gmtime(timecode)))
# And reshape them into a numpy array
np_array = np.array(extracted_frame).reshape(
(im_height, im_width, 3)).astype(np.uint8)
if video_sensitivity > 0:
# Compare the frame with the previous one for similarity, and drop if similar
frame_to_check = Image.fromarray(np_array)
new_hash = imagehash.phash(frame_to_check)
if old_hash is None or (new_hash - old_hash > video_sensitivity):
cluster = str(image_path + ";" + str(timecode)), hashvalue, np_array
videoframes.append(cluster)
old_hash = new_hash
else:
cluster = str(image_path + ";" + str(timecode)), hashvalue, np_array
videoframes.append(cluster)
vidcap.release()
return videoframes
except cv2.error:
videoerror = 'Could not process video: ' + str(image_path) + '\n'
return videoerror
except:
videoerror = 'General error processing video: ' + str(image_path) + '\n'
return videoerror
######
# Detection within loaded images with Tensorflow framework
# Creation of output file with hashes, detection scores and class
######
def run_inference_for_multiple_images(image_paths, images, hashvalues):
# Open the results file again
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
for y in range(0, len(graphlist)):
# Create TF Session with loaded graph
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
logfile.write("*" + str(datetime.now()) + ": \tStarting detection with model " + str(y + 1) + " of " + str(len(graphlist)) + "*\n")
# Update progress indicator
updateProgressMeter(7 + y, 'Detecting with model {}'.format(graphlist[y]))
# Load the respective detetion graph from file
with tf.gfile.GFile(graphlist[y], 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Create TF session
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_scores', 'detection_classes'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Setting the detection limit of the different models.
if "ISLogo" not in graphlist[y]:
detectionlimit = 0.5
else:
detectionlimit = 0.90
# Loading the label map of the corresponding graph
category_index = indexlist[y]
# Conduct actual detection within single image
for index, image in enumerate(images):
updateProgressMeter(7 + y, str(graphlist[y]) + '\nFile ' + str(index) + ' of ' + str(len(images)))
try:
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_scores'] = output_dict['detection_scores'][0]
detectionhit = output_dict['num_detections']
output_dict['detection_classes'] = output_dict['detection_classes'][0]
hashvalue = hashvalues[index]
image_path = image_paths[index]
# Validate against detection limit (default: 65%) and write hash/score if above
for j in range(detectionhit):
score = output_dict['detection_scores'][j]
category = category_index[output_dict['detection_classes'][j]]
# Validate against the preconfigured minimum detection assurance and write to result file
if (score >= detectionlimit):
scorestring = str(score)
if REPORT_FORMAT[0] == 'Nuix':
line = ",".join([category['name'], "md5:" + hashvalue])
else:
line = ",".join([Path(image_path).name, hashvalue, scorestring, category['name']])
detectionresults.write(line + "\n")
except tf.errors.InvalidArgumentError:
logfile.write("Unable to process file dimensions of file with hash: \t" + str(hashvalue) + "\n")
logfile.write("*" + str(datetime.now()) + ": \tFinished detection with model " + str(y + 1) + "*\n")
detectionresults.flush()
detectionresults.close()
######
# Detect and count faces in loaded images
# Prepare and call age/gender detection once done
######
def faceDetection(image_paths, images, hashvalues):
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
# Updating progress bar and logfile
updateProgressMeter(10, 'Detecting with Face/Age/Gender Detector')
logfile.write("*" + str(datetime.now()) + ": \tStarting detection with face/age/gender detection model*\n")
# Applying constants as defined in Facenet
minsize = 20
threshold = [0.6, 0.7, 0.7]
factor = 0.709
# Creating different TF Session
with tf.Session() as sess:
# read pnet, rnet, onet models from Models/Face directory
facemodel_path = Path('Models/Face')
pnet, rnet, onet = detect_face.create_mtcnn(sess, str(facemodel_path))
# Helperlists for age/gender detection
facelist = []
imagelist = []
# Inference for all images
for index, image in enumerate(images):
updateProgressMeter(10, 'Detecting with Face/Age/Gender Detector' + '\nFile ' + str(index) + ' of ' + str(len(images)))
try:
bounding_boxes, _ = detect_face.detect_face(image, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
# If a face was detected, go on
if nrof_faces > 0:
detectedFaces = bounding_boxes[:, 0:4]
detectedFacesArray = []
img_size = np.asarray(image.shape)[0:2]
if nrof_faces > 1:
for single_face in range(nrof_faces):
detectedFacesArray.append(np.squeeze(detectedFaces[single_face]))
else:
detectedFacesArray.append(np.squeeze(detectedFaces))
# Crop the detected face and add it to the list to conduct age/gender identification
for x, detectedFaces in enumerate(detectedFacesArray):
detectedFaces = np.squeeze(detectedFaces)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(detectedFaces[0], 0)
bb[1] = np.maximum(detectedFaces[1], 0)
bb[2] = np.minimum(detectedFaces[2], img_size[1])
bb[3] = np.minimum(detectedFaces[3], img_size[0])
cropped_Face = image[bb[1]:bb[3], bb[0]:bb[2], :]
facelist.append(cropped_Face)
imagelist.append(index)
# Write the results of the face detection into the resultsfile
if not len(bounding_boxes) == 0:
hashvalue = hashvalues[index]
number_of_faces = len(bounding_boxes)
if REPORT_FORMAT[0] == 'Nuix':
line = "Face,md5:" + hashvalue
else:
line = str(Path(image_paths[index]).name) + "," + str(hashvalue) + ",FACES," + str(
number_of_faces) + "Faces"
detectionresults.write(line + "\n")
except tf.errors.InvalidArgumentError:
errorcount += 1
logfile.write("Unable to detect faces in file with hash: \t" + str(hashvalue) + "\n")
# Conduct age/gender recognition based on the list of detected & cropped faces
if len(facelist) != 0:
age_gender_detection(imagelist, facelist, hashvalues, image_paths)
logfile.write("*" + str(datetime.now()) + ": \tFinished detection with face/age/gender detection model*\n")
detectionresults.flush()
detectionresults.close()
######
# Detection with the OPEN VINO Framework
# Evaluate Age & Gender based on input faces
######
def age_gender_detection(imagelist, facelist, hashvalues, image_paths):
# Acquire the age-gender detection model
model_path = Path('Models/OpenVINO/age-gender')
model_xml = str(model_path / 'model.xml')
model_bin = str(model_path / 'model.bin')
# Reopen the results file
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
# Plugin initialization for specified device and load extensions library if specified
ie = IECore()
# Read IR
net = IENetwork(model=model_xml, weights=model_bin)
input_blob = next(iter(net.inputs))
net.batch_size = len(facelist)
# Read and pre-process input images
n, c, h, w = net.inputs[input_blob].shape
images = np.ndarray(shape=(n, c, h, w))
# Loading model to the plugin
exec_net = ie.load_network(network=net, device_name='CPU')
# Resize and reshape input faces
for i in range(n):
image = facelist[i]
if image.shape[:-1] != (62, 62):
h, w = image.shape[:2]
# interpolation method
if h > 62 or w > 62: # shrinking image
interp = cv2.INTER_AREA
else: # stretching image
interp = cv2.INTER_CUBIC
# aspect ratio of image
aspect = w / h
# compute scaling and pad sizing
if aspect > 1: # horizontal image
new_w = 62
new_h = np.round(new_w / aspect).astype(int)
pad_vert = (62 - new_h) / 2
pad_top, pad_bot = np.floor(pad_vert).astype(int), np.ceil(pad_vert).astype(int)
pad_left, pad_right = 0, 0
elif aspect < 1: # vertical image
new_h = 62
new_w = np.round(new_h * aspect).astype(int)
pad_horz = (62 - new_w) / 2
pad_left, pad_right = np.floor(pad_horz).astype(int), np.ceil(pad_horz).astype(int)
pad_top, pad_bot = 0, 0
else: # square image
new_h, new_w = 62, 62
pad_left, pad_right, pad_top, pad_bot = 0, 0, 0, 0
# set pad color
padColor = 0
if len(image.shape) is 3 and not isinstance(padColor, (
list, tuple, np.ndarray)): # color image but only one color provided
padColor = [padColor] * 3
# scale and pad
scaled_img = cv2.resize(image, (new_w, new_h), interpolation=interp)
scaled_img = cv2.cvtColor(scaled_img, cv2.COLOR_BGR2RGB)
scaled_img = cv2.copyMakeBorder(scaled_img, pad_top, pad_bot, pad_left, pad_right,
borderType=cv2.BORDER_CONSTANT, value=padColor)
image = scaled_img.transpose((2, 0, 1)) # Change data layout from HWC to CHW
images[i] = image
# Conduct inference
res = exec_net.infer(inputs={input_blob: images})
# Process inference results
for y in range(len(facelist)):
probable_age = int(np.squeeze(res['age_conv3'][y]) * 100)
if np.squeeze(res['prob'][y][0]) > 0.5:
gender = "Female"
else:
gender = "Male"
age_gender_combo = str(probable_age) + str(gender)
# Write inference results to resultsfile
hashvalue = hashvalues[imagelist[y]]
if REPORT_FORMAT[0] == 'Nuix':
line = str(age_gender_combo) + ",md5:" + hashvalue
else:
line = str(Path(image_paths[imagelist[y]]).name) + "," + str(hashvalue) + ",AGE-GENDER," + str(
age_gender_combo)
detectionresults.write(line + "\n")
######
# Detection with the OPEN VINO Framework
# Creation of output file with hashes, detection scores and class
######
def run_inference_openvino(image_paths, images, hashvalue):
# Update progress meter and reopen results file
updateProgressMeter(6, 'Detecting with OpenVINO Object Detector')
logfile.write("*" + str(datetime.now()) + ": \tStarting detection with OpenVINO object detection model*\n")
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
# Fetch paths for openvino model
model_path = Path('Models/OpenVINO/vgg19')
model_xml = str(model_path / 'model.xml')
model_bin = str(model_path / 'model.bin')
model_labels = str(model_path / 'model.labels')
temp_bilder = images
# Plugin initialization for specified device and load extensions library if specified
ie = IECore()
# Read IR
net = IENetwork(model=model_xml, weights=model_bin)
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
net.batch_size = 4000
# Read and pre-process input images
n, c, h, w = net.inputs[input_blob].shape
images = np.ndarray(shape=(n, c, h, w))
# Loading model to the plugin
exec_net = ie.load_network(network=net, device_name='CPU')
# Create batches to prevent RAM overload
batches = tuple(temp_bilder[x:x + net.batch_size] for x in range(0, len(temp_bilder), net.batch_size))
# Start sync inference
for batch in batches:
for index, temp_pic in enumerate(batch):
temp_pic = cv2.resize(temp_pic, (w, h))
temp_pic = temp_pic.transpose((2, 0, 1))
images[index] = temp_pic
res = exec_net.infer(inputs={input_blob: images})
# Processing output blob
res = res[out_blob]
# Prepare label file
with open(model_labels, 'r') as f:
labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
# Clean inference results and write them to resultsfile
for i, probs in enumerate(res):
probs = np.squeeze(probs)
top_ind = np.argsort(probs)[-3:][::-1]
for id in top_ind:
if probs[id] >= 0.3:
# det_label = labels_map[id] if labels_map else "{}".format(id)
det_label = labels_map[id].split(sep=' ', maxsplit=1)[1]
if REPORT_FORMAT[0] == 'Nuix':
line = ",".join([det_label, "md5:" + hashvalue])
else:
line = ",".join([Path(image_paths[i]).name, hashvalue[i], str(probs[id]), str(det_label)])
detectionresults.write(line + "\n")
logfile.write("*" + str(datetime.now()) + ": \tFinished detection with OpenVINO object detection model*\n")
######
# Worker function to load and encode known faces and to compare them against
# the provided input material
######
def faceRecognition(known_faces_path, image_paths, images, hashvalues):
# Update progress bar
updateProgressMeter(5, 'Conducting Face Recognition')
known_face_counter = 0
# Open the results file
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
OutputPictureFolder = PATH_TO_RESULTS / 'DetectedFaces'
if not OutputPictureFolder.exists(): os.mkdir(str(OutputPictureFolder))
# Initiate array to store known faces
known_face_encodings = []
known_face_names = []
known_faces = Path.iterdir(Path(known_faces_path))
# Create encodings and store them with names
for known_face in known_faces:
known_person_image = face_recognition.load_image_file(known_face)
known_face_encodings.extend(face_recognition.face_encodings(known_person_image))
known_face_names.append(Path(known_face).stem)
logfile.write("*" + str(datetime.now()) + ": \tStarting face recognition with " + str(len(known_face_names)) + " known faces*\n")
# Load images, detect faces, encode and compare them to the known faces
for index, image_to_detect in enumerate(images):
hashvalue = hashvalues[index]
image_path = image_paths[index]
updateProgressMeter(5, 'Face Reco Image ' + str(index) + ' of ' + str(len(images)))
# Use GPU based model to detect & encode
face_locations = face_recognition.face_locations(image_to_detect, model="cnn")
face_encodings = face_recognition.face_encodings(image_to_detect, face_locations)
# Loop through each face in this frame of video
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=facereq_tolerance)
name = "Unknown"
# Check the face distance and get best match
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
# If there is a match, write it to the output file
if name != "Unknown":
known_face_counter += 1
if REPORT_FORMAT[0] == 'Nuix':
line = ",".join([name, "md5:" + hashvalue])
else:
line = ",".join([Path(image_path).name, hashvalue, "FACE-Match", name])
detectionresults.write(line + "\n")
if output_detFaces:
# Export detected face with bounding box
cv2.rectangle(image_to_detect, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(image_to_detect, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(image_to_detect, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
savePath = str(OutputPictureFolder / str(Path(image_path).name)) + '.jpg'
detectedFace = Image.fromarray(image_to_detect)
detectedFace.save(savePath)
logfile.write("*" + str(datetime.now()) + ": \tFace Recognition completed.*\n")
detectionresults.flush()
detectionresults.close()
# Return amount of detected known faces
return known_face_counter
######
# Worker function to conduct speech detection in audio files
# for all audio files detected
######
def audioSpeechDetection(audiolist):
logfile.write("*" + str(datetime.now()) + ": \tStarting audio speech detection*\n")
updateProgressMeter(11, 'Processing Audio Files')
audiocounter = 0
# Open the results file
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
pool = Pool(maxtasksperchild=100)
result = pool.map(audioAnalysis.segmentSpeechDetection, audiolist, chunksize=10)
pool.close()
# Synchronize after completion
pool.join()
pool.terminate()
result = [x for x in result if x != None]
for processedAudio in result:
speechPercentage, audiopath = processedAudio
# Check for the video flag
if not isinstance(speechPercentage, float):
logfile.write("Unsupported audio file: " + str(audiopath) + "\n")
else:
speechPercentage, audiopath = processedAudio
# Hashing the video once
hash_md5 = hashlib.md5()
with open(audiopath, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
hashvalue = hash_md5.hexdigest()
audiocounter += 1
if REPORT_FORMAT[0] == 'Nuix':
if speechPercentage != 0.0:
line = ",".join(["AUDIO-SPEECH", "md5:" + hashvalue])
else:
line = ",".join([Path(audiopath).name, hashvalue, str(speechPercentage), "AUDIO-SPEECH"])
detectionresults.write(line + "\n")
logfile.write("*" + str(datetime.now()) + ": \tAudio speech detection completed.*\n")
detectionresults.flush()
detectionresults.close()
return audiocounter
######
# Split the report file to allow seamless integration into XWays Hash Database per category
######
def createXWaysReport():
detectionresults_path = str(PATH_TO_RESULTS / 'Detection_Results.csv')
xways_folder = PATH_TO_RESULTS / 'XWaysOutput'
if not xways_folder.exists(): os.mkdir(str(xways_folder))
for key, rows in groupby(csv.reader(open(detectionresults_path)),
lambda row: row[3]):
# Replace special characters in categories
if str(key) != 'category':
key = str(key).replace("/","-")
key = str(key).replace(".", "")
key = str(key).replace("(", "")
key = str(key).replace(")", "")
key = key + '.txt'
detectionresults_single_path = xways_folder / key
with open(str(detectionresults_single_path), 'a') as rf:
for row in rows:
rf.write(row[1] + "\n")
rf.flush()
# Get a list of all files in results directory
resultsfiles = os.listdir(str(xways_folder))
# Prepend them with MD5 for seamless import into XWays
for file in resultsfiles:
line = "md5"
if file[-3:] == 'txt' and file != 'Logfile.txt':
with open(str(xways_folder / file), 'r+') as ff:
content = ff.read()
ff.seek(0,0)
ff.write(line.rstrip('\r\n') + '\n' + content)
######
#
# Main program function
# First initiates required parameters and variables, then loads the GUI
# After which the image and video load functions are triggered based on the input parameters
# Finally, the detection is executed and results written to the place requested
#
######
# Prevent execution when externally called
if __name__ == '__main__':
######
# Collecting parameters via GUI
######
sg.ChangeLookAndFeel('Dark')
layout = [[sg.Text('General Settings', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('Please specify the folder holding the media data:')],
[sg.Input(), sg.FolderBrowse('Browse', initial_folder='/home/b/Desktop/TestBilder', button_color=('black', 'grey'))], #Path.home() = Initial folder
[sg.Text('Where shall I place the results?')],
[sg.Input(), sg.FolderBrowse('Browse', initial_folder='/home/b/Desktop/TestResults', button_color=('black', 'grey'))], #Path.home()
[sg.Text('TENSORFLOW DETECTORS')],
[sg.Checkbox('Objects/Persons', size=(15, 2)),
sg.Checkbox('Actions'),
sg.Checkbox('IS Logos'),
sg.Checkbox("Face Recognition")],
[sg.Text('OPEN VINO DETECTORS')],
[sg.Checkbox('Objects-fast', size=(15, 2)),
sg.Checkbox('Faces/Age/Gender')],
[sg.Text('Output Format:'), sg.Listbox(values=('Nuix', 'XWays', 'csv'), size=(29, 3))],
[sg.Text('Video Settings', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('# of frames to be analyzed per Minute:', size=(36, 0))],
[sg.Slider(range=(1, 120), orientation='h', size=(29, 20), default_value=30)],
[sg.Text('Max. # of frames to be analyzed per Video:', size=(36, 0))],
[sg.Slider(range=(1, 500), orientation='h', size=(29, 20), default_value=100)],
[sg.Text('Check for & discard similar frames?'),
sg.InputCombo(('Yes', 'No'), default_value='No', size=(10, 2))],
[sg.Text('Face Recognition', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('Specify folder with known faces (if FaceReq selected): ')],
[sg.Input(), sg.FolderBrowse('Browse', initial_folder='/home/b/Desktop/known', button_color=('black', 'grey'))],
[sg.Text('Specify face recognition tolerance (Default: 60%):', size=(48, 0))],
[sg.Slider(range=(0, 100), orientation='h', size=(29, 20), default_value=60)],
[sg.Checkbox('Output detected faces as jpg', size=(25, 2))],
[sg.Text('Audio Settings', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('AUDIO PROCESSING')],
[sg.Checkbox('Speech Detection', size=(15, 2))],
[sg.OK(button_color=('black', 'sea green')), sg.Cancel(button_color=('black', 'grey'))]]
layout_progress = [[sg.Text('Detection in progress')],
[sg.ProgressBar(12, orientation='h', size=(20, 20), key='progressbar')],
[sg.Cancel()]]
# Render the GUI
gui_input = sg.Window('BKP Media Detector').Layout(layout).Read()
error = False
# Validate input
validateInput(gui_input)
# Initiating progress meter
updateProgressMeter(1, 'Initializing variables & parameters...')
startTime = datetime.now()
# Variable to determine minimum GPU Processor requirement & to disable TF log output
# os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '5'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Validating TF version
if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')
# Defining multiple needed variables based on GUI input & adding TF/OpenVINO directory to path
PATH_TO_INPUT = Path(gui_input[1][0])
TEST_IMAGE_PATHS = Path.iterdir(PATH_TO_INPUT)
number_of_input = 0
for elements in Path.iterdir(PATH_TO_INPUT):
number_of_input += 1
PATH_TO_RESULTS = Path(gui_input[1][1])
PATH_TO_OBJECT_DETECTION_DIR = '/home/b/Programs/tensorflow/models/research' # PLACEHOLDER-tobereplacedWithPathtoDirectory
sys.path.append(PATH_TO_OBJECT_DETECTION_DIR)
REPORT_FORMAT = gui_input[1][8]
frames_per_second = gui_input[1][9] / 60
max_frames_per_video = gui_input[1][10]
video_sensitivity_text = gui_input[1][11]
KNOWN_FACES_PATH = gui_input[1][12]
facereq_tolerance = int(gui_input[1][13])/100
output_detFaces = gui_input[1][14]
if video_sensitivity_text == "Yes":
video_sensitivity = 20
else:
video_sensitivity = 0
# Check which models to apply and load their corresponding label maps
from object_detection.utils import label_map_util
graphlist = []
indexlist = []
MODEL1 = bool(gui_input[1][2])
if MODEL1:
OPEN_IMAGES_GRAPH = str(Path('Models/OpenImages/openimages.pb'))
OPEN_IMAGES_LABELS = str(OPEN_IMAGES_GRAPH)[:-3] + '.pbtxt'
OPEN_IMAGES_INDEX = label_map_util.create_category_index_from_labelmap(OPEN_IMAGES_LABELS)
graphlist.append(OPEN_IMAGES_GRAPH)
indexlist.append(OPEN_IMAGES_INDEX)
MODEL2 = bool(gui_input[1][3])
if MODEL2:
AVA_GRAPH = str(Path('Models/AVA/ava.pb'))
AVA_LABELS = str(AVA_GRAPH)[:-3] + '.pbtxt'
AVA_INDEX = label_map_util.create_category_index_from_labelmap(AVA_LABELS)
graphlist.append(AVA_GRAPH)
indexlist.append(AVA_INDEX)
MODEL3 = bool(gui_input[1][4])
if MODEL3:
SPECIAL_DETECTOR_GRAPH = str(Path('Models/ISLogos/islogos.pb'))
SPECIAL_DETECTOR_LABELS = str(SPECIAL_DETECTOR_GRAPH)[:-3] + '.pbtxt'
SPECIAL_DETECTOR_INDEX = label_map_util.create_category_index_from_labelmap(SPECIAL_DETECTOR_LABELS)
graphlist.append(SPECIAL_DETECTOR_GRAPH)
indexlist.append(SPECIAL_DETECTOR_INDEX)
FACE_RECOGNITION = bool(gui_input[1][5])
OPEN_VINO_vgg19 = bool(gui_input[1][6])
FACE_MODEL = bool(gui_input[1][7])
AUDIO_SPEECH_DETECTION = bool(gui_input[1][15])
# Update the progress indicator
updateProgressMeter(2, 'Process started. Loading ' + str(number_of_input) + ' media files...')
# Create logfile
logfile = open(str(PATH_TO_RESULTS / 'Logfile.txt'), 'w')
logfile.write('***DETECTION LOG***\n')
logfile.write("*" + str(datetime.now()) + ': \tProcess started. Loading images...*\n')
# Create resultsfile
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'w')
if REPORT_FORMAT[0] == 'Nuix':
detectionresults.write("tag,searchterm\n")
else:
detectionresults.write("name,hash,score,category\n")
detectionresults.flush()
detectionresults.close()
# Initiate needed variables
vidlist = []
audiolist = []
final_images = []
errors = []
# Multiprocess the image load function on all CPU cores available
pool = Pool(maxtasksperchild=100)
processed_images = pool.map(load_image_into_numpy_array, TEST_IMAGE_PATHS, chunksize=10)
pool.close()
# Synchronize after completion
pool.join()
pool.terminate()
# Clean the result for None types (where image conversion failed)
processed_images = [x for x in processed_images if x != None]
# Check for the different flags set by mimetype
for processed_image in processed_images:
if str(processed_image[1]) == "VIDEO":
# If present, populate the video list
vidlist.append(processed_image[0])
elif str(processed_image[1]) == "AUDIO":
audiolist.append(processed_image[0])
elif str(processed_image[1]) == "OCTET":
if processed_image[0][-3:] in ["mp4", "mov", "mpg", "avi", "exo", "mkv", "m4v", "ebm"]:
vidlist.append(processed_image[0])
else:
audiolist.append(processed_image[0])
elif str(processed_image[1]) == "ERROR":
errors.append(processed_image[0])
else:
# If not, put it to the final images list
final_images.append(processed_image)
for error in errors:
logfile.write(error)
logfile.flush()
# Count the number of images before adding the videoframes
number_of_images = len(final_images)
# Update the progress indicator
updateProgressMeter(3, 'Loading ' + str(len(vidlist)) + ' Videos...')
# Multiprocess the video load function on all CPU cores available
pool = Pool(maxtasksperchild=10)
videoframes = pool.map(load_video_into_numpy_array, vidlist, chunksize=2)
pool.close()
# Synchronize after completion
pool.join()
pool.terminate()
number_of_videos = 0
# Clean the result for None types (where video conversion failed)
for video in videoframes:
if type(video) is str:
errors.append(video)
if type(video) is list:
final_images.extend(video)
number_of_videos += 1
for error in errors:
logfile.write(error)
logfile.flush()
# Split the result from the loading function into hashes and image arrays
if len(final_images) != 0:
image_path, hashvalues, image_nps = zip(*final_images)
# Update the progress indicator & logfile
updateProgressMeter(4, 'Starting detection of ' + str(len(final_images)) + ' media files')
logfile.write("*" + str(datetime.now()) + ": \tLoading completed. Detecting...*\n")
# Conduct Face Recognition if needed
if FACE_RECOGNITION:
known_face_counter = faceRecognition(KNOWN_FACES_PATH, image_path, image_nps, hashvalues)
# Conduct OpenVino VGG19 Model if needed
if OPEN_VINO_vgg19:
run_inference_openvino(image_path, image_nps, hashvalues)
# Execute all other detection models
if len(final_images) != 0:
run_inference_for_multiple_images(image_path, image_nps, hashvalues)
# Conduct face/age/gender detection
if FACE_MODEL:
faceDetection(image_path, image_nps, hashvalues)
if AUDIO_SPEECH_DETECTION:
audiofiles_processed = audioSpeechDetection(audiolist)
else:
audiofiles_processed = 0
# Check whether an Xways report needs to be created
if REPORT_FORMAT[0] == 'XWays':
createXWaysReport()
# Write process statistics to logfile
logfile.write("*Results:\t\t\t" + str(PATH_TO_RESULTS / 'Detection_Results.csv*\n'))
logfile.write("*Total Amount of Files:\t\t" + str(number_of_input) + " (of which " + str(number_of_images + number_of_videos + audiofiles_processed) + " were processed.)*\n")
logfile.write("*Processed Images:\t\t" + str(number_of_images) + "*\n")
logfile.write("*Processed Videos: \t\t" + str(number_of_videos) + " (analyzed " + str(frames_per_second * 60) + " frames per minute, up to max. 500) with the check for content-based duplicates set to " + video_sensitivity_text + "\n")
logfile.write("*Processed Audio Files:\t\t" + str(audiofiles_processed) + "*\n")
logfile.write("*Applied models:\n")
for y in range(0, len(graphlist)): logfile.write("\t\t\t\t" + graphlist[y] + "\n")
if OPEN_VINO_vgg19: logfile.write("\t\t\t\tOpenVINO Object Detector\n")
if FACE_MODEL: logfile.write("\t\t\t\tFace-Age-Gender Detector\n")
if FACE_RECOGNITION: logfile.write("\t\t\t\tFace Recognition (Known faces detected: " + str(known_face_counter) + ")\n")
logfile.write("*Processing time:\t\t" + str(datetime.now() - startTime) + "*\n")
logfile.write("*Time per processed file:\t" + str((datetime.now() - startTime) / (number_of_images + number_of_videos + audiofiles_processed)) + "*\n")
logfile.flush()
logfile.close()
# Update progress indicator
sg.OneLineProgressMeter('BKP Media Detector', 12, 12, 'key', 'Detection finished',orientation='h',size=(100, 10))
# Deliver final success pop up to user
sg.Popup('The detection was successful',
'The results are placed here:',
'Path: "{}"'.format(str(PATH_TO_RESULTS)))
| 1.992188 | 2 |
src/BruteForce.py | stevenwalton/Retro-Learner | 0 | 5215 | <filename>src/BruteForce.py
import time
import retro
import FrameSkip
import TimeLimit
import Brute
class BruteForce():
def __init__(self,
game='Airstriker-Genesis',
max_episode_steps=4500,
timestep_limit=100_000_000,
state=retro.State.DEFAULT,
scenario=None,
save=False,
savename="best.bk2",
fs_skip=4,
render=False,
time=False,
):
self.game = game
self.max_episode_steps = max_episode_steps
self.timestep_limit = timestep_limit
self.state = state
self.scenario = scenario
self.save=save
self.savename = savename
self.fs_skip=fs_skip
self.render=render
self.time=time
if ".bk2" not in self.savename[-4:]:
self.savename += ".bk2"
self.timesteps = 0
self.best_reward = float('-inf')
self.env = retro.make(game=game,
state=state,
use_restricted_actions=retro.Actions.DISCRETE,
scenario=scenario)
self.env = FrameSkip.Frameskip(self.env, skip=self.fs_skip)
self.env = TimeLimit.TimeLimit(self.env, max_episode_steps=self.max_episode_steps)
def start(self):
brute = Brute.Brute(self.env, max_episode_steps=self.max_episode_steps,render=self.render)
if self.time:
startTime = time.time()
while True:
acts, reward = brute.run()
self.timesteps += len(acts)
if reward > self.best_reward:
print(f"New best reward {reward} from {self.best_reward}")
if self.time:
print(f"Elapsed time {time.time() - startTime}")
self.best_reward = reward
if (self.save):
self.env.unwrapped.record_movie(self.savename)
self.env.reset()
for act in acts:
self.env.step(act)
self.env.unwrapped.stop_record()
if self.timesteps > self.timestep_limit:
print("Timed out")
break
| 2.859375 | 3 |
tutorials/04-advanced/03-super-resolution-onnx/main.py | yakhyo/PyTorch-Tutorials | 7 | 5216 | import io
import numpy as np
import torch.utils.model_zoo as model_zoo
import torch.onnx
import torch.nn as nn
import torch.nn.init as init
# ================================================================ #
# Building the Model #
# ================================================================ #
class SuperResolutionNet(nn.Module):
def __init__(self, upscale_factor, inplace=False):
super(SuperResolutionNet, self).__init__()
self.relu = nn.ReLU(inplace=inplace)
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(in_channels=32, out_channels=upscale_factor ** 2, kernel_size=3, padding=1)
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
self._initialize_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
def _initialize_weights(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight)
# Creating an instance from SuperResolutionNet
net = SuperResolutionNet(upscale_factor=3)
# ================================================================ #
# Downloading Pretrained Weights #
# ================================================================ #
model_url = 'https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth'
# Initialize model with the pretrained weights
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net.load_state_dict(model_zoo.load_url(model_url, map_location=device))
net.eval() # Changing to eval mode to save it onnx format
# onnx input shape: x.shape : (batch_size=1, channel=1, H, W)
# The model expects the Y component of the YCbCr of an image as an input so it has one channel
x = torch.randn(1, 1, 224, 224, requires_grad=True)
onnx_model = net(x)
# Export the onnx model
torch.onnx.export(onnx_model, # model being run
x, # model input (or a tuple for multiple inputs)
"super_resolution.onnx", # where to save the model
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
# ================================================================ #
# Loading ONNX model #
# ================================================================ #
import onnx
import onnxruntime
onnx_model = onnx.load("super_resolution.onnx")
onnx.checker.check_model(onnx_model)
ort_session = onnxruntime.InferenceSession("super_resolution.onnx")
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
# compute ONNX Runtime output prediction
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
ort_outs = ort_session.run(None, ort_inputs)
# compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
print("Exported model has been tested with ONNXRuntime, and the result looks good!")
# ================================================================ #
# Reading Original Image and Feed it to Model #
# ================================================================ #
from PIL import Image
import torchvision.transforms as transforms
img = Image.open("../../../cat_224x224.jpg")
resize = transforms.Resize([224, 224])
img = resize(img)
# The model expects the Y component of the YCbCr of an image as an input
img_ycbcr = img.convert('YCbCr')
img_y, img_cb, img_cr = img_ycbcr.split()
to_tensor = transforms.ToTensor()
img_y = to_tensor(img_y)
img_y.unsqueeze_(0)
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(img_y)}
ort_outs = ort_session.run(None, ort_inputs)
img_out_y = ort_outs[0]
img_out_y = Image.fromarray(np.uint8((img_out_y[0] * 255.0).clip(0, 255)[0]), mode='L')
# get the output image follow post-processing step from PyTorch implementation
output = Image.merge(
"YCbCr",
[img_out_y, img_cb.resize(img_out_y.size, Image.BICUBIC), img_cr.resize(img_out_y.size, Image.BICUBIC), ]
).convert("RGB")
# Save the image, we will compare this with the output image from mobile device
output.save("../../../cat_superres_with_ort.jpg")
| 2.46875 | 2 |
features/steps/section.py | revvsales/python-docx-1 | 3,031 | 5217 | <reponame>revvsales/python-docx-1<filename>features/steps/section.py
# encoding: utf-8
"""
Step implementations for section-related features
"""
from __future__ import absolute_import, print_function, unicode_literals
from behave import given, then, when
from docx import Document
from docx.enum.section import WD_ORIENT, WD_SECTION
from docx.section import Section
from docx.shared import Inches
from helpers import test_docx
# given ====================================================
@given("a Section object as section")
def given_a_Section_object_as_section(context):
context.section = Document(test_docx("sct-section-props")).sections[-1]
@given("a Section object {with_or_without} a distinct first-page header as section")
def given_a_Section_object_with_or_without_first_page_header(context, with_or_without):
section_idx = {"with": 1, "without": 0}[with_or_without]
context.section = Document(test_docx("sct-first-page-hdrftr")).sections[section_idx]
@given('a section collection containing 3 sections')
def given_a_section_collection_containing_3_sections(context):
document = Document(test_docx('doc-access-sections'))
context.sections = document.sections
@given('a section having known page dimension')
def given_a_section_having_known_page_dimension(context):
document = Document(test_docx('sct-section-props'))
context.section = document.sections[-1]
@given('a section having known page margins')
def given_a_section_having_known_page_margins(context):
document = Document(test_docx('sct-section-props'))
context.section = document.sections[0]
@given('a section having start type {start_type}')
def given_a_section_having_start_type(context, start_type):
section_idx = {
'CONTINUOUS': 0,
'NEW_PAGE': 1,
'ODD_PAGE': 2,
'EVEN_PAGE': 3,
'NEW_COLUMN': 4,
}[start_type]
document = Document(test_docx('sct-section-props'))
context.section = document.sections[section_idx]
@given('a section known to have {orientation} orientation')
def given_a_section_having_known_orientation(context, orientation):
section_idx = {
'landscape': 0,
'portrait': 1
}[orientation]
document = Document(test_docx('sct-section-props'))
context.section = document.sections[section_idx]
# when =====================================================
@when("I assign {bool_val} to section.different_first_page_header_footer")
def when_I_assign_value_to_section_different_first_page_hdrftr(context, bool_val):
context.section.different_first_page_header_footer = eval(bool_val)
@when('I set the {margin_side} margin to {inches} inches')
def when_I_set_the_margin_side_length(context, margin_side, inches):
prop_name = {
'left': 'left_margin',
'right': 'right_margin',
'top': 'top_margin',
'bottom': 'bottom_margin',
'gutter': 'gutter',
'header': 'header_distance',
'footer': 'footer_distance',
}[margin_side]
new_value = Inches(float(inches))
setattr(context.section, prop_name, new_value)
@when('I set the section orientation to {orientation}')
def when_I_set_the_section_orientation(context, orientation):
new_orientation = {
'WD_ORIENT.PORTRAIT': WD_ORIENT.PORTRAIT,
'WD_ORIENT.LANDSCAPE': WD_ORIENT.LANDSCAPE,
'None': None,
}[orientation]
context.section.orientation = new_orientation
@when('I set the section page height to {y} inches')
def when_I_set_the_section_page_height_to_y_inches(context, y):
context.section.page_height = Inches(float(y))
@when('I set the section page width to {x} inches')
def when_I_set_the_section_page_width_to_x_inches(context, x):
context.section.page_width = Inches(float(x))
@when('I set the section start type to {start_type}')
def when_I_set_the_section_start_type_to_start_type(context, start_type):
new_start_type = {
'None': None,
'CONTINUOUS': WD_SECTION.CONTINUOUS,
'EVEN_PAGE': WD_SECTION.EVEN_PAGE,
'NEW_COLUMN': WD_SECTION.NEW_COLUMN,
'NEW_PAGE': WD_SECTION.NEW_PAGE,
'ODD_PAGE': WD_SECTION.ODD_PAGE,
}[start_type]
context.section.start_type = new_start_type
# then =====================================================
@then('I can access a section by index')
def then_I_can_access_a_section_by_index(context):
sections = context.sections
for idx in range(3):
section = sections[idx]
assert isinstance(section, Section)
@then('I can iterate over the sections')
def then_I_can_iterate_over_the_sections(context):
sections = context.sections
actual_count = 0
for section in sections:
actual_count += 1
assert isinstance(section, Section)
assert actual_count == 3
@then('len(sections) is 3')
def then_len_sections_is_3(context):
sections = context.sections
assert len(sections) == 3, (
'expected len(sections) of 3, got %s' % len(sections)
)
@then("section.different_first_page_header_footer is {bool_val}")
def then_section_different_first_page_header_footer_is(context, bool_val):
actual = context.section.different_first_page_header_footer
expected = eval(bool_val)
assert actual == expected, (
"section.different_first_page_header_footer is %s" % actual
)
@then("section.even_page_footer is a _Footer object")
def then_section_even_page_footer_is_a_Footer_object(context):
actual = type(context.section.even_page_footer).__name__
expected = "_Footer"
assert actual == expected, "section.even_page_footer is a %s object" % actual
@then("section.even_page_header is a _Header object")
def then_section_even_page_header_is_a_Header_object(context):
actual = type(context.section.even_page_header).__name__
expected = "_Header"
assert actual == expected, "section.even_page_header is a %s object" % actual
@then("section.first_page_footer is a _Footer object")
def then_section_first_page_footer_is_a_Footer_object(context):
actual = type(context.section.first_page_footer).__name__
expected = "_Footer"
assert actual == expected, "section.first_page_footer is a %s object" % actual
@then("section.first_page_header is a _Header object")
def then_section_first_page_header_is_a_Header_object(context):
actual = type(context.section.first_page_header).__name__
expected = "_Header"
assert actual == expected, "section.first_page_header is a %s object" % actual
@then("section.footer is a _Footer object")
def then_section_footer_is_a_Footer_object(context):
actual = type(context.section.footer).__name__
expected = "_Footer"
assert actual == expected, "section.footer is a %s object" % actual
@then("section.header is a _Header object")
def then_section_header_is_a_Header_object(context):
actual = type(context.section.header).__name__
expected = "_Header"
assert actual == expected, "section.header is a %s object" % actual
@then("section.{propname}.is_linked_to_previous is True")
def then_section_hdrftr_prop_is_linked_to_previous_is_True(context, propname):
actual = getattr(context.section, propname).is_linked_to_previous
expected = True
assert actual == expected, (
"section.%s.is_linked_to_previous is %s" % (propname, actual)
)
@then('the reported {margin_side} margin is {inches} inches')
def then_the_reported_margin_is_inches(context, margin_side, inches):
prop_name = {
'left': 'left_margin',
'right': 'right_margin',
'top': 'top_margin',
'bottom': 'bottom_margin',
'gutter': 'gutter',
'header': 'header_distance',
'footer': 'footer_distance',
}[margin_side]
expected_value = Inches(float(inches))
actual_value = getattr(context.section, prop_name)
assert actual_value == expected_value
@then('the reported page orientation is {orientation}')
def then_the_reported_page_orientation_is_orientation(context, orientation):
expected_value = {
'WD_ORIENT.LANDSCAPE': WD_ORIENT.LANDSCAPE,
'WD_ORIENT.PORTRAIT': WD_ORIENT.PORTRAIT,
}[orientation]
assert context.section.orientation == expected_value
@then('the reported page width is {x} inches')
def then_the_reported_page_width_is_width(context, x):
assert context.section.page_width == Inches(float(x))
@then('the reported page height is {y} inches')
def then_the_reported_page_height_is_11_inches(context, y):
assert context.section.page_height == Inches(float(y))
@then('the reported section start type is {start_type}')
def then_the_reported_section_start_type_is_type(context, start_type):
expected_start_type = {
'CONTINUOUS': WD_SECTION.CONTINUOUS,
'EVEN_PAGE': WD_SECTION.EVEN_PAGE,
'NEW_COLUMN': WD_SECTION.NEW_COLUMN,
'NEW_PAGE': WD_SECTION.NEW_PAGE,
'ODD_PAGE': WD_SECTION.ODD_PAGE,
}[start_type]
assert context.section.start_type == expected_start_type
| 2.296875 | 2 |
scipy/sparse/csgraph/_laplacian.py | seberg/scipy | 1 | 5218 | """
Laplacian of a compressed-sparse graph
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD
import numpy as np
from scipy.sparse import isspmatrix, coo_matrix
###############################################################################
# Graph laplacian
def laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
Examples
--------
>>> from scipy.sparse import csgraph
>>> G = np.arange(5) * np.arange(5)[:, np.newaxis]
>>> G
array([[ 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4],
[ 0, 2, 4, 6, 8],
[ 0, 3, 6, 9, 12],
[ 0, 4, 8, 12, 16]])
>>> csgraph.laplacian(G, normed=False)
array([[ 0, 0, 0, 0, 0],
[ 0, 9, -2, -3, -4],
[ 0, -2, 16, -6, -8],
[ 0, -3, -6, 21, -12],
[ 0, -4, -8, -12, 24]])
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = csgraph.astype(np.float)
if isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(
diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = coo_matrix((new_data, (new_row, new_col)), shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = 1 - w_zeros
else:
lap.flat[::n_nodes + 1] = w
if return_diag:
return lap, w
return lap
| 3.453125 | 3 |
zilean/system/zilean_migrator.py | A-Hilaly/zilean | 0 | 5219 | <reponame>A-Hilaly/zilean<gh_stars>0
from .utils.migrations import (migrate_database_from,
migrate_machine_from,
zilean_rollback_database_backup,
zilean_rollback_machine_backup)
class ZileanMigrator(object):
pass
| 1.195313 | 1 |
coltran/run.py | DionysisChristopoulos/google-research | 23,901 | 5220 | <filename>coltran/run.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ColTran: Training and Continuous Evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
import time
from absl import app
from absl import flags
from absl import logging
from ml_collections import config_flags
import tensorflow as tf
import tensorflow_datasets as tfds
from coltran import datasets
from coltran.models import colorizer
from coltran.models import upsampler
from coltran.utils import train_utils
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=missing-docstring
# pylint: disable=not-callable
# pylint: disable=g-long-lambda
flags.DEFINE_enum('mode', 'train', [
'train', 'eval_train', 'eval_valid', 'eval_test'], 'Operation mode.')
flags.DEFINE_string('logdir', '/tmp/svt', 'Main directory for logs.')
flags.DEFINE_string('master', 'local',
'BNS name of the TensorFlow master to use.')
flags.DEFINE_enum('accelerator_type', 'GPU', ['CPU', 'GPU', 'TPU'],
'Hardware type.')
flags.DEFINE_enum('dataset', 'imagenet', ['imagenet', 'custom'], 'Dataset')
flags.DEFINE_string('data_dir', None, 'Data directory for custom images.')
flags.DEFINE_string('tpu_worker_name', 'tpu_worker', 'Name of the TPU worker.')
flags.DEFINE_string(
'pretrain_dir', None, 'Finetune from a pretrained checkpoint.')
flags.DEFINE_string('summaries_log_dir', 'summaries', 'Summaries parent.')
flags.DEFINE_integer('steps_per_summaries', 100, 'Steps per summaries.')
flags.DEFINE_integer('devices_per_worker', 1, 'Number of devices per worker.')
flags.DEFINE_integer('num_workers', 1, 'Number workers.')
config_flags.DEFINE_config_file(
'config',
default='test_configs/colorizer.py',
help_string='Training configuration file.')
FLAGS = flags.FLAGS
def restore_checkpoint(model, ema, strategy, latest_ckpt=None, optimizer=None):
if optimizer is None:
ckpt_func = functools.partial(
train_utils.create_checkpoint, models=model, ema=ema)
else:
ckpt_func = functools.partial(
train_utils.create_checkpoint, models=model, ema=ema,
optimizer=optimizer)
checkpoint = train_utils.with_strategy(ckpt_func, strategy)
if latest_ckpt:
logging.info('Restoring from pretrained directory: %s', latest_ckpt)
train_utils.with_strategy(lambda: checkpoint.restore(latest_ckpt), strategy)
return checkpoint
def is_tpu():
return FLAGS.accelerator_type == 'TPU'
def loss_on_batch(inputs, model, config, training=False):
"""Loss on a batch of inputs."""
logits, aux_output = model.get_logits(
inputs_dict=inputs, train_config=config, training=training)
loss, aux_loss_dict = model.loss(
targets=inputs, logits=logits, train_config=config, training=training,
aux_output=aux_output)
loss_factor = config.get('loss_factor', 1.0)
loss_dict = collections.OrderedDict()
loss_dict['loss'] = loss
total_loss = loss_factor * loss
for aux_key, aux_loss in aux_loss_dict.items():
aux_loss_factor = config.get(f'{aux_key}_loss_factor', 1.0)
loss_dict[aux_key] = aux_loss
total_loss += aux_loss_factor * aux_loss
loss_dict['total_loss'] = total_loss
extra_info = collections.OrderedDict([
('scalar', loss_dict),
])
return total_loss, extra_info
def train_step(config,
model,
optimizer,
metrics,
ema=None,
strategy=None):
"""Training StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
with tf.GradientTape() as tape:
loss, extra = loss_on_batch(inputs, model, config, training=True)
scaled_loss = loss
if strategy:
scaled_loss /= float(strategy.num_replicas_in_sync)
grads = tape.gradient(scaled_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
for metric_key, metric in metrics.items():
metric.update_state(extra['scalar'][metric_key])
if ema is not None:
ema.apply(model.trainable_variables)
return loss
return train_utils.step_with_strategy(step_fn, strategy)
def build(config, batch_size, is_train=False):
optimizer = train_utils.build_optimizer(config)
ema_vars = []
downsample = config.get('downsample', False)
downsample_res = config.get('downsample_res', 64)
h, w = config.resolution
if config.model.name == 'coltran_core':
if downsample:
h, w = downsample_res, downsample_res
zero = tf.zeros((batch_size, h, w, 3), dtype=tf.int32)
model = colorizer.ColTranCore(config.model)
model(zero, training=is_train)
c = 1 if is_train else 3
if config.model.name == 'color_upsampler':
if downsample:
h, w = downsample_res, downsample_res
zero_slice = tf.zeros((batch_size, h, w, c), dtype=tf.int32)
zero = tf.zeros((batch_size, h, w, 3), dtype=tf.int32)
model = upsampler.ColorUpsampler(config.model)
model(zero, inputs_slice=zero_slice, training=is_train)
elif config.model.name == 'spatial_upsampler':
zero_slice = tf.zeros((batch_size, h, w, c), dtype=tf.int32)
zero = tf.zeros((batch_size, h, w, 3), dtype=tf.int32)
model = upsampler.SpatialUpsampler(config.model)
model(zero, inputs_slice=zero_slice, training=is_train)
ema_vars = model.trainable_variables
ema = train_utils.build_ema(config, ema_vars)
return model, optimizer, ema
###############################################################################
## Train.
###############################################################################
def train(logdir):
config = FLAGS.config
steps_per_write = FLAGS.steps_per_summaries
train_utils.write_config(config, logdir)
strategy, batch_size = train_utils.setup_strategy(
config, FLAGS.master,
FLAGS.devices_per_worker, FLAGS.mode, FLAGS.accelerator_type)
def input_fn(input_context=None):
read_config = None
if input_context is not None:
read_config = tfds.ReadConfig(input_context=input_context)
dataset = datasets.get_dataset(
name=FLAGS.dataset,
config=config,
batch_size=config.batch_size,
subset='train',
read_config=read_config,
data_dir=FLAGS.data_dir)
return dataset
# DATASET CREATION.
logging.info('Building dataset.')
train_dataset = train_utils.dataset_with_strategy(input_fn, strategy)
data_iterator = iter(train_dataset)
# MODEL BUILDING
logging.info('Building model.')
model, optimizer, ema = train_utils.with_strategy(
lambda: build(config, batch_size, True), strategy)
model.summary(120, print_fn=logging.info)
# METRIC CREATION.
metrics = {}
metric_keys = ['loss', 'total_loss']
metric_keys += model.metric_keys
for metric_key in metric_keys:
func = functools.partial(tf.keras.metrics.Mean, metric_key)
curr_metric = train_utils.with_strategy(func, strategy)
metrics[metric_key] = curr_metric
# CHECKPOINTING LOGIC.
if FLAGS.pretrain_dir is not None:
pretrain_ckpt = tf.train.latest_checkpoint(FLAGS.pretrain_dir)
assert pretrain_ckpt
# Load the entire model without the optimizer from the checkpoints.
restore_checkpoint(model, ema, strategy, pretrain_ckpt, optimizer=None)
# New tf.train.Checkpoint instance with a reset optimizer.
checkpoint = restore_checkpoint(
model, ema, strategy, latest_ckpt=None, optimizer=optimizer)
else:
latest_ckpt = tf.train.latest_checkpoint(logdir)
checkpoint = restore_checkpoint(
model, ema, strategy, latest_ckpt, optimizer=optimizer)
checkpoint = tf.train.CheckpointManager(
checkpoint, directory=logdir, checkpoint_name='model', max_to_keep=10)
if optimizer.iterations.numpy() == 0:
checkpoint_name = checkpoint.save()
logging.info('Saved checkpoint to %s', checkpoint_name)
train_summary_dir = os.path.join(logdir, 'train_summaries')
writer = tf.summary.create_file_writer(train_summary_dir)
start_time = time.time()
logging.info('Start Training.')
# This hack of wrapping up multiple train steps with a tf.function call
# speeds up training significantly.
# See: https://www.tensorflow.org/guide/tpu#improving_performance_by_multiple_steps_within_tffunction # pylint: disable=line-too-long
@tf.function
def train_multiple_steps(iterator, steps_per_epoch):
train_step_f = train_step(config, model, optimizer, metrics, ema,
strategy)
for _ in range(steps_per_epoch):
train_step_f(iterator)
while optimizer.iterations.numpy() < config.get('max_train_steps', 1000000):
num_train_steps = optimizer.iterations
for metric_key in metric_keys:
metrics[metric_key].reset_states()
start_run = time.time()
train_multiple_steps(data_iterator, tf.convert_to_tensor(steps_per_write))
steps_per_sec = steps_per_write / (time.time() - start_run)
with writer.as_default():
for metric_key, metric in metrics.items():
metric_np = metric.result().numpy()
tf.summary.scalar(metric_key, metric_np, step=num_train_steps)
if metric_key == 'total_loss':
logging.info('Loss: %.3f bits/dim, Speed: %.3f steps/second',
metric_np, steps_per_sec)
if time.time() - start_time > config.save_checkpoint_secs:
checkpoint_name = checkpoint.save()
logging.info('Saved checkpoint to %s', checkpoint_name)
start_time = time.time()
###############################################################################
## Evaluating.
###############################################################################
def evaluate(logdir, subset):
"""Executes the evaluation loop."""
config = FLAGS.config
strategy, batch_size = train_utils.setup_strategy(
config, FLAGS.master,
FLAGS.devices_per_worker, FLAGS.mode, FLAGS.accelerator_type)
def input_fn(_=None):
return datasets.get_dataset(
name=config.dataset,
config=config,
batch_size=config.eval_batch_size,
subset=subset)
model, optimizer, ema = train_utils.with_strategy(
lambda: build(config, batch_size, False), strategy)
metric_keys = ['loss', 'total_loss']
# metric_keys += model.metric_keys
metrics = {}
for metric_key in metric_keys:
func = functools.partial(tf.keras.metrics.Mean, metric_key)
curr_metric = train_utils.with_strategy(func, strategy)
metrics[metric_key] = curr_metric
checkpoints = train_utils.with_strategy(
lambda: train_utils.create_checkpoint(model, optimizer, ema),
strategy)
dataset = train_utils.dataset_with_strategy(input_fn, strategy)
def step_fn(batch):
_, extra = loss_on_batch(batch, model, config, training=False)
for metric_key in metric_keys:
curr_metric = metrics[metric_key]
curr_scalar = extra['scalar'][metric_key]
curr_metric.update_state(curr_scalar)
num_examples = config.eval_num_examples
eval_step = train_utils.step_with_strategy(step_fn, strategy)
ckpt_path = None
wait_max = config.get(
'eval_checkpoint_wait_secs', config.save_checkpoint_secs * 100)
is_ema = True if ema else False
eval_summary_dir = os.path.join(
logdir, 'eval_{}_summaries_pyk_{}'.format(subset, is_ema))
writer = tf.summary.create_file_writer(eval_summary_dir)
while True:
ckpt_path = train_utils.wait_for_checkpoint(logdir, ckpt_path, wait_max)
logging.info(ckpt_path)
if ckpt_path is None:
logging.info('Timed out waiting for checkpoint.')
break
train_utils.with_strategy(
lambda: train_utils.restore(model, checkpoints, logdir, ema),
strategy)
data_iterator = iter(dataset)
num_steps = num_examples // batch_size
for metric_key, metric in metrics.items():
metric.reset_states()
logging.info('Starting evaluation.')
done = False
for i in range(0, num_steps, FLAGS.steps_per_summaries):
start_run = time.time()
for k in range(min(num_steps - i, FLAGS.steps_per_summaries)):
try:
if k % 10 == 0:
logging.info('Step: %d', (i + k + 1))
eval_step(data_iterator)
except (StopIteration, tf.errors.OutOfRangeError):
done = True
break
if done:
break
bits_per_dim = metrics['loss'].result()
logging.info('Bits/Dim: %.3f, Speed: %.3f seconds/step, Step: %d/%d',
bits_per_dim,
(time.time() - start_run) / FLAGS.steps_per_summaries,
i + k + 1, num_steps)
# logging.info('Final Bits/Dim: %.3f', bits_per_dim)
with writer.as_default():
for metric_key, metric in metrics.items():
curr_scalar = metric.result().numpy()
tf.summary.scalar(metric_key, curr_scalar, step=optimizer.iterations)
def main(_):
logging.info('Logging to %s.', FLAGS.logdir)
if FLAGS.mode == 'train':
logging.info('[main] I am the trainer.')
try:
train(FLAGS.logdir)
# During TPU Preemeption, the coordinator hangs with the error below.
# the exception forces the coordinator to fail, and it will be restarted.
except (tf.errors.UnavailableError, tf.errors.CancelledError):
os._exit(os.EX_TEMPFAIL) # pylint: disable=protected-access
elif FLAGS.mode.startswith('train'):
logging.info('[main] I am the trainer.')
train(os.path.join(FLAGS.logdir, FLAGS.mode))
elif FLAGS.mode == 'eval_train':
logging.info('[main] I am the training set evaluator.')
evaluate(FLAGS.logdir, subset='train')
elif FLAGS.mode == 'eval_valid':
logging.info('[main] I am the validation set evaluator.')
evaluate(FLAGS.logdir, subset='valid')
elif FLAGS.mode == 'eval_test':
logging.info('[main] I am the test set evaluator.')
evaluate(FLAGS.logdir, subset='test')
else:
raise ValueError(
'Unknown mode {}. '
'Must be one of [train, eval_train, eval_valid, eval_test]'.format(
FLAGS.mode))
if __name__ == '__main__':
app.run(main)
| 1.851563 | 2 |
train_multi_human.py | wenliangdai/sunets-reproduce | 2 | 5221 | <gh_stars>1-10
import argparse
import math
import os
import pickle
import random
import sys
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch import nn
from torch.optim import lr_scheduler
from torch.utils import data
import torchvision.transforms as transforms
import transforms as extended_transforms
from loss import prediction_stat
from main import get_data_path
from main.loader import get_loader
from main.models import get_model
from utils import dotdict, float2str
# paths
ROOT = '/home/wenlidai/sunets-reproduce/'
RESULT = 'results'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def main(args):
print('='*10, 'Starting', '='*10, '\n')
print(device)
# Set the seed for reproducing the results
random.seed(args.manual_seed)
np.random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.manual_seed)
cudnn.benchmark = True
# Set up results folder
if not os.path.exists(os.path.join(ROOT, RESULT, 'saved_val_images')):
os.makedirs(os.path.join(ROOT, RESULT, 'saved_val_images'))
if not os.path.exists(os.path.join(ROOT, RESULT, 'saved_train_images')):
os.makedirs(os.path.join(ROOT, RESULT, 'saved_train_images'))
# Setup Dataloader
data_loader = get_loader(args.dataset)
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
target_transform = extended_transforms.MaskToTensor()
traindata = data_loader('train', n_classes=args.n_classes, transform=input_transform, target_transform=target_transform, do_transform=True)
trainloader = data.DataLoader(traindata, batch_size=args.batch_size, num_workers=2, shuffle=True)
valdata = data_loader('val', n_classes=args.n_classes, transform=input_transform, target_transform=target_transform)
valloader = data.DataLoader(valdata, batch_size=args.batch_size, num_workers=2, shuffle=False)
n_classes = traindata.n_classes
n_trainsamples = len(traindata)
n_iters_per_epoch = np.ceil(n_trainsamples / float(args.batch_size * args.iter_size))
# Setup Model
model = get_model(
name=args.arch,
n_classes=n_classes,
ignore_index=traindata.ignore_index,
output_stride=args.output_stride,
pretrained=args.pretrained,
momentum_bn=args.momentum_bn,
dprob=args.dprob
).to(device)
epochs_done=0
X=[]
Y1=[]
Y1_test=[]
Y2=[]
Y2_test=[]
avg_pixel_acc = 0
mean_class_acc = 0
mIoU = 0
avg_pixel_acc_test = 0
mean_class_acc_test = 0
mIoU_test = 0
best_mIoU = 0
best_epoch = 0
if args.model_path:
model_name = args.model_path.split('.')
checkpoint_name = model_name[0] + '_optimizer.pkl'
checkpoint = torch.load(os.path.join(ROOT, RESULT, checkpoint_name))
optm = checkpoint['optimizer']
model.load_state_dict(checkpoint['state_dict'])
split_str = model_name[0].split('_')
epochs_done = int(split_str[-1])
saved_loss = pickle.load( open(os.path.join(ROOT, RESULT, "saved_loss.p"), "rb") )
saved_accuracy = pickle.load( open(os.path.join(ROOT, RESULT, "saved_accuracy.p"), "rb") )
X=saved_loss["X"][:epochs_done]
Y=saved_loss["Y"][:epochs_done]
Y_test=saved_loss["Y_test"][:epochs_done]
avg_pixel_acc = saved_accuracy["P"][:epochs_done,:]
mean_class_acc = saved_accuracy["M"][:epochs_done,:]
mIoU = saved_accuracy["I"][:epochs_done,:]
avg_pixel_acc_test = saved_accuracy["P_test"][:epochs_done,:]
mean_class_acc_test = saved_accuracy["M_test"][:epochs_done,:]
mIoU_test = saved_accuracy["I_test"][:epochs_done,:]
if args.best_model_path:
best_model_name = args.best_model_path.split('_')
best_mIoU = float(best_model_name[-2])
best_epoch = int(best_model_name[-3])
# Learning rates: For new layers (such as final layer), we set lr to be 10x the learning rate of layers already trained
bias_10x_params = filter(lambda x: ('bias' in x[0]) and ('final' in x[0]) and ('conv' in x[0]),
model.named_parameters())
bias_10x_params = list(map(lambda x: x[1], bias_10x_params))
bias_params = filter(lambda x: ('bias' in x[0]) and ('final' not in x[0]),
model.named_parameters())
bias_params = list(map(lambda x: x[1], bias_params))
nonbias_10x_params = filter(lambda x: (('bias' not in x[0]) or ('bn' in x[0])) and ('final' in x[0]),
model.named_parameters())
nonbias_10x_params = list(map(lambda x: x[1], nonbias_10x_params))
nonbias_params = filter(lambda x: ('bias' not in x[0]) and ('final' not in x[0]),
model.named_parameters())
nonbias_params = list(map(lambda x: x[1], nonbias_params))
optimizer = torch.optim.SGD([{'params': bias_params, 'lr': args.lr},
{'params': bias_10x_params, 'lr': 20 * args.lr if args.pretrained else args.lr},
{'params': nonbias_10x_params, 'lr': 10 * args.lr if args.pretrained else args.lr},
{'params': nonbias_params, 'lr': args.lr},],
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay,
nesterov=(args.optim == 'Nesterov'))
num_param_groups = 4
# optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# Setting up scheduler
if args.model_path and args.restore:
# Here we restore all states of optimizer
optimizer.load_state_dict(optm)
total_iters = n_iters_per_epoch * args.epochs
lambda1 = lambda step: 0.5 + 0.5 * math.cos(np.pi * step / total_iters)
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=[lambda1]*num_param_groups, last_epoch=epochs_done*n_iters_per_epoch)
# scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1, last_epoch=epochs_done)
else:
# scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
# Here we simply restart the training
# if args.T0:
# total_iters = args.T0 * n_iters_per_epoch
# else:
total_iters = ((args.epochs - epochs_done) * n_iters_per_epoch)
lambda1 = lambda step: 0.5 + 0.5 * math.cos(np.pi * step / total_iters)
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=[lambda1]*num_param_groups)
global l_avg, totalclasswise_pixel_acc, totalclasswise_gtpixels, totalclasswise_predpixels
global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test
global steps, steps_test
criterion_sbd = nn.CrossEntropyLoss(size_average=False, ignore_index=traindata.ignore_index)
criterion_lip = nn.CrossEntropyLoss(size_average=False, ignore_index=traindata.ignore_index)
criterions = [criterion_sbd, criterion_lip]
for epoch in range(epochs_done, args.epochs):
print('='*10, 'Epoch %d' % (epoch + 1), '='*10)
l_avg = [0, 0]
totalclasswise_pixel_acc = [0, 0]
totalclasswise_gtpixels = [0, 0]
totalclasswise_predpixels = [0, 0]
l_avg_test = [0, 0]
totalclasswise_pixel_acc_test = [0, 0]
totalclasswise_gtpixels_test = [0, 0]
totalclasswise_predpixels_test = [0, 0]
steps = [0, 0]
steps_test = [0, 0]
# scheduler.step()
train(model, optimizer, criterions, trainloader, epoch, scheduler, traindata)
val(model, criterions, valloader, epoch, valdata)
# save the model every 5 epochs
if (epoch + 1) % 5 == 0 or epoch == args.epochs - 1:
if (epoch + 1) > 5:
os.remove(os.path.join(ROOT, RESULT, "{}_{}_{}.pkl".format(args.arch, args.dataset, epoch - 4)))
os.remove(os.path.join(ROOT, RESULT, "{}_{}_{}_optimizer.pkl".format(args.arch, args.dataset, epoch - 4)))
torch.save(model, os.path.join(ROOT, RESULT, "{}_{}_{}.pkl".format(args.arch, args.dataset, epoch + 1)))
torch.save({'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()},
os.path.join(ROOT, RESULT, "{}_{}_{}_optimizer.pkl".format(args.arch, args.dataset, epoch + 1)))
# remove old loss & accuracy files
if os.path.isfile(os.path.join(ROOT, RESULT, "saved_loss.p")):
os.remove(os.path.join(ROOT, RESULT, "saved_loss.p"))
if os.path.isfile(os.path.join(ROOT, RESULT, "saved_accuracy.p")):
os.remove(os.path.join(ROOT, RESULT, "saved_accuracy.p"))
# save train and validation loss
X.append(epoch + 1)
Y1.append(l_avg[0] / steps[0])
Y1_test.append(l_avg_test[0] / steps_test[0])
Y2.append(l_avg[1] / steps[1])
Y2_test.append(l_avg_test[1] / steps_test[1])
saved_loss={"X": X, "Y1": Y1, "Y2": Y2, "Y1_test": Y1_test, "Y2_test": Y2_test}
pickle.dump(saved_loss, open(os.path.join(ROOT, RESULT, "saved_loss.p"), "wb"))
# pixel accuracy
totalclasswise_pixel_acc[0] = totalclasswise_pixel_acc[0].reshape((-1, n_classes[0])).astype(np.float32)
totalclasswise_gtpixels[0] = totalclasswise_gtpixels[0].reshape((-1, n_classes[0]))
totalclasswise_predpixels[0] = totalclasswise_predpixels[0].reshape((-1, n_classes[0]))
totalclasswise_pixel_acc_test[0] = totalclasswise_pixel_acc_test[0].reshape((-1, n_classes[0])).astype(np.float32)
totalclasswise_gtpixels_test[0] = totalclasswise_gtpixels_test[0].reshape((-1, n_classes[0]))
totalclasswise_predpixels_test[0] = totalclasswise_predpixels_test[0].reshape((-1, n_classes[0]))
totalclasswise_pixel_acc[1] = totalclasswise_pixel_acc[1].reshape((-1, n_classes[1])).astype(np.float32)
totalclasswise_gtpixels[1] = totalclasswise_gtpixels[1].reshape((-1, n_classes[1]))
totalclasswise_predpixels[1] = totalclasswise_predpixels[1].reshape((-1, n_classes[1]))
totalclasswise_pixel_acc_test[1] = totalclasswise_pixel_acc_test[1].reshape((-1, n_classes[1])).astype(np.float32)
totalclasswise_gtpixels_test[1] = totalclasswise_gtpixels_test[1].reshape((-1, n_classes[1]))
totalclasswise_predpixels_test[1] = totalclasswise_predpixels_test[1].reshape((-1, n_classes[1]))
if isinstance(avg_pixel_acc, list):
avg_pixel_acc[0] = np.vstack((avg_pixel_acc[0], np.sum(totalclasswise_pixel_acc[0], axis=1) / np.sum(totalclasswise_gtpixels[0], axis=1)))
mean_class_acc[0] = np.vstack((mean_class_acc[0], np.mean(totalclasswise_pixel_acc[0] / totalclasswise_gtpixels[0], axis=1)))
mIoU[0] = np.vstack((mIoU[0], np.mean(totalclasswise_pixel_acc[0] / (totalclasswise_gtpixels[0] + totalclasswise_predpixels[0] - totalclasswise_pixel_acc[0]), axis=1)))
avg_pixel_acc[1] = np.vstack((avg_pixel_acc[1], np.sum(totalclasswise_pixel_acc[1], axis=1) / np.sum(totalclasswise_gtpixels[1], axis=1)))
mean_class_acc[1] = np.vstack((mean_class_acc[1], np.mean(totalclasswise_pixel_acc[1] / totalclasswise_gtpixels[1], axis=1)))
mIoU[1] = np.vstack((mIoU[1], np.mean(totalclasswise_pixel_acc[1] / (totalclasswise_gtpixels[1] + totalclasswise_predpixels[1] - totalclasswise_pixel_acc[1]), axis=1)))
avg_pixel_acc_test[0] = np.vstack((avg_pixel_acc_test[0], np.sum(totalclasswise_pixel_acc_test[0],axis=1) / np.sum(totalclasswise_gtpixels_test[0], axis=1)))
mean_class_acc_test[0] = np.vstack((mean_class_acc_test[0], np.mean(totalclasswise_pixel_acc_test[0] / totalclasswise_gtpixels_test[0], axis=1)))
mIoU_test[0] = np.vstack((mIoU_test[0], np.mean(totalclasswise_pixel_acc_test[0] / (totalclasswise_gtpixels_test[0] + totalclasswise_predpixels_test[0] - totalclasswise_pixel_acc_test[0]), axis=1)))
avg_pixel_acc_test[1] = np.vstack((avg_pixel_acc_test[1], np.sum(totalclasswise_pixel_acc_test[1],axis=1) / np.sum(totalclasswise_gtpixels_test[1], axis=1)))
mean_class_acc_test[1] = np.vstack((mean_class_acc_test[1], np.mean(totalclasswise_pixel_acc_test[1] / totalclasswise_gtpixels_test[1], axis=1)))
mIoU_test[1] = np.vstack((mIoU_test[1], np.mean(totalclasswise_pixel_acc_test[1] / (totalclasswise_gtpixels_test[1] + totalclasswise_predpixels_test[1] - totalclasswise_pixel_acc_test[1]), axis=1)))
else:
avg_pixel_acc = []
mean_class_acc = []
mIoU = []
avg_pixel_acc.append( np.sum(totalclasswise_pixel_acc[0], axis=1) / np.sum(totalclasswise_gtpixels[0], axis=1) )
mean_class_acc.append( np.mean(totalclasswise_pixel_acc[0] / totalclasswise_gtpixels[0], axis=1) )
mIoU.append( np.mean(totalclasswise_pixel_acc[0] / (totalclasswise_gtpixels[0] + totalclasswise_predpixels[0] - totalclasswise_pixel_acc[0]), axis=1) )
avg_pixel_acc.append( np.sum(totalclasswise_pixel_acc[1], axis=1) / np.sum(totalclasswise_gtpixels[1], axis=1) )
mean_class_acc.append( np.mean(totalclasswise_pixel_acc[1] / totalclasswise_gtpixels[1], axis=1) )
mIoU.append( np.mean(totalclasswise_pixel_acc[1] / (totalclasswise_gtpixels[1] + totalclasswise_predpixels[1] - totalclasswise_pixel_acc[1]), axis=1) )
avg_pixel_acc_test = []
mean_class_acc_test = []
mIoU_test = []
avg_pixel_acc_test.append( np.sum(totalclasswise_pixel_acc_test[0], axis=1) / np.sum(totalclasswise_gtpixels_test[0], axis=1) )
mean_class_acc_test.append( np.mean(totalclasswise_pixel_acc_test[0] / totalclasswise_gtpixels_test[0], axis=1) )
mIoU_test.append( np.mean(totalclasswise_pixel_acc_test[0] / (totalclasswise_gtpixels_test[0] + totalclasswise_predpixels_test[0] - totalclasswise_pixel_acc_test[0]), axis=1) )
avg_pixel_acc_test.append( np.sum(totalclasswise_pixel_acc_test[1], axis=1) / np.sum(totalclasswise_gtpixels_test[1], axis=1) )
mean_class_acc_test.append( np.mean(totalclasswise_pixel_acc_test[1] / totalclasswise_gtpixels_test[1], axis=1) )
mIoU_test.append( np.mean(totalclasswise_pixel_acc_test[1] / (totalclasswise_gtpixels_test[1] + totalclasswise_predpixels_test[1] - totalclasswise_pixel_acc_test[1]), axis=1) )
saved_accuracy = {
"X": X,
"P1": avg_pixel_acc[0], "P2": avg_pixel_acc[1],
"M1": mean_class_acc[0], "M2": mean_class_acc[1],
"I1": mIoU[0], "I2": mIoU[1],
"P1_test": avg_pixel_acc_test[0], "P2_test": avg_pixel_acc_test[1],
"M1_test": mean_class_acc_test[0], "M2_test": mean_class_acc_test[1],
"I1_test": mIoU_test[0], "I2_test": mIoU_test[1]
}
pickle.dump(saved_accuracy, open(os.path.join(ROOT, RESULT, "saved_accuracy.p"), "wb"))
# print validation mIoU of both tasks
this_mIoU1 = np.mean(totalclasswise_pixel_acc_test[0] / (totalclasswise_gtpixels_test[0] + totalclasswise_predpixels_test[0] - totalclasswise_pixel_acc_test[0]), axis=1)[0]
this_mIoU2 = np.mean(totalclasswise_pixel_acc_test[1] / (totalclasswise_gtpixels_test[1] + totalclasswise_predpixels_test[1] - totalclasswise_pixel_acc_test[1]), axis=1)[0]
print('Val: mIoU_sbd = {}, mIoU_lip = {}'.format(this_mIoU1, this_mIoU2))
def train(model, optimizer, criterions, trainloader, epoch, scheduler, data):
global l_avg, totalclasswise_pixel_acc, totalclasswise_gtpixels, totalclasswise_predpixels
global steps
model.train()
for i, (images, sbd_labels, lip_labels) in enumerate(trainloader):
sbd_valid_pixel = float( (sbd_labels.data != criterions[0].ignore_index).long().sum() )
lip_valid_pixel = float( (lip_labels.data != criterions[1].ignore_index).long().sum() )
images = images.to(device)
sbd_labels = sbd_labels.to(device)
lip_labels = lip_labels.to(device)
sbd_outputs, lip_outputs = model(images, task=2)
sbd_loss = criterions[0](sbd_outputs, sbd_labels)
classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat([sbd_outputs], sbd_labels, data.n_classes[0])
classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc])
classwise_gtpixels = torch.FloatTensor([classwise_gtpixels])
classwise_predpixels = torch.FloatTensor([classwise_predpixels])
totalclasswise_pixel_acc[0] += classwise_pixel_acc.sum(0).data.numpy()
totalclasswise_gtpixels[0] += classwise_gtpixels.sum(0).data.numpy()
totalclasswise_predpixels[0] += classwise_predpixels.sum(0).data.numpy()
sbd_total_loss = sbd_loss.sum()
sbd_total_loss = sbd_total_loss / float(sbd_valid_pixel)
sbd_total_loss.backward(retain_graph=True)
lip_loss = criterions[1](lip_outputs, lip_labels)
classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat([lip_outputs], lip_labels, data.n_classes[1])
classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc])
classwise_gtpixels = torch.FloatTensor([classwise_gtpixels])
classwise_predpixels = torch.FloatTensor([classwise_predpixels])
totalclasswise_pixel_acc[1] += classwise_pixel_acc.sum(0).data.numpy()
totalclasswise_gtpixels[1] += classwise_gtpixels.sum(0).data.numpy()
totalclasswise_predpixels[1] += classwise_predpixels.sum(0).data.numpy()
lip_total_loss = lip_loss.sum()
lip_total_loss = lip_total_loss / float(lip_valid_pixel)
lip_total_loss.backward()
l_avg[0] += sbd_loss.sum().data.cpu().numpy()
steps[0] += sbd_valid_pixel
l_avg[1] += lip_loss.sum().data.cpu().numpy()
steps[1] += lip_valid_pixel
optimizer.step()
optimizer.zero_grad()
scheduler.step()
# if (i + 1) % args.log_size == 0:
# pickle.dump(images[0].cpu().numpy(),
# open(os.path.join(ROOT, RESULT, "saved_train_images/" + str(epoch) + "_" + str(i) + "_input.p"), "wb"))
# pickle.dump(np.transpose(data.decode_segmap(outputs[0].data.cpu().numpy().argmax(0)), [2, 0, 1]),
# open(os.path.join(ROOT, RESULT, "saved_train_images/" + str(epoch) + "_" + str(i) + "_output.p"), "wb"))
# pickle.dump(np.transpose(data.decode_segmap(labels[0].cpu().numpy()), [2, 0, 1]),
# open(os.path.join(ROOT, RESULT, "saved_train_images/" + str(epoch) + "_" + str(i) + "_target.p"), "wb"))
def val(model, criterions, valloader, epoch, data):
global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test
global steps_test
model.eval()
for i, (images, sbd_labels, lip_labels) in enumerate(valloader):
sbd_valid_pixel = float( (sbd_labels.data != criterions[0].ignore_index).long().sum() )
lip_valid_pixel = float( (lip_labels.data != criterions[1].ignore_index).long().sum() )
images = images.to(device)
sbd_labels = sbd_labels.to(device)
lip_labels = lip_labels.to(device)
with torch.no_grad():
sbd_outputs, lip_outputs = model(images, task=2)
sbd_loss = criterions[0](sbd_outputs, sbd_labels)
lip_loss = criterions[1](lip_outputs, lip_labels)
classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat([sbd_outputs], sbd_labels, data.n_classes[0])
classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc])
classwise_gtpixels = torch.FloatTensor([classwise_gtpixels])
classwise_predpixels = torch.FloatTensor([classwise_predpixels])
totalclasswise_pixel_acc_test[0] += classwise_pixel_acc.sum(0).data.numpy()
totalclasswise_gtpixels_test[0] += classwise_gtpixels.sum(0).data.numpy()
totalclasswise_predpixels_test[0] += classwise_predpixels.sum(0).data.numpy()
classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat([lip_outputs], lip_labels, data.n_classes[1])
classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc])
classwise_gtpixels = torch.FloatTensor([classwise_gtpixels])
classwise_predpixels = torch.FloatTensor([classwise_predpixels])
totalclasswise_pixel_acc_test[1] += classwise_pixel_acc.sum(0).data.numpy()
totalclasswise_gtpixels_test[1] += classwise_gtpixels.sum(0).data.numpy()
totalclasswise_predpixels_test[1] += classwise_predpixels.sum(0).data.numpy()
l_avg_test[0] += sbd_loss.sum().data.cpu().numpy()
steps_test[0] += sbd_valid_pixel
l_avg_test[1] += lip_loss.sum().data.cpu().numpy()
steps_test[1] += lip_valid_pixel
# if (i + 1) % 800 == 0:
# pickle.dump(images[0].cpu().numpy(),
# open(os.path.join(ROOT, RESULT, "saved_val_images/" + str(epoch) + "_" + str(i) + "_input.p"), "wb"))
# pickle.dump(np.transpose(data.decode_segmap(outputs[0].data.cpu().numpy().argmax(0)), [2, 0, 1]),
# open(os.path.join(ROOT, RESULT, "saved_val_images/" + str(epoch) + "_" + str(i) + "_output.p"), "wb"))
# pickle.dump(np.transpose(data.decode_segmap(labels[0].cpu().numpy()), [2, 0, 1]),
# open(os.path.join(ROOT, RESULT, "saved_val_images/" + str(epoch) + "_" + str(i) + "_target.p"), "wb"))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--arch', nargs='?', type=str, default='sunet64_multi',
help='Architecture to use [\'sunet64, sunet128, sunet7128 etc\']')
parser.add_argument('--model_path', help='Path to the saved model', type=str)
parser.add_argument('--best_model_path', help='Path to the saved best model', type=str)
parser.add_argument('--dataset', nargs='?', type=str, default='human',
help='Dataset to use [\'sbd, coco, cityscapes etc\']')
parser.add_argument('--img_rows', nargs='?', type=int, default=512,
help='Height of the input image')
parser.add_argument('--img_cols', nargs='?', type=int, default=512,
help='Width of the input image')
parser.add_argument('--epochs', nargs='?', type=int, default=90,
help='# of the epochs')
parser.add_argument('--batch_size', nargs='?', type=int, default=10,
help='Batch Size')
parser.add_argument('--lr', nargs='?', type=float, default=0.0005,
help='Learning Rate')
parser.add_argument('--manual_seed', default=0, type=int,
help='manual seed')
parser.add_argument('--iter_size', type=int, default=1,
help='number of batches per weight updates')
parser.add_argument('--log_size', type=int, default=400,
help='iteration period of logging segmented images')
parser.add_argument('--dprob', nargs='?', type=float, default=1e-7,
help='Dropout probability')
parser.add_argument('--momentum', nargs='?', type=float, default=0.95,
help='Momentum for SGD')
parser.add_argument('--momentum_bn', nargs='?', type=float, default=0.01,
help='Momentum for BN')
parser.add_argument('--weight_decay', nargs='?', type=float, default=1e-4,
help='Weight decay')
parser.add_argument('--output_stride', nargs='?', type=str, default='16',
help='Output stride to use [\'32, 16, 8 etc\']')
parser.add_argument('--freeze', action='store_true',
help='Freeze BN params')
parser.add_argument('--restore', action='store_true',
help='Restore Optimizer params')
parser.add_argument('--epoch_log_size', nargs='?', type=str, default=20,
help='Every [epoch_log_size] iterations to print loss in each epoch')
parser.add_argument('--pretrained', action='store_true',
help='Use pretrained ImageNet initialization or not')
parser.add_argument('--n_classes', nargs='?', type=int, action='append',
help='number of classes of the labels')
parser.add_argument('--optim', nargs='?', type=str, default='SGD',
help='Optimizer to use [\'SGD, Nesterov etc\']')
global args
args = parser.parse_args()
RESULT = '{}_{}_{}'.format(RESULT, args.arch, args.dataset)
if args.pretrained:
RESULT = RESULT + '_pretrained'
main(args)
| 2.078125 | 2 |
exemplos/exemplo-aula-14-01.py | quitaiskiluisf/TI4F-2021-LogicaProgramacao | 0 | 5222 | # Apresentação
print('Programa para somar 8 valores utilizando vetores/listas')
print()
# Declaração do vetor
valores = [0, 0, 0, 0, 0, 0, 0, 0]
# Solicita os valores
for i in range(len(valores)):
valores[i] = int(input('Informe o valor: '))
# Cálculo da soma
soma = 0
for i in range(len(valores)):
soma += valores[i]
# Apresenta o resultado
print(f'A soma dos valores é {soma}')
| 4.125 | 4 |
day3/p1.py | pwicks86/adventofcode2015 | 0 | 5223 | from collections import defaultdict
f = open("input.txt")
d = f.read()
houses = defaultdict(int,{(0,0):1})
cur = [0,0]
for c in d:
if c == "<":
cur[0] -= 1
if c == ">":
cur[0] += 1
if c == "v":
cur[1] += 1
if c == "^":
cur[1] -= 1
houses[tuple(cur)]+=1
print(len(houses.keys()))
| 3.046875 | 3 |
pbr/config/blend_config.py | NUbots/NUpbr | 1 | 5224 | <gh_stars>1-10
# Blender-specific Configuration Settings
from math import pi
render = {
"render_engine": "CYCLES",
"render": {"cycles_device": "GPU"},
"dimensions": {"resolution": [1280, 1024], "percentage": 100.0},
"sampling": {"cycles_samples": 256, "cycles_preview_samples": 16},
"light_paths": {
"transparency": {"max_bounces": 1, "min_bounces": 1},
"bounces": {"max_bounces": 1, "min_bounces": 1},
"diffuse": 1,
"glossy": 1,
"transmission": 1,
"volume": 0,
"reflective_caustics": False,
"refractive_caustics": False,
},
"performance": {
"render_tile": [512, 512],
"threads": {"mode": "FIXED", "num_threads": 8},
},
"layers": {"use_hair": False},
}
scene = {"units": {"length_units": "METRIC", "rotation_units": "DEGREES"}}
layers = {"denoising": {"use_denoising": False}}
field = {
"material": {
"mapping": {
"translation": (0.0, 0.05, 0.0),
"rotation": (0.0, -pi / 2.0, 0.0),
"scale": (1.0, 0.6, 1.0),
},
"mix_lower_grass": {
"inp1": (0.000, 0.012, 0.00076, 1.0),
"inp2": (0.020, 0.011, 0.0, 1.0),
},
"mix_upper_grass": {
"inp1": (0.247, 0.549, 0.0, 1),
"inp2": (0.257, 0.272, 0.0, 1),
},
"noise": {"inp": [5.0, 2.0, 0.0]},
"hsv": {"inp": [0.0, 0.0, 1.9, 1.0]},
"mix_up_grass_hsv": {"inp0": 0.455},
"mix_low_grass_field_lines": {"inp0": 0.4},
"mix_grass": {"inp0": 0.391},
"principled": {"specular": 0.225, "roughness": 0.625},
},
"lower_plane": {
"colour": (0.003, 0.04, 0.0, 1.0),
"principled": {"specular": 0.225, "roughness": 1.0},
"mapping": {"scale": (0.1, 0.1, 1.0)},
},
}
ball = {
"initial_cond": {"segments": 16, "ring_count": 10, "calc_uvs": True},
"material": {"metallic": 0.0, "roughness": 0.35},
"subsurf_mod": {"levels": 1, "rend_levels": 4},
}
goal = {
"initial_cond": {"vertices": 32, "calc_uvs": True},
"corner_curve": {"fill": "FULL"},
"material": {"metallic": 0.0, "roughness": 0.35, "colour": (0.8, 0.8, 0.8, 1.0)},
"subsurf_mod": {"levels": 1, "rend_levels": 4},
}
robot = {"material": {"specular": 0.742, "metallic": 0.0, "roughness": 0.9}}
| 1.640625 | 2 |
simglucose/controller/basal_bolus_ctrller.py | mia-jingyi/simglucose | 0 | 5225 | from .base import Controller
from .base import Action
import numpy as np
import pandas as pd
import logging
from collections import namedtuple
from tqdm import tqdm
logger = logging.getLogger(__name__)
CONTROL_QUEST = 'simglucose/params/Quest.csv'
PATIENT_PARA_FILE = 'simglucose/params/vpatient_params.csv'
ParamTup = namedtuple('ParamTup', ['basal', 'cf', 'cr'])
class BBController(Controller):
"""
This is a Basal-Bolus Controller that is typically practiced by a Type-1
Diabetes patient. The performance of this controller can serve as a
baseline when developing a more advanced controller.
"""
def __init__(self, target=140):
self.quest = pd.read_csv(CONTROL_QUEST)
self.patient_params = pd.read_csv(PATIENT_PARA_FILE)
self.target = target
def policy(self, observation, reward, done, **kwargs):
sample_time = kwargs.get('sample_time', 1)
pname = kwargs.get('patient_name')
meal = kwargs.get('meal') # unit: g/min
action = self._bb_policy(pname, meal, observation.CGM, sample_time)
return action
def _bb_policy(self, name, meal, glucose, env_sample_time):
"""
Helper function to compute the basal and bolus amount.
The basal insulin is based on the insulin amount to keep the blood
glucose in the steady state when there is no (meal) disturbance.
basal = u2ss (pmol/(L*kg)) * body_weight (kg) / 6000 (U/min)
The bolus amount is computed based on the current glucose level, the
target glucose level, the patient's correction factor and the patient's
carbohydrate ratio.
bolus = ((carbohydrate / carbohydrate_ratio) +
(current_glucose - target_glucose) / correction_factor)
/ sample_time
NOTE the bolus computed from the above formula is in unit U. The
simulator only accepts insulin rate. Hence the bolus is converted to
insulin rate.
"""
if any(self.quest.Name.str.match(name)):
quest = self.quest[self.quest.Name.str.match(name)]
params = self.patient_params[self.patient_params.Name.str.match(
name)]
u2ss = params.u2ss.values.item() # unit: pmol/(L*kg)
BW = params.BW.values.item() # unit: kg
else:
quest = pd.DataFrame([['Average', 13.5, 23.52, 50, 30]],
columns=['Name', 'CR', 'CF', 'TDI', 'Age'])
u2ss = 1.43 # unit: pmol/(L*kg)
BW = 57.0 # unit: kg
basal = u2ss * BW / 6000 # unit: U/min
if meal > 0:
logger.info('Calculating bolus ...')
logger.info(f'Meal = {meal} g/min')
logger.info(f'glucose = {glucose}')
bolus = (
(meal * env_sample_time) / quest.CR.values + (glucose > 150) *
(glucose - self.target) / quest.CF.values).item() # unit: U
else:
bolus = 0 # unit: U
# This is to convert bolus in total amount (U) to insulin rate (U/min).
# The simulation environment does not treat basal and bolus
# differently. The unit of Action.basal and Action.bolus are the same
# (U/min).
bolus = bolus / env_sample_time # unit: U/min
return Action(basal=basal, bolus=bolus)
def reset(self):
pass
class ManualBBController(Controller):
def __init__(self, target, cr, cf, basal, sample_rate=5, use_cf=True, use_bol=True, cooldown=0,
corrected=True, use_low_lim=False, low_lim=70):
super().__init__(self)
self.target = target
self.orig_cr = self.cr = cr
self.orig_cf = self.cf = cf
self.orig_basal = self.basal = basal
self.sample_rate = sample_rate
self.use_cf = use_cf
self.use_bol = use_bol
self.cooldown = cooldown
self.last_cf = np.inf
self.corrected = corrected
self.use_low_lim = use_low_lim
self.low_lim = low_lim
def increment(self, cr_incr=0, cf_incr=0, basal_incr=0):
self.cr += cr_incr
self.cf += cf_incr
self.basal += basal_incr
def policy(self, observation, reward, done, **kwargs):
carbs = kwargs.get('carbs')
glucose = kwargs.get('glucose')
action = self.manual_bb_policy(carbs, glucose)
return action
def manual_bb_policy(self, carbs, glucose, log=False):
if carbs > 0:
if self.corrected:
carb_correct = carbs / self.cr
else:
# assuming carbs are already multiplied by sampling rate
carb_correct = (carbs/self.sample_rate) / self.cr
hyper_correct = (glucose > self.target) * (glucose - self.target) / self.cf
hypo_correct = (glucose < self.low_lim) * (self.low_lim - glucose) / self.cf
bolus = 0
if self.use_low_lim:
bolus -= hypo_correct
if self.use_cf:
if self.last_cf > self.cooldown and hyper_correct > 0:
bolus += hyper_correct
self.last_cf = 0
if self.use_bol:
bolus += carb_correct
bolus = bolus / self.sample_rate
else:
bolus = 0
carb_correct = 0
hyper_correct = 0
hypo_correct = 0
self.last_cf += self.sample_rate
if log:
return Action(basal=self.basal, bolus=bolus), hyper_correct, hypo_correct, carb_correct
else:
return Action(basal=self.basal, bolus=bolus)
def get_params(self):
return ParamTup(basal=self.basal, cf=self.cf, cr=self.cr)
def adjust(self, basal_adj, cr_adj):
self.basal += self.orig_basal + basal_adj
self.cr = self.orig_cr * cr_adj
def reset(self):
self.cr = self.orig_cr
self.cf = self.orig_cf
self.basal = self.orig_basal
self.last_cf = np.inf
def bb_test(bbc, env, n_days, seed, full_save=False):
env.seeds['sensor'] = seed
env.seeds['scenario'] = seed
env.seeds['patient'] = seed
env.reset()
full_patient_state = []
carb_error_mean = 0
carb_error_std = 0.2
carb_miss_prob = 0.05
action = bbc.manual_bb_policy(carbs=0, glucose=140)
for _ in tqdm(range(n_days*288)):
obs, reward, done, info = env.step(action=action.basal+action.bolus)
bg = env.env.CGM_hist[-1]
carbs = info['meal']
if np.random.uniform() < carb_miss_prob:
carbs = 0
err = np.random.normal(carb_error_mean, carb_error_std)
carbs = carbs + carbs * err
action = bbc.manual_bb_policy(carbs=carbs, glucose=bg)
full_patient_state.append(info['patient_state'])
full_patient_state = np.stack(full_patient_state)
if full_save:
return env.env.show_history(), full_patient_state
else:
return {'hist': env.env.show_history()[288:]} | 2.8125 | 3 |
ceilometer/event/trait_plugins.py | redhat-openstack/ceilometer | 1 | 5226 | #
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class TraitPluginBase(object):
"""Base class for plugins.
It converts notification fields to Trait values.
"""
def __init__(self, **kw):
"""Setup the trait plugin.
For each Trait definition a plugin is used on in a conversion
definition, a new instance of the plugin will be created, and
initialized with the parameters (if any) specified in the
config file.
:param kw: the parameters specified in the event definitions file.
"""
super(TraitPluginBase, self).__init__()
@abc.abstractmethod
def trait_value(self, match_list):
"""Convert a set of fields to a Trait value.
This method is called each time a trait is attempted to be extracted
from a notification. It will be called *even if* no matching fields
are found in the notification (in that case, the match_list will be
empty). If this method returns None, the trait *will not* be added to
the event. Any other value returned by this method will be used as
the value for the trait. Values returned will be coerced to the
appropriate type for the trait.
:param match_list: A list (may be empty if no matches) of *tuples*.
Each tuple is (field_path, value) where field_path is the jsonpath
for that specific field.
Example::
trait's fields definition: ['payload.foobar',
'payload.baz',
'payload.thing.*']
notification body:
{
'message_id': '12345',
'publisher': 'someservice.host',
'payload': {
'foobar': 'test',
'thing': {
'bar': 12,
'boing': 13,
}
}
}
match_list will be: [('payload.foobar','test'),
('payload.thing.bar',12),
('payload.thing.boing',13)]
Here is a plugin that emulates the default (no plugin) behavior:
.. code-block:: python
class DefaultPlugin(TraitPluginBase):
"Plugin that returns the first field value."
def __init__(self, **kw):
super(DefaultPlugin, self).__init__()
def trait_value(self, match_list):
if not match_list:
return None
return match_list[0][1]
"""
class SplitterTraitPlugin(TraitPluginBase):
"""Plugin that splits a piece off of a string value."""
def __init__(self, separator=".", segment=0, max_split=None, **kw):
"""Setup how do split the field.
:param separator: String to split on. default "."
:param segment: Which segment to return. (int) default 0
:param max_split: Limit number of splits. Default: None (no limit)
"""
self.separator = separator
self.segment = segment
self.max_split = max_split
super(SplitterTraitPlugin, self).__init__(**kw)
def trait_value(self, match_list):
if not match_list:
return None
value = six.text_type(match_list[0][1])
if self.max_split is not None:
values = value.split(self.separator, self.max_split)
else:
values = value.split(self.separator)
try:
return values[self.segment]
except IndexError:
return None
class BitfieldTraitPlugin(TraitPluginBase):
"""Plugin to set flags on a bitfield."""
def __init__(self, initial_bitfield=0, flags=None, **kw):
"""Setup bitfield trait.
:param initial_bitfield: (int) initial value for the bitfield
Flags that are set will be OR'ed with this.
:param flags: List of dictionaries defining bitflags to set depending
on data in the notification. Each one has the following
keys:
path: jsonpath of field to match.
bit: (int) number of bit to set (lsb is bit 0)
value: set bit if corresponding field's value
matches this. If value is not provided,
bit will be set if the field exists (and
is non-null), regardless of it's value.
"""
self.initial_bitfield = initial_bitfield
if flags is None:
flags = []
self.flags = flags
super(BitfieldTraitPlugin, self).__init__(**kw)
def trait_value(self, match_list):
matches = dict(match_list)
bitfield = self.initial_bitfield
for flagdef in self.flags:
path = flagdef['path']
bit = 2 ** int(flagdef['bit'])
if path in matches:
if 'value' in flagdef:
if matches[path] == flagdef['value']:
bitfield |= bit
else:
bitfield |= bit
return bitfield
| 2.0625 | 2 |
web13/jsonapi.py | gongjunhuang/web | 0 | 5227 | <reponame>gongjunhuang/web
from flask import Flask, redirect, url_for, jsonify, request
app = Flask(__name__)
users = []
'''
Json api
请求form里面Json 返回Json
好处:
1.通信的格式统一,对语言的约束就小了
2.易于做成open api
3.客户端重度渲染
RESTful api
Dr. Fielding
url 用资源来组织的 名词
/GET /players 拿到所有玩家
/GET /player/id 访问id的玩家的数据
/PUT /players 全量更新
/PATCH /players 部分更新
/DELETE /player/id 删除一个玩家
/GET /player/id/level
'''
@app.route("/", methods=["GET"])
def index():
return'''<form method=post action='/add'>
<input type=text name=author>
<button>提交</button>
</form>
'''
@app.route("/add", methods=["POST"])
def add():
form = request.form
users.append(dict(author=form.get("author", "")))
return redirect(url_for(".index"))
@app.route("/json")
def json():
return jsonify(users)
app.run() | 3.265625 | 3 |
cards/migrations/0012_auto_20180331_1348.py | mhndlsz/memodrop | 18 | 5228 | <reponame>mhndlsz/memodrop<filename>cards/migrations/0012_auto_20180331_1348.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-31 13:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cards', '0011_auto_20180319_1112'),
]
operations = [
migrations.AlterField(
model_name='card',
name='answer',
field=models.TextField(verbose_name='Answer'),
),
migrations.AlterField(
model_name='card',
name='hint',
field=models.TextField(blank=True, verbose_name='Hint'),
),
migrations.AlterField(
model_name='card',
name='question',
field=models.TextField(verbose_name='Question'),
),
]
| 1.640625 | 2 |
MoMMI/Modules/ss14_nudges.py | T6751/MoMMI | 18 | 5229 | import logging
from typing import Match, Any, Dict
import aiohttp
from discord import Message
from MoMMI import comm_event, command, MChannel, always_command
logger = logging.getLogger(__name__)
@comm_event("ss14")
async def ss14_nudge(channel: MChannel, message: Any, meta: str) -> None:
try:
config: Dict[str, Any] = channel.module_config(f"ss14.servers.{meta}")
except ValueError:
return
expect_password = config["password"]
if expect_password != message.get("password"):
return
if "type" not in message or "contents" not in message:
return
contents = message["contents"]
type = message["type"]
if type == "ooc":
final_message = f"\u200B**OOC**: `{contents['sender']}`: {contents['contents']}"
else:
return
await channel.send(final_message)
@always_command("ss14_relay", unsafe=True)
async def ss14_relay(channel: MChannel, match: Match, message: Message) -> None:
if not channel.internal_name:
return
content = message.content
content = content.strip()
if not content or content[0] == "\u200B":
return
server = None
config: Any
for config in channel.server_config("modules.ss14", []):
if config["discord_channel"] != channel.internal_name:
continue
server = config["server"]
if not server:
return
config = channel.module_config(f"ss14.servers.{server}")
password = config["password"]
url = config["api_url"] + "/ooc"
async with aiohttp.ClientSession() as session:
async with session.post(url, json={"password": password, "sender": message.author.name, "contents": content}) as resp:
r = await resp.text()
logger.error(f"{resp.status}")
| 2.265625 | 2 |
oxe-api/test/resource/company/test_get_company_taxonomy.py | CybersecurityLuxembourg/openxeco | 0 | 5230 | <reponame>CybersecurityLuxembourg/openxeco<gh_stars>0
from test.BaseCase import BaseCase
class TestGetCompanyTaxonomy(BaseCase):
@BaseCase.login
def test_ok(self, token):
self.db.insert({"id": 1, "name": "My Company"}, self.db.tables["Company"])
self.db.insert({"id": 2, "name": "My Company 2"}, self.db.tables["Company"])
self.db.insert({"name": "CAT1"}, self.db.tables["TaxonomyCategory"])
self.db.insert({"name": "CAT2"}, self.db.tables["TaxonomyCategory"])
self.db.insert({"id": 1, "name": "VAL1", "category": "CAT1"}, self.db.tables["TaxonomyValue"])
self.db.insert({"id": 2, "name": "VAL2", "category": "CAT2"}, self.db.tables["TaxonomyValue"])
self.db.insert({"company": 1, "taxonomy_value": 1}, self.db.tables["TaxonomyAssignment"])
self.db.insert({"company": 1, "taxonomy_value": 2}, self.db.tables["TaxonomyAssignment"])
self.db.insert({"company": 2, "taxonomy_value": 2}, self.db.tables["TaxonomyAssignment"])
response = self.application.get('/company/get_company_taxonomy/2',
headers=self.get_standard_header(token))
self.assertEqual([{'company': 2, 'taxonomy_value': 2}], response.json)
self.assertEqual(200, response.status_code)
@BaseCase.login
def test_ok_empty(self, token):
self.db.insert({"id": 2, "name": "My Company"}, self.db.tables["Company"])
response = self.application.get('/company/get_company_taxonomy/2',
headers=self.get_standard_header(token))
self.assertEqual(response.json, [])
self.assertEqual(200, response.status_code)
| 2.359375 | 2 |
spoon/models/groupmembership.py | mikeboers/Spoon | 4 | 5231 | <reponame>mikeboers/Spoon
import sqlalchemy as sa
from ..core import db
class GroupMembership(db.Model):
__tablename__ = 'group_memberships'
__table_args__ = dict(
autoload=True,
extend_existing=True,
)
user = db.relationship('Account',
foreign_keys='GroupMembership.user_id',
backref=db.backref('groups', cascade="all, delete-orphan"),
)
group = db.relationship('Account',
foreign_keys='GroupMembership.group_id',
backref=db.backref('members', cascade="all, delete-orphan"),
)
| 2.640625 | 3 |
nonlinear/aorta/nonlinearCasesCreation_aorta.py | HaolinCMU/Soft_tissue_tracking | 3 | 5232 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 13:08:16 2020
@author: haolinl
"""
import copy
import os
import time
import numpy as np
import random
import scipy.io # For extracting data from .mat file
class inputFileGenerator(object):
"""
Generate input file for Abaqus.
Unit system:
Length: m
Force: N
Pressure: Pa
"""
def __init__(self, data_file_name, write_path, material_type, fix_indices_list, node_variable_name, elem_variable_name, user_prescribed_force_field=[]):
"""
Initialize parameters.
Parameters:
----------
data_file_name: String.
The file path of information of node, element, etc.
write_path: String.
The path to write the inp file.
material_type: String.
The type of material.
Used to indicate whether to consider material nonlinearity.
fix_indices_list: List of ints.
The node indices to be fixed.
node_variable_name: String.
The variable name of the nodes matrix in the data file.
elem_variable_name: String.
The variable name of the elements matrix in the data file.
user_prescribed_force_field (optional): List of floats.
The user-prescribed vector of laplacian force field.
Size: nSurfI x 3.
Default: [].
"""
# Data & Variables.
self.data_file_name = data_file_name
self.data_mat = scipy.io.loadmat(self.data_file_name)
self._surface_mat = self.data_mat["FaceI"]
self._surface_nodes = self.data_mat["idxSurfI"]
self._surface_nodes_num = self.data_mat["nSurfI"][0,0]
self._outer_surface_regionNum = 22 # Int. The region number of outer surface.
self._outer_surface_nodes_list = self._extractOuterSurfaceNodes(self.data_mat["faces"], self._outer_surface_regionNum) # List of sorted ints. The indices of outer surface nodes. Indexed from 1.
self._outer_surface_nodes_num = len(self._outer_surface_nodes_list)
self._triangle_nodes_list = []
self._coupled_list = []
self._node_variable_name = node_variable_name
self._elem_variable_name = elem_variable_name
self._inputFile_lines_total = []
self.writePath = write_path
self._modulus = 1e7 # Young's modulus. Unit: Pa. Default: 1e7.
self._poisson_ratio = 0.48 # Poisson's ratio. Linear elastic default: 0.3; neo-Hookean default: 0.48.
self._isCoupleOn = False # Boolean. True: use coupling constraint; False: do not use coupling constraint. Must not turn on if applying Laplacian smoothing.
self._coupling_type = "Kinematic" # String. "Kinematic" / "Distributing".
self._coupling_neighbor_layers = 1 # How deep does the neighborhood searching go. Default: 1.
self._isLaplacianSmoothingOn = True # Boolean. True: use laplacian smoothing. False: do not use laplacian smoothing.
self._laplacian_variable_name = "laplacianMatrixI3"
self._massMatrix_variable_name = "massMatrixI3"
self._laplacian_iter_num = 20 # Default: 3.
self._smoothing_rate = 0.1 # Default: 0.1 (Previous: 1e-4).
self.loads_num = 3 # For initial testing.
self._load_sampling_style = "gaussian" # String. Indicating the type of random sampling for force components. "uniform" / "gaussian".
self._load_scale = (0.0, 10.0) # Absolute range of the force for uniform sampling. Case and BC specific. (min, max). Unit: N.
self._gaussian_params = (4.0, 0.8) # Mean and deviation of the force for Gaussian sampling. Case and BC specific. (mean, deviation). Unit: N.
self._load_params_tuple = None
self._initial_force_component_vector = [] # List of floats. Default: []. Example: [5., 5., 5.].
self.autoIncrementNum = 5000 # Int. The maximum increment number of the AutoSolver.
self.initIncrem = 0.001 # Float. The initial length of the increment (for fixed-step, this is also the length per increm).
self.minIncrem = 1e-20 # Float. The minimum increment length for the AutoSolver (ueless for the StaticSolver).
self.maxIncrem = 1.0 # Float. The maximum increment length for the AutoSolver (useless for the StaticSovler).
self.totalTime = 1.0 # Float. The total time for one simulation step.
self.frameNum = 1 # Int. The number of frames intending to extract from the nodal file.
# ================== Load sampling variables ================== #
if self._isCoupleOn: self._couple_region_num = self.loads_num
else: self._couple_region_num = 0
if self._load_sampling_style == "gaussian": self._load_params_tuple = self._gaussian_params
elif self._load_sampling_style == "uniform": self._load_params_tuple = self._load_scale
else:
self._load_sampling_style = "uniform"
self._load_params_tuple = self._load_scale
# ============================================================= #
# Header.
self._header = ["*Heading"]
# Part definition.
self._part_name = "part-1"
self._material_name = "tissue"
self._part_initial = ["*Part, name={}".format(self._part_name)] # Total list of Part definition.
self._node = ["*Node"]
self._elem = ["*Element, type=C3D10"] # Nonlinear tetrahedron. http://web.mit.edu/calculix_v2.7/CalculiX/ccx_2.7/doc/ccx/node33.html#tennode.
self._nset_all = []
self._elset_all = []
self._section = ["*Solid Section, elset=allElems, material={}".format(self._material_name),
","]
self._part_end = ["*End Part"]
self._new_node_list = []
self._new_node_dict = {}
self._node_num = None
self._orig_node_num = None
self._elem_num = None
self._part = self.generatePart()
# Load settings.
self._loads_nset_name_list = []
self._rf_name_list = []
self._rf_nset_name_list = []
self._rf_nsets = []
self._load_nsets = [] # Nset definition of loads.
self._load = self.generateLoadSetting()
# Assembly definition.
self._assembly_name = "assembly-1"
self._instance_name = "instance-1"
self._assembly_initial = ["*Assembly, name={}".format(self._assembly_name)] # Total list of Assembly definition.
self._instance = ["*Instance, name={}, part={}".format(self._instance_name, self._part_name),
"*End Instance"]
self._ref_nodes_list = []
self._fix_nset_name = "fix"
self._fix_indices_list = fix_indices_list
self._fix_nset = self.generateNset(self._fix_indices_list, self._fix_nset_name, self._instance_name) # Nset definition of fix BC.
self._loads_posi_indices_list = self._generateLoadPositions(self.loads_num, self._fix_indices_list) # Generate load positions. Randomly. For fixed mode: style="fix", input_posi_indices_list=[415, 470, 107].
self._laplacian_initial_loads_posi = None # List. Containing the original position of concentrated forces.
self._laplacian_force_field = None # 2D Array of floats. Size: nSurfI * 3. The force field on the outer surface.
self._user_prescribed_force_field = user_prescribed_force_field # List of floats. Size: nSurfI * 3. The prescribed force field on the outer surface. Default: [].
self._surface_list = []
self._coupling_list = []
self._nset_boundary = [] # All nsets definitions in assembly. Boundary conditions
self._assembly_end = ["*End Assembly"]
self._assembly = self.generateAssembly()
# Material.
self.material_type = material_type # String. Indicate material type. "linear"/"neo_hookean_fitting"/"neo_hookean_solid".
self._material_def_file_name = "" # Default: "". If there is a file of stress strain definition, please specify here (must not be "").
self._material = self.generateMaterial(self.material_type)
# Boundary condition.
self._boundary_initial = ["*Boundary"]
self._boundary = self.generateBoundaryCondition_fixAll()
# Step settings.
self.freq = int(self.autoIncrementNum / self.frameNum) # Int. The data frame extraction frequency (also refers to the number of increments. Extract one frame per "self.freq" increments). Especially for StaticSolver case.
self._step = ["*Step, name=step-1, nlgeom=YES, inc={}".format(self.autoIncrementNum),
"*Static",
"{}, {}, {}, {}".format(self.initIncrem, self.totalTime,
self.minIncrem, self.maxIncrem)] # Auto solver.
self._step_end = ["*End Step"]
# Rest settings.
self._restart = ["*Restart, write, frequency=0"]
self._output = ["*Output, field, variable=PRESELECT",
"*Output, history, variable=PRESELECT"]
self._fil = ["*FILE FORMAT, ASCII",
"*node file, frequency={}".format(self.freq),
"U, COORD",
"*El file, frequency={}".format(self.freq),
"S, COORD"]
self._resSettings = self._restart + self._output + self._fil
def readFile(self, read_path):
"""
Read files from specific path.
Parameters:
----------
read_path: String.
Path of the original inp file.
Return:
----------
lines: List of strings.
The list of lines from the file.
"""
with open(read_path, "rt") as f: lines = f.read().splitlines()
return lines
def writeFile(self, write_status):
"""
Write 'self.write_lines' into a new inp file.
Parameters:
----------
write_status: String.
"Normal" / "Fast".
"Normal": generate all definitions;
"Fast": generate nodes and elements definition only.
"""
if write_status == "Normal":
self._inputFile_lines_total = (self._header + self._part + self._assembly +
self._material + self._boundary + self._step +
self._load + self._resSettings + self._step_end)
content = '\n'.join(self._inputFile_lines_total)
with open(self.writePath, 'w') as f: f.write(content)
elif write_status == "Fast":
self._inputFile_lines_total = self._header + self._part
content = '\n'.join(self._inputFile_lines_total)
with open(self.writePath, 'w') as f: f.write(content)
else:
self.writeFile("Normal")
def generatePart(self):
"""
Generate part definition.
Returns:
----------
The list collection of all sub-definition lists, including:
part_initial: header part of "Part definition".
node: Node definition.
elem: Element definition.
elset_all: The elset containing all elements. For material definition specifically.
section: Section definition.
part_end: The endline of "Part definition".
"""
self.generateNodes(self.data_mat[self._node_variable_name], self._node)
self.generateElements(self.data_mat[self._elem_variable_name], self._elem)
self.nonlinearization()
# Generate all element elset.
allElem_list, allElem_list_name = [], "allElems"
for i in range(len(self._elem[1:])): allElem_list.append(str(i+1))
self._elset_all = self.generateElset(allElem_list, allElem_list_name)
# Generate Section.
self._section = self.generateSection(allElem_list_name, self._material_name)
# Collection.
return (self._part_initial + self._node + self._elem + self._elset_all +
self._section + self._part_end)
def generateNodes(self, node_mat, target_node_list, specified_indices_list=[]):
"""
Generate nodes information.
Parameters:
----------
node_mat: 2D Array of ints.
The matrix containing the coordinates of the nodes to-be-defined under "*Node".
targer_node_list: List of strings.
The definition of node list.
specified_indices_list (optional): List of ints.
List the indices of the input node list, following the exact order of the node_mat.
Default: [].
"""
for i in range(node_mat.shape[0]):
if specified_indices_list == []: node_list_temp = ["{}".format(i+1)]
else: node_list_temp = ["{}".format(specified_indices_list[i])]
node_list_temp += [str(coord) for coord in list(node_mat[i,:])]
target_node_list.append(', '.join(node_list_temp))
def _extractOuterSurfaceNodes(self, faces_def_matrix, outer_surface_regionNum):
"""
Extract the nodes on the outer surface of the geometry (for force application in next step).
Parameters:
----------
faces_def_matrix: 2D Array of ints.
The definition of all faces, including the information of surface region number.
outer_surface_regionNum: Int.
The region number of outer surface of the geometry.
Returns:
----------
outer_surface_nodes_list: List of ints.
The indices of nodes on the outer surface. Indexed from 1. Sorted.
"""
outer_surface_nodes_list = []
for i in range(faces_def_matrix.shape[0]):
if faces_def_matrix[i,0] == outer_surface_regionNum: # The region number of outer surface.
outer_surface_nodes_list += [int(ind) for ind in faces_def_matrix[i,1:]] # Indexed from 1.
outer_surface_nodes_list = list(set(outer_surface_nodes_list))
outer_surface_nodes_list.sort()
return outer_surface_nodes_list
def generateElements(self, elem_mat, target_elem_list, specified_indices_list=[]):
"""
Generate elements information.
Parameters:
----------
elem_mat: 2D Array of ints.
The matrix containing the indices of each element to-be-defined under "*Element".
targer_elem_list: List of strings.
The definition of element list.
specified_indices_list (optional): List of ints.
List the indices of the input element list, following the exact order of the elem_mat.
Default: [].
"""
for i in range(elem_mat.shape[0]):
if specified_indices_list == []: elem_list_temp = ["{}".format(i+1)]
else: elem_list_temp = ["{}".format(specified_indices_list[i])]
elem_line_temp = [str(ind) for ind in list(elem_mat[i,:])]
# Make sure the order of nodes for tetrahedron definition is counter-clockwise, otherwise resulting in negative volume.
ind_temp = elem_line_temp[1]
elem_line_temp[1] = elem_line_temp[2]
elem_line_temp[2] = ind_temp
elem_list_temp += elem_line_temp
target_elem_list.append(', '.join(elem_list_temp))
def generateNset(self, node_list, nset_name, instance_name=None):
"""
Generate node set information.
Parameters:
----------
node_list: List of ints.
The list of nodes to be contained in the node list.
nset_name: String.
The name of the to-be-defined node list.
instance_name (optional): String.
The name of specified instance.
Only use in assembly definition.
Default: None. (Part cases)
Returns:
----------
nset: List of strings.
The definition of a specific nset.
"""
if instance_name == None: nset = ["*Nset, nset={}".format(nset_name)]
else: nset = ["*Nset, nset={}, instance={}".format(nset_name, instance_name)]
nset_line_temp, nset_string_temp = [], None
for i, ind in enumerate(node_list):
nset_line_temp.append(str(ind))
if (i+1) % 10 == 0:
nset_string_temp = ', '.join(nset_line_temp)
nset.append(copy.deepcopy(nset_string_temp))
nset_line_temp, nset_string_temp = [], None
nset_string_temp = ', '.join(nset_line_temp)
nset.append(copy.deepcopy(nset_string_temp))
return nset
def generateElset(self, elem_list, elset_name, instance_name=None):
"""
Generate element set information.
Parameters:
----------
elem_list: List of ints.
The list of elements to be contained in the element list.
elset_name: String.
The name of the to-be-defined element list.
instance_name (optional): String.
The name of specified instance.
Only use in assembly definition.
Default: None. (Part cases)
Returns:
----------
elset: List of strings.
The definition of a specific elset.
"""
if instance_name == None: elset = ["*Elset, elset={}".format(elset_name)]
else: elset = ["*Elset, elset={}, instance={}".format(elset_name, instance_name)]
elset_line_temp, elset_string_temp = [], None
for i, ind in enumerate(elem_list):
elset_line_temp.append(str(ind))
if (i+1) % 10 == 0:
elset_string_temp = ', '.join(elset_line_temp)
elset.append(copy.deepcopy(elset_string_temp))
elset_line_temp, elset_string_temp = [], None
elset_string_temp = ', '.join(elset_line_temp)
elset.append(copy.deepcopy(elset_string_temp))
return elset
def generateSection(self, elset_name, material_name):
"""
Generate section information.
Parameters:
----------
elset_name: String.
The name of the elset to be assigned a section.
material_name: String.
The name of defined material.
Returns:
----------
section: List of strings.
The definition of section.
"""
section = ["*Solid Section, elset={}, material={}".format(elset_name, material_name),
","]
return section
def generateMaterial(self, material_type):
"""
Generate lines for material definition.
Parameters:
----------
material_type: String.
Indicate what type of material is used.
Returns:
----------
material_lines: List of lines.
The lines of material definition.
"""
material_lines = ["*Material, name={}".format(self._material_name)]
if material_type == "neo_hookean_fitting":
stress_strain_lines = self._generateNeoHookeanFitting(self._modulus, (-0.3, 0.3), file_name=self._material_def_file_name)
material_lines += ["*Hyperelastic, neo hooke, test data input, poisson={}".format(self._poisson_ratio),
"*Uniaxial Test Data"]
material_lines += stress_strain_lines
elif material_type == "neo_hookean_solid":
c10 = self._modulus / (4 * (1 + self._poisson_ratio))
d1 = 6 * (1 - 2 * self._poisson_ratio) / self._modulus
material_lines += ["*Hyperelastic, neo hooke",
"{}, {}".format(c10, d1)]
elif material_type == "linear":
material_lines += ["*Elastic",
"{}, {}".format(self._modulus, self._poisson_ratio)]
else: material_lines = self.generateMaterial("linear")
return material_lines
def _generateNeoHookeanFitting(self, modulus, strain_range, file_name=""):
"""
Import/Generate stress strain data for neo-Hookean material fitting.
Parameters:
----------
modulus: Float.
The elastic modulus of material.
strain_range: Tuple of floats.
Range for strain interpolation.
file_name (optional): String.
The name of stress strain data definition file.
Default: "".
Returns:
----------
stress_strain_lines: List of strings.
The lines of stress strain data.
"""
if file_name != "": return self.readFile(file_name)
else:
"""
Assumptions of neo-Hookean formulation:
Incompressible (Poisson's ratio = ~0.5, small deformation).
Undergoing uniaxial loading.
Formulation: sigma = 2*C*(stretch - 1/(stretch^2)).
E = 6*C.
"""
strain_data = np.linspace(strain_range[0], strain_range[1], 100)
stretch_data = strain_data + 1.0
stress_data = (self._modulus / 3.0) * (stretch_data - 1.0 / stretch_data**2) # Formulation.
stress_strain_lines = []
for i in range(len(stress_data)):
stress_strain_lines.append("%.6f, %.6f" % (stress_data[i], strain_data[i]))
return stress_strain_lines
def _generateLoadPositions(self, loads_num, fix_indices_list, style="random", input_posi_indices_list=[]):
"""
Randomly generate positions of the load.
Parameters:
----------
loads_num: Int.
Number of loads.
fix_indices_list: List of ints.
Indices of fixed nodes.
style (optional): String.
Indicate how to generate initial load positions.
"random" / "fix":
"random": Randomly generate load positions.
"fix": Use the user input of initial load position indices.
Default: "random".
input_posi_indices_list (optional): List of ints.
User input of initial load positions indices list.
Indexed from 1.
Default: [].
Returns:
----------
loads_posi_indices_list: List of ints.
Picked indices for load application positions.
"""
if style == "random":
loads_posi_indices_list = []
for i in range(loads_num):
while(True):
load_posi_index_temp = random.choice(self._outer_surface_nodes_list) # Randomly chosen an outer surface node to apply load F(x, y, z). Indexed from 1.
if load_posi_index_temp not in fix_indices_list: break # The randomly generated index cannot be one of the fixed nodes.
loads_posi_indices_list.append(load_posi_index_temp)
return loads_posi_indices_list
elif style == "fix": return input_posi_indices_list
else: return self._generateLoadPositions(loads_num, fix_indices_list)
def _generateLoadValues(self, output_dimension, load_scale, sampling_style="uniform"):
"""
Randomly generate force values for load component definition.
Using function: numpy.random.rand().
Parameters:
----------
output_dimension: Tuple of ints.
The shape of output random array.
Size: 2*1. (dim1, dim2).
load_scale: Tuple of floats.
Size: 2*1. (min_laod, max_laod) / (mean, deviation).
sampling_style (optional): String.
Indicating the type of sampling.
"uniform": uniform distribution.
"gaussian": Gaussian distribution.
Default: "uniform".
Returns:
----------
load_result: Array of floats.
Size: output_dimension.
"""
if sampling_style == "uniform":
load_result = (np.random.rand(output_dimension[0], output_dimension[1]) * 2 - 1) * abs(load_scale[1] - load_scale[0])
load_result = load_result.reshape(-1,1)
for index, load_value_temp in enumerate(load_result):
if load_value_temp < 0: load_result[index] -= self._load_scale[0]
else: load_result[index] += self._load_scale[0]
load_result = load_result.reshape(output_dimension[0], output_dimension[1])
elif sampling_style == "gaussian":
mean, deviation = load_scale[0], load_scale[1]
load_result = np.random.normal(mean, deviation, size=output_dimension)
load_result = load_result.reshape(-1,1)
for index, load_value_temp in enumerate(load_result):
if np.random.rand() <= 0.5: load_result[index] *= -1
load_result = load_result.reshape(output_dimension[0], output_dimension[1])
else: load_result = self._generateLoadValues(output_dimension, load_scale)
return load_result
def generateAssembly(self):
"""
Generate assembly definition.
Returns:
----------
The list collection of all sub-definition lists, including:
assenbly_initial: Header of the assembly definition.
instance: The instance definition.
nset_boundary: The definition of BC related node set.
asssenbly_end: The endline of assembly definition.
"""
# Generate "self.loads_num" nsets, each of which has 1 node.
if self._isCoupleOn:
for i, load_posi_index_temp in enumerate(self._loads_posi_indices_list):
ref_name_temp = "rf-{}".format(i+1)
ref_nset_name_temp = "rf-{}-nset".format(i+1)
self._rf_name_list.append(ref_name_temp)
self._rf_nset_name_list.append(ref_nset_name_temp)
# Generate assembly node definitions for reference points.
ref_node_list_temp = ["*Node"]
ref_pt_coord_list_temp = [float(item) for item in self._node[load_posi_index_temp].split(',')[1:]]
self.generateNodes(np.array(ref_pt_coord_list_temp).astype(float).reshape(1,-1), ref_node_list_temp,
specified_indices_list=[i+1])
self._ref_nodes_list += copy.deepcopy(ref_node_list_temp)
rf_nset_list_temp = self._findCouplingNodes(load_posi_index_temp, self._coupling_neighbor_layers)
# Generate reference point node sets.
self._load_nsets += self.generateNset([i+1], ref_name_temp)
# Generate coupling constraint node sets.
self._rf_nsets += self.generateNset(rf_nset_list_temp, ref_nset_name_temp,
self._instance_name)
self.generateCoupling()
else:
if self._isLaplacianSmoothingOn:
force_vector_temp = np.zeros(shape=(3*self._surface_nodes_num, 1))
self._laplacian_initial_loads_posi = copy.deepcopy(self._loads_posi_indices_list)
if self._initial_force_component_vector == []:
for load_posi_index_temp in self._loads_posi_indices_list:
force_vector_temp[(load_posi_index_temp-1)*3:load_posi_index_temp*3,:] = self._generateLoadValues((3,1), self._load_params_tuple,
sampling_style=self._load_sampling_style)
else:
for load_posi_index_temp in self._loads_posi_indices_list:
force_vector_temp[(load_posi_index_temp-1)*3:load_posi_index_temp*3,:] = np.array(self._initial_force_component_vector).astype(float).reshape(3,1)
laplacian_matrix, mass_matrix = self.data_mat[self._laplacian_variable_name], self.data_mat[self._massMatrix_variable_name]
laplacian_matrix = self._laplacianMatrixShrink(laplacian_matrix, self._surface_nodes, self.data_mat["faces"], self._outer_surface_regionNum)
force_vector_new = self._laplacianSmoothing(force_vector_temp, laplacian_matrix, mass_matrix, iter_num=self._laplacian_iter_num,
smoothing_rate=self._smoothing_rate, laplacian_force_field=self._user_prescribed_force_field) # Size: (nSurfI x 3)*1. Fix force value: initial_BC_state="fix" (not recommended).
self._laplacian_force_field = force_vector_new.reshape(-1,3)
self._loads_posi_indices_list = copy.deepcopy([(list(force_vector_new).index(item)//3)+1 for item in list(force_vector_new) if item != 0]) # Indexed from 1.
self._loads_posi_indices_list = list(set(self._loads_posi_indices_list))
self._loads_posi_indices_list.sort()
for i, load_posi_index_temp in enumerate(self._loads_posi_indices_list):
load_nset_name_temp = "Load-{}".format(i+1)
self._loads_nset_name_list.append(load_nset_name_temp)
self._load_nsets += self.generateNset([load_posi_index_temp], load_nset_name_temp, self._instance_name)
self._load_nsets += self.generateNset(self._laplacian_initial_loads_posi, "Orig_loads_posi", self._instance_name)
self._load = self.generateLoadSetting(force_list=list(force_vector_new.reshape(-1,1)))
else:
for i, load_posi_index_temp in enumerate(self._loads_posi_indices_list):
load_nset_name_temp = "Load-{}".format(i+1)
self._loads_nset_name_list.append(load_nset_name_temp)
self._load_nsets += self.generateNset([load_posi_index_temp], load_nset_name_temp, self._instance_name)
# Concatenate assembly subparts.
self._nset_boundary = self._nset_boundary + self._load_nsets + self._rf_nsets + self._fix_nset + self._surface_list + self._coupling_list
return (self._assembly_initial + self._instance + self._ref_nodes_list + self._nset_boundary + self._assembly_end)
def generateCoupling(self):
"""
Generate coupling constriants for concentrated forces application.
"""
for index, rf_name in enumerate(self._rf_nset_name_list):
self._surface_list += ["*Surface, type=NODE, name={}_CNS_, internal".format(rf_name),
"{}, 1.".format(rf_name)]
self._coupling_list += ["*Coupling, constraint name={}, ref node={}, surface={}_CNS_".format(self._rf_name_list[index],
self._rf_name_list[index],
rf_name),
"*{}".format(self._coupling_type)]
def _findCouplingNodes(self, rf_node_ind, neighbor_layers):
"""
Find the immediate neighbors of each specified node index.
Parameters:
----------
rf_node_ind: Int.
The index of target node.
Returns:
----------
rf_nset_list: List of ints (duplicated items removed).
"rf_node_ind"'s corresponding immediate neighbor nodes set.
"""
rf_nset_list, new_nodes_list, searched_nodes_list = [rf_node_ind], [rf_node_ind], []
for j in range(neighbor_layers):
for ind_temp in new_nodes_list:
for i in range(len(self._triangle_nodes_list)):
if ind_temp in self._triangle_nodes_list[i]:
rf_nset_list += copy.deepcopy(self._triangle_nodes_list[i])
else: continue
searched_nodes_list += copy.deepcopy(new_nodes_list)
rf_nset_list = list(set(copy.deepcopy(rf_nset_list)))
new_nodes_list = [ind for ind in rf_nset_list if ind not in searched_nodes_list]
# Avoid assigning same nodes to different coupled node sets.
for ind in rf_nset_list:
if ind in self._coupled_list: rf_nset_list.remove(ind)
else: self._coupled_list.append(ind)
return rf_nset_list
def generateBoundaryCondition_fixAll(self):
"""
Generate fix boundary condition.
Returns:
----------
The list collection of all sub-definition lists, including:
boundary_initial: Header of boundary condition definition.
BC_list_temp: The detailed BC definition of boundary conditions.
"""
BC_list_temp = []
for i in range(6): # 6: 6 DOFs (disp. + rot.); 3: 3 DOFs (disp.).
BC_list_temp.append("{}, {}, {}".format(self._fix_nset_name, i+1, i+1))
return (self._boundary_initial + BC_list_temp)
def generateLoadSetting(self, force_list=[]):
"""
Generate load information.
Returns:
----------
load_list: List of strings.
Definition of concentrated forces.
force_list (optional): List of forces (floats).
Size: loads_num * 3.
Default: [].
"""
load_list = []
if force_list == []:
force_list = list(self._generateLoadValues((self.loads_num*3, 1), self._load_params_tuple, sampling_style=self._load_sampling_style))
force_list = np.array(force_list).astype(float).reshape(-1,3) # 2D Array of floats. Size: self._loads_num * 3.
if self._isCoupleOn:
for j, rf_name in enumerate(self._rf_name_list): # Length: self._loads_num
load_temp = ["*Cload, op=NEW"]
for i in range(force_list.shape[1]): # 3: Three directions.
load_temp.append("{}, {}, {}".format(rf_name, i+1, force_list[j,i]))
load_list += copy.deepcopy(load_temp)
else:
for j, load_name in enumerate(self._loads_nset_name_list): # Length: length of self._loads_nset_name_list.
load_temp = ["*Cload"]
for i in range(force_list.shape[1]): # 3: Three directions.
load_temp.append("{}, {}, {}".format(load_name, i+1, force_list[self._loads_posi_indices_list[j]-1,i]))
load_list += copy.deepcopy(load_temp)
return load_list
def _laplacianMatrixShrink(self, laplacian_matrix, surface_nodes_list, faces_def_matrix, outer_surface_regionNum):
"""
Assign zeros to the DOFs without force value applied.
Parameters:
----------
laplacian_matrix: 2D Array of floats.
The surface's Laplacian for force smoothing.
Size: nSurfI*3 x nSurfI*3.
surface_nodes_list: List of ints.
All indices of nodes on all surfaces.
faces_def_matrix: 2D Array of ints.
The definition of all faces, including the information of surface region number.
outer_surface_regionNum: Int.
The region number of outer surface of the geometry.
Returns:
----------
laplacian_matrix: 2D Array of floats.
Laplacian with zeros assigned to the nodes not on the outer surfaces.
Size: nSurfI*3 x nSurfI*3.
"""
surface_nodes_list = [ind for ind in surface_nodes_list]
outer_surface_nodes_list = self._extractOuterSurfaceNodes(faces_def_matrix, outer_surface_regionNum)
other_surface_nodes_list = [ind for ind in surface_nodes_list if ind not in outer_surface_nodes_list]
other_surface_nodes_list.sort()
for ind in other_surface_nodes_list:
laplacian_matrix[surface_nodes_list.index(ind)*3:(surface_nodes_list.index(ind)+1)*3,:] = 0.0
laplacian_matrix[:,surface_nodes_list.index(ind)*3:(surface_nodes_list.index(ind)+1)*3] = 0.0
return laplacian_matrix
def _laplacianSmoothing(self, force_vector, laplacian_matrix, mass_matrix, iter_num=3, smoothing_rate=1e-4, initial_BC_state="", laplacian_force_field=[]):
"""
Implement laplacian smoothing based on pre-calculated Laplacian matrix.
Formulation: Forward Euler.
F_(n+1) = (I + lambda*massMatrix*Laplacian) * F_n
Parameters:
----------
force_vector: 1D Array of floats.
With concentrated force values applied at the specidied nodes.
Size: (self._surface_nodes_num x 3) * 1.
laplacian_matrix: 2D Array of floats.
Size: (self._surface_nodes_num x 3) * (self._surface_nodes_num x 3).
mass_matrix: 2D Array of floats.
Diagonal matrix.
Size: (self._surface_nodes_num x 3) * (self._surface_nodes_num x 3).
iter_num (optional): Int.
The number of smoothing iterations.
Default: 3.
smoothing_rate (optional): float.
The coefficient that control the step size of smoothing.
Default: 1e-4.
initial_BC_state (optional): String.
Indicating whether to "fix" or "decay" the original concentrated force value.
Default: "". Indicating smoothing including the original forces.
laplacian_force_field (optional): List of floats.
The user-prescribed vector of laplacian force field.
Size: self._surface_nodes_num x 3.
Default: [].
Returns:
----------
force_vector_new: 1D Array of floats.
The laplacian-smoothed force vector.
Size: (self._surface_nodes_num x 3) * 1.
"""
if laplacian_force_field == []:
force_vector_new = copy.deepcopy(force_vector)
for i in range(iter_num):
force_vector_new += smoothing_rate * (laplacian_matrix @ force_vector_new) # Without mass matrix.
# force_vector_new += smoothing_rate * (mass_matrix @ laplacian_matrix @ force_vector_new) # With mass matrix (NOT recommended).
if initial_BC_state == "fix":
for j, value in enumerate(force_vector):
if value != 0:
force_vector_new[j] = value
else: force_vector_new = np.array(laplacian_force_field).astype(float).reshape(len(laplacian_force_field),1)
return force_vector_new
def _computeMidPoint(self, ind_1, ind_2):
"""
Compute the mid-point of the edge.
Parameters:
----------
ind_1: Int.
The first index of the node pair. Indexed from 1.
ind_2: Int.
The second index of the node pair. Indexed from 1.
Returns:
----------
ind_mid: Int.
The index of the self._node. Index from 1.
"""
key_string_temp_1, key_string_temp_2 = "{}_{}".format(ind_1, ind_2), "{}_{}".format(ind_2, ind_1)
if key_string_temp_1 in self._new_node_dict.keys(): return self._new_node_dict[key_string_temp_1]
elif key_string_temp_2 in self._new_node_dict.keys(): return self._new_node_dict[key_string_temp_2]
else:
coord_temp_1 = np.array(self._node[ind_1].split(',')[1:]).astype(float).reshape(1,-1)
coord_temp_2 = np.array(self._node[ind_2].split(',')[1:]).astype(float).reshape(1,-1)
coord_temp_mid = (coord_temp_1 + coord_temp_2) / 2.0
coord_mid_list = [str(item) for item in list(coord_temp_mid[0])]
self._node_num = len(self._node)
new_node_def_list_temp = copy.deepcopy([str(self._node_num)])
new_node_def_list_temp += copy.deepcopy(coord_mid_list)
self._node.append(', '.join(new_node_def_list_temp))
self._new_node_list.append(', '.join(new_node_def_list_temp))
self._new_node_dict[key_string_temp_1] = self._node_num
self._new_node_dict[key_string_temp_2] = self._node_num
return self._node_num
def insertNode(self):
"""
Insert one node (at the mid-point) of each edge.
Create C3D10 element structure.
"""
for index, elem_def_string in enumerate(self._elem[1:]):
elem_node_list_temp = [int(ind) for ind in elem_def_string.split(',')[1:]]
# Obtain the mid-point index in order. Assume tetrahedral element (C3D4).
mid_pt_ind_5 = self._computeMidPoint(elem_node_list_temp[0], elem_node_list_temp[1])
mid_pt_ind_6 = self._computeMidPoint(elem_node_list_temp[1], elem_node_list_temp[2])
mid_pt_ind_7 = self._computeMidPoint(elem_node_list_temp[0], elem_node_list_temp[2])
mid_pt_ind_8 = self._computeMidPoint(elem_node_list_temp[0], elem_node_list_temp[3])
mid_pt_ind_9 = self._computeMidPoint(elem_node_list_temp[1], elem_node_list_temp[3])
mid_pt_ind_10 = self._computeMidPoint(elem_node_list_temp[2], elem_node_list_temp[3])
elem_new_def_list_temp = [str(mid_pt_ind_5),
str(mid_pt_ind_6),
str(mid_pt_ind_7),
str(mid_pt_ind_8),
str(mid_pt_ind_9),
str(mid_pt_ind_10)]
# Redefine the new C3D10 element in order.
elem_def_list_temp = copy.deepcopy(elem_def_string.split(',')) + copy.deepcopy(elem_new_def_list_temp)
elem_def_string_temp = ', '.join(elem_def_list_temp)
self._elem[index+1] = copy.deepcopy(elem_def_string_temp)
def _triangleNodesCollection(self):
"""
Collect all the nodes on each triangle (surface).
Need to be implemented after "self.insertNode()".
"""
for i in range(self._surface_mat.shape[0]):
tri_temp = self._surface_mat[i,:]
# Assuming all triangles on the surface of geometry.
middle_pts_list_temp = [self._computeMidPoint(tri_temp[0], tri_temp[1]),
self._computeMidPoint(tri_temp[0], tri_temp[2]),
self._computeMidPoint(tri_temp[1], tri_temp[2])]
triangle_nodes_list_temp = list(copy.deepcopy(tri_temp)) + copy.deepcopy(middle_pts_list_temp)
self._triangle_nodes_list.append(copy.deepcopy(triangle_nodes_list_temp)) # List of lists of ints.
def nonlinearization(self):
"""
Nonlinearize the linear tetrahedral (CST) element to quadratic tetrahedral element.
"""
self._elem_num = len(self._elem) - 1
self._orig_node_num = len(self._node) - 1
self.insertNode()
self._triangleNodesCollection()
self._node_num = len(self._node) - 1
def saveLog(file_name_list, elapsed_time_list, write_status, data_file_name,
sample_num, fix_indices_list, loads_num, load_sampling_type, load_param_tuple,
material_type, modulus, poisson_ratio, isCoupleOn, isLaplacianSmoothingOn,
coupling_type="", coupling_neighbor_layer_num=1,
laplacian_iter_num=5, laplacian_smoothing_rate=1e-4, write_path="nonlinear_case_generation.log"):
"""
Save the nonlinear cases generation results into .log file.
Parameters:
----------
file_name_list: List of strings.
Names of generated files.
elapsed_time_list: List of floats.
Elapsed time of generation for each input file.
In exact order.
write_status: String.
Indicating the type of input file generation.
"Normal" / "Fast":
"Normal": generate all definitions;
"Fast": generate nodes and elements definition only.
data_file_name: String.
The name of modeling data file.
Format: .mat
sample_num: Int.
Number of generated input files.
fix_indices_list: List of ints.
Indices of fixed points.
Indexed from 1.
loads_num: Int.
The number of concentrated forces.
load_sampling_type: String.
The distribution type for force sampling.
"uniform" / "gaussian":
"uniform": uniform distribution with specified (min, max) range.
"gaussian": gaussian distribution with specified (mean, dev) parameters.
load_param_tuple: tuple of floats.
Parameters of load sampling.
load_sampling_type specific.
material_type: String.
The type of material.
"linear" / "neo_hookean_solid" / "neo_hookean_fitting":
"linear": linear elastic material.
"neo_hookean_solid": neo-Hookean solid following the stain energy formulation.
"neo_hookean_fitting": neo-Hookean solid following the strass-strain curved fitted from user-input strss-strain data.
modulus: Float.
Elastic modulus of the material.
poisson_ratio: Float.
Poisson's ratio of the material.
isCoupleOn: Boolean indicator.
True: using coupling constraint for local force distribution.
False: not using coupling constraint.
isLaplacianSmoothingOn: Boolean indicator.
True: using Laplacian-Beltrami operator matrix to smooth the force distribution.
False: not using Laplacian smoothing.
coupling_type (optional): String.
The type of coupling constraint.
Default: "".
coupling_neighbor_layer_num (optional): Int.
The number of neighbor layers to which the local force distributing goes.
Default: 1.
laplacian_iter_num (optional): Int.
The number of iteration for laplacian smoothing.
Default: 5.
laplacian_smoothing_rate (optional): Float.
The rate of Laplacian smoothing.
Default: 1e-4.
write_path (optional): String.
The path of to-be-written file.
Default: "nonlinear_case_generation.log".
"""
if isCoupleOn: isCoupleOn_status = "On"
else: isCoupleOn_status = "Off"
if isLaplacianSmoothingOn: isLaplacianSmoothingOn_status = "On"
else: isLaplacianSmoothingOn_status = "Off"
content = ["Data_file_name: {}".format(data_file_name),
"Sample_num = {}".format(sample_num),
"Fixed_indices_list (indexed from 1): {}".format(fix_indices_list),
"Material type: {}".format(material_type),
"Elastic modulus = {} Pa".format(modulus),
"Poisson's ratio = {}".format(poisson_ratio),
"Loads_num = {}".format(loads_num)]
if load_sampling_type == "uniform":
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling range (min, max): {} N".format(load_param_tuple)]
elif load_sampling_type == "gaussian":
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling parameters (mean, dev): {} N".format(load_param_tuple)]
else:
load_sampling_type = "uniform"
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling range (min, max): {} N".format(load_param_tuple)]
content += ["Coupling constraint status: {}".format(isCoupleOn_status),
"Laplacian smoothing status: {}".format(isLaplacianSmoothingOn_status)]
if isCoupleOn:
content += ["Coupling type: {}".format(coupling_type),
"Coupling neighbor layer numbers: {}".format(coupling_neighbor_layer_num)]
if isLaplacianSmoothingOn:
content += ["Laplacian smoothing iteration numbers = {}".format(laplacian_iter_num),
"Laplacian smoothing rate = {}".format(laplacian_smoothing_rate)]
content += ["----------------------------------------------------------",
"Input file\t\tExport status\tGeneration status\tElapsed time/s"]
elapsed_time_total = 0
for i, file_name in enumerate(file_name_list):
data_string_temp = "{}\t\t{}\t\tCompleted\t".format(file_name, write_status) + "\t%.8f" % (elapsed_time_list[i])
content.append(data_string_temp)
elapsed_time_total += elapsed_time_list[i]
content += ["----------------------------------------------------------",
"Total elapsed time: {} s".format(elapsed_time_total)]
content = '\n'.join(content)
with open(write_path, 'w') as f: f.write(content)
def main():
abaqus_default_directory = "C:/temp" # Default working directory of Abaqus.
inp_folder = "inp_files"
sample_nums = 1500
data_file_path = "data_aorta.mat"
node_variable_name, elem_variable_name = "NodeI", "EleI"
results_folder_path_stress, results_folder_path_coor = "stress", "coor"
material_type = "neo_hookean_solid" # "linear" / "neo_hookean_fitting" / "neo_hookean_solid".
fix_indices_list = [1148, 1156, 1169] # Specify the node to fix. At least 3. Indexed from 1.
write_status = "Normal" # String. "Normal" / "Fast". "Normal": generate all definitions; "Fast": generate nodes and elements definition only.
# ================================== Force interpolation related variables ================================== #
force_field_mat_name = "force_field_data.mat"
force_interpolation_folder = "inp_interpolation"
isPrescribedForceOn = True # Boolean indicator. True: use prescribed force field; False: no specified force field. Default: False.
force_type = "random" # String. The type of prescribed force field. "interpolated": interpolated force fields; "random": weighted-summed force fields.
eigen_num_force, force_scalar = 20, 0.4 # Float. The scalar of force fields controlling the force magnitude -> deformation magnitude of the tumor in nonlinear solver. Unit: N.
# =========================================================================================================== #
if isPrescribedForceOn:
"""
The pipeline of generating interpolated force fields:
1. Run "nonlinearCasesCreation.py" with 'isPrescribedForceOn = False' firstly.
2. Run "forceInterpolation.py" in the same directory.
3. Set 'isPrescribedForceOn = True', set 'force_type = "interpolated", then run "nonlinearCasesCreation.py" again.
Get input files with "*_interpolated.inp" in the folder 'force_interpolation_folder'.
4. Set 'isPrescribedForceOn = True', set 'force_type = "random", then run "nonlinearCasesCreation.py" again.
Get input files with "*_random.inp" in the folder 'force_interpolation_folder'.
"""
force_fields = (scipy.io.loadmat(force_field_mat_name)["force_field_interpolated"] if force_type == "interpolated" else
scipy.io.loadmat(force_field_mat_name)["force_field_random"]) # Size: nSurfI*3 x sampleNum. Concatenated as xyzxyz...
sample_nums = force_fields.shape[1]
# Generate input file for Abaqus.
file_name_list, elapsed_time_list, force_field_matrix = [], [], None
for i in range(sample_nums):
start_time = time.time()
if isPrescribedForceOn:
if not os.path.isdir(force_interpolation_folder): os.mkdir(force_interpolation_folder)
file_name_temp = ("{}_interpolated.inp".format(str(i+20001)) if force_type == "interpolated" else
"{}_random.inp".format(str(i+20001)))
write_path = os.path.join(force_interpolation_folder, file_name_temp)
force_field_prescribed_list = list(force_fields[:,i])
inputFile_temp = inputFileGenerator(data_file_path, write_path, material_type,
fix_indices_list, node_variable_name, elem_variable_name,
user_prescribed_force_field=force_field_prescribed_list)
else:
if not os.path.isdir(inp_folder): os.mkdir(inp_folder)
file_name_temp = "{}.inp".format(str(i+20001))
write_path = os.path.join(inp_folder, file_name_temp)
inputFile_temp = inputFileGenerator(data_file_path, write_path, material_type,
fix_indices_list, node_variable_name, elem_variable_name)
inputFile_temp.writeFile(write_status)
end_time = time.time()
elapsed_time = end_time - start_time
file_name_list.append(file_name_temp)
elapsed_time_list.append(elapsed_time)
if i == 0: force_field_matrix = inputFile_temp._laplacian_force_field.reshape(-1,1)
else: force_field_matrix = np.hstack((force_field_matrix, inputFile_temp._laplacian_force_field.reshape(-1,1)))
# ============================ For force visualization only (sample_nums = 1) ============================ #
# print(inputFile_temp._laplacian_initial_loads_posi)
# force_field = {"force_field": inputFile_temp._laplacian_force_field}
# scipy.io.savemat("force_field.mat", force_field)
# ======================================================================================================== #
print("Input_file: ", file_name_temp, "| Status:", write_status, "| Generation: Completed | Time: %.4f s" % (elapsed_time))
saveLog(file_name_list, elapsed_time_list, write_status, data_file_path, sample_nums,
fix_indices_list, inputFile_temp.loads_num, inputFile_temp._load_sampling_style, inputFile_temp._load_params_tuple,
material_type, inputFile_temp._modulus, inputFile_temp._poisson_ratio,
inputFile_temp._isCoupleOn, inputFile_temp._isLaplacianSmoothingOn,
coupling_type=inputFile_temp._coupling_type, coupling_neighbor_layer_num=inputFile_temp._coupling_neighbor_layers,
laplacian_iter_num=inputFile_temp._laplacian_iter_num, laplacian_smoothing_rate=inputFile_temp._smoothing_rate,
write_path="nonlinear_case_generation.log")
if not isPrescribedForceOn: weight_matrix = (2.0 * np.random.rand(eigen_num_force, 3*sample_nums) - 1.0) # Distinct random weights corresponding to each laplacian-force-field.
else: weight_matrix = scipy.io.loadmat(force_field_mat_name)["weight_matrix"] # Distinct random force field for each laplacian-force-field.
mdict = {"fix_indices_list": fix_indices_list,
"orig_data_file_name": data_file_path,
"orig_config_var_name": node_variable_name,
"inp_folder": inp_folder if not isPrescribedForceOn else force_interpolation_folder, # The folder containing input files.
"current_directory": os.getcwd(),
"results_folder_path_stress": results_folder_path_stress,
"results_folder_path_coor": results_folder_path_coor,
"original_node_number": inputFile_temp._orig_node_num,
"couple_region_num": inputFile_temp._couple_region_num,
"force_field_matrix": force_field_matrix, # The force field matrix of all generated samples. Size: nSurfI*3 x sampleNum_total.
"weight_matrix": weight_matrix, "force_scalar_coeff": force_scalar, # The randomly generated matrix for force fields' reconstruction. Size: eigen_num x (3*sample_num).
"eigen_number_force": eigen_num_force, # Int. The eigenmode number of force field reconstruction. (Used only in force field interpolation)
"alpha_indexing_vector": np.zeros(shape=(sample_nums, 1)) if not isPrescribedForceOn else scipy.io.loadmat(force_field_mat_name)["alpha_indexing_vector"]
}
scipy.io.savemat("training_parameters_transfer.mat", mdict)
# np.save(os.path.join(abaqus_default_directory, "training_parameters_transfer.npy"), mdict, fix_imports=True)
# np.savez(os.path.join(abaqus_default_directory, "training_parameters_transfer.npz"),
# fix_indices_list=fix_indices_list,
# orig_data_file_name=data_file_path,
# orig_config_var_name=node_variable_name,
# inp_folder=inp_folder,
# current_directory=os.getcwd(),
# results_folder_path_stress=results_folder_path_stress,
# results_folder_path_coor=results_folder_path_coor)
if __name__ == "__main__":
main()
| 2.375 | 2 |
data/cache/test/test_cache.py | dongboyan77/quay | 1 | 5233 | <reponame>dongboyan77/quay
import pytest
from mock import patch
from data.cache import InMemoryDataModelCache, NoopDataModelCache, MemcachedModelCache
from data.cache.cache_key import CacheKey
class MockClient(object):
def __init__(self, server, **kwargs):
self.data = {}
def get(self, key, default=None):
return self.data.get(key, default)
def set(self, key, value, expire=None):
self.data[key] = value
@pytest.mark.parametrize("cache_type", [(NoopDataModelCache), (InMemoryDataModelCache),])
def test_caching(cache_type):
key = CacheKey("foo", "60m")
cache = cache_type()
# Perform two retrievals, and make sure both return.
assert cache.retrieve(key, lambda: {"a": 1234}) == {"a": 1234}
assert cache.retrieve(key, lambda: {"a": 1234}) == {"a": 1234}
def test_memcache():
key = CacheKey("foo", "60m")
with patch("data.cache.impl.Client", MockClient):
cache = MemcachedModelCache(("127.0.0.1", "-1"))
assert cache.retrieve(key, lambda: {"a": 1234}) == {"a": 1234}
assert cache.retrieve(key, lambda: {"a": 1234}) == {"a": 1234}
def test_memcache_should_cache():
key = CacheKey("foo", None)
def sc(value):
return value["a"] != 1234
with patch("data.cache.impl.Client", MockClient):
cache = MemcachedModelCache(("127.0.0.1", "-1"))
assert cache.retrieve(key, lambda: {"a": 1234}, should_cache=sc) == {"a": 1234}
# Ensure not cached since it was `1234`.
assert cache._get_client().get(key.key) is None
# Ensure cached.
assert cache.retrieve(key, lambda: {"a": 2345}, should_cache=sc) == {"a": 2345}
assert cache._get_client().get(key.key) is not None
assert cache.retrieve(key, lambda: {"a": 2345}, should_cache=sc) == {"a": 2345}
| 2.421875 | 2 |
Packs/HealthCheck/Scripts/HealthCheckIncidentsCreatedMonthly/HealthCheckIncidentsCreatedMonthly.py | mazmat-panw/content | 2 | 5234 | <gh_stars>1-10
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
ctx = demisto.context()
dataFromCtx = ctx.get("widgets")
if not dataFromCtx:
incident = demisto.incidents()[0]
accountName = incident.get('account')
accountName = f"acc_{accountName}" if accountName != "" else ""
stats = demisto.executeCommand(
"demisto-api-post",
{
"uri": f"{accountName}/statistics/widgets/query",
"body": {
"size": 13,
"dataType": "incidents",
"query": "",
"dateRange": {
"period": {
"byFrom": "months",
"fromValue": 12
}
},
"widgetType": "line",
"params": {
"groupBy": [
"occurred(m)",
"null"
],
"timeFrame": "months"
},
},
})
res = stats[0]["Contents"]["response"]
buildNumber = demisto.executeCommand("DemistoVersion", {})[0]['Contents']['DemistoVersion']['buildNumber']
buildNumber = f'{buildNumber}' if buildNumber != "REPLACE_THIS_WITH_CI_BUILD_NUM" else "618658"
if int(buildNumber) >= 618657:
# Line graph:
data = {
"Type": 17,
"ContentsFormat": "line",
"Contents": {
"stats": res,
"params": {
"timeFrame": "months"
}
}
}
else:
# Bar graph:
output = []
for entry in res:
output.append({"name": entry["name"], "data": entry["data"]})
data = {
"Type": 17,
"ContentsFormat": "bar",
"Contents": {
"stats": output,
"params": {
"layout": "horizontal"
}
}
}
demisto.results(data)
else:
data = {
"Type": 17,
"ContentsFormat": "line",
"Contents": {
"stats": dataFromCtx['IncidentsCreatedMonthly'],
"params": {
"timeFrame": "months"
}
}
}
demisto.results(data)
| 2.125 | 2 |
Bert_training.py | qzlydao/Bert_Sentiment_Analysis | 0 | 5235 | from torch.utils.data import DataLoader
from dataset.wiki_dataset import BERTDataset
from models.bert_model import *
from tqdm import tqdm
import numpy as np
import pandas as pd
import os
config = {}
config['train_corpus_path'] = './corpus/train_wiki.txt'
config['test_corpus_path'] = './corpus/test_wiki.txt'
config['word2idx_path'] = './corpus/bert_word2idx_extend.json'
config['output_path'] = './output_wiki_bert'
config['batch_size'] = 1
config['max_seq_len'] = 200
config['vocab_size'] = 32162
config['lr'] = 2e-6
config['num_workers'] = 0
class Pretrainer:
def __init__(self, bert_model,
vocab_size, max_seq_len,
batch_size, lr, with_cuda=True):
# 词量, 注意这里实际字(词)汇量 = vocab_size - 20
# 因为前20个token用来做一些特殊功能,如padding等
self.vocab_size = vocab_size
self.batch_size = batch_size
self.lr = lr
cuda_condition = torch.cuda.is_available() and with_cuda
self.device = torch.device('cuda:0' if cuda_condition else 'cpu')
# 限定单句最大长度
self.max_seq_len = max_seq_len
# 初始化超参数的配置
bertconfig = BertConfig(vocab_size=config['vocab_size'])
# 初始化bert模型
self.bert_model = bert_model(config=bertconfig)
self.bert_model.to(self.device)
# 初始化训练数据集
train_dataset = BERTDataset(corpus_path=config['train_corpus_path'],
word2idx_path=config['word2idx_path'],
seq_len=self.max_seq_len,
hidden_dim=bertconfig.hidden_size,
on_memory=False)
# 初始化训练dataloader
self.train_dataloader = DataLoader(train_dataset,
batch_size=config['batch_size'],
num_workers=config['num_workers'],
collate_fn=lambda x:x)
# 初始化测试数据集
test_dataset = BERTDataset(corpus_path=config['test_corpus_path'],
word2idx_path=config['word2idx_path'],
seq_len=self.max_seq_len,
hidden_dim=bertconfig.hidden_size,
on_memory=True)
# 初始化测试dataloader
self.test_dataloader = DataLoader(test_dataset, batch_size=self.batch_size,
num_workers=config['num_workers'],
collate_fn=lambda x: x)
# 初始化positional_encoding [max_seq_len, hidden_size]
self.positional_enc = self.init_positional_encoding(hidden_dim=bertconfig.hidden_size,
max_seq_len=self.max_seq_len)
# 拓展positional_encoding的维度为[1, max_seq_len, hidden_size]
self.positional_enc = torch.unsqueeze(self.positional_enc, dim=0)
# 列举需要优化的参数并传入优化器
optim_parameters = list(self.bert_model.parameters())
self.optimizer = torch.optim.Adam(optim_parameters, lr=self.lr)
print('Total Parameters:', sum(p.nelement() for p in self.bert_model.parameters()))
def init_positional_encoding(self, hidden_dim, max_seq_len):
position_enc = np.array([
[pos / np.power(10000, 2 * i / hidden_dim) for i in range(hidden_dim)]
if pos != 0 else np.zeros(hidden_dim) for pos in range(max_seq_len)
])
# dim=2i
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2])
# dim=2i+1
position_enc[1:, 1::2] = np.sin(position_enc[1:, 1::2])
# todo 归一化处理 why? 用位置嵌入的每一行除以它的模长
denominator = np.sqrt(np.sum(position_enc**2, axis=1, keepdims=True)) # 作为分母
position_enc /= (denominator + 1e-8)
position_enc = torch.from_numpy(position_enc).type(torch.FloatTensor)
return position_enc
def test(self, epoch, df_path='./output_wiki_bert/df_log.pickle'):
self.bert_model.eval()
with torch.no_grad():
return self.iteration(epoch, self.test_dataloader, train=False, df_path=df_path)
def load_model(self, model, dir_path='./output'):
# 加载模型
checkpoint_dir = self.find_most_recent_state_dict(dir_path)
checkpoint = torch.load(checkpoint_dir)
# todo key在哪保存的
model.load_state_dict(checkpoint['model_state_dict'], strict=False)
torch.cuda.empty_cache()
model.to(self.device)
print('{} loaded for training!'.format(checkpoint_dir))
def train(self, epoch, df_path='./output_wiki_bert/df_log.pickle'):
self.bert_model.train()
self.iteration(epoch, self.train_dataloader, train=True, df_path=df_path)
def compute_loss(self, preditions, labels, num_class=2, ignore_index=None):
if ignore_index is None:
loss_func = CrossEntropyLoss()
else:
loss_func = CrossEntropyLoss(ignore_index=ignore_index)
return loss_func(preditions.view(-1, num_class), labels.view(-1))
def get_mlm_accuracy(self, predictions, labels):
# predictions [batch_size, seq_len, vocab_size]
predictions = torch.argmax(predictions, dim=-1, keepdim=False) # predictions: [batch_size, seq_len]
# labels: [batch_size, seq_len]
mask = (labels > 0) # 只考虑被MASK的token
# 预测正确的数量
pred_correct = torch.sum((predictions == labels) * mask).float()
# accuracy
mlm_accuracy = pred_correct / (torch.sum(mask).float() + 1e-8)
return mlm_accuracy.item()
def padding(self, output_dic_list):
# todo output_dic_list的格式
# [batch_size, seq_len, embed_dim]
bert_input = [i['bert_input'] for i in output_dic_list]
bert_label = [i['bert_label'] for i in output_dic_list]
segment_label = [i['segment_label'] for i in output_dic_list]
# padding
bert_input = torch.nn.utils.rnn.pad_sequence(bert_input, batch_first=True)
bert_label = torch.nn.utils.rnn.pad_sequence(bert_label, batch_first=True)
segment_label = torch.nn.utils.rnn.pad_sequence(segment_label, batch_first=True)
# [batch_size]
is_next = torch.cat([i['is_next'] for i in output_dic_list])
return {
'bert_input': bert_input,
'bert_label': bert_label,
'segment_label': segment_label,
'is_next': is_next
}
def find_most_recent_state_dict(self, dir_path):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
dic_list = [i for i in os.listdir(dir_path)]
if len(dic_list) == 0:
raise FileNotFoundError('can not find any state dict in {}'.format(dir_path))
# todo model什么时候存放的?
dic_list = [i for i in dic_list if 'model' in i]
dic_list = sorted(dic_list, key=lambda k: int(k.split('.')[-1]))
return dir_path + '/' + dic_list[-1]
def iteration(self, epoch, data_loader, train=True, df_path='./output_wiki_bert/df_log.pickle'):
if not os.path.isfile(df_path) and epoch != 0:
raise RuntimeError("log DataFrame path not found and can't create a new one because we're not training from scratch!")
if not os.path.isfile(df_path) and epoch == 0:
df = pd.DataFrame(columns=['epoch', 'train_next_sen_loss', 'train_mlm_loss',
'train_next_sen_acc', 'train_mlm_acc',
'test_next_sen_loss', 'test_mlm_loss',
'test_next_sen_acc', 'test_mlm_acc'])
df.to_pickle(df_path)
print('log DataFrame created!')
str_code = 'train' if train else 'test'
# 设置进度条,得到迭代器对象
data_iter = tqdm(enumerate(data_loader),
desc='EP_%s:%d' % (str_code, epoch),
total=len(data_loader),
bar_format='{l_bar}{r_bar}')
total_next_sen_loss = 0
total_mlm_loss = 0
total_next_sen_acc = 0
total_mlm_acc = 0
total_element = 0
for i, data in data_iter:
data = self.padding(data)
# 0. batch_data will be sent into the device
data = {key: value.to(self.device) for key, value in data.items()}
# todo data['bert_input'] 的维度
positional_enc = self.positional_enc[:, :data['bert_input'].size()[-1], :].to(self.device)
# 1. forward the next_sentence_prediction and masked_lm_model
# mlm_preds: [batch_size, seq_len, vocab_size]
# next_sen_preds: [batch_size, seq_len]
mlm_preds, next_sen_preds = self.bert_model.forward(input_ids=data['bert_input'],
positional_enc=positional_enc,
token_type_ids=data['segment_label'])
mlm_acc = self.get_mlm_accuracy(mlm_preds, data['bert_label'])
next_sen_acc = next_sen_preds.argmax(dim=-1, keepdim=False).eq(data['is_next']).sum().item()
mlm_loss = self.compute_loss(mlm_preds, data['bert_label'], self.vocab_size, ignore_index=0)
next_sen_loss = self.compute_loss(next_sen_preds, data['is_next'])
# 两个任务联合训练
loss = mlm_loss + next_sen_loss
# 3. 反向传播和梯度更新
if train:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_next_sen_loss += next_sen_loss.item()
total_mlm_loss += mlm_loss.item()
total_next_sen_acc += next_sen_acc
total_element += data['is_next'].nelement()
total_mlm_acc += mlm_acc
if train:
log_dict = {
'epoch': epoch,
'train_next_sen_loss': total_next_sen_loss / (i + 1),
'train_mlm_loss': total_mlm_loss / (i + 1),
'train_next_sen_acc': total_next_sen_acc / total_element,
'train_mlm_acc': total_mlm_acc / (i + 1),
'test_next_sen_loss': 0, 'test_mlm_loss':0,
'test_next_sen_acc':0, 'test_mlm_acc':0
}
else:
log_dict = {
'epoch': epoch,
'test_next_sen_loss': total_next_sen_loss / (i + 1),
'test_mlm_loss': total_mlm_loss / (i + 1),
'test_next_sen_acc': total_next_sen_acc / total_element,
'test_mlm_acc': total_mlm_acc / (i + 1),
'train_next_sen_loss': 0, 'train_mlm_loss': 0,
'train_next_sen_acc': 0, 'train_mlm_acc': 0
}
if i % 10 == 0:
data_iter.write(str({k: v for k, v in log_dict.items() if v != 0 and k != 'epoch'}))
if train:
df = pd.read_pickle(df_path)
# 将日志信息追加到df中
df = df.append([log_dict])
# 重置索引
df.reset_index(inplace=True, drop=True)
# 保存到本地
df.to_pickle(df_path)
else:
log_dict = {k: v for k, v in log_dict.items() if v != 0 and k != 'epoch'}
df = pd.read_pickle(df_path)
df.reset_index(inplace=True, drop=True)
for k, v in log_dict.items():
df.at[epoch, k] = v
df.to_pickle(df_path)
return float(log_dict['test_next_sen_loss']) + float(log_dict['test_mlm_loss'])
def save_state_dict(self, model, epoch, dir_path='./output', file_path='bert.model'):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
save_path = dir_path + '/' + file_path + '.epoch.{}'.format(str(epoch))
model.to('cpu')
torch.save({'model_state_dict': model.state_dict()}, save_path)
print('{} saved!'.format(save_path))
model.to(self.device)
if __name__ == '__main__':
def init_trainer(dynamic_lr, load_model=False):
trainer = Pretrainer(BertForPreTraining,
vocab_size=config['vocab_size'],
max_seq_len=config['max_seq_len'],
batch_size=config['batch_size'],
lr=dynamic_lr,
with_cuda=True)
if load_model:
trainer.load_model(trainer.bert_model, dir_path=config['output_path'])
return trainer
start_epoch = 3
train_epoches = 1
trainer = init_trainer(config['lr'], load_model=True)
all_loss = []
threshold = 0
patient = 10
best_f1 = 0
dynamic_lr = config['lr']
# todo start_epoch 为什么要从3开始
for epoch in range(start_epoch, start_epoch + train_epoches):
print('train with learning rate {}'.format(str(dynamic_lr)))
trainer.train(epoch)
trainer.save_state_dict(trainer.bert_model, epoch, dir_path=config['output_path'],
file_path='bert.model')
trainer.test(epoch)
| 2.484375 | 2 |
python/triton/language/random.py | appliedml85/triton | 1 | 5236 | <filename>python/triton/language/random.py
import triton
import triton.language as tl
# Notes
# 1. triton doesn't support uint32, so we use int32 instead and benefit from the fact that two's complement operations are equivalent to uint operations.
# 2. multiply_low_high is currently inefficient.
# 3. Even though technically philox sampling outputs int, in many places we pretends they were actualy uints e.g. uint_to_uniform_float
@triton.jit
def PHILOX_KEY_A():
# 0x9E3779B9
return -1640531527
@triton.jit
def PHILOX_KEY_B():
# <KEY>
return -1150833019
@triton.jit
def PHILOX_ROUND_A():
# 0xD2511F53
return -766435501
@triton.jit
def PHILOX_ROUND_B():
# 0xCD9E8D57
return -845247145
@triton.jit
def hacky_to_uint64(x):
return ((x >> 1).to(tl.int64) << 1) + (x & 1).to(tl.int64)
@triton.jit
def multiply_low_high(a, b):
return (
a * b,
((hacky_to_uint64(a) * hacky_to_uint64(b)) >> 32).to(tl.int32)
)
@triton.jit
def single_round(c0, c1, c2, c3, k0, k1):
A = PHILOX_ROUND_A()
B = PHILOX_ROUND_B()
lo0, hi0 = multiply_low_high(A, c0)
lo1, hi1 = multiply_low_high(B, c2)
return (
hi1 ^ c1 ^ k0,
lo1,
hi0 ^ c3 ^ k1,
lo0,
)
@triton.jit
def raise_key(k0, k1):
return (
k0 + PHILOX_KEY_A(),
k1 + PHILOX_KEY_B(),
)
@triton.jit
def philox_f(c0, c1, c2, c3, k0, k1):
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
return c0, c1, c2, c3
@triton.jit
def uint32_to_uniform_float(x):
"""
Numerically stable function to convert a random integer into a random float uniformly sampled in [0, 1).
This is originally designed from uint32, but it works with int32 too as long as the int32 uniformly
covers all the possible values it can take.
"""
mantissa = x & 0x7fffff
exp = 127
res = mantissa | (exp << 23)
return res.to(tl.float32, bitcast=True) - 1.0
@triton.jit
def pair_uniform_to_normal(u1, u2):
"""Box-Muller transform"""
u1 = tl.maximum(1.0e-7, u1)
th = 6.283185307179586 * u2
r = tl.sqrt(-2.0 * tl.log(u1))
return r * tl.cos(th), r * tl.sin(th)
@triton.jit
def randint4x(seed, offset):
"""
Given a :code:`seed` scalar and an :code:`offset` block, returns four
blocks of random :code:`int32`.
This is the maximally efficient entry point
to Triton's Philox pseudo-random number generator.
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
z = 0
return philox_f(offset, z, z, z, seed, z)
@triton.jit
def randint(seed, offset):
"""
Given a :code:`seed` scalar and an :code:`offset` block, returns a single
block of random :code:`int32`.
If you need multiple streams of random numbers,
using `randint4x` is likely to be faster than calling `randint` 4 times.
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
ret, _, _, _ = randint4x(seed, offset)
return ret
@triton.jit
def rand(seed, offset):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a block of random :code:`float32` in :math:`U(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
source = randint(seed, offset)
return uint32_to_uniform_float(source)
@triton.jit
def randn(seed, offset):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a block of random :code:`float32` in :math:`\mathcal{N}(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
i1, i2, _, _ = randint4x(seed, offset)
u1 = uint32_to_uniform_float(i1)
u2 = uint32_to_uniform_float(i2)
n1, _ = pair_uniform_to_normal(u1, u2)
return n1
@triton.jit
def rand4x(seed, offsets):
"""
Given a :code:`seed` scalar and an :code:`offsets` block,
returns a 4 blocks of random :code:`float32` in :math:`U(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
i1, i2, i3, i4 = randint4x(seed, offsets)
u1 = uint32_to_uniform_float(i1)
u2 = uint32_to_uniform_float(i2)
u3 = uint32_to_uniform_float(i3)
u4 = uint32_to_uniform_float(i4)
return u1, u2, u3, u4
@triton.jit
def randn4x(seed, offset):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a 4 blocks of random :code:`float32` in :math:`\mathcal{N}(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
u1, u2, u3, u4 = rand4x(seed, offset)
n1, n2 = pair_uniform_to_normal(u1, u2)
n3, n4 = pair_uniform_to_normal(u3, u4)
return n1, n2, n3, n4
| 2.5625 | 3 |
pyctcdecode/__init__.py | kensho-technologies/pyctcdecode | 203 | 5237 | # Copyright 2021-present Kensho Technologies, LLC.
from .alphabet import Alphabet # noqa
from .decoder import BeamSearchDecoderCTC, build_ctcdecoder # noqa
from .language_model import LanguageModel # noqa
__package_name__ = "pyctcdecode"
__version__ = "0.3.0"
| 0.972656 | 1 |
wumpus/start_server.py | marky1991/Legend-of-Wumpus | 0 | 5238 | <filename>wumpus/start_server.py
from wumpus.server import Server
from circuits import Debugger
s = Server("0.0.0.0", 50551) + Debugger()
s.run()
import sys
sys.exit(1)
| 1.820313 | 2 |
platypush/backend/joystick/linux/__init__.py | BlackLight/platypush | 228 | 5239 | import array
import struct
import time
from fcntl import ioctl
from typing import IO
from platypush.backend import Backend
from platypush.message.event.joystick import JoystickConnectedEvent, JoystickDisconnectedEvent, \
JoystickButtonPressedEvent, JoystickButtonReleasedEvent, JoystickAxisEvent
class JoystickLinuxBackend(Backend):
"""
This backend intercepts events from joystick devices through the native Linux API implementation.
It is loosely based on https://gist.github.com/rdb/8864666, which itself uses the
`Linux kernel joystick API <https://www.kernel.org/doc/Documentation/input/joystick-api.txt>`_ to interact with
the devices.
Triggers:
* :class:`platypush.message.event.joystick.JoystickConnectedEvent` when the joystick is connected.
* :class:`platypush.message.event.joystick.JoystickDisconnectedEvent` when the joystick is disconnected.
* :class:`platypush.message.event.joystick.JoystickButtonPressedEvent` when a joystick button is pressed.
* :class:`platypush.message.event.joystick.JoystickButtonReleasedEvent` when a joystick button is released.
* :class:`platypush.message.event.joystick.JoystickAxisEvent` when an axis value of the joystick changes.
"""
# These constants were borrowed from linux/input.h
axis_names = {
0x00: 'x',
0x01: 'y',
0x02: 'z',
0x03: 'rx',
0x04: 'ry',
0x05: 'rz',
0x06: 'throttle',
0x07: 'rudder',
0x08: 'wheel',
0x09: 'gas',
0x0a: 'brake',
0x10: 'hat0x',
0x11: 'hat0y',
0x12: 'hat1x',
0x13: 'hat1y',
0x14: 'hat2x',
0x15: 'hat2y',
0x16: 'hat3x',
0x17: 'hat3y',
0x18: 'pressure',
0x19: 'distance',
0x1a: 'tilt_x',
0x1b: 'tilt_y',
0x1c: 'tool_width',
0x20: 'volume',
0x28: 'misc',
}
button_names = {
0x120: 'trigger',
0x121: 'thumb',
0x122: 'thumb2',
0x123: 'top',
0x124: 'top2',
0x125: 'pinkie',
0x126: 'base',
0x127: 'base2',
0x128: 'base3',
0x129: 'base4',
0x12a: 'base5',
0x12b: 'base6',
0x12f: 'dead',
0x130: 'a',
0x131: 'b',
0x132: 'c',
0x133: 'x',
0x134: 'y',
0x135: 'z',
0x136: 'tl',
0x137: 'tr',
0x138: 'tl2',
0x139: 'tr2',
0x13a: 'select',
0x13b: 'start',
0x13c: 'mode',
0x13d: 'thumbl',
0x13e: 'thumbr',
0x220: 'dpad_up',
0x221: 'dpad_down',
0x222: 'dpad_left',
0x223: 'dpad_right',
# XBox 360 controller uses these codes.
0x2c0: 'dpad_left',
0x2c1: 'dpad_right',
0x2c2: 'dpad_up',
0x2c3: 'dpad_down',
}
def __init__(self, device: str = '/dev/input/js0', *args, **kwargs):
"""
:param device: Joystick device to monitor (default: ``/dev/input/js0``).
"""
super().__init__(*args, **kwargs)
self.device = device
self._axis_states = {}
self._button_states = {}
self._axis_map = []
self._button_map = []
def _init_joystick(self, dev: IO):
# Get the device name.
buf = array.array('B', [0] * 64)
ioctl(dev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf.tobytes().rstrip(b'\x00').decode('utf-8')
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(dev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(dev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(dev, 0x80406a32, buf) # JSIOCGAXMAP
for axis in buf[:num_axes]:
axis_name = self.axis_names.get(axis, 'unknown(0x%02x)' % axis)
self._axis_map.append(axis_name)
self._axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(dev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = self.button_names.get(btn, 'unknown(0x%03x)' % btn)
self._button_map.append(btn_name)
self._button_states[btn_name] = 0
self.bus.post(JoystickConnectedEvent(device=self.device, name=js_name, axes=self._axis_map,
buttons=self._button_map))
def run(self):
super().run()
self.logger.info(f'Opening {self.device}...')
while not self.should_stop():
# Open the joystick device.
try:
jsdev = open(self.device, 'rb')
self._init_joystick(jsdev)
except Exception as e:
self.logger.debug(f'Joystick device on {self.device} not available: {e}')
time.sleep(5)
continue
# Joystick event loop
while not self.should_stop():
try:
evbuf = jsdev.read(8)
if evbuf:
_, value, evt_type, number = struct.unpack('IhBB', evbuf)
if evt_type & 0x80: # Initial state notification
continue
if evt_type & 0x01:
button = self._button_map[number]
if button:
self._button_states[button] = value
evt_class = JoystickButtonPressedEvent if value else JoystickButtonReleasedEvent
# noinspection PyTypeChecker
self.bus.post(evt_class(device=self.device, button=button))
if evt_type & 0x02:
axis = self._axis_map[number]
if axis:
fvalue = value / 32767.0
self._axis_states[axis] = fvalue
# noinspection PyTypeChecker
self.bus.post(JoystickAxisEvent(device=self.device, axis=axis, value=fvalue))
except OSError as e:
self.logger.warning(f'Connection to {self.device} lost: {e}')
self.bus.post(JoystickDisconnectedEvent(device=self.device))
break
| 2.53125 | 3 |
src/modules/sensors/vehicle_magnetometer/mag_compensation/python/mag_compensation.py | SaxionMechatronics/Firmware | 4,224 | 5240 | <reponame>SaxionMechatronics/Firmware
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File: mag_compensation.py
Author: <NAME>
Email: <EMAIL>
Github: https://github.com/baumanta
Description:
Computes linear coefficients for mag compensation from thrust and current
Usage:
python mag_compensation.py /path/to/log/logfile.ulg current --instance 1
Remark:
If your logfile does not contain some of the topics, e.g.battery_status/current_a
you will have to comment out the corresponding parts in the script
"""
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from pyulog import ULog
from pyulog.px4 import PX4ULog
from pylab import *
import numpy as np
import textwrap as tw
import argparse
#arguments
parser = argparse.ArgumentParser(description='Calculate compensation parameters from ulog')
parser.add_argument('logfile', type=str, nargs='?', default=[],
help='full path to ulog file')
parser.add_argument('type', type=str, nargs='?', choices=['current', 'thrust'], default=[],
help='Power signal used for compensation, supported is "current" or "thrust".')
parser.add_argument('--instance', type=int, nargs='?', default=0,
help='instance of the current or thrust signal to use (0 or 1)')
args = parser.parse_args()
log_name = args.logfile
comp_type = args.type
comp_instance = args.instance
#Load the log data (produced by pyulog)
log = ULog(log_name)
pxlog = PX4ULog(log)
def get_data(topic_name, variable_name, index):
try:
dataset = log.get_dataset(topic_name, index)
return dataset.data[variable_name]
except:
return []
def ms2s_list(time_ms_list):
if len(time_ms_list) > 0:
return 1e-6 * time_ms_list
else:
return time_ms_list
# Select msgs and copy into arrays
armed = get_data('vehicle_status', 'arming_state', 0)
t_armed = ms2s_list(get_data('vehicle_status', 'timestamp', 0))
if comp_type == "thrust":
power = get_data('vehicle_rates_setpoint', 'thrust_body[2]', comp_instance)
power_t = ms2s_list(get_data('vehicle_rates_setpoint', 'timestamp', comp_instance))
comp_type_param = 1
factor = 1
unit = "[G]"
elif comp_type == "current":
power = get_data('battery_status', 'current_a', comp_instance)
power = np.true_divide(power, 1000) #kA
power_t = ms2s_list(get_data('battery_status', 'timestamp', comp_instance))
comp_type_param = 2 + comp_instance
factor = -1
unit = "[G/kA]"
else:
print("unknown compensation type {}. Supported is either 'thrust' or 'current'.".format(comp_type))
sys.exit(1)
if len(power) == 0:
print("could not retrieve power signal from log, zero data points")
sys.exit(1)
mag0X_body = get_data('sensor_mag', 'x', 0)
mag0Y_body = get_data('sensor_mag', 'y', 0)
mag0Z_body = get_data('sensor_mag', 'z', 0)
t_mag0 = ms2s_list(get_data('sensor_mag', 'timestamp', 0))
mag0_ID = get_data('sensor_mag', 'device_id', 0)
mag1X_body = get_data('sensor_mag', 'x', 1)
mag1Y_body = get_data('sensor_mag', 'y', 1)
mag1Z_body = get_data('sensor_mag', 'z', 1)
t_mag1 = ms2s_list(get_data('sensor_mag', 'timestamp', 1))
mag1_ID = get_data('sensor_mag', 'device_id', 1)
mag2X_body = get_data('sensor_mag', 'x', 2)
mag2Y_body = get_data('sensor_mag', 'y', 2)
mag2Z_body = get_data('sensor_mag', 'z', 2)
t_mag2 = ms2s_list(get_data('sensor_mag', 'timestamp', 2))
mag2_ID = get_data('sensor_mag', 'device_id', 2)
mag3X_body = get_data('sensor_mag', 'x', 3)
mag3Y_body = get_data('sensor_mag', 'y', 3)
mag3Z_body = get_data('sensor_mag', 'z', 3)
t_mag3 = ms2s_list(get_data('sensor_mag', 'timestamp', 3))
mag3_ID = get_data('sensor_mag', 'device_id', 3)
magX_body = []
magY_body = []
magZ_body = []
mag_id = []
t_mag = []
if len(mag0X_body) > 0:
magX_body.append(mag0X_body)
magY_body.append(mag0Y_body)
magZ_body.append(mag0Z_body)
t_mag.append(t_mag0)
mag_id.append(mag0_ID[0])
if len(mag1X_body) > 0:
magX_body.append(mag1X_body)
magY_body.append(mag1Y_body)
magZ_body.append(mag1Z_body)
t_mag.append(t_mag1)
mag_id.append(mag1_ID[0])
if len(mag2X_body) > 0:
magX_body.append(mag2X_body)
magY_body.append(mag2Y_body)
magZ_body.append(mag2Z_body)
t_mag.append(t_mag2)
mag_id.append(mag2_ID[0])
if len(mag3X_body) > 0:
magX_body.append(mag3X_body)
magY_body.append(mag3Y_body)
magZ_body.append(mag3Z_body)
t_mag.append(t_mag3)
mag_id.append(mag3_ID[0])
n_mag = len(magX_body)
#log index does not necessarily match mag calibration instance number
calibration_instance = []
instance_found = False
for idx in range(n_mag):
instance_found = False
for j in range(4):
if mag_id[idx] == log.initial_parameters["CAL_MAG{}_ID".format(j)]:
calibration_instance.append(j)
instance_found = True
if not instance_found:
print('Mag {} calibration instance not found, run compass calibration first.'.format(mag_id[idx]))
#get first arming sequence from data
start_time = 0
stop_time = 0
for i in range(len(armed)-1):
if armed[i] == 1 and armed[i+1] == 2:
start_time = t_armed[i+1]
if armed[i] == 2 and armed[i+1] == 1:
stop_time = t_armed[i+1]
break
#cut unarmed sequences from mag data
index_start = 0
index_stop = 0
for idx in range(n_mag):
for i in range(len(t_mag[idx])):
if t_mag[idx][i] > start_time:
index_start = i
break
for i in range(len(t_mag[idx])):
if t_mag[idx][i] > stop_time:
index_stop = i -1
break
t_mag[idx] = t_mag[idx][index_start:index_stop]
magX_body[idx] = magX_body[idx][index_start:index_stop]
magY_body[idx] = magY_body[idx][index_start:index_stop]
magZ_body[idx] = magZ_body[idx][index_start:index_stop]
#resample data
power_resampled = []
for idx in range(n_mag):
power_resampled.append(interp(t_mag[idx], power_t, power))
#fit linear to get coefficients
px = []
py = []
pz = []
for idx in range(n_mag):
px_temp, res_x, _, _, _ = polyfit(power_resampled[idx], magX_body[idx], 1,full = True)
py_temp, res_y, _, _, _ = polyfit(power_resampled[idx], magY_body[idx], 1,full = True)
pz_temp, res_z, _, _, _ = polyfit(power_resampled[idx], magZ_body[idx], 1, full = True)
px.append(px_temp)
py.append(py_temp)
pz.append(pz_temp)
#print to console
for idx in range(n_mag):
print('Mag{} device ID {} (calibration instance {})'.format(idx, mag_id[idx], calibration_instance[idx]))
print('\033[91m \n{}-based compensation: \033[0m'.format(comp_type))
print('\nparam set CAL_MAG_COMP_TYP {}'.format(comp_type_param))
for idx in range(n_mag):
print('\nparam set CAL_MAG{}_XCOMP {:.3f}'.format(calibration_instance[idx], factor * px[idx][0]))
print('param set CAL_MAG{}_YCOMP {:.3f}'.format(calibration_instance[idx], factor * py[idx][0]))
print('param set CAL_MAG{}_ZCOMP {:.3f}'.format(calibration_instance[idx], factor * pz[idx][0]))
#plot data
for idx in range(n_mag):
fig = plt.figure(num=None, figsize=(25, 14), dpi=80, facecolor='w', edgecolor='k')
fig.suptitle('Compensation Parameter Fit \n{} \nmag {} ID: {} (calibration instance {})'.format(log_name, idx, mag_id[idx], calibration_instance[idx]), fontsize=14, fontweight='bold')
plt.subplot(1,3,1)
plt.plot(power_resampled[idx], magX_body[idx], 'yo', power_resampled[idx], px[idx][0]*power_resampled[idx]+px[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag X [G]')
plt.subplot(1,3,2)
plt.plot(power_resampled[idx], magY_body[idx], 'yo', power_resampled[idx], py[idx][0]*power_resampled[idx]+py[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag Y [G]')
plt.subplot(1,3,3)
plt.plot(power_resampled[idx], magZ_body[idx], 'yo', power_resampled[idx], pz[idx][0]*power_resampled[idx]+pz[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag Z [G]')
# display results
plt.figtext(0.24, 0.03, 'CAL_MAG{}_XCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * px[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
plt.figtext(0.51, 0.03, 'CAL_MAG{}_YCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * py[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
plt.figtext(0.79, 0.03, 'CAL_MAG{}_ZCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * pz[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
#compensation comparison plots
for idx in range(n_mag):
fig = plt.figure(num=None, figsize=(25, 14), dpi=80, facecolor='w', edgecolor='k')
fig.suptitle('Original Data vs. Compensation \n{}\nmag {} ID: {} (calibration instance {})'.format(log_name, idx, mag_id[idx], calibration_instance[idx]), fontsize=14, fontweight='bold')
plt.subplot(3,1,1)
original_x, = plt.plot(t_mag[idx], magX_body[idx], label='original')
power_x, = plt.plot(t_mag[idx],magX_body[idx] - px[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_x, power_x])
plt.xlabel('Time [s]')
plt.ylabel('Mag X corrected[G]')
plt.subplot(3,1,2)
original_y, = plt.plot(t_mag[idx], magY_body[idx], label='original')
power_y, = plt.plot(t_mag[idx],magY_body[idx] - py[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_y, power_y])
plt.xlabel('Time [s]')
plt.ylabel('Mag Y corrected[G]')
plt.subplot(3,1,3)
original_z, = plt.plot(t_mag[idx], magZ_body[idx], label='original')
power_z, = plt.plot(t_mag[idx],magZ_body[idx] - pz[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_z, power_z])
plt.xlabel('Time [s]')
plt.ylabel('Mag Z corrected[G]')
plt.show()
| 2.515625 | 3 |
app.py | 19857625778/watchlist | 0 | 5241 | from flask import Flask
app = Flask(_name_)
@app.route('/')
def hello():
return 'welcome to my watchlist' | 2.109375 | 2 |
portfolio_optimization/constants.py | AI-Traiding-Team/paired_trading | 1 | 5242 | <reponame>AI-Traiding-Team/paired_trading
import os
path1 = "outputs"
path2 = "outputs/_imgs"
path3 = "outputs/max_sharpe_weights"
path4 = "outputs/opt_portfolio_trades"
try:
os.mkdir(path1)
except OSError:
print ("Директория %s уже создана" % path1)
else:
print ("Успешно создана директория %s " % path1)
try:
os.makedirs(path2)
os.makedirs(path3)
os.makedirs(path4)
except OSError:
print ("Директории уже созданы")
else:
print ("Успешно созданы нужные директории")
source_path = '../source_root/1m'
destination_path = 'outputs' | 1.984375 | 2 |
mypy/transformtype.py | silky/mypy | 1 | 5243 | <filename>mypy/transformtype.py
"""Transform classes for runtime type checking."""
from typing import Undefined, List, Set, Any, cast, Tuple, Dict
from mypy.nodes import (
TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt,
TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt,
AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode
)
from mypy import nodes
from mypy.semanal import self_type
from mypy.types import (
Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar,
UnboundType
)
from mypy.checkmember import analyse_member_access
from mypy.checkexpr import type_object_type
from mypy.subtypes import map_instance_to_supertype
import mypy.transform
from mypy.transformfunc import FuncTransformer
from mypy.transutil import (
self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type
)
from mypy.rttypevars import translate_runtime_type_vars_locally
from mypy.compileslotmap import find_slot_origin
from mypy.coerce import coerce
from mypy.maptypevar import num_slots, get_tvar_access_path
from mypy import erasetype
class TypeTransformer:
"""Class for transforming type definitions for runtime type checking.
Transform a type definition by modifying it in-place.
The following transformations are performed:
* Represent generic type variables explicitly as attributes.
* Create generic wrapper classes used by coercions to different type
args.
* Create wrapper methods needed when overriding methods with different
signatures.
* Create wrapper methods for calling methods in dynamically typed code.
These perform the necessary coercions for arguments and return values
to/from 'Any'.
This is used by DyncheckTransformVisitor and is logically aggregated within
that class.
"""
# Used for common transformation operations.
tf = Undefined('mypy.transform.DyncheckTransformVisitor')
# Used for transforming methods.
func_tf = Undefined(FuncTransformer)
def __init__(self, tf: 'mypy.transform.DyncheckTransformVisitor') -> None:
self.tf = tf
self.func_tf = FuncTransformer(tf)
def transform_type_def(self, tdef: TypeDef) -> List[Node]:
"""Transform a type definition.
The result may be one or two definitions. The first is the
transformation of the original TypeDef. The second is a
wrapper type, which is generated for generic types only.
"""
defs = [] # type: List[Node]
if tdef.info.type_vars:
# This is a generic type. Insert type variable slots in
# the class definition for new type variables, i.e. type
# variables not mapped to superclass type variables.
defs.extend(self.make_tvar_representation(tdef.info))
# Iterate over definitions and transform each of them.
vars = set() # type: Set[Var]
for d in tdef.defs.body:
if isinstance(d, FuncDef):
# Implicit cast from FuncDef[] to Node[] is safe below.
defs.extend(Any(self.func_tf.transform_method(d)))
elif isinstance(d, VarDef):
defs.extend(self.transform_var_def(d))
for n in d.items:
vars.add(n)
elif isinstance(d, AssignmentStmt):
self.transform_assignment(d)
defs.append(d)
# Add accessors for implicitly defined attributes.
for node in tdef.info.names.values():
if isinstance(node.node, Var):
v = cast(Var, node.node)
if v.info == tdef.info and v not in vars:
defs.extend(self.make_accessors(v))
# For generic classes, add an implicit __init__ wrapper.
defs.extend(self.make_init_wrapper(tdef))
if tdef.is_generic() or (tdef.info.bases and
tdef.info.mro[1].is_generic()):
self.make_instance_tvar_initializer(
cast(FuncDef, tdef.info.get_method('__init__')))
if not defs:
defs.append(PassStmt())
if tdef.is_generic():
gen_wrapper = self.generic_class_wrapper(tdef)
tdef.defs = Block(defs)
dyn_wrapper = self.make_type_object_wrapper(tdef)
if not tdef.is_generic():
return [tdef, dyn_wrapper]
else:
return [tdef, dyn_wrapper, gen_wrapper]
def make_init_wrapper(self, tdef: TypeDef) -> List[Node]:
"""Make and return an implicit __init__ if class needs it.
Otherwise, return an empty list. We include an implicit
__init__ if the class is generic or if it extends a generic class
and if it does not define __init__.
The __init__ of a generic class requires one or more extra type
variable arguments. The inherited __init__ may not accept these.
For example, assume these definitions:
. class A(Generic[T]): pass
. class B(A[int]): pass
The constructor for B will be (equivalent to)
. def __init__(self: B) -> None:
. self.__tv = <int>
. super().__init__(<int>)
"""
# FIX overloading, default args / varargs, keyword args
info = tdef.info
if '__init__' not in info.names and (
tdef.is_generic() or (info.bases and
info.mro[1].is_generic())):
# Generic class with no explicit __init__ method
# (i.e. __init__ inherited from superclass). Generate a
# wrapper that initializes type variable slots and calls
# the superclass __init__ method.
base = info.mro[1]
selftype = self_type(info)
callee_type = cast(Callable, analyse_member_access(
'__init__', selftype, None, False, True, None, None,
base))
# Now the callee type may contain the type variables of a
# grandparent as bound type variables, but we want the
# type variables of the parent class. Explicitly set the
# bound type variables.
callee_type = self.fix_bound_init_tvars(callee_type,
map_instance_to_supertype(selftype, base))
super_init = cast(FuncDef, base.get_method('__init__'))
# Build argument list.
args = [Var('self')]
for i in range(1, len(super_init.args)):
args.append(Var(super_init.args[i].name()))
args[-1].type = callee_type.arg_types[i - 1]
selft = self_type(self.tf.type_context())
callee_type = prepend_arg_type(callee_type, selft)
creat = FuncDef('__init__', args,
super_init.arg_kinds, [None] * len(args),
Block([]))
creat.info = tdef.info
creat.type = callee_type
creat.is_implicit = False
tdef.info.names['__init__'] = SymbolTableNode(MDEF, creat,
typ=creat.type)
# Insert a call to superclass constructor. If the
# superclass is object, the constructor does nothing =>
# omit the call.
if base.fullname() != 'builtins.object':
creat.body.body.append(
self.make_superclass_constructor_call(tdef.info,
callee_type))
# Implicit cast from FuncDef[] to Node[] is safe below.
return Any(self.func_tf.transform_method(creat))
else:
return []
def fix_bound_init_tvars(self, callable: Callable,
typ: Instance) -> Callable:
"""Replace bound type vars of callable with args from instance type."""
a = [] # type: List[Tuple[int, Type]]
for i in range(len(typ.args)):
a.append((i + 1, typ.args[i]))
return Callable(callable.arg_types, callable.arg_kinds,
callable.arg_names, callable.ret_type,
callable.is_type_obj(), callable.name,
callable.variables, a)
def make_superclass_constructor_call(
self, info: TypeInfo, callee_type: Callable) -> ExpressionStmt:
"""Construct a statement that calls the superclass constructor.
In particular, it passes any type variables arguments as needed.
"""
callee = SuperExpr('__init__')
callee.info = info
# We do not handle generic constructors. Either pass runtime
# type variables from the current scope or perhaps require
# explicit constructor in this case.
selftype = self_type(info)
# FIX overloading
# FIX default args / varargs
# Map self type to the superclass context.
base = info.mro[1]
selftype = map_instance_to_supertype(selftype, base)
super_init = cast(FuncDef, base.get_method('__init__'))
# Add constructor arguments.
args = [] # type: List[Node]
for n in range(1, callee_type.min_args):
args.append(NameExpr(super_init.args[n].name()))
self.tf.set_type(args[-1], callee_type.arg_types[n])
# Store callee type after stripping away the 'self' type.
self.tf.set_type(callee, nodes.method_callable(callee_type))
call = CallExpr(callee, args, [nodes.ARG_POS] * len(args))
return ExpressionStmt(call)
def transform_var_def(self, o: VarDef) -> List[Node]:
"""Transform a member variable definition.
The result may be one or more definitions.
"""
res = [o] # type: List[Node]
self.tf.visit_var_def(o)
# Add $x and set$x accessor wrappers for data attributes. These let
# derived classes redefine a data attribute as a property.
for n in o.items:
res.extend(self.make_accessors(n))
return res
def transform_assignment(self, o: AssignmentStmt) -> None:
"""Transform an assignment statement in class body."""
self.tf.visit_assignment_stmt(o)
def make_accessors(self, n: Var) -> List[Node]:
if n.type:
t = n.type
else:
t = AnyType()
return [self.make_getter_wrapper(n.name(), t),
self.make_setter_wrapper(n.name(), t),
self.make_dynamic_getter_wrapper(n.name(), t),
self.make_dynamic_setter_wrapper(n.name(), t)]
def make_getter_wrapper(self, name: str, typ: Type) -> FuncDef:
"""Create a getter wrapper for a data attribute.
The getter will be of this form:
. def $name*(self: C) -> type:
. return self.name!
"""
scope = self.make_scope()
selft = self.self_type()
selfv = scope.add('self', selft)
member_expr = MemberExpr(scope.name_expr('self'), name, direct=True)
ret = ReturnStmt(member_expr)
wrapper_name = '$' + name
sig = Callable([selft], [nodes.ARG_POS], [None], typ, False)
fdef = FuncDef(wrapper_name,
[selfv],
[nodes.ARG_POS],
[None],
Block([ret]), sig)
fdef.info = self.tf.type_context()
return fdef
def make_dynamic_getter_wrapper(self, name: str, typ: Type) -> FuncDef:
"""Create a dynamically-typed getter wrapper for a data attribute.
The getter will be of this form:
. def $name*(self: C) -> Any:
. return {Any <= typ self.name!}
"""
scope = self.make_scope()
selft = self.self_type()
selfv = scope.add('self', selft)
member_expr = MemberExpr(scope.name_expr('self'), name, direct=True)
coerce_expr = coerce(member_expr, AnyType(), typ,
self.tf.type_context())
ret = ReturnStmt(coerce_expr)
wrapper_name = '$' + name + self.tf.dynamic_suffix()
sig = Callable([selft], [nodes.ARG_POS], [None], AnyType(), False)
return FuncDef(wrapper_name,
[selfv],
[nodes.ARG_POS],
[None],
Block([ret]), sig)
def make_setter_wrapper(self, name: str, typ: Type) -> FuncDef:
"""Create a setter wrapper for a data attribute.
The setter will be of this form:
. def set$name(self: C, name: typ) -> None:
. self.name! = name
"""
scope = self.make_scope()
selft = self.self_type()
selfv = scope.add('self', selft)
namev = scope.add(name, typ)
lvalue = MemberExpr(scope.name_expr('self'), name, direct=True)
rvalue = scope.name_expr(name)
ret = AssignmentStmt([lvalue], rvalue)
wrapper_name = 'set$' + name
sig = Callable([selft, typ],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
Void(), False)
fdef = FuncDef(wrapper_name,
[selfv, namev],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
Block([ret]), sig)
fdef.info = self.tf.type_context()
return fdef
def make_dynamic_setter_wrapper(self, name: str, typ: Type) -> FuncDef:
"""Create a dynamically-typed setter wrapper for a data attribute.
The setter will be of this form:
. def set$name*(self: C, name; Any) -> None:
. self.name! = {typ name}
"""
lvalue = MemberExpr(self_expr(), name, direct=True)
name_expr = NameExpr(name)
rvalue = coerce(name_expr, typ, AnyType(), self.tf.type_context())
ret = AssignmentStmt([lvalue], rvalue)
wrapper_name = 'set$' + name + self.tf.dynamic_suffix()
selft = self_type(self.tf.type_context())
sig = Callable([selft, AnyType()],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
Void(), False)
return FuncDef(wrapper_name,
[Var('self'), Var(name)],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
Block([ret]), sig)
def generic_accessor_wrappers(self, s: AssignmentStmt) -> List[Node]:
"""Construct wrapper class methods for attribute accessors."""
res = [] # type: List[Node]
assert len(s.lvalues) == 1
assert isinstance(s.lvalues[0], NameExpr)
assert s.type is not None
name = cast(NameExpr, s.lvalues[0])
for fd in [self.make_getter_wrapper(name.name, s.type),
self.make_setter_wrapper(name.name, s.type)]:
res.extend(self.func_tf.generic_method_wrappers(fd))
return res
def generic_class_wrapper(self, tdef: TypeDef) -> TypeDef:
"""Construct a wrapper class for a generic type."""
# FIX semanal meta-info for nodes + TypeInfo
defs = [] # type: List[Node]
# Does the type have a superclass, other than builtins.object?
base = tdef.info.mro[1]
has_proper_superclass = base.fullname() != 'builtins.object'
if not has_proper_superclass or self.tf.is_java:
# Generate member variables for wrapper object.
defs.extend(self.make_generic_wrapper_member_vars(tdef))
for alt in [False, BOUND_VAR]:
defs.extend(self.make_tvar_representation(tdef.info, alt))
# Generate constructor.
defs.append(self.make_generic_wrapper_init(tdef.info))
# Generate method wrappers.
for d in tdef.defs.body:
if isinstance(d, FuncDef):
if not d.is_constructor():
defs.extend(self.func_tf.generic_method_wrappers(d))
elif isinstance(d, AssignmentStmt):
defs.extend(self.generic_accessor_wrappers(d))
elif not isinstance(d, PassStmt):
raise RuntimeError(
'Definition {} at line {} not supported'.format(
type(d), d.line))
base_type = self.tf.named_type('builtins.object') # type: Type
# Inherit superclass wrapper if there is one.
if has_proper_superclass:
base = self.find_generic_base_class(tdef.info)
if base:
# TODO bind the type somewhere
base_type = UnboundType(base.defn.name +
self.tf.wrapper_class_suffix())
# Build the type definition.
wrapper = TypeDef(tdef.name + self.tf.wrapper_class_suffix(),
Block(defs),
None,
[base_type])
# FIX fullname
self.tf.add_line_mapping(tdef, wrapper)
return wrapper
def find_generic_base_class(self, info: TypeInfo) -> TypeInfo:
base = info.mro[1]
while True:
if base.type_vars != []:
return base
if len(base.mro) <= 1:
return None
base = base.mro[1]
def make_generic_wrapper_member_vars(self, tdef: TypeDef) -> List[Node]:
"""Generate member variable definition for wrapped object (__o).
This is added to a generic wrapper class.
"""
# The type is 'Any' since it should behave covariantly in subclasses.
return [VarDef([Var(self.object_member_name(tdef.info),
AnyType())], False, None)]
def object_member_name(self, info: TypeInfo) -> str:
if self.tf.is_java:
return '__o_{}'.format(info.name)
else:
return '__o'
def make_generic_wrapper_init(self, info: TypeInfo) -> FuncDef:
"""Build constructor of a generic wrapper class."""
nslots = num_slots(info)
cdefs = [] # type: List[Node]
# Build superclass constructor call.
base = info.mro[1]
if base.fullname() != 'builtins.object' and self.tf.is_java:
s = SuperExpr('__init__')
cargs = [NameExpr('__o')] # type: List[Node]
for n in range(num_slots(base)):
cargs.append(NameExpr(tvar_arg_name(n + 1)))
for n in range(num_slots(base)):
cargs.append(NameExpr(tvar_arg_name(n + 1, BOUND_VAR)))
c = CallExpr(s, cargs, [nodes.ARG_POS] * len(cargs))
cdefs.append(ExpressionStmt(c))
# Create initialization of the wrapped object.
cdefs.append(AssignmentStmt([MemberExpr(
self_expr(),
self.object_member_name(info),
direct=True)],
NameExpr('__o')))
# Build constructor arguments.
args = [Var('self'), Var('__o')]
init = [None, None] # type: List[Node]
for alt in [False, BOUND_VAR]:
for n in range(nslots):
args.append(Var(tvar_arg_name(n + 1, alt)))
init.append(None)
nargs = nslots * 2 + 2
fdef = FuncDef('__init__',
args,
[nodes.ARG_POS] * nargs,
init,
Block(cdefs),
Callable( [AnyType()] * nargs,
[nodes.ARG_POS] * nargs, [None] * nargs,
Void(),
is_type_obj=False))
fdef.info = info
self.make_wrapper_slot_initializer(fdef)
return fdef
def make_tvar_representation(self, info: TypeInfo,
is_alt: Any = False) -> List[Node]:
"""Return type variable slot member definitions.
There are of form '__tv*: Any'. Only include new slots defined in the
type.
"""
defs = [] # type: List[Node]
base_slots = num_slots(info.mro[1])
for n in range(len(info.type_vars)):
# Only include a type variable if it introduces a new slot.
slot = get_tvar_access_path(info, n + 1)[0] - 1
if slot >= base_slots:
defs.append(VarDef([Var(tvar_slot_name(slot, is_alt),
AnyType())], False, None))
return defs
def make_instance_tvar_initializer(self, creat: FuncDef) -> None:
"""Add type variable member initialization code to a constructor.
Modify the constructor body directly.
"""
for n in range(num_slots(creat.info)):
rvalue = self.make_tvar_init_expression(creat.info, n)
init = AssignmentStmt([MemberExpr(self_expr(),
tvar_slot_name(n),
direct=True)],
rvalue)
self.tf.set_type(init.lvalues[0], AnyType())
self.tf.set_type(init.rvalue, AnyType())
creat.body.body.insert(n, init)
def make_wrapper_slot_initializer(self, creat: FuncDef) -> None:
"""Add type variable member initializations to a wrapper constructor.
The function must be a constructor of a generic wrapper class. Modify
the constructor body directly.
"""
for alt in [BOUND_VAR, False]:
for n in range(num_slots(creat.info)):
rvalue = TypeExpr(
RuntimeTypeVar(NameExpr(tvar_slot_name(n, alt))))
init = AssignmentStmt(
[MemberExpr(self_expr(),
tvar_slot_name(n, alt), direct=True)],
rvalue)
self.tf.set_type(init.lvalues[0], AnyType())
self.tf.set_type(init.rvalue, AnyType())
creat.body.body.insert(n, init)
def make_tvar_init_expression(self, info: TypeInfo, slot: int) -> TypeExpr:
"""Return the initializer for the given slot in the given type.
This is the type expression that initializes the given slot
using the type arguments given to the constructor.
Examples:
- In 'class C(Generic[T]) ...', the initializer for the slot 0 is
TypeExpr(RuntimeTypeVar(NameExpr('__tv'))).
- In 'class D(C[int]) ...', the initializer for the slot 0 is
TypeExpr(<int instance>).
"""
# Figure out the superclass which defines the slot; also figure out
# the tvar index that maps to the slot.
origin, tv = find_slot_origin(info, slot)
# Map self type to the superclass -> extract tvar with target index
# (only contains subclass tvars?? PROBABLY NOT).
selftype = self_type(info)
selftype = map_instance_to_supertype(selftype, origin)
tvar = selftype.args[tv - 1]
# Map tvar to an expression; refer to local vars instead of member
# vars always.
tvar = translate_runtime_type_vars_locally(tvar)
# Build the rvalue (initializer) expression
return TypeExpr(tvar)
def make_type_object_wrapper(self, tdef: TypeDef) -> FuncDef:
"""Construct dynamically typed wrapper function for a class.
It simple calls the type object and returns the result.
"""
# TODO keyword args, default args and varargs
# TODO overloads
type_sig = cast(Callable, type_object_type(tdef.info, None))
type_sig = cast(Callable, erasetype.erase_typevars(type_sig))
init = cast(FuncDef, tdef.info.get_method('__init__'))
arg_kinds = type_sig.arg_kinds
# The wrapper function has a dynamically typed signature.
wrapper_sig = Callable( [AnyType()] * len(arg_kinds),
arg_kinds, [None] * len(arg_kinds),
AnyType(), False)
n = NameExpr(tdef.name) # TODO full name
args = self.func_tf.call_args(
init.args[1:],
type_sig,
wrapper_sig,
True, False)
call = CallExpr(n, args, arg_kinds)
ret = ReturnStmt(call)
fdef = FuncDef(tdef.name + self.tf.dynamic_suffix(),
init.args[1:],
arg_kinds, [None] * len(arg_kinds),
Block([ret]))
fdef.type = wrapper_sig
return fdef
def self_type(self) -> Instance:
return self_type(self.tf.type_context())
def make_scope(self) -> 'Scope':
return Scope(self.tf.type_map)
class Scope:
"""Maintain a temporary local scope during transformation."""
def __init__(self, type_map: Dict[Node, Type]) -> None:
self.names = {} # type: Dict[str, Var]
self.type_map = type_map
def add(self, name: str, type: Type) -> Var:
v = Var(name)
v.type = type
self.names[name] = v
return v
def name_expr(self, name: str) -> NameExpr:
nexpr = NameExpr(name)
nexpr.kind = nodes.LDEF
node = self.names[name]
nexpr.node = node
self.type_map[nexpr] = node.type
return nexpr
| 2.4375 | 2 |
jazzpos/admin.py | AhmadManzoor/jazzpos | 5 | 5244 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django_tablib.admin import TablibAdmin
from jazzpos.models import Customer, Patient, Store, CustomerType, StoreSettings
from jazzpos.models import UserProfile
class CustomerAdmin(TablibAdmin):
formats = ['xls', 'csv',]
class PatientAdmin(TablibAdmin):
formats = ['xls', 'csv',]
class StoreAdmin(admin.ModelAdmin):
pass
class StoreSettingsAdmin(admin.ModelAdmin):
pass
class CustomerTypeAdmin(admin.ModelAdmin):
pass
class UserProfileInline(admin.StackedInline):
model = UserProfile
UserAdmin.inlines = [UserProfileInline,]
admin.site.register(Customer, CustomerAdmin)
admin.site.register(Patient, PatientAdmin)
admin.site.register(Store, StoreAdmin)
admin.site.register(StoreSettings, StoreSettingsAdmin)
admin.site.register(CustomerType, CustomerTypeAdmin)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| 1.867188 | 2 |
classification/imaterialist_challenge_furniture_2018/configs/train/train_inceptionresnetv2_350_ssd_like_v3.py | vfdev-5/ignite-examples | 11 | 5245 | # Basic training configuration file
from torch.optim import RMSprop
from torch.optim.lr_scheduler import MultiStepLR
from torchvision.transforms import RandomHorizontalFlip, Compose
from torchvision.transforms import RandomResizedCrop, RandomAffine, RandomApply
from torchvision.transforms import ColorJitter, ToTensor, Normalize
from common.dataset import FilesFromCsvDataset
from common.data_loaders import get_data_loader
from models.inceptionresnetv2_ssd_like import FurnitureInceptionResNetV4350SSDLike_v3
SEED = 17
DEBUG = True
DEVICE = 'cuda'
OUTPUT_PATH = "output"
size = 350
TRAIN_TRANSFORMS = Compose([
RandomApply(
[RandomAffine(degrees=10, resample=3, fillcolor=(255, 255, 255)), ],
p=0.5
),
RandomResizedCrop(size, scale=(0.7, 1.0), interpolation=3),
RandomHorizontalFlip(p=0.5),
ColorJitter(hue=0.12, brightness=0.12),
ToTensor(),
Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
VAL_TRANSFORMS = TRAIN_TRANSFORMS
BATCH_SIZE = 24
NUM_WORKERS = 15
dataset = FilesFromCsvDataset("output/unique_filtered_train_dataset.csv")
TRAIN_LOADER = get_data_loader(dataset,
data_transform=TRAIN_TRANSFORMS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory='cuda' in DEVICE)
val_dataset = FilesFromCsvDataset("output/unique_filtered_val_dataset.csv")
VAL_LOADER = get_data_loader(val_dataset,
data_transform=VAL_TRANSFORMS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory='cuda' in DEVICE)
MODEL = FurnitureInceptionResNetV4350SSDLike_v3(num_classes=128, pretrained='imagenet')
N_EPOCHS = 100
OPTIM = RMSprop(
params=[
{"params": MODEL.extractor.stem.parameters(), 'lr': 0.0001},
{"params": MODEL.extractor.low_features_a.parameters(), 'lr': 0.00045},
{"params": MODEL.extractor.low_features_b.parameters(), 'lr': 0.00045},
{"params": MODEL.extractor.mid_features.parameters(), 'lr': 0.0045},
{"params": MODEL.extractor.top_features.parameters(), 'lr': 0.0045},
{"params": MODEL.extractor.smooth_layers.parameters(), 'lr': 0.045},
{"params": MODEL.cls_layers.parameters(), 'lr': 0.045},
{"params": MODEL.boxes_to_classes.parameters(), 'lr': 0.045},
{"params": MODEL.final_classifier.parameters(), 'lr': 0.045},
],
alpha=0.9,
eps=1.0
)
LR_SCHEDULERS = [
MultiStepLR(OPTIM, milestones=[4, 5, 6, 7, 8, 10, 11, 13, 14, 15], gamma=0.5),
]
EARLY_STOPPING_KWARGS = {
'patience': 25,
# 'score_function': None
}
LOG_INTERVAL = 100
| 2.0625 | 2 |
examples/qmmm/02-mcscf.py | QuESt-Calculator/pyscf | 501 | 5246 | #!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
'''
A simple example to run MCSCF with background charges.
'''
import numpy
from pyscf import gto, scf, mcscf, qmmm
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g',
verbose=4)
numpy.random.seed(1)
coords = numpy.random.random((5,3)) * 10
charges = (numpy.arange(5) + 1.) * -.1
#
# There are two ways to add background charges to MCSCF method.
# The recommended one is to initialize it in SCF calculation. The MCSCF
# calculation takes the information from SCF objects.
#
mf = qmmm.mm_charge(scf.RHF(mol), coords, charges).run()
mc = mcscf.CASSCF(mf, 6, 6)
mc.run()
mc = mcscf.CASCI(mf, 6, 6)
mc.run()
#
# The other method is to patch the MCSCF object with the background charges.
# Note: it updates the underlying SCF object inplace.
#
mo_init = mf.mo_coeff
mf = scf.RHF(mol)
mc = mcscf.CASSCF(mf, 6, 6)
mc = qmmm.mm_charge(mc, coords, charges)
mc.run(mo_init)
mf = scf.RHF(mol)
mc = mcscf.CASCI(mf, 6, 6)
mc = qmmm.mm_charge(mc, coords, charges)
mc.run(mo_init)
| 2.546875 | 3 |
mtp_send_money/apps/send_money/utils.py | uk-gov-mirror/ministryofjustice.money-to-prisoners-send-money | 0 | 5247 | import datetime
from decimal import Decimal, ROUND_DOWN, ROUND_UP
import logging
import re
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.utils import formats
from django.utils.cache import patch_cache_control
from django.utils.dateformat import format as format_date
from django.utils.dateparse import parse_date
from django.utils.encoding import force_text
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
from mtp_common.auth import api_client, urljoin
import requests
from requests.exceptions import Timeout
logger = logging.getLogger('mtp')
prisoner_number_re = re.compile(r'^[a-z]\d\d\d\d[a-z]{2}$', re.IGNORECASE)
def get_api_session():
return api_client.get_authenticated_api_session(
settings.SHARED_API_USERNAME,
settings.SHARED_API_PASSWORD,
)
def check_payment_service_available():
# service is deemed unavailable only if status is explicitly false, not if it cannot be determined
try:
response = requests.get(api_url('/service-availability/'), timeout=5)
gov_uk_status = response.json().get('gov_uk_pay', {})
return gov_uk_status.get('status', True), gov_uk_status.get('message_to_users')
except (Timeout, ValueError):
return True, None
def validate_prisoner_number(value):
if not prisoner_number_re.match(value):
raise ValidationError(_('Incorrect prisoner number format'), code='invalid')
class RejectCardNumberValidator(RegexValidator):
regex = r'\d{4}\s*\d{4}\s*\d{4}\s*\d{4}'
inverse_match = True
code = 'card_number'
message = _('Please do not enter your debit card number here')
def format_percentage(number, decimals=1, trim_zeros=True):
if not isinstance(number, Decimal):
number = Decimal(number)
percentage_text = ('{0:.%sf}' % decimals).format(number)
if decimals and trim_zeros and percentage_text.endswith('.' + ('0' * decimals)):
percentage_text = percentage_text[:-decimals - 1]
return percentage_text + '%'
def currency_format(amount, trim_empty_pence=False):
"""
Formats a number into currency format
@param amount: amount in pounds
@param trim_empty_pence: if True, strip off .00
"""
if not isinstance(amount, Decimal):
amount = unserialise_amount(amount)
text_amount = serialise_amount(amount)
if trim_empty_pence and text_amount.endswith('.00'):
text_amount = text_amount[:-3]
return '£' + text_amount
def currency_format_pence(amount, trim_empty_pence=False):
"""
Formats a number into currency format display pence only as #p
@param amount: amount in pounds
@param trim_empty_pence: if True, strip off .00
"""
if not isinstance(amount, Decimal):
amount = unserialise_amount(amount)
if amount.__abs__() < Decimal('1'):
return '%sp' % (amount * Decimal('100')).to_integral_value()
return currency_format(amount, trim_empty_pence=trim_empty_pence)
def clamp_amount(amount):
"""
Round the amount to integer pence,
rounding fractional pence up (away from zero) for any fractional pence value
that is greater than or equal to a tenth of a penny.
@param amount: Decimal amount to round
"""
tenths_of_pennies = (amount * Decimal('1000')).to_integral_value(rounding=ROUND_DOWN)
pounds = tenths_of_pennies / Decimal('1000')
return pounds.quantize(Decimal('1.00'), rounding=ROUND_UP)
def get_service_charge(amount, clamp=True):
if not isinstance(amount, Decimal):
amount = Decimal(amount)
percentage_charge = amount * settings.SERVICE_CHARGE_PERCENTAGE / Decimal('100')
service_charge = percentage_charge + settings.SERVICE_CHARGE_FIXED
if clamp:
return clamp_amount(service_charge)
return service_charge
def get_total_charge(amount, clamp=True):
if not isinstance(amount, Decimal):
amount = Decimal(amount)
charge = get_service_charge(amount, clamp=False)
result = amount + charge
if clamp:
return clamp_amount(result)
return result
def serialise_amount(amount):
return '{0:.2f}'.format(amount)
def unserialise_amount(amount_text):
amount_text = force_text(amount_text)
return Decimal(amount_text)
def serialise_date(date):
return format_date(date, 'Y-m-d')
def unserialise_date(date_text):
date_text = force_text(date_text)
date = parse_date(date_text)
if not date:
raise ValueError('Invalid date')
return date
def lenient_unserialise_date(date_text):
date_text = force_text(date_text)
date_formats = formats.get_format('DATE_INPUT_FORMATS')
for date_format in date_formats:
try:
return datetime.datetime.strptime(date_text, date_format).date()
except (ValueError, TypeError):
continue
raise ValueError('Invalid date')
def govuk_headers():
return {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % settings.GOVUK_PAY_AUTH_TOKEN
}
def govuk_url(path):
return urljoin(settings.GOVUK_PAY_URL, path)
def api_url(path):
return urljoin(settings.API_URL, path)
def site_url(path):
return urljoin(settings.SITE_URL, path)
def get_link_by_rel(data, rel):
if rel in data['_links']:
return data['_links'][rel]['href']
def make_response_cacheable(response):
"""
Allow response to be public and cached for an hour
"""
patch_cache_control(response, public=True, max_age=3600)
return response
class CacheableTemplateView(TemplateView):
"""
For simple pages whose content rarely changes so can be cached for an hour
"""
def get(self, request, *args, **kwargs):
response = super().get(request, *args, **kwargs)
return make_response_cacheable(response)
| 2.1875 | 2 |
src/zmbrelev/config.py | Zumbi-ML/zmbRELEV | 0 | 5248 | # -*- coding: UTF-8 -*-
import os
this_file_path = os.path.dirname(os.path.realpath(__file__))
MODELS_DIR = os.path.join(this_file_path, "models/")
| 1.601563 | 2 |
ArraysP2.py | EdgarVallejo96/pyEdureka | 0 | 5249 | <reponame>EdgarVallejo96/pyEdureka<filename>ArraysP2.py
import array as arr
a = arr.array('i', [ 1,2,3,4,5,6])
print(a)
# Accessing elements
print(a[2])
print(a[-2])
# BASIC ARRAY OPERATIONS
# Find length of array
print()
print('Length of array')
print(len(a))
# Adding elments to an array
# append() to add a single element at the end of an array
# extend() to add more than one element at the end of an array
# insert() to add an element at a specific position in an array
print()
# append
print('Append')
a.append(8)
print(a)
# extend
print()
print('Extend')
a.extend([9,8,6,5,4])
print(a)
# insert
print()
print('Insert')
a.insert(2,6) # first param is the index, second param is the value
print(a)
# Removing elements from an array
# pop() Remove an element and return it
# remove() Remove element with a specific value without returning it
print()
print(a)
# pop
print('pop')
print(a.pop()) # removes last element
print(a)
print(a.pop(2))
print(a)
print(a.pop(-1))
print(a)
# remove
print()
print('remove')
print(a.remove(8)) # doesn't return what it removes, it removed the first occurrence of '8'
print(a)
# Array Concatenation
print()
print('Array Concatenation')
b = arr.array('i', [1,2,3,4,5,6,7])
c = arr.array('i', [3,4,2,1,3,5,6,7,8])
d = arr.array('i')
d = b + c
print(d)
# Slicing an Array
print()
print('Slicing an Array') # This means fetching some particular values from an array
print(d)
print(d[0:5]) # Doesn't include the value on the right index
print(d[0:-2])
print(d[::-1]) # Reverse the array, this method is not preferred because it exauhsts the memory
# Looping through an Array
print()
print('Looping through an Array')
print('Using for')
for x in d:
print(x, end=' ')
print()
for x in d[0:-3]:
print(x, end=' ')
print()
print('Using while')
temp = 0
while temp < d[2]:
print(d[temp], end = ' ')
temp = temp + 1 # Can use temp+=1, it's the same thing
print()
print(a)
tem = 0
while tem < len(a):
print(a[tem], end=' ')
tem += 1
print()
| 4.03125 | 4 |
vesper/mpg_ranch/nfc_detector_low_score_classifier_1_0/classifier.py | RichardLitt/Vesper | 29 | 5250 | <reponame>RichardLitt/Vesper<filename>vesper/mpg_ranch/nfc_detector_low_score_classifier_1_0/classifier.py
"""
Module containing low score classifier for MPG Ranch NFC detectors.
An instance of the `Classifier` class of this module assigns the `LowScore`
classification to a clip if the clip has no `Classification` annotation and
has a `DetectorScore` annotation whose value is less than a threshold.
This classifier is intended for use on clips created by the the
MPG Ranch Thrush Detector 1.0 and the MPG Ranch Tseep Detector 1.0.
"""
import logging
from vesper.command.annotator import Annotator
from vesper.django.app.models import AnnotationInfo, StringAnnotation
_logger = logging.getLogger()
_SCORE_THRESHOLDS = {
# For 50 percent precision on validation recordings.
'MPG Ranch Thrush Detector 1.0 40': 70,
'MPG Ranch Tseep Detector 1.0 20': 41,
# For 75 percent precision on validation recordings.
# 'MPG Ranch Thrush Detector 1.0 40': 91,
# 'MPG Ranch Tseep Detector 1.0 20': 63,
}
class Classifier(Annotator):
extension_name = 'MPG Ranch NFC Detector Low Score Classifier 1.0'
def __init__(
self, annotation_info, creating_user=None, creating_job=None,
creating_processor=None):
super().__init__(
annotation_info, creating_user, creating_job, creating_processor)
self._score_annotation_info = _get_annotation_info('Detector Score')
self._score_thresholds = _SCORE_THRESHOLDS
def annotate(self, clip):
annotated = False
classification = self._get_annotation_value(clip)
if classification is None:
# clip is unclassified
score = self._get_score(clip)
if score is not None:
# clip has a detector score
threshold = self._get_score_threshold(clip)
if threshold is not None and score < threshold:
# detector score is below threshold
self._annotate(clip, 'LowScore')
annotated = True
return annotated
def _get_score(self, clip):
try:
annotation = StringAnnotation.objects.get(
clip=clip, info=self._score_annotation_info)
except StringAnnotation.DoesNotExist:
return None
else:
return float(annotation.value)
def _get_score_threshold(self, clip):
detector = clip.creating_processor
if detector is None:
return None
else:
return self._score_thresholds.get(detector.name)
def _get_annotation_info(name):
try:
return AnnotationInfo.objects.get(name=name)
except AnnotationInfo.DoesNotExist:
raise ValueError(
'Unrecognized annotation "{}".'.format(name))
| 2.375 | 2 |
setup.py | yitzikc/athena2pd | 1 | 5251 | <filename>setup.py
from setuptools import setup, find_packages
def find_version(path):
import re
# path shall be a plain ascii tetxt file
s = open(path, 'rt').read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", s, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Version not found')
def get_requirements(filename):
with open(filename, 'r') as fh:
return [l.strip() for l in fh]
def get_long_desc(filename):
with open(filename, 'r') as fh:
return fh.read()
setup(
name='athena2pd',
packages=['athena2pd'],
version=find_version('athena2pd/__init__.py'),
description='Help\'s simplify the access of databases stored in Amazon Athena by using SQL and pandas DataFrames.',
long_description=get_long_desc('README.md'),
long_description_content_type='text/markdown',
author='<NAME>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
license='MIT',
install_requires=get_requirements('requirements.txt'),
zip_safe=False,
url='https://github.com/joedementri/athena2pd',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent'
],
python_requires='>=2.7,>=3.6'
) | 2.125 | 2 |
mmdet/core/ufp/__init__.py | PuAnysh/UFPMP-Det | 9 | 5252 | <reponame>PuAnysh/UFPMP-Det
from .spp import *
from .unified_foreground_packing import *
__all__ = [
'phsppog', 'UnifiedForegroundPacking'
]
| 1.070313 | 1 |
PythonBasics/ExamPreparation/FamilyTrip.py | achoraev/SoftUni | 0 | 5253 | budget = float(input())
nights = int(input())
price_night = float(input())
percent_extra = int(input())
if nights > 7:
price_night = price_night - (price_night * 0.05)
sum = nights * price_night
total_sum = sum + (budget * percent_extra / 100)
if total_sum <= budget:
print(f"Ivanovi will be left with {(budget - total_sum):.2f} leva after vacation.")
else:
print(f"{(total_sum - budget):.2f} leva needed.") | 3.703125 | 4 |
tex_live_package_manager/progress.py | csch0/SublimeText-TeX-Live-Package-Manager | 2 | 5254 | <reponame>csch0/SublimeText-TeX-Live-Package-Manager
import sublime, sublime_plugin
import threading
class ProcessQueueManager():
__shared = {}
items = []
thread = None
# Current item details
messages = None
function = None
callback = None
# Progress Bar preferences
i = 0
size = 8
add = 1
def __new__(cls, *args, **kwargs):
inst = object.__new__(cls)
inst.__dict__ = cls.__shared
return inst
def queue(self, unique_id, function, messages, callback):
print(unique_id, function, messages, callback)
self.items += [{"function": function, "messages": messages, "callback": callback}]
if not self.thread or not self.thread.is_alive():
sublime.set_timeout(lambda: self.run(), 100)
def run(self):
# If thread available and running
if self.thread and self.thread.is_alive():
# Recall run
self.progress()
sublime.set_timeout(lambda: self.run(), 100)
# Stop if thread available, not running and no item is available
elif self.thread and not self.thread.is_alive() and not self.items:
sublime.status_message(self.messages[1])
# Callback
sublime.set_timeout(self.callback, 0)
# Reset progress details
self.i = 0
self.callback = None
self.function = None
self.message = None
# If no thread availale or not running
elif not self.thread or not self.thread.is_alive():
# Check for callback of old item
if self.callback:
sublime.set_timeout(self.callback, 0)
self.callback = None
# Queue available
if self.items:
item = self.items.pop(0)
self.callback = item["callback"]
self.function = item["function"]
self.messages = item["messages"]
# Start thread for current item
self.thread = HelperThread(self.function)
self.thread.start()
# Call run to start updating progress
sublime.set_timeout(lambda: self.run(), 100)
def progress(self):
# Calculate items on the left size
before = self.i % self.size
after = self.size - (before + 1)
# Print the actual progress
sublime.status_message('%s [%s=%s]' % (self.messages[0], ' ' * before, ' ' * after))
# Invert increment if reached the end or start
if not after:
self.add = -1
elif not before:
self.add = 1
self.i += self.add
class HelperThread(threading.Thread):
def __init__(self, function):
self.function = function if isinstance(function, list) else [function]
threading.Thread.__init__(self)
def run(self):
for function in self.function:
function()
def ProgressFunction(function, messages, callback):
t = ThreadThread(function)
t.start()
Progress(t, messages[0], messages[1], callback) | 2.390625 | 2 |
moscow_routes_parser/t_mos_ru.py | rscprof/moscow_routes_parser | 0 | 5255 | <filename>moscow_routes_parser/t_mos_ru.py
import html
import json
import logging
import re
from abc import abstractmethod
from datetime import datetime, time
from typing import Optional
import requests
from moscow_routes_parser.model import Route, Timetable, Equipment, Timetable_builder
from moscow_routes_parser.model_impl import Timetable_builder_t_mos_ru
class parser_timetable:
""""Interface for parser"""
@abstractmethod
def parse(self, text: str) -> Timetable_builder:
pass
class parser_timetable_t_mos_ru(parser_timetable):
""""Parser for timetable from t.mos.ru implementation"""
def __init__(self, builder: Timetable_builder):
""""Initialize parser
:param builder: Builder for Timetable for route
"""
self.builder = lambda: builder
def parse(self, text: str) -> Timetable_builder:
"""Parse text from https://transport.mos.ru/ru/ajax/App/ScheduleController/getRoute (for format using
2022-Jan-11)
Since 12.01.2022 t.mos.ru drop data-services from results
Since 13.03.2022 added flag_has_another_direction
@param text: text for parse
@return Timetable for route
"""
result_stops = type(self.builder())()
# stops = re.finditer(r'data-stop="([^"]*?)".*?data-services="([^"]*?)".*?d-inline.*?>(.*?)<(.*?)</li>', text,
# re.M + re.S
# )
stops = re.finditer(r'data-stop="(.*?)".*?d-inline.*?>(.*?)<(.*?)</li>', text,
re.M + re.S
)
data_coords_iter = re.finditer(r'data-coords="(.*?)"', text,
re.M + re.S
)
data_coords_list = list(data_coords_iter)
if re.search(r'ic-change-a-b', text, re.M + re.S) is None:
result_stops.set_has_another_direction(False)
else:
result_stops.set_has_another_direction(True)
# если есть расписание
if len(data_coords_list) > 0:
data_coords = data_coords_list[0].group(1)
data_coords = html.unescape(data_coords)
data_coords = json.loads(data_coords)['features']
data_coords = iter(map(lambda feature: feature['geometry']['coordinates'], data_coords))
else:
data_coords = []
for stop in stops:
name_stop = stop.group(2)
coords_stop = next(data_coords)
description = stop.group(3)
logger = logging.getLogger(__name__)
logger.info(name_stop)
hours = re.finditer(r'dt1.*?(\d\d):(.*?)</div>\s*</div>\s*</div>', description, re.M + re.S)
timetable_stop = result_stops.add_stop()
timetable_stop.set_name(name_stop)
timetable_stop.set_coords(coords_stop)
log_timetable = ""
for hour in hours:
num_hour = int(hour.group(1))
minutes_text = hour.group(2)
log_timetable += str(num_hour) + ": "
minutes = re.finditer(r'div10([^>]*)>\s*(\d\d)', minutes_text, re.M + re.S)
for minute in minutes:
num_minute = int(minute.group(2))
color_start = minute.group(1).find('color: ')
if color_start >= 0:
quote = minute.group(1).find('"', color_start)
min_color = minute.group(1)[color_start + 7:quote]
else:
min_color = None
if not (min_color is None):
log_timetable += "{}{}".format(num_minute, min_color) + " "
pass
else:
log_timetable += str(num_minute) + " "
pass
time_flight = time(num_hour, num_minute)
timetable_stop.add_item_timetable(time_flight, min_color)
logger.info(log_timetable)
return result_stops
class Parser_routes:
@abstractmethod
def parse(self, text: str) -> [Route]:
pass
class Parser_routes_t_mos_ru(Parser_routes):
def __init__(self):
self.count = None
def parse(self, text: str) -> [Route]:
""""Parses route info from transport.mos.ru (name, id, type)
:param text: text for parsing from t.mos.ru
:return list of Route
"""
count_result = re.finditer(r'data-count-pages="(\d+)"', text, re.M + re.S)
self.count = int(list(count_result)[0].group(1))
result = re.finditer(r'<a.*?href=.*?route/(.+?)".*?<div.*?ic[ ]([a-z-]+).*?</i>\s*(\S+?)\s*</div>', text,
re.M + re.S)
list_routes = []
for route in result:
num = route.group(1)
type_route = route.group(2)
if type_route.find('-bus') >= 0:
type_route = Equipment.bus()
elif type_route.find('tramway') >= 0:
type_route = Equipment.tramway()
elif type_route.find('trolleybus') >= 0:
type_route = Equipment.trolleybus()
else:
logging.getLogger(__name__).error("Unknown type route: {}".format(type_route))
type_route = None
name = route.group(3)
list_routes.append(Route(num, type_route, name))
return list_routes
def get_route(date: datetime.date, id_route_t_mos_ru: str, direction: int,
get_route_url: str = 'https://transport.mos.ru/ru/ajax/App/ScheduleController/getRoute',
parser: parser_timetable = parser_timetable_t_mos_ru(builder=Timetable_builder_t_mos_ru())
) -> Timetable:
"""Get timetable for route by date and direction
:param date: date of timetable for route
:param id_route_t_mos_ru: id of route from t.mos.ru
:param direction: direction for route (0 or 1)
:param get_route_url URL for requesting timetable
:param parser for timetable
:return timetable for route by date and direction
"""
logger = logging.getLogger(__name__)
try:
# strange problem with SSL Cert in package
response = requests.get(get_route_url,
params={
'mgt_schedule[isNight]': '',
'mgt_schedule[date]': date.strftime("%d.%m.%Y"),
'mgt_schedule[route]': id_route_t_mos_ru,
'mgt_schedule[direction]': direction,
},
headers={'X-Requested-With': 'XMLHttpRequest'}
)
if response.status_code == 200:
logger.info("Get route #{}".format(id_route_t_mos_ru))
route_info = parser.parse(response.text)
else:
logger.error("Error status: {}".format(response.status_code))
route_info = None
except requests.exceptions.RequestException as e:
logger.error("Error " + str(e))
route_info = None
if not (route_info is None):
result = route_info.set_id_route_t_mos_ru(id_route_t_mos_ru).set_direction(direction).set_date(date).build()
if len(result.get_stops()) == 0: # Error of loading timetable without exceptions
result = None
else:
result = None
return result
def get_list_routes(work_time: int, direction: int,
parser: Parser_routes = None,
get_routes_url: str = 'https://transport.mos.ru/ru/ajax/App/ScheduleController/getRoutesList'
) -> Optional[list[Route]]:
"""get list routes by work_time and direction from transport.mos.ru
:param parser: function to parse got string
:param get_routes_url: url for requesting routes
:param work_time: work day or not (1 or 0)
:param direction: 0
:return list of Route
"""
if parser is None:
parser = Parser_routes_t_mos_ru()
page = 1
result_routes = []
finish = False
count = None
logger = logging.getLogger(__name__)
while not finish:
finish = False
repeat = True
while repeat:
repeat = False
try:
# strange problem with SSL Cert in package
response = requests.get(get_routes_url,
params={
'mgt_schedule[search]': '',
'mgt_schedule[isNight]': '',
# 'mgt_schedule[filters]': '',
'mgt_schedule[work_time]': work_time,
'page': page,
'mgt_schedule[direction]': direction,
}
, headers={'X-Requested-With': 'XMLHttpRequest'}
# , headers={'Cookie': "_ym_d=1637468102; _ym_uid=1637468102592825648; mos_id=rBEAAmGaFNawBwAOHRgWAgA=; _ga=GA1.2.1733238845.1637487830; uxs_uid=147e2110-500d-11ec-a7cb-8bb8b12c3186; KFP_DID=ee285837-cd1f-0a9b-c8a2-9cef6a4ee333; _ym_isad=2; _ym_visorc=w"}
)
if response.status_code == 200:
logger.info("Get page #{}".format(page))
routes = parser.parse(response.text)
result_routes += routes
if count is None:
count = parser.count
if not routes:
finish = True
else:
logger.error("Error status: {}".format(response.status_code))
finish = True
page = page + 1
if page > count:
finish = True
except requests.exceptions.RequestException as e:
logger.error("Error " + str(e))
repeat = True
return result_routes
| 3.078125 | 3 |
web/api/get_summary_data.py | spudmind/spud | 2 | 5256 | from web.api import BaseAPI
from utils import mongo
import json
class DataApi(BaseAPI):
def __init__(self):
BaseAPI.__init__(self)
self._db = mongo.MongoInterface()
self.query = {}
self.fields = {
"donation_count": "$influences.electoral_commission.donation_count",
"donor_count": '$influences.electoral_commission.donor_count',
"donation_total_int": "$influences.electoral_commission.donation_total_int",
"mp_interest_relationships": "$influences.register_of_interests.relationship_count",
"lord_interest_relationships": "$influences.register_of_interests.interest_relationships",
"remuneration_count": "$influences.register_of_interests.remuneration_count",
"remuneration_total_int": "$influences.register_of_interests.remuneration_total_int",
"lobbyists_hired": "$influences.lobby_registers.lobbyist_hired"
}
def request(self, **args):
node_type = args.get("type")
category = args.get("category")
field = args.get("field")
summary = {
"influencers": self._influencers_aggregate(category, field),
#"lobby_agencies": self._influencers_aggregate(),
"political_parties": self._party_aggregate(category, field),
"mps": self._mp_aggregate(category, field),
"lords": self._lord_aggregate(category, field)
}
return {"children": summary[node_type][category]}
def _influencers_aggregate(self, category, field):
_db_table = 'api_influencers'
response = {}
if category == "electoral_commission":
# get electoral commission data
ec_fields = ["donation_total_int", "donation_count"]
top_total, top_count = self._get_top(_db_table, ec_fields)
ec = {
"donation_total": self._format_top(top_total, "influencer"),
"donation_count": self._format_top(top_count, "influencer", monetary=False)
}
response["electoral_commission"] = ec[field]
if category == "register_of_interests":
# get register of interests data
reg_fields = [
"remuneration_total_int",
"mp_interest_relationships",
"remuneration_count"
]
top_total, top_relationships, top_count = self._get_top(_db_table, reg_fields)
reg = {
"remuneration_total": self._format_top(top_total, "influencer"),
"interest_relationships": self._format_top(
top_relationships, "influencer", monetary=False
),
"remuneration_count": self._format_top(
top_count, "influencer", monetary=False
)
}
response["register_of_interests"] = reg[field]
return response
def _party_aggregate(self, category, field):
_db_table = 'api_political_parties'
response = {}
if category == "political_parties":
ec_fields = ["donation_total_int", "donation_count"]
top_total, top_count = self._get_top(_db_table, ec_fields)
result = {
"donation_total": self._format_top(top_total, "party"),
"donation_count": self._format_top(top_count, "party", monetary=False)
}
response["electoral_commission"] = result[field]
return response
def _mp_aggregate(self, category, field):
_db_table = 'api_mps'
response = {}
if category == "electoral_commission":
# get electoral commission data
ec_fields = ["donation_total_int", "donor_count"]
top_total, top_count = self._get_top(_db_table, ec_fields)
ec = {
"donation_total": self._format_top(top_total, "mp"),
"donor_count": self._format_top(top_count, "mp", monetary=False)
}
response["electoral_commission"] = ec[field]
if category == "register_of_interests":
# get register of interests data
reg_fields = [
"remuneration_total_int",
"lord_interest_relationships",
"remuneration_count"
]
top_total, top_relationships, top_count = self._get_top(_db_table, reg_fields)
reg = {
"remuneration_total": self._format_top(top_total, "mp"),
"interest_relationships": self._format_top(
top_relationships, "mp", monetary=False
),
"remuneration_count": self._format_top(
top_count, "mp", monetary=False
)
}
response["register_of_interests"] = reg[field]
return response
def _lord_aggregate(self, category, field):
_db_table = 'api_lords'
response ={}
if category == "electoral_commission":
# get electoral commission data
ec_fields = ["donation_total_int", "donation_count"]
top_total, top_count = self._get_top(_db_table, ec_fields)
ec = {
"donation_total": self._format_top(top_total, "lord"),
"donation_count": self._format_top(top_count, "lord", monetary=False)
}
response["electoral_commission"] = ec[field]
if category == "register_of_interests":
# get register of interests data
reg_fields = ["lord_interest_relationships"]
top_relationships = self._get_top(_db_table, reg_fields)[0]
reg = {
"interest_relationships": self._format_top(
top_relationships, "lord", monetary=False
)
}
response["register_of_interests"] = reg[field]
return response
def _format_top(self, results, label, monetary=True):
updated = []
for entry in results:
new = {
"name": entry["_id"],
"details_url": self.named_entity_resources(
entry["_id"], label
)[0]
}
if monetary:
new["total_int"] = entry["total"]
new["total"] = self._format_number(entry["total"])
else:
new["total"] = entry["total"]
updated.append(new)
return updated
def _get_aggregate(self, table, field_list):
return [self._db.sum(table, field=self.fields[x]) for x in field_list]
def _get_top(self, table, field_list):
return [self._db.top(table, field=self.fields[x]) for x in field_list]
| 2.359375 | 2 |
neutron/common/ovn/utils.py | guillermomolina/neutron | 3 | 5257 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import inspect
import os
import re
import netaddr
from neutron_lib.api.definitions import external_net
from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext
from neutron_lib.api.definitions import l3
from neutron_lib.api.definitions import port_security as psec
from neutron_lib.api.definitions import portbindings
from neutron_lib.api import validators
from neutron_lib import constants as const
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.utils import net as n_utils
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import netutils
from oslo_utils import strutils
from ovsdbapp import constants as ovsdbapp_const
from neutron._i18n import _
from neutron.common.ovn import constants
from neutron.common.ovn import exceptions as ovn_exc
from neutron.db import models_v2
from neutron.objects import ports as ports_obj
LOG = log.getLogger(__name__)
CONF = cfg.CONF
DNS_RESOLVER_FILE = "/etc/resolv.conf"
AddrPairsDiff = collections.namedtuple(
'AddrPairsDiff', ['added', 'removed', 'changed'])
PortExtraDHCPValidation = collections.namedtuple(
'PortExtraDHCPValidation', ['failed', 'invalid_ipv4', 'invalid_ipv6'])
def ovn_name(id):
# The name of the OVN entry will be neutron-<UUID>
# This is due to the fact that the OVN application checks if the name
# is a UUID. If so then there will be no matches.
# We prefix the UUID to enable us to use the Neutron UUID when
# updating, deleting etc.
return "%s%s" % (constants.OVN_NAME_PREFIX, id)
def ovn_lrouter_port_name(id):
# The name of the OVN lrouter port entry will be lrp-<UUID>
# This is to distinguish with the name of the connected lswitch patch port,
# which is named with neutron port uuid, so that OVS patch ports are
# generated properly. The pairing patch port names will be:
# - patch-lrp-<UUID>-to-<UUID>
# - patch-<UUID>-to-lrp-<UUID>
# lrp stands for Logical Router Port
return constants.LRP_PREFIX + '%s' % id
def ovn_cr_lrouter_port_name(_id):
# The name of the OVN chassisredirect lrouter port entry will be
# cr-lrp-<UUID>
return 'cr-lrp-%s' % _id
def ovn_provnet_port_name(network_id):
# The name of OVN lswitch provider network port entry will be
# provnet-<Network-UUID>. The port is created for network having
# provider:physical_network attribute.
return constants.OVN_PROVNET_PORT_NAME_PREFIX + '%s' % network_id
def ovn_vhu_sockpath(sock_dir, port_id):
# Frame the socket path of a virtio socket
return os.path.join(
sock_dir,
# this parameter will become the virtio port name,
# so it should not exceed IFNAMSIZ(16).
(const.VHOST_USER_DEVICE_PREFIX + port_id)[:14])
def ovn_addrset_name(sg_id, ip_version):
# The name of the address set for the given security group id and ip
# version. The format is:
# as-<ip version>-<security group uuid>
# with all '-' replaced with '_'. This replacement is necessary
# because OVN doesn't support '-' in an address set name.
return ('as-%s-%s' % (ip_version, sg_id)).replace('-', '_')
def ovn_pg_addrset_name(sg_id, ip_version):
# The name of the address set for the given security group id modelled as a
# Port Group and ip version. The format is:
# pg-<security group uuid>-<ip version>
# with all '-' replaced with '_'. This replacement is necessary
# because OVN doesn't support '-' in an address set name.
return ('pg-%s-%s' % (sg_id, ip_version)).replace('-', '_')
def ovn_port_group_name(sg_id):
# The name of the port group for the given security group id.
# The format is: pg-<security group uuid>.
return ('pg-%s' % sg_id).replace('-', '_')
def is_network_device_port(port):
return port.get('device_owner', '').startswith(
const.DEVICE_OWNER_PREFIXES)
def _is_dhcp_disabled(dhcp_opt):
return (dhcp_opt['opt_name'] == constants.DHCP_DISABLED_OPT and
dhcp_opt.get('opt_value', '').lower() == 'true')
def validate_port_extra_dhcp_opts(port):
"""Validate port's extra DHCP options.
:param port: A neutron port.
:returns: A PortExtraDHCPValidation object.
"""
invalid = {const.IP_VERSION_4: [], const.IP_VERSION_6: []}
failed = False
for edo in port.get(edo_ext.EXTRADHCPOPTS, []):
ip_version = edo['ip_version']
opt_name = edo['opt_name']
# If DHCP is disabled for this port via this special option,
# always succeed the validation
if _is_dhcp_disabled(edo):
failed = False
break
if opt_name not in constants.SUPPORTED_DHCP_OPTS_MAPPING[ip_version]:
invalid[ip_version].append(opt_name)
failed = True
return PortExtraDHCPValidation(
failed=failed,
invalid_ipv4=invalid[const.IP_VERSION_4] if failed else [],
invalid_ipv6=invalid[const.IP_VERSION_6] if failed else [])
def get_lsp_dhcp_opts(port, ip_version):
# Get dhcp options from Neutron port, for setting DHCP_Options row
# in OVN.
lsp_dhcp_disabled = False
lsp_dhcp_opts = {}
if is_network_device_port(port):
lsp_dhcp_disabled = True
else:
mapping = constants.SUPPORTED_DHCP_OPTS_MAPPING[ip_version]
for edo in port.get(edo_ext.EXTRADHCPOPTS, []):
if edo['ip_version'] != ip_version:
continue
if _is_dhcp_disabled(edo):
# OVN native DHCP is disabled on this port
lsp_dhcp_disabled = True
# Make sure return value behavior not depends on the order and
# content of the extra DHCP options for the port
lsp_dhcp_opts.clear()
break
if edo['opt_name'] not in mapping:
LOG.warning('The DHCP option %(opt_name)s on port %(port)s '
'is not suppported by OVN, ignoring it',
{'opt_name': edo['opt_name'], 'port': port['id']})
continue
opt = mapping[edo['opt_name']]
lsp_dhcp_opts[opt] = edo['opt_value']
return (lsp_dhcp_disabled, lsp_dhcp_opts)
def is_lsp_trusted(port):
return n_utils.is_port_trusted(port) if port.get('device_owner') else False
def is_lsp_ignored(port):
# Since the floating IP port is not bound to any chassis, packets from vm
# destined to floating IP will be dropped. To overcome this, we do not
# create/update floating IP port in OVN.
return port.get('device_owner') in [const.DEVICE_OWNER_FLOATINGIP]
def get_lsp_security_groups(port, skip_trusted_port=True):
# In other agent link OVS, skipping trusted port is processed in security
# groups RPC. We haven't that step, so we do it here.
return [] if (skip_trusted_port and is_lsp_trusted(port)
) else port.get('security_groups', [])
def is_snat_enabled(router):
return router.get(l3.EXTERNAL_GW_INFO, {}).get('enable_snat', True)
def is_port_security_enabled(port):
return port.get(psec.PORTSECURITY)
def is_security_groups_enabled(port):
return port.get(constants.PORT_SECURITYGROUPS)
def validate_and_get_data_from_binding_profile(port):
if (constants.OVN_PORT_BINDING_PROFILE not in port or
not validators.is_attr_set(
port[constants.OVN_PORT_BINDING_PROFILE])):
return {}
param_set = {}
param_dict = {}
for param_set in constants.OVN_PORT_BINDING_PROFILE_PARAMS:
param_keys = param_set.keys()
for param_key in param_keys:
try:
param_dict[param_key] = (port[
constants.OVN_PORT_BINDING_PROFILE][param_key])
except KeyError:
pass
if len(param_dict) == 0:
continue
if len(param_dict) != len(param_keys):
msg = _('Invalid binding:profile. %s are all '
'required.') % param_keys
raise n_exc.InvalidInput(error_message=msg)
if (len(port[constants.OVN_PORT_BINDING_PROFILE]) != len(
param_keys)):
msg = _('Invalid binding:profile. too many parameters')
raise n_exc.InvalidInput(error_message=msg)
break
if not param_dict:
return {}
for param_key, param_type in param_set.items():
if param_type is None:
continue
param_value = param_dict[param_key]
if not isinstance(param_value, param_type):
msg = _('Invalid binding:profile. %(key)s %(value)s '
'value invalid type') % {'key': param_key,
'value': param_value}
raise n_exc.InvalidInput(error_message=msg)
# Make sure we can successfully look up the port indicated by
# parent_name. Just let it raise the right exception if there is a
# problem.
if 'parent_name' in param_set:
plugin = directory.get_plugin()
plugin.get_port(n_context.get_admin_context(),
param_dict['parent_name'])
if 'tag' in param_set:
tag = int(param_dict['tag'])
if tag < 0 or tag > 4095:
msg = _('Invalid binding:profile. tag "%s" must be '
'an integer between 0 and 4095, inclusive') % tag
raise n_exc.InvalidInput(error_message=msg)
return param_dict
def is_dhcp_options_ignored(subnet):
# Don't insert DHCP_Options entry for v6 subnet with 'SLAAC' as
# 'ipv6_address_mode', since DHCPv6 shouldn't work for this mode.
return (subnet['ip_version'] == const.IP_VERSION_6 and
subnet.get('ipv6_address_mode') == const.IPV6_SLAAC)
def get_ovn_ipv6_address_mode(address_mode):
return constants.OVN_IPV6_ADDRESS_MODES[address_mode]
def get_revision_number(resource, resource_type):
"""Get the resource's revision number based on its type."""
if resource_type in (constants.TYPE_NETWORKS,
constants.TYPE_PORTS,
constants.TYPE_SECURITY_GROUP_RULES,
constants.TYPE_ROUTERS,
constants.TYPE_ROUTER_PORTS,
constants.TYPE_SECURITY_GROUPS,
constants.TYPE_FLOATINGIPS, constants.TYPE_SUBNETS):
return resource['revision_number']
else:
raise ovn_exc.UnknownResourceType(resource_type=resource_type)
def remove_macs_from_lsp_addresses(addresses):
"""Remove the mac addreses from the Logical_Switch_Port addresses column.
:param addresses: The list of addresses from the Logical_Switch_Port.
Example: ["80:fa:5b:06:72:b7 172.16.58.3",
"ff:ff:ff:ff:ff:ff 10.0.0.2"]
:returns: A list of IP addesses (v4 and v6)
"""
ip_list = []
for addr in addresses:
ip_list.extend([x for x in addr.split() if
(netutils.is_valid_ipv4(x) or
netutils.is_valid_ipv6(x))])
return ip_list
def get_allowed_address_pairs_ip_addresses(port):
"""Return a list of IP addresses from port's allowed_address_pairs.
:param port: A neutron port
:returns: A list of IP addesses (v4 and v6)
"""
return [x['ip_address'] for x in port.get('allowed_address_pairs', [])
if 'ip_address' in x]
def get_allowed_address_pairs_ip_addresses_from_ovn_port(ovn_port):
"""Return a list of IP addresses from ovn port.
Return a list of IP addresses equivalent of Neutron's port
allowed_address_pairs column using the data in the OVN port.
:param ovn_port: A OVN port
:returns: A list of IP addesses (v4 and v6)
"""
addresses = remove_macs_from_lsp_addresses(ovn_port.addresses)
port_security = remove_macs_from_lsp_addresses(ovn_port.port_security)
return [x for x in port_security if x not in addresses]
def get_ovn_port_security_groups(ovn_port, skip_trusted_port=True):
info = {'security_groups': ovn_port.external_ids.get(
constants.OVN_SG_IDS_EXT_ID_KEY, '').split(),
'device_owner': ovn_port.external_ids.get(
constants.OVN_DEVICE_OWNER_EXT_ID_KEY, '')}
return get_lsp_security_groups(info, skip_trusted_port=skip_trusted_port)
def get_ovn_port_addresses(ovn_port):
addresses = remove_macs_from_lsp_addresses(ovn_port.addresses)
port_security = remove_macs_from_lsp_addresses(ovn_port.port_security)
return list(set(addresses + port_security))
def sort_ips_by_version(addresses):
ip_map = {'ip4': [], 'ip6': []}
for addr in addresses:
ip_version = netaddr.IPNetwork(addr).version
ip_map['ip%d' % ip_version].append(addr)
return ip_map
def is_lsp_router_port(port):
return port.get('device_owner') in const.ROUTER_PORT_OWNERS
def get_lrouter_ext_gw_static_route(ovn_router):
return [route for route in getattr(ovn_router, 'static_routes', []) if
strutils.bool_from_string(getattr(
route, 'external_ids', {}).get(
constants.OVN_ROUTER_IS_EXT_GW, 'false'))]
def get_lrouter_snats(ovn_router):
return [n for n in getattr(ovn_router, 'nat', []) if n.type == 'snat']
def get_lrouter_non_gw_routes(ovn_router):
routes = []
for route in getattr(ovn_router, 'static_routes', []):
external_ids = getattr(route, 'external_ids', {})
if strutils.bool_from_string(
external_ids.get(constants.OVN_ROUTER_IS_EXT_GW, 'false')):
continue
routes.append({'destination': route.ip_prefix,
'nexthop': route.nexthop})
return routes
def is_ovn_l3(l3_plugin):
return hasattr(l3_plugin, '_ovn_client_inst')
def get_system_dns_resolvers(resolver_file=DNS_RESOLVER_FILE):
resolvers = []
if not os.path.exists(resolver_file):
return resolvers
with open(resolver_file, 'r') as rconf:
for line in rconf.readlines():
if not line.startswith('nameserver'):
continue
line = line.split('nameserver')[1].strip()
ipv4 = re.search(r'^(?:[0-9]{1,3}\.){3}[0-9]{1,3}', line)
if ipv4:
resolvers.append(ipv4.group(0))
return resolvers
def get_port_subnet_ids(port):
fixed_ips = list(port['fixed_ips'])
return [f['subnet_id'] for f in fixed_ips]
def get_method_class(method):
if not inspect.ismethod(method):
return
return method.__self__.__class__
def ovn_metadata_name(id_):
"""Return the OVN metadata name based on an id."""
return 'metadata-%s' % id_
def is_gateway_chassis_invalid(chassis_name, gw_chassis,
physnet, chassis_physnets):
"""Check if gateway chassis is invalid
@param chassis_name: gateway chassis name
@type chassis_name: string
@param gw_chassis: List of gateway chassis in the system
@type gw_chassis: []
@param physnet: physical network associated to chassis_name
@type physnet: string
@param chassis_physnets: Dictionary linking chassis with their physnets
@type chassis_physnets: {}
@return Boolean
"""
if chassis_name == constants.OVN_GATEWAY_INVALID_CHASSIS:
return True
elif chassis_name not in chassis_physnets:
return True
elif physnet and physnet not in chassis_physnets.get(chassis_name):
return True
elif gw_chassis and chassis_name not in gw_chassis:
return True
return False
def is_provider_network(network):
return network.get(external_net.EXTERNAL, False)
def is_neutron_dhcp_agent_port(port):
"""Check if the given DHCP port belongs to Neutron DHCP agents
The DHCP ports with the device_id equals to 'reserved_dhcp_port'
or starting with the word 'dhcp' belongs to the Neutron DHCP agents.
"""
return (port['device_owner'] == const.DEVICE_OWNER_DHCP and
(port['device_id'] == const.DEVICE_ID_RESERVED_DHCP_PORT or
port['device_id'].startswith('dhcp')))
def compute_address_pairs_diff(ovn_port, neutron_port):
"""Compute the differences in the allowed_address_pairs field."""
ovn_ap = get_allowed_address_pairs_ip_addresses_from_ovn_port(
ovn_port)
neutron_ap = get_allowed_address_pairs_ip_addresses(neutron_port)
added = set(neutron_ap) - set(ovn_ap)
removed = set(ovn_ap) - set(neutron_ap)
return AddrPairsDiff(added, removed, changed=any(added or removed))
def get_ovn_cms_options(chassis):
"""Return the list of CMS options in a Chassis."""
return [opt.strip() for opt in chassis.external_ids.get(
constants.OVN_CMS_OPTIONS, '').split(',')]
def is_gateway_chassis(chassis):
"""Check if the given chassis is a gateway chassis"""
return constants.CMS_OPT_CHASSIS_AS_GW in get_ovn_cms_options(chassis)
def get_port_capabilities(port):
"""Return a list of port's capabilities"""
return port.get(portbindings.PROFILE, {}).get('capabilities', [])
def get_port_id_from_gwc_row(row):
"""Return a port_id from gwc row
The Gateway_Chassis row stores router port_id in
the row name attribute:
<prefix>-<port_id>_<chassis_id>
:param row: A Gateway_Chassis table row.
:returns: String containing router port_id.
"""
return constants.RE_PORT_FROM_GWC.search(row.name).group(2)
def get_chassis_availability_zones(chassis):
"""Return a list of availability zones from a given OVN Chassis."""
azs = set()
if not chassis:
return azs
opt_key = constants.CMS_OPT_AVAILABILITY_ZONES + '='
for opt in get_ovn_cms_options(chassis):
if not opt.startswith(opt_key):
continue
values = opt.split('=')[1]
azs = {az.strip() for az in values.split(':') if az.strip()}
break
return azs
def get_chassis_in_azs(chassis_list, az_list):
"""Return a set of Chassis that belongs to the AZs.
Given a list of Chassis and a list of availability zones (AZs),
return a set of Chassis that belongs to one or more AZs.
:param chassis_list: A list of Chassis objects
:param az_list: A list of availability zones
:returns: A set of Chassis names
"""
chassis = set()
for ch in chassis_list:
chassis_azs = get_chassis_availability_zones(ch)
if chassis_azs.intersection(az_list):
chassis.add(ch.name)
return chassis
def get_gateway_chassis_without_azs(chassis_list):
"""Return a set of Chassis that does not belong to any AZs.
Filter a list of Chassis and return only the Chassis that does not
belong to any availability zones.
:param chassis_list: A list of Chassis objects
:returns: A set of Chassis names
"""
return {ch.name for ch in chassis_list if is_gateway_chassis(ch) and not
get_chassis_availability_zones(ch)}
def parse_ovn_lb_port_forwarding(ovn_rtr_lb_pfs):
"""Return a dictionary compatible with port forwarding from OVN lb."""
result = {}
for ovn_lb in ovn_rtr_lb_pfs:
ext_ids = ovn_lb.external_ids
fip_id = ext_ids.get(constants.OVN_FIP_EXT_ID_KEY)
protocol = (ovn_lb.protocol[0]
if ovn_lb.protocol else ovsdbapp_const.PROTO_TCP)
fip_dict = result.get(fip_id, {})
fip_dict_proto = fip_dict.get(protocol, set())
ovn_vips = ovn_lb.vips
for vip, ips in ovn_vips.items():
for ip in ips.split(','):
fip_dict_proto.add("{} {}".format(vip, ip))
fip_dict[protocol] = fip_dict_proto
result[fip_id] = fip_dict
return result
def get_network_name_from_datapath(datapath):
return datapath.external_ids['name'].replace('neutron-', '')
def is_port_external(port):
# This port is represented in OVN DB as lsp.type=external
capabilities = []
vnic_type = portbindings.VNIC_NORMAL
if isinstance(port, dict):
capabilities = get_port_capabilities(port)
vnic_type = port.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
else:
if isinstance(port, models_v2.Port):
bindings = port.port_bindings
elif isinstance(port, ports_obj.Port):
bindings = port.bindings
else: # What else could be "port"?
bindings = []
if bindings:
profile = bindings[0].get('profile')
if profile:
# DB object, not OVO, stores the dict in JSON.
profile = (jsonutils.loads(profile) if isinstance(profile, str)
else profile)
capabilities = profile.get('capabilities', [])
vnic_type = bindings[0].get('vnic_type', portbindings.VNIC_NORMAL)
return (vnic_type in constants.EXTERNAL_PORT_TYPES and
constants.PORT_CAP_SWITCHDEV not in capabilities)
| 1.390625 | 1 |
ens/exceptions.py | pjryan93/web3.py | 326 | 5258 | import idna
class AddressMismatch(ValueError):
'''
In order to set up reverse resolution correctly, the ENS name should first
point to the address. This exception is raised if the name does
not currently point to the address.
'''
pass
class InvalidName(idna.IDNAError):
'''
This exception is raised if the provided name does not meet
the syntax standards specified in `EIP 137 name syntax
<https://github.com/ethereum/EIPs/blob/master/EIPS/eip-137.md#name-syntax>`_.
For example: names may not start with a dot, or include a space.
'''
pass
class UnauthorizedError(Exception):
'''
Raised if the sending account is not the owner of the name
you are trying to modify. Make sure to set ``from`` in the
``transact`` keyword argument to the owner of the name.
'''
pass
class UnownedName(Exception):
'''
Raised if you are trying to modify a name that no one owns.
If working on a subdomain, make sure the subdomain gets created
first with :meth:`~ens.main.ENS.setup_address`.
'''
pass
class BidTooLow(ValueError):
'''
Raised if you bid less than the minimum amount
'''
pass
class InvalidBidHash(ValueError):
'''
Raised if you supply incorrect data to generate the bid hash.
'''
pass
class InvalidLabel(ValueError):
'''
Raised if you supply an invalid label
'''
pass
class OversizeTransaction(ValueError):
'''
Raised if a transaction you are trying to create would cost so
much gas that it could not fit in a block.
For example: when you try to start too many auctions at once.
'''
pass
class UnderfundedBid(ValueError):
'''
Raised if you send less wei with your bid than you declared
as your intent to bid.
'''
pass
| 2.984375 | 3 |
Submods/MAS Additions/MASM/scripts/midi_input.py | CaptainHorse/MAS-Additions | 13 | 5259 | <filename>Submods/MAS Additions/MASM/scripts/midi_input.py
import mido
from socketer import MASM
inPort = None
doReadInput = False
def Start():
global inPort
try:
print(f"MIDI inputs: {mido.get_input_names()}")
inPort = mido.open_input()
print(f"MIDI input open: {inPort}")
except Exception as e:
inPort = None
print(f"Could not open MIDI input: {e}")
def Update():
global inPort
global doReadInput
if inPort is not None:
if doReadInput and MASM.hasDataBool("MIDI_STOP"):
doReadInput = False
elif not doReadInput and MASM.hasDataBool("MIDI_START"):
doReadInput = True
for msg in inPort.iter_pending():
if MASM.hasDataCheck("MIDI_KEYMAPKEY"):
bytes = msg.bytes()
if len(bytes) >= 3:
MASM.hasDataBool("MIDI_KEYMAPKEY")
MASM.sendData("MIDI_KEY", bytes[1])
elif doReadInput: # We want to clear old pending messages but not send them if input is disabled
bytes = msg.bytes()
if len(bytes) >= 3:
if bytes[0] == 144 and bytes[2] > 0:
MASM.sendData(f"MIDI_NOTE.{bytes[1]}", bytes[2])
elif bytes[0] == 128 or bytes[2] == 0:
MASM.sendData(f"MIDI_NOTE.{bytes[1]}", 0) | 2.40625 | 2 |
dash_carbon_components/Column.py | Matheus-Rangel/dash-carbon-components | 4 | 5260 | <reponame>Matheus-Rangel/dash-carbon-components
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Column(Component):
"""A Column component.
Row Column
Keyword arguments:
- children (list of a list of or a singular dash component, string or numbers | a list of or a singular dash component, string or number; optional): The children of the element
- style (dict; optional): The inline styles
- id (string; optional): The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.
- className (string; default ''): The class of the element
- columnSizes (list of strings; optional): The size of the column with the display size, sm-4, lg-16 ...
- offsetSizes (list of strings; optional): The size of the offset with the display size, lg-2 ..."""
@_explicitize_args
def __init__(self, children=None, style=Component.UNDEFINED, id=Component.UNDEFINED, className=Component.UNDEFINED, columnSizes=Component.UNDEFINED, offsetSizes=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'style', 'id', 'className', 'columnSizes', 'offsetSizes']
self._type = 'Column'
self._namespace = 'dash_carbon_components'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'style', 'id', 'className', 'columnSizes', 'offsetSizes']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Column, self).__init__(children=children, **args)
| 2.078125 | 2 |
src/backend/schemas/vps.py | ddddhm1/LuWu | 658 | 5261 | from typing import List
from typing import Optional
from typing import Union
from models.vps import VpsStatus
from schemas.base import APIModel
from schemas.base import BasePagination
from schemas.base import BaseSchema
from schemas.base import BaseSuccessfulResponseModel
class VpsSshKeySchema(APIModel):
name: str
public_key: str = None
private_key: str = None
isp_id: int
ssh_key_id: Optional[str]
date_created: Optional[str]
fingerprint: Optional[str]
class VpsSpecPlanSchema(APIModel):
name: str
plan_code: Union[str, int]
region_codes: List = None
bandwidth: float
ram: int
vcpu: int
disk: int
price_monthly: Union[float, int, str] = None
price_hourly: Union[float, int, str] = None
price_yearly: Union[float, int, str] = None
class VpsSpecRegionSchema(APIModel):
name: str
region_code: Union[str, int]
features: List[str] = None
plan_codes: List[Union[str, int]] = []
class VpsSpecOsSchema(APIModel):
name: str
os_code: Union[str, int]
region_codes: List[Union[str, int]] = []
plan_codes: List[Union[str, int]] = []
class VpsSpecSchema(APIModel):
region: List[VpsSpecRegionSchema] = []
plan: List[VpsSpecPlanSchema] = []
os: List[VpsSpecOsSchema] = []
class VpsSpecResponse(BaseSuccessfulResponseModel):
result: VpsSpecSchema
class VpsCreateSchema(APIModel):
hostname: str
isp_id: int
region_code: str
os_code: str
plan_code: str
ssh_keys: List[str] = []
status: int = VpsStatus.init
remark: str = None
class VpsItemSchema(BaseSchema):
isp_id: int
ip: Union[int, str, None]
server_id: Optional[str]
hostname: str
os: Optional[str]
plan: Optional[str]
region: Optional[str]
status: int
status_name: str
status_msg: Optional[str]
isp_provider_name: str
class VpsItemResponse(BaseSuccessfulResponseModel):
result: VpsItemSchema
class VpsPaginationSchema(BasePagination):
items: Optional[List[VpsItemSchema]]
class VpsPaginationResponse(BaseSuccessfulResponseModel):
result: VpsPaginationSchema
class VpsSshKeyResponseSchema(BaseSuccessfulResponseModel):
result: List[VpsSshKeySchema]
| 2.203125 | 2 |
main.py | hari-sh/sigplot | 0 | 5262 | <filename>main.py
import sigplot as sp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
matplotlib.rcParams['toolbar'] = 'None'
plt.style.use('dark_background')
fig = plt.figure()
# seed = np.linspace(3, 7, 1000)
# a = (np.sin(2 * np.pi * seed))
# b = (np.cos(2 * np.pi * seed))
# sp.correlate(fig, b, a, 300)
t = np.linspace(0, 1, 500)
b = (np.cos(2 * np.pi * t))
# x = np.concatenate([np.zeros(500), signal.sawtooth(2 * np.pi * 5 * t), np.zeros(500), np.ones(120), np.zeros(500)])
x = np.concatenate([np.zeros(500), np.ones(500), np.zeros(500)])
sp.fourier_series(fig, x, 100, 200, 200)
plt.show()
# WriteToVideo("twoPulse.mp4", anim);
| 2.4375 | 2 |
test/vanilla/version-tolerant/Expected/AcceptanceTests/UrlVersionTolerant/urlversiontolerant/operations/_operations.py | msyyc/autorest.python | 0 | 5263 | <filename>test/vanilla/version-tolerant/Expected/AcceptanceTests/UrlVersionTolerant/urlversiontolerant/operations/_operations.py<gh_stars>0
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .._vendor import _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
def build_paths_get_boolean_true_request(**kwargs: Any) -> HttpRequest:
bool_path = kwargs.pop("bool_path", True) # type: bool
accept = "application/json"
# Construct URL
url = "/paths/bool/true/{boolPath}"
path_format_arguments = {
"boolPath": _SERIALIZER.url("bool_path", bool_path, "bool"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_get_boolean_false_request(**kwargs: Any) -> HttpRequest:
bool_path = kwargs.pop("bool_path", False) # type: bool
accept = "application/json"
# Construct URL
url = "/paths/bool/false/{boolPath}"
path_format_arguments = {
"boolPath": _SERIALIZER.url("bool_path", bool_path, "bool"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_get_int_one_million_request(**kwargs: Any) -> HttpRequest:
int_path = kwargs.pop("int_path", 1000000) # type: int
accept = "application/json"
# Construct URL
url = "/paths/int/1000000/{intPath}"
path_format_arguments = {
"intPath": _SERIALIZER.url("int_path", int_path, "int"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_get_int_negative_one_million_request(**kwargs: Any) -> HttpRequest:
int_path = kwargs.pop("int_path", -1000000) # type: int
accept = "application/json"
# Construct URL
url = "/paths/int/-1000000/{intPath}"
path_format_arguments = {
"intPath": _SERIALIZER.url("int_path", int_path, "int"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_get_ten_billion_request(**kwargs: Any) -> HttpRequest:
long_path = kwargs.pop("long_path", 10000000000) # type: int
accept = "application/json"
# Construct URL
url = "/paths/long/10000000000/{longPath}"
path_format_arguments = {
"longPath": _SERIALIZER.url("long_path", long_path, "long"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_get_negative_ten_billion_request(**kwargs: Any) -> HttpRequest:
long_path = kwargs.pop("long_path", -10000000000) # type: int
accept = "application/json"
# Construct URL
url = "/paths/long/-10000000000/{longPath}"
path_format_arguments = {
"longPath": _SERIALIZER.url("long_path", long_path, "long"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_float_scientific_positive_request(**kwargs: Any) -> HttpRequest:
float_path = kwargs.pop("float_path", 103400000000000000000) # type: float
accept = "application/json"
# Construct URL
url = "/paths/float/1.034E+20/{floatPath}"
path_format_arguments = {
"floatPath": _SERIALIZER.url("float_path", float_path, "float"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_float_scientific_negative_request(**kwargs: Any) -> HttpRequest:
float_path = kwargs.pop("float_path", -1.034e-20) # type: float
accept = "application/json"
# Construct URL
url = "/paths/float/-1.034E-20/{floatPath}"
path_format_arguments = {
"floatPath": _SERIALIZER.url("float_path", float_path, "float"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_double_decimal_positive_request(**kwargs: Any) -> HttpRequest:
double_path = kwargs.pop("double_path", 9999999.999) # type: float
accept = "application/json"
# Construct URL
url = "/paths/double/9999999.999/{doublePath}"
path_format_arguments = {
"doublePath": _SERIALIZER.url("double_path", double_path, "float"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_double_decimal_negative_request(**kwargs: Any) -> HttpRequest:
double_path = kwargs.pop("double_path", -9999999.999) # type: float
accept = "application/json"
# Construct URL
url = "/paths/double/-9999999.999/{doublePath}"
path_format_arguments = {
"doublePath": _SERIALIZER.url("double_path", double_path, "float"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_string_unicode_request(**kwargs: Any) -> HttpRequest:
string_path = kwargs.pop("string_path", "啊齄丂狛狜隣郎隣兀﨩") # type: str
accept = "application/json"
# Construct URL
url = "/paths/string/unicode/{stringPath}"
path_format_arguments = {
"stringPath": _SERIALIZER.url("string_path", string_path, "str"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_string_url_encoded_request(**kwargs: Any) -> HttpRequest:
string_path = kwargs.pop("string_path", "begin!*'();:@ &=+$,/?#[]end") # type: str
accept = "application/json"
# Construct URL
url = "/paths/string/begin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend/{stringPath}"
path_format_arguments = {
"stringPath": _SERIALIZER.url("string_path", string_path, "str"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_string_url_non_encoded_request(**kwargs: Any) -> HttpRequest:
string_path = kwargs.pop("string_path", "begin!*'();:@&=+$,end") # type: str
accept = "application/json"
# Construct URL
url = "/paths/string/begin!*'();:@&=+$,end/{stringPath}"
path_format_arguments = {
"stringPath": _SERIALIZER.url("string_path", string_path, "str", skip_quote=True),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_string_empty_request(**kwargs: Any) -> HttpRequest:
string_path = kwargs.pop("string_path", "") # type: str
accept = "application/json"
# Construct URL
url = "/paths/string/empty/{stringPath}"
path_format_arguments = {
"stringPath": _SERIALIZER.url("string_path", string_path, "str"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_string_null_request(string_path: str, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/paths/string/null/{stringPath}"
path_format_arguments = {
"stringPath": _SERIALIZER.url("string_path", string_path, "str"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_enum_valid_request(enum_path: str, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/paths/enum/green%20color/{enumPath}"
path_format_arguments = {
"enumPath": _SERIALIZER.url("enum_path", enum_path, "str"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_enum_null_request(enum_path: str, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/paths/string/null/{enumPath}"
path_format_arguments = {
"enumPath": _SERIALIZER.url("enum_path", enum_path, "str"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_byte_multi_byte_request(byte_path: bytearray, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/paths/byte/multibyte/{bytePath}"
path_format_arguments = {
"bytePath": _SERIALIZER.url("byte_path", byte_path, "bytearray"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_byte_empty_request(**kwargs: Any) -> HttpRequest:
byte_path = kwargs.pop("byte_path", bytearray("", encoding="utf-8")) # type: bytearray
accept = "application/json"
# Construct URL
url = "/paths/byte/empty/{bytePath}"
path_format_arguments = {
"bytePath": _SERIALIZER.url("byte_path", byte_path, "bytearray"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_byte_null_request(byte_path: bytearray, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/paths/byte/null/{bytePath}"
path_format_arguments = {
"bytePath": _SERIALIZER.url("byte_path", byte_path, "bytearray"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_date_valid_request(**kwargs: Any) -> HttpRequest:
date_path = kwargs.pop("date_path", "2012-01-01") # type: datetime.date
accept = "application/json"
# Construct URL
url = "/paths/date/2012-01-01/{datePath}"
path_format_arguments = {
"datePath": _SERIALIZER.url("date_path", date_path, "date"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_date_null_request(date_path: datetime.date, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/paths/date/null/{datePath}"
path_format_arguments = {
"datePath": _SERIALIZER.url("date_path", date_path, "date"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_date_time_valid_request(**kwargs: Any) -> HttpRequest:
date_time_path = kwargs.pop("date_time_path", "2012-01-01T01:01:01Z") # type: datetime.datetime
accept = "application/json"
# Construct URL
url = "/paths/datetime/2012-01-01T01%3A01%3A01Z/{dateTimePath}"
path_format_arguments = {
"dateTimePath": _SERIALIZER.url("date_time_path", date_time_path, "iso-8601"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_date_time_null_request(date_time_path: datetime.datetime, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/paths/datetime/null/{dateTimePath}"
path_format_arguments = {
"dateTimePath": _SERIALIZER.url("date_time_path", date_time_path, "iso-8601"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_base64_url_request(base64_url_path: bytes, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/paths/string/bG9yZW0/{base64UrlPath}"
path_format_arguments = {
"base64UrlPath": _SERIALIZER.url("base64_url_path", base64_url_path, "base64"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_array_csv_in_path_request(array_path: List[str], **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = (
"/paths/array/ArrayPath1%2cbegin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend%2c%2c/{arrayPath}"
)
path_format_arguments = {
"arrayPath": _SERIALIZER.url("array_path", array_path, "[str]", div=","),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_unix_time_url_request(unix_time_url_path: datetime.datetime, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/paths/int/1460505600/{unixTimeUrlPath}"
path_format_arguments = {
"unixTimeUrlPath": _SERIALIZER.url("unix_time_url_path", unix_time_url_path, "unix-time"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_queries_get_boolean_true_request(**kwargs: Any) -> HttpRequest:
bool_query = kwargs.pop("bool_query", True) # type: bool
accept = "application/json"
# Construct URL
url = "/queries/bool/true"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["boolQuery"] = _SERIALIZER.query("bool_query", bool_query, "bool")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_boolean_false_request(**kwargs: Any) -> HttpRequest:
bool_query = kwargs.pop("bool_query", False) # type: bool
accept = "application/json"
# Construct URL
url = "/queries/bool/false"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["boolQuery"] = _SERIALIZER.query("bool_query", bool_query, "bool")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_boolean_null_request(*, bool_query: Optional[bool] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/bool/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if bool_query is not None:
query_parameters["boolQuery"] = _SERIALIZER.query("bool_query", bool_query, "bool")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_int_one_million_request(**kwargs: Any) -> HttpRequest:
int_query = kwargs.pop("int_query", 1000000) # type: int
accept = "application/json"
# Construct URL
url = "/queries/int/1000000"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["intQuery"] = _SERIALIZER.query("int_query", int_query, "int")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_int_negative_one_million_request(**kwargs: Any) -> HttpRequest:
int_query = kwargs.pop("int_query", -1000000) # type: int
accept = "application/json"
# Construct URL
url = "/queries/int/-1000000"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["intQuery"] = _SERIALIZER.query("int_query", int_query, "int")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_int_null_request(*, int_query: Optional[int] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/int/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if int_query is not None:
query_parameters["intQuery"] = _SERIALIZER.query("int_query", int_query, "int")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_ten_billion_request(**kwargs: Any) -> HttpRequest:
long_query = kwargs.pop("long_query", 10000000000) # type: int
accept = "application/json"
# Construct URL
url = "/queries/long/10000000000"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["longQuery"] = _SERIALIZER.query("long_query", long_query, "long")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_negative_ten_billion_request(**kwargs: Any) -> HttpRequest:
long_query = kwargs.pop("long_query", -10000000000) # type: int
accept = "application/json"
# Construct URL
url = "/queries/long/-10000000000"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["longQuery"] = _SERIALIZER.query("long_query", long_query, "long")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_long_null_request(*, long_query: Optional[int] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/long/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if long_query is not None:
query_parameters["longQuery"] = _SERIALIZER.query("long_query", long_query, "long")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_float_scientific_positive_request(**kwargs: Any) -> HttpRequest:
float_query = kwargs.pop("float_query", 103400000000000000000) # type: float
accept = "application/json"
# Construct URL
url = "/queries/float/1.034E+20"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["floatQuery"] = _SERIALIZER.query("float_query", float_query, "float")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_float_scientific_negative_request(**kwargs: Any) -> HttpRequest:
float_query = kwargs.pop("float_query", -1.034e-20) # type: float
accept = "application/json"
# Construct URL
url = "/queries/float/-1.034E-20"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["floatQuery"] = _SERIALIZER.query("float_query", float_query, "float")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_float_null_request(*, float_query: Optional[float] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/float/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if float_query is not None:
query_parameters["floatQuery"] = _SERIALIZER.query("float_query", float_query, "float")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_double_decimal_positive_request(**kwargs: Any) -> HttpRequest:
double_query = kwargs.pop("double_query", 9999999.999) # type: float
accept = "application/json"
# Construct URL
url = "/queries/double/9999999.999"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["doubleQuery"] = _SERIALIZER.query("double_query", double_query, "float")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_double_decimal_negative_request(**kwargs: Any) -> HttpRequest:
double_query = kwargs.pop("double_query", -9999999.999) # type: float
accept = "application/json"
# Construct URL
url = "/queries/double/-9999999.999"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["doubleQuery"] = _SERIALIZER.query("double_query", double_query, "float")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_double_null_request(*, double_query: Optional[float] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/double/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if double_query is not None:
query_parameters["doubleQuery"] = _SERIALIZER.query("double_query", double_query, "float")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_string_unicode_request(**kwargs: Any) -> HttpRequest:
string_query = kwargs.pop("string_query", "啊齄丂狛狜隣郎隣兀﨩") # type: str
accept = "application/json"
# Construct URL
url = "/queries/string/unicode/"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["stringQuery"] = _SERIALIZER.query("string_query", string_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_string_url_encoded_request(**kwargs: Any) -> HttpRequest:
string_query = kwargs.pop("string_query", "begin!*'();:@ &=+$,/?#[]end") # type: str
accept = "application/json"
# Construct URL
url = "/queries/string/begin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["stringQuery"] = _SERIALIZER.query("string_query", string_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_string_empty_request(**kwargs: Any) -> HttpRequest:
string_query = kwargs.pop("string_query", "") # type: str
accept = "application/json"
# Construct URL
url = "/queries/string/empty"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["stringQuery"] = _SERIALIZER.query("string_query", string_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_string_null_request(*, string_query: Optional[str] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/string/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if string_query is not None:
query_parameters["stringQuery"] = _SERIALIZER.query("string_query", string_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_enum_valid_request(*, enum_query: Optional[str] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/enum/green%20color"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if enum_query is not None:
query_parameters["enumQuery"] = _SERIALIZER.query("enum_query", enum_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_enum_null_request(*, enum_query: Optional[str] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/enum/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if enum_query is not None:
query_parameters["enumQuery"] = _SERIALIZER.query("enum_query", enum_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_byte_multi_byte_request(*, byte_query: Optional[bytearray] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/byte/multibyte"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if byte_query is not None:
query_parameters["byteQuery"] = _SERIALIZER.query("byte_query", byte_query, "bytearray")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_byte_empty_request(**kwargs: Any) -> HttpRequest:
byte_query = kwargs.pop("byte_query", bytearray("", encoding="utf-8")) # type: bytearray
accept = "application/json"
# Construct URL
url = "/queries/byte/empty"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["byteQuery"] = _SERIALIZER.query("byte_query", byte_query, "bytearray")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_byte_null_request(*, byte_query: Optional[bytearray] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/byte/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if byte_query is not None:
query_parameters["byteQuery"] = _SERIALIZER.query("byte_query", byte_query, "bytearray")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_date_valid_request(**kwargs: Any) -> HttpRequest:
date_query = kwargs.pop("date_query", "2012-01-01") # type: datetime.date
accept = "application/json"
# Construct URL
url = "/queries/date/2012-01-01"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["dateQuery"] = _SERIALIZER.query("date_query", date_query, "date")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_date_null_request(*, date_query: Optional[datetime.date] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/date/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if date_query is not None:
query_parameters["dateQuery"] = _SERIALIZER.query("date_query", date_query, "date")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_date_time_valid_request(**kwargs: Any) -> HttpRequest:
date_time_query = kwargs.pop("date_time_query", "2012-01-01T01:01:01Z") # type: datetime.datetime
accept = "application/json"
# Construct URL
url = "/queries/datetime/2012-01-01T01%3A01%3A01Z"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["dateTimeQuery"] = _SERIALIZER.query("date_time_query", date_time_query, "iso-8601")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_date_time_null_request(
*, date_time_query: Optional[datetime.datetime] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/datetime/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if date_time_query is not None:
query_parameters["dateTimeQuery"] = _SERIALIZER.query("date_time_query", date_time_query, "iso-8601")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_array_string_csv_valid_request(
*, array_query: Optional[List[str]] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/array/csv/string/valid"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters["arrayQuery"] = _SERIALIZER.query("array_query", array_query, "[str]", div=",")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_array_string_csv_null_request(
*, array_query: Optional[List[str]] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/array/csv/string/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters["arrayQuery"] = _SERIALIZER.query("array_query", array_query, "[str]", div=",")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_array_string_csv_empty_request(
*, array_query: Optional[List[str]] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/array/csv/string/empty"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters["arrayQuery"] = _SERIALIZER.query("array_query", array_query, "[str]", div=",")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_array_string_no_collection_format_empty_request(
*, array_query: Optional[List[str]] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/array/none/string/empty"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters["arrayQuery"] = _SERIALIZER.query("array_query", array_query, "[str]", div=",")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_array_string_ssv_valid_request(
*, array_query: Optional[List[str]] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/array/ssv/string/valid"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters["arrayQuery"] = _SERIALIZER.query("array_query", array_query, "[str]", div=" ")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_array_string_tsv_valid_request(
*, array_query: Optional[List[str]] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/array/tsv/string/valid"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters["arrayQuery"] = _SERIALIZER.query("array_query", array_query, "[str]", div=" ")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_array_string_pipes_valid_request(
*, array_query: Optional[List[str]] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/array/pipes/string/valid"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters["arrayQuery"] = _SERIALIZER.query("array_query", array_query, "[str]", div="|")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_path_items_get_all_with_values_request(
path_item_string_path: str,
global_string_path: str,
local_string_path: str,
*,
path_item_string_query: Optional[str] = None,
global_string_query: Optional[str] = None,
local_string_query: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/globalStringQuery/pathItemStringQuery/localStringQuery"
path_format_arguments = {
"pathItemStringPath": _SERIALIZER.url("path_item_string_path", path_item_string_path, "str"),
"globalStringPath": _SERIALIZER.url("global_string_path", global_string_path, "str"),
"localStringPath": _SERIALIZER.url("local_string_path", local_string_path, "str"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if path_item_string_query is not None:
query_parameters["pathItemStringQuery"] = _SERIALIZER.query(
"path_item_string_query", path_item_string_query, "str"
)
if global_string_query is not None:
query_parameters["globalStringQuery"] = _SERIALIZER.query("global_string_query", global_string_query, "str")
if local_string_query is not None:
query_parameters["localStringQuery"] = _SERIALIZER.query("local_string_query", local_string_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_path_items_get_global_query_null_request(
path_item_string_path: str,
global_string_path: str,
local_string_path: str,
*,
path_item_string_query: Optional[str] = None,
global_string_query: Optional[str] = None,
local_string_query: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/null/pathItemStringQuery/localStringQuery"
path_format_arguments = {
"pathItemStringPath": _SERIALIZER.url("path_item_string_path", path_item_string_path, "str"),
"globalStringPath": _SERIALIZER.url("global_string_path", global_string_path, "str"),
"localStringPath": _SERIALIZER.url("local_string_path", local_string_path, "str"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if path_item_string_query is not None:
query_parameters["pathItemStringQuery"] = _SERIALIZER.query(
"path_item_string_query", path_item_string_query, "str"
)
if global_string_query is not None:
query_parameters["globalStringQuery"] = _SERIALIZER.query("global_string_query", global_string_query, "str")
if local_string_query is not None:
query_parameters["localStringQuery"] = _SERIALIZER.query("local_string_query", local_string_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_path_items_get_global_and_local_query_null_request(
path_item_string_path: str,
global_string_path: str,
local_string_path: str,
*,
path_item_string_query: Optional[str] = None,
global_string_query: Optional[str] = None,
local_string_query: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/null/pathItemStringQuery/null"
path_format_arguments = {
"pathItemStringPath": _SERIALIZER.url("path_item_string_path", path_item_string_path, "str"),
"globalStringPath": _SERIALIZER.url("global_string_path", global_string_path, "str"),
"localStringPath": _SERIALIZER.url("local_string_path", local_string_path, "str"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if path_item_string_query is not None:
query_parameters["pathItemStringQuery"] = _SERIALIZER.query(
"path_item_string_query", path_item_string_query, "str"
)
if global_string_query is not None:
query_parameters["globalStringQuery"] = _SERIALIZER.query("global_string_query", global_string_query, "str")
if local_string_query is not None:
query_parameters["localStringQuery"] = _SERIALIZER.query("local_string_query", local_string_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_path_items_get_local_path_item_query_null_request(
path_item_string_path: str,
global_string_path: str,
local_string_path: str,
*,
path_item_string_query: Optional[str] = None,
global_string_query: Optional[str] = None,
local_string_query: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/globalStringQuery/null/null"
path_format_arguments = {
"pathItemStringPath": _SERIALIZER.url("path_item_string_path", path_item_string_path, "str"),
"globalStringPath": _SERIALIZER.url("global_string_path", global_string_path, "str"),
"localStringPath": _SERIALIZER.url("local_string_path", local_string_path, "str"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if path_item_string_query is not None:
query_parameters["pathItemStringQuery"] = _SERIALIZER.query(
"path_item_string_query", path_item_string_query, "str"
)
if global_string_query is not None:
query_parameters["globalStringQuery"] = _SERIALIZER.query("global_string_query", global_string_query, "str")
if local_string_query is not None:
query_parameters["localStringQuery"] = _SERIALIZER.query("local_string_query", local_string_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
class PathsOperations(object):
"""PathsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_boolean_true(self, **kwargs: Any) -> None:
"""Get true Boolean value on path.
:keyword bool_path: true boolean value. The default value is True. Note that overriding this
default value may result in unsupported behavior.
:paramtype bool_path: bool
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
bool_path = kwargs.pop("bool_path", True) # type: bool
request = build_paths_get_boolean_true_request(
bool_path=bool_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_boolean_true.metadata = {"url": "/paths/bool/true/{boolPath}"} # type: ignore
@distributed_trace
def get_boolean_false(self, **kwargs: Any) -> None:
"""Get false Boolean value on path.
:keyword bool_path: false boolean value. The default value is False. Note that overriding this
default value may result in unsupported behavior.
:paramtype bool_path: bool
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
bool_path = kwargs.pop("bool_path", False) # type: bool
request = build_paths_get_boolean_false_request(
bool_path=bool_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_boolean_false.metadata = {"url": "/paths/bool/false/{boolPath}"} # type: ignore
@distributed_trace
def get_int_one_million(self, **kwargs: Any) -> None:
"""Get '1000000' integer value.
:keyword int_path: '1000000' integer value. The default value is 1000000. Note that overriding
this default value may result in unsupported behavior.
:paramtype int_path: int
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
int_path = kwargs.pop("int_path", 1000000) # type: int
request = build_paths_get_int_one_million_request(
int_path=int_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_int_one_million.metadata = {"url": "/paths/int/1000000/{intPath}"} # type: ignore
@distributed_trace
def get_int_negative_one_million(self, **kwargs: Any) -> None:
"""Get '-1000000' integer value.
:keyword int_path: '-1000000' integer value. The default value is -1000000. Note that
overriding this default value may result in unsupported behavior.
:paramtype int_path: int
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
int_path = kwargs.pop("int_path", -1000000) # type: int
request = build_paths_get_int_negative_one_million_request(
int_path=int_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_int_negative_one_million.metadata = {"url": "/paths/int/-1000000/{intPath}"} # type: ignore
@distributed_trace
def get_ten_billion(self, **kwargs: Any) -> None:
"""Get '10000000000' 64 bit integer value.
:keyword long_path: '10000000000' 64 bit integer value. The default value is 10000000000. Note
that overriding this default value may result in unsupported behavior.
:paramtype long_path: long
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
long_path = kwargs.pop("long_path", 10000000000) # type: int
request = build_paths_get_ten_billion_request(
long_path=long_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_ten_billion.metadata = {"url": "/paths/long/10000000000/{longPath}"} # type: ignore
@distributed_trace
def get_negative_ten_billion(self, **kwargs: Any) -> None:
"""Get '-10000000000' 64 bit integer value.
:keyword long_path: '-10000000000' 64 bit integer value. The default value is -10000000000.
Note that overriding this default value may result in unsupported behavior.
:paramtype long_path: long
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
long_path = kwargs.pop("long_path", -10000000000) # type: int
request = build_paths_get_negative_ten_billion_request(
long_path=long_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_negative_ten_billion.metadata = {"url": "/paths/long/-10000000000/{longPath}"} # type: ignore
@distributed_trace
def float_scientific_positive(self, **kwargs: Any) -> None:
"""Get '1.034E+20' numeric value.
:keyword float_path: '1.034E+20'numeric value. The default value is 103400000000000000000. Note
that overriding this default value may result in unsupported behavior.
:paramtype float_path: float
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
float_path = kwargs.pop("float_path", 103400000000000000000) # type: float
request = build_paths_float_scientific_positive_request(
float_path=float_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
float_scientific_positive.metadata = {"url": "/paths/float/1.034E+20/{floatPath}"} # type: ignore
@distributed_trace
def float_scientific_negative(self, **kwargs: Any) -> None:
"""Get '-1.034E-20' numeric value.
:keyword float_path: '-1.034E-20'numeric value. The default value is -1.034e-20. Note that
overriding this default value may result in unsupported behavior.
:paramtype float_path: float
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
float_path = kwargs.pop("float_path", -1.034e-20) # type: float
request = build_paths_float_scientific_negative_request(
float_path=float_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
float_scientific_negative.metadata = {"url": "/paths/float/-1.034E-20/{floatPath}"} # type: ignore
@distributed_trace
def double_decimal_positive(self, **kwargs: Any) -> None:
"""Get '9999999.999' numeric value.
:keyword double_path: '9999999.999'numeric value. The default value is 9999999.999. Note that
overriding this default value may result in unsupported behavior.
:paramtype double_path: float
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
double_path = kwargs.pop("double_path", 9999999.999) # type: float
request = build_paths_double_decimal_positive_request(
double_path=double_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
double_decimal_positive.metadata = {"url": "/paths/double/9999999.999/{doublePath}"} # type: ignore
@distributed_trace
def double_decimal_negative(self, **kwargs: Any) -> None:
"""Get '-9999999.999' numeric value.
:keyword double_path: '-9999999.999'numeric value. The default value is -9999999.999. Note that
overriding this default value may result in unsupported behavior.
:paramtype double_path: float
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
double_path = kwargs.pop("double_path", -9999999.999) # type: float
request = build_paths_double_decimal_negative_request(
double_path=double_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
double_decimal_negative.metadata = {"url": "/paths/double/-9999999.999/{doublePath}"} # type: ignore
@distributed_trace
def string_unicode(self, **kwargs: Any) -> None:
"""Get '啊齄丂狛狜隣郎隣兀﨩' multi-byte string value.
:keyword string_path: '啊齄丂狛狜隣郎隣兀﨩'multi-byte string value. The default value is "啊齄丂狛狜隣郎隣兀﨩".
Note that overriding this default value may result in unsupported behavior.
:paramtype string_path: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
string_path = kwargs.pop("string_path", "啊齄丂狛狜隣郎隣兀﨩") # type: str
request = build_paths_string_unicode_request(
string_path=string_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
string_unicode.metadata = {"url": "/paths/string/unicode/{stringPath}"} # type: ignore
@distributed_trace
def string_url_encoded(self, **kwargs: Any) -> None:
"""Get 'begin!*'();:@ &=+$,/?#[]end.
:keyword string_path: 'begin!*'();:@ &=+$,/?#[]end' url encoded string value. The default value
is "begin!*'();:@ &=+$,/?#[]end". Note that overriding this default value may result in
unsupported behavior.
:paramtype string_path: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
string_path = kwargs.pop("string_path", "begin!*'();:@ &=+$,/?#[]end") # type: str
request = build_paths_string_url_encoded_request(
string_path=string_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
string_url_encoded.metadata = {"url": "/paths/string/begin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend/{stringPath}"} # type: ignore
@distributed_trace
def string_url_non_encoded(self, **kwargs: Any) -> None:
"""Get 'begin!*'();:@&=+$,end.
https://tools.ietf.org/html/rfc3986#appendix-A 'path' accept any 'pchar' not encoded.
:keyword string_path: 'begin!*'();:@&=+$,end' url encoded string value. The default value is
"begin!*'();:@&=+$,end". Note that overriding this default value may result in unsupported
behavior.
:paramtype string_path: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
string_path = kwargs.pop("string_path", "begin!*'();:@&=+$,end") # type: str
request = build_paths_string_url_non_encoded_request(
string_path=string_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
string_url_non_encoded.metadata = {"url": "/paths/string/begin!*'();:@&=+$,end/{stringPath}"} # type: ignore
@distributed_trace
def string_empty(self, **kwargs: Any) -> None:
"""Get ''.
:keyword string_path: '' string value. The default value is "". Note that overriding this
default value may result in unsupported behavior.
:paramtype string_path: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
string_path = kwargs.pop("string_path", "") # type: str
request = build_paths_string_empty_request(
string_path=string_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
string_empty.metadata = {"url": "/paths/string/empty/{stringPath}"} # type: ignore
@distributed_trace
def string_null(self, string_path: str, **kwargs: Any) -> None:
"""Get null (should throw).
:param string_path: null string value.
:type string_path: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_paths_string_null_request(
string_path=string_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
string_null.metadata = {"url": "/paths/string/null/{stringPath}"} # type: ignore
@distributed_trace
def enum_valid(self, enum_path: str, **kwargs: Any) -> None:
"""Get using uri with 'green color' in path parameter.
:param enum_path: send the value green. Possible values are: "red color", "green color", and
"blue color".
:type enum_path: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_paths_enum_valid_request(
enum_path=enum_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
enum_valid.metadata = {"url": "/paths/enum/green%20color/{enumPath}"} # type: ignore
@distributed_trace
def enum_null(self, enum_path: str, **kwargs: Any) -> None:
"""Get null (should throw on the client before the request is sent on wire).
:param enum_path: send null should throw. Possible values are: "red color", "green color", and
"blue color".
:type enum_path: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_paths_enum_null_request(
enum_path=enum_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
enum_null.metadata = {"url": "/paths/string/null/{enumPath}"} # type: ignore
@distributed_trace
def byte_multi_byte(self, byte_path: bytearray, **kwargs: Any) -> None:
"""Get '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte array.
:param byte_path: '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte array.
:type byte_path: bytearray
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_paths_byte_multi_byte_request(
byte_path=byte_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
byte_multi_byte.metadata = {"url": "/paths/byte/multibyte/{bytePath}"} # type: ignore
@distributed_trace
def byte_empty(self, **kwargs: Any) -> None:
"""Get '' as byte array.
:keyword byte_path: '' as byte array. The default value is bytearray("", encoding="utf-8").
Note that overriding this default value may result in unsupported behavior.
:paramtype byte_path: bytearray
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
byte_path = kwargs.pop("byte_path", bytearray("", encoding="utf-8")) # type: bytearray
request = build_paths_byte_empty_request(
byte_path=byte_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
byte_empty.metadata = {"url": "/paths/byte/empty/{bytePath}"} # type: ignore
@distributed_trace
def byte_null(self, byte_path: bytearray, **kwargs: Any) -> None:
"""Get null as byte array (should throw).
:param byte_path: null as byte array (should throw).
:type byte_path: bytearray
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_paths_byte_null_request(
byte_path=byte_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
byte_null.metadata = {"url": "/paths/byte/null/{bytePath}"} # type: ignore
@distributed_trace
def date_valid(self, **kwargs: Any) -> None:
"""Get '2012-01-01' as date.
:keyword date_path: '2012-01-01' as date. The default value is "2012-01-01". Note that
overriding this default value may result in unsupported behavior.
:paramtype date_path: ~datetime.date
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
date_path = kwargs.pop("date_path", "2012-01-01") # type: datetime.date
request = build_paths_date_valid_request(
date_path=date_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
date_valid.metadata = {"url": "/paths/date/2012-01-01/{datePath}"} # type: ignore
@distributed_trace
def date_null(self, date_path: datetime.date, **kwargs: Any) -> None:
"""Get null as date - this should throw or be unusable on the client side, depending on date
representation.
:param date_path: null as date (should throw).
:type date_path: ~datetime.date
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_paths_date_null_request(
date_path=date_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
date_null.metadata = {"url": "/paths/date/null/{datePath}"} # type: ignore
@distributed_trace
def date_time_valid(self, **kwargs: Any) -> None:
"""Get '2012-01-01T01:01:01Z' as date-time.
:keyword date_time_path: '2012-01-01T01:01:01Z' as date-time. The default value is
"2012-01-01T01:01:01Z". Note that overriding this default value may result in unsupported
behavior.
:paramtype date_time_path: ~datetime.datetime
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
date_time_path = kwargs.pop("date_time_path", "2012-01-01T01:01:01Z") # type: datetime.datetime
request = build_paths_date_time_valid_request(
date_time_path=date_time_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
date_time_valid.metadata = {"url": "/paths/datetime/2012-01-01T01%3A01%3A01Z/{dateTimePath}"} # type: ignore
@distributed_trace
def date_time_null(self, date_time_path: datetime.datetime, **kwargs: Any) -> None:
"""Get null as date-time, should be disallowed or throw depending on representation of date-time.
:param date_time_path: null as date-time.
:type date_time_path: ~datetime.datetime
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_paths_date_time_null_request(
date_time_path=date_time_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
date_time_null.metadata = {"url": "/paths/datetime/null/{dateTimePath}"} # type: ignore
@distributed_trace
def base64_url(self, base64_url_path: bytes, **kwargs: Any) -> None:
"""Get 'lorem' encoded value as 'bG9yZW0' (base64url).
:param base64_url_path: base64url encoded value.
:type base64_url_path: bytes
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_paths_base64_url_request(
base64_url_path=base64_url_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
base64_url.metadata = {"url": "/paths/string/bG9yZW0/{base64UrlPath}"} # type: ignore
@distributed_trace
def array_csv_in_path(self, array_path: List[str], **kwargs: Any) -> None:
"""Get an array of string ['ArrayPath1', 'begin!*'();:@ &=+$,/?#[]end' , null, ''] using the
csv-array format.
:param array_path: an array of string ['ArrayPath1', 'begin!*'();:@ &=+$,/?#[]end' , null, '']
using the csv-array format.
:type array_path: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_paths_array_csv_in_path_request(
array_path=array_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
array_csv_in_path.metadata = {"url": "/paths/array/ArrayPath1%2cbegin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend%2c%2c/{arrayPath}"} # type: ignore
@distributed_trace
def unix_time_url(self, unix_time_url_path: datetime.datetime, **kwargs: Any) -> None:
"""Get the date 2016-04-13 encoded value as '1460505600' (Unix time).
:param unix_time_url_path: Unix time encoded value.
:type unix_time_url_path: ~datetime.datetime
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_paths_unix_time_url_request(
unix_time_url_path=unix_time_url_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
unix_time_url.metadata = {"url": "/paths/int/1460505600/{unixTimeUrlPath}"} # type: ignore
class QueriesOperations(object):
"""QueriesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_boolean_true(self, **kwargs: Any) -> None:
"""Get true Boolean value on path.
:keyword bool_query: true boolean value. The default value is True. Note that overriding this
default value may result in unsupported behavior.
:paramtype bool_query: bool
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
bool_query = kwargs.pop("bool_query", True) # type: bool
request = build_queries_get_boolean_true_request(
bool_query=bool_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_boolean_true.metadata = {"url": "/queries/bool/true"} # type: ignore
@distributed_trace
def get_boolean_false(self, **kwargs: Any) -> None:
"""Get false Boolean value on path.
:keyword bool_query: false boolean value. The default value is False. Note that overriding this
default value may result in unsupported behavior.
:paramtype bool_query: bool
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
bool_query = kwargs.pop("bool_query", False) # type: bool
request = build_queries_get_boolean_false_request(
bool_query=bool_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_boolean_false.metadata = {"url": "/queries/bool/false"} # type: ignore
@distributed_trace
def get_boolean_null(self, *, bool_query: Optional[bool] = None, **kwargs: Any) -> None:
"""Get null Boolean value on query (query string should be absent).
:keyword bool_query: null boolean value.
:paramtype bool_query: bool
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_get_boolean_null_request(
bool_query=bool_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_boolean_null.metadata = {"url": "/queries/bool/null"} # type: ignore
@distributed_trace
def get_int_one_million(self, **kwargs: Any) -> None:
"""Get '1000000' integer value.
:keyword int_query: '1000000' integer value. The default value is 1000000. Note that overriding
this default value may result in unsupported behavior.
:paramtype int_query: int
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
int_query = kwargs.pop("int_query", 1000000) # type: int
request = build_queries_get_int_one_million_request(
int_query=int_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_int_one_million.metadata = {"url": "/queries/int/1000000"} # type: ignore
@distributed_trace
def get_int_negative_one_million(self, **kwargs: Any) -> None:
"""Get '-1000000' integer value.
:keyword int_query: '-1000000' integer value. The default value is -1000000. Note that
overriding this default value may result in unsupported behavior.
:paramtype int_query: int
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
int_query = kwargs.pop("int_query", -1000000) # type: int
request = build_queries_get_int_negative_one_million_request(
int_query=int_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_int_negative_one_million.metadata = {"url": "/queries/int/-1000000"} # type: ignore
@distributed_trace
def get_int_null(self, *, int_query: Optional[int] = None, **kwargs: Any) -> None:
"""Get null integer value (no query parameter).
:keyword int_query: null integer value.
:paramtype int_query: int
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_get_int_null_request(
int_query=int_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_int_null.metadata = {"url": "/queries/int/null"} # type: ignore
@distributed_trace
def get_ten_billion(self, **kwargs: Any) -> None:
"""Get '10000000000' 64 bit integer value.
:keyword long_query: '10000000000' 64 bit integer value. The default value is 10000000000. Note
that overriding this default value may result in unsupported behavior.
:paramtype long_query: long
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
long_query = kwargs.pop("long_query", 10000000000) # type: int
request = build_queries_get_ten_billion_request(
long_query=long_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_ten_billion.metadata = {"url": "/queries/long/10000000000"} # type: ignore
@distributed_trace
def get_negative_ten_billion(self, **kwargs: Any) -> None:
"""Get '-10000000000' 64 bit integer value.
:keyword long_query: '-10000000000' 64 bit integer value. The default value is -10000000000.
Note that overriding this default value may result in unsupported behavior.
:paramtype long_query: long
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
long_query = kwargs.pop("long_query", -10000000000) # type: int
request = build_queries_get_negative_ten_billion_request(
long_query=long_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_negative_ten_billion.metadata = {"url": "/queries/long/-10000000000"} # type: ignore
@distributed_trace
def get_long_null(self, *, long_query: Optional[int] = None, **kwargs: Any) -> None:
"""Get 'null 64 bit integer value (no query param in uri).
:keyword long_query: null 64 bit integer value.
:paramtype long_query: long
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_get_long_null_request(
long_query=long_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_long_null.metadata = {"url": "/queries/long/null"} # type: ignore
@distributed_trace
def float_scientific_positive(self, **kwargs: Any) -> None:
"""Get '1.034E+20' numeric value.
:keyword float_query: '1.034E+20'numeric value. The default value is 103400000000000000000.
Note that overriding this default value may result in unsupported behavior.
:paramtype float_query: float
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
float_query = kwargs.pop("float_query", 103400000000000000000) # type: float
request = build_queries_float_scientific_positive_request(
float_query=float_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
float_scientific_positive.metadata = {"url": "/queries/float/1.034E+20"} # type: ignore
@distributed_trace
def float_scientific_negative(self, **kwargs: Any) -> None:
"""Get '-1.034E-20' numeric value.
:keyword float_query: '-1.034E-20'numeric value. The default value is -1.034e-20. Note that
overriding this default value may result in unsupported behavior.
:paramtype float_query: float
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
float_query = kwargs.pop("float_query", -1.034e-20) # type: float
request = build_queries_float_scientific_negative_request(
float_query=float_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
float_scientific_negative.metadata = {"url": "/queries/float/-1.034E-20"} # type: ignore
@distributed_trace
def float_null(self, *, float_query: Optional[float] = None, **kwargs: Any) -> None:
"""Get null numeric value (no query parameter).
:keyword float_query: null numeric value.
:paramtype float_query: float
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_float_null_request(
float_query=float_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
float_null.metadata = {"url": "/queries/float/null"} # type: ignore
@distributed_trace
def double_decimal_positive(self, **kwargs: Any) -> None:
"""Get '9999999.999' numeric value.
:keyword double_query: '9999999.999'numeric value. The default value is 9999999.999. Note that
overriding this default value may result in unsupported behavior.
:paramtype double_query: float
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
double_query = kwargs.pop("double_query", 9999999.999) # type: float
request = build_queries_double_decimal_positive_request(
double_query=double_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
double_decimal_positive.metadata = {"url": "/queries/double/9999999.999"} # type: ignore
@distributed_trace
def double_decimal_negative(self, **kwargs: Any) -> None:
"""Get '-9999999.999' numeric value.
:keyword double_query: '-9999999.999'numeric value. The default value is -9999999.999. Note
that overriding this default value may result in unsupported behavior.
:paramtype double_query: float
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
double_query = kwargs.pop("double_query", -9999999.999) # type: float
request = build_queries_double_decimal_negative_request(
double_query=double_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
double_decimal_negative.metadata = {"url": "/queries/double/-9999999.999"} # type: ignore
@distributed_trace
def double_null(self, *, double_query: Optional[float] = None, **kwargs: Any) -> None:
"""Get null numeric value (no query parameter).
:keyword double_query: null numeric value.
:paramtype double_query: float
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_double_null_request(
double_query=double_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
double_null.metadata = {"url": "/queries/double/null"} # type: ignore
@distributed_trace
def string_unicode(self, **kwargs: Any) -> None:
"""Get '啊齄丂狛狜隣郎隣兀﨩' multi-byte string value.
:keyword string_query: '啊齄丂狛狜隣郎隣兀﨩'multi-byte string value. The default value is "啊齄丂狛狜隣郎隣兀﨩".
Note that overriding this default value may result in unsupported behavior.
:paramtype string_query: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
string_query = kwargs.pop("string_query", "啊齄丂狛狜隣郎隣兀﨩") # type: str
request = build_queries_string_unicode_request(
string_query=string_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
string_unicode.metadata = {"url": "/queries/string/unicode/"} # type: ignore
@distributed_trace
def string_url_encoded(self, **kwargs: Any) -> None:
"""Get 'begin!*'();:@ &=+$,/?#[]end.
:keyword string_query: 'begin!*'();:@ &=+$,/?#[]end' url encoded string value. The default
value is "begin!*'();:@ &=+$,/?#[]end". Note that overriding this default value may result in
unsupported behavior.
:paramtype string_query: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
string_query = kwargs.pop("string_query", "begin!*'();:@ &=+$,/?#[]end") # type: str
request = build_queries_string_url_encoded_request(
string_query=string_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
string_url_encoded.metadata = {"url": "/queries/string/begin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend"} # type: ignore
@distributed_trace
def string_empty(self, **kwargs: Any) -> None:
"""Get ''.
:keyword string_query: '' string value. The default value is "". Note that overriding this
default value may result in unsupported behavior.
:paramtype string_query: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
string_query = kwargs.pop("string_query", "") # type: str
request = build_queries_string_empty_request(
string_query=string_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
string_empty.metadata = {"url": "/queries/string/empty"} # type: ignore
@distributed_trace
def string_null(self, *, string_query: Optional[str] = None, **kwargs: Any) -> None:
"""Get null (no query parameter in url).
:keyword string_query: null string value.
:paramtype string_query: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_string_null_request(
string_query=string_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
string_null.metadata = {"url": "/queries/string/null"} # type: ignore
@distributed_trace
def enum_valid(self, *, enum_query: Optional[str] = None, **kwargs: Any) -> None:
"""Get using uri with query parameter 'green color'.
:keyword enum_query: 'green color' enum value. Possible values are: "red color", "green color",
and "blue color".
:paramtype enum_query: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_enum_valid_request(
enum_query=enum_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
enum_valid.metadata = {"url": "/queries/enum/green%20color"} # type: ignore
@distributed_trace
def enum_null(self, *, enum_query: Optional[str] = None, **kwargs: Any) -> None:
"""Get null (no query parameter in url).
:keyword enum_query: null string value. Possible values are: "red color", "green color", and
"blue color".
:paramtype enum_query: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_enum_null_request(
enum_query=enum_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
enum_null.metadata = {"url": "/queries/enum/null"} # type: ignore
@distributed_trace
def byte_multi_byte(self, *, byte_query: Optional[bytearray] = None, **kwargs: Any) -> None:
"""Get '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte array.
:keyword byte_query: '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte array.
:paramtype byte_query: bytearray
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_byte_multi_byte_request(
byte_query=byte_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
byte_multi_byte.metadata = {"url": "/queries/byte/multibyte"} # type: ignore
@distributed_trace
def byte_empty(self, **kwargs: Any) -> None:
"""Get '' as byte array.
:keyword byte_query: '' as byte array. The default value is bytearray("", encoding="utf-8").
Note that overriding this default value may result in unsupported behavior.
:paramtype byte_query: bytearray
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
byte_query = kwargs.pop("byte_query", bytearray("", encoding="utf-8")) # type: bytearray
request = build_queries_byte_empty_request(
byte_query=byte_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
byte_empty.metadata = {"url": "/queries/byte/empty"} # type: ignore
@distributed_trace
def byte_null(self, *, byte_query: Optional[bytearray] = None, **kwargs: Any) -> None:
"""Get null as byte array (no query parameters in uri).
:keyword byte_query: null as byte array (no query parameters in uri).
:paramtype byte_query: bytearray
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_byte_null_request(
byte_query=byte_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
byte_null.metadata = {"url": "/queries/byte/null"} # type: ignore
@distributed_trace
def date_valid(self, **kwargs: Any) -> None:
"""Get '2012-01-01' as date.
:keyword date_query: '2012-01-01' as date. The default value is "2012-01-01". Note that
overriding this default value may result in unsupported behavior.
:paramtype date_query: ~datetime.date
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
date_query = kwargs.pop("date_query", "2012-01-01") # type: datetime.date
request = build_queries_date_valid_request(
date_query=date_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
date_valid.metadata = {"url": "/queries/date/2012-01-01"} # type: ignore
@distributed_trace
def date_null(self, *, date_query: Optional[datetime.date] = None, **kwargs: Any) -> None:
"""Get null as date - this should result in no query parameters in uri.
:keyword date_query: null as date (no query parameters in uri).
:paramtype date_query: ~datetime.date
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_date_null_request(
date_query=date_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
date_null.metadata = {"url": "/queries/date/null"} # type: ignore
@distributed_trace
def date_time_valid(self, **kwargs: Any) -> None:
"""Get '2012-01-01T01:01:01Z' as date-time.
:keyword date_time_query: '2012-01-01T01:01:01Z' as date-time. The default value is
"2012-01-01T01:01:01Z". Note that overriding this default value may result in unsupported
behavior.
:paramtype date_time_query: ~datetime.datetime
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
date_time_query = kwargs.pop("date_time_query", "2012-01-01T01:01:01Z") # type: datetime.datetime
request = build_queries_date_time_valid_request(
date_time_query=date_time_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
date_time_valid.metadata = {"url": "/queries/datetime/2012-01-01T01%3A01%3A01Z"} # type: ignore
@distributed_trace
def date_time_null(self, *, date_time_query: Optional[datetime.datetime] = None, **kwargs: Any) -> None:
"""Get null as date-time, should result in no query parameters in uri.
:keyword date_time_query: null as date-time (no query parameters).
:paramtype date_time_query: ~datetime.datetime
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_date_time_null_request(
date_time_query=date_time_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
date_time_null.metadata = {"url": "/queries/datetime/null"} # type: ignore
@distributed_trace
def array_string_csv_valid(self, *, array_query: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Get an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' , null, ''] using the
csv-array format.
:keyword array_query: an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' , null,
''] using the csv-array format.
:paramtype array_query: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_array_string_csv_valid_request(
array_query=array_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
array_string_csv_valid.metadata = {"url": "/queries/array/csv/string/valid"} # type: ignore
@distributed_trace
def array_string_csv_null(self, *, array_query: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Get a null array of string using the csv-array format.
:keyword array_query: a null array of string using the csv-array format.
:paramtype array_query: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_array_string_csv_null_request(
array_query=array_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
array_string_csv_null.metadata = {"url": "/queries/array/csv/string/null"} # type: ignore
@distributed_trace
def array_string_csv_empty(self, *, array_query: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Get an empty array [] of string using the csv-array format.
:keyword array_query: an empty array [] of string using the csv-array format.
:paramtype array_query: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_array_string_csv_empty_request(
array_query=array_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
array_string_csv_empty.metadata = {"url": "/queries/array/csv/string/empty"} # type: ignore
@distributed_trace
def array_string_no_collection_format_empty(
self, *, array_query: Optional[List[str]] = None, **kwargs: Any
) -> None:
"""Array query has no defined collection format, should default to csv. Pass in ['hello', 'nihao',
'bonjour'] for the 'arrayQuery' parameter to the service.
:keyword array_query: Array-typed query parameter. Pass in ['hello', 'nihao', 'bonjour'].
:paramtype array_query: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_array_string_no_collection_format_empty_request(
array_query=array_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
array_string_no_collection_format_empty.metadata = {"url": "/queries/array/none/string/empty"} # type: ignore
@distributed_trace
def array_string_ssv_valid(self, *, array_query: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Get an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' , null, ''] using the
ssv-array format.
:keyword array_query: an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' , null,
''] using the ssv-array format.
:paramtype array_query: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_array_string_ssv_valid_request(
array_query=array_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
array_string_ssv_valid.metadata = {"url": "/queries/array/ssv/string/valid"} # type: ignore
@distributed_trace
def array_string_tsv_valid(self, *, array_query: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Get an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' , null, ''] using the
tsv-array format.
:keyword array_query: an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' , null,
''] using the tsv-array format.
:paramtype array_query: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_array_string_tsv_valid_request(
array_query=array_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
array_string_tsv_valid.metadata = {"url": "/queries/array/tsv/string/valid"} # type: ignore
@distributed_trace
def array_string_pipes_valid(self, *, array_query: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Get an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' , null, ''] using the
pipes-array format.
:keyword array_query: an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' , null,
''] using the pipes-array format.
:paramtype array_query: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_queries_array_string_pipes_valid_request(
array_query=array_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
array_string_pipes_valid.metadata = {"url": "/queries/array/pipes/string/valid"} # type: ignore
class PathItemsOperations(object):
"""PathItemsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_all_with_values(
self,
path_item_string_path: str,
local_string_path: str,
*,
path_item_string_query: Optional[str] = None,
local_string_query: Optional[str] = None,
**kwargs: Any
) -> None:
"""send globalStringPath='globalStringPath', pathItemStringPath='pathItemStringPath',
localStringPath='localStringPath', globalStringQuery='globalStringQuery',
pathItemStringQuery='pathItemStringQuery', localStringQuery='localStringQuery'.
:param path_item_string_path: A string value 'pathItemStringPath' that appears in the path.
:type path_item_string_path: str
:param local_string_path: should contain value 'localStringPath'.
:type local_string_path: str
:keyword path_item_string_query: A string value 'pathItemStringQuery' that appears as a query
parameter.
:paramtype path_item_string_query: str
:keyword local_string_query: should contain value 'localStringQuery'.
:paramtype local_string_query: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_path_items_get_all_with_values_request(
path_item_string_path=path_item_string_path,
global_string_path=self._config.global_string_path,
local_string_path=local_string_path,
path_item_string_query=path_item_string_query,
global_string_query=self._config.global_string_query,
local_string_query=local_string_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_all_with_values.metadata = {"url": "/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/globalStringQuery/pathItemStringQuery/localStringQuery"} # type: ignore
@distributed_trace
def get_global_query_null(
self,
path_item_string_path: str,
local_string_path: str,
*,
path_item_string_query: Optional[str] = None,
local_string_query: Optional[str] = None,
**kwargs: Any
) -> None:
"""send globalStringPath='globalStringPath', pathItemStringPath='pathItemStringPath',
localStringPath='localStringPath', globalStringQuery=null,
pathItemStringQuery='pathItemStringQuery', localStringQuery='localStringQuery'.
:param path_item_string_path: A string value 'pathItemStringPath' that appears in the path.
:type path_item_string_path: str
:param local_string_path: should contain value 'localStringPath'.
:type local_string_path: str
:keyword path_item_string_query: A string value 'pathItemStringQuery' that appears as a query
parameter.
:paramtype path_item_string_query: str
:keyword local_string_query: should contain value 'localStringQuery'.
:paramtype local_string_query: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_path_items_get_global_query_null_request(
path_item_string_path=path_item_string_path,
global_string_path=self._config.global_string_path,
local_string_path=local_string_path,
path_item_string_query=path_item_string_query,
global_string_query=self._config.global_string_query,
local_string_query=local_string_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_global_query_null.metadata = {"url": "/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/null/pathItemStringQuery/localStringQuery"} # type: ignore
@distributed_trace
def get_global_and_local_query_null(
self,
path_item_string_path: str,
local_string_path: str,
*,
path_item_string_query: Optional[str] = None,
local_string_query: Optional[str] = None,
**kwargs: Any
) -> None:
"""send globalStringPath=globalStringPath, pathItemStringPath='pathItemStringPath',
localStringPath='localStringPath', globalStringQuery=null,
pathItemStringQuery='pathItemStringQuery', localStringQuery=null.
:param path_item_string_path: A string value 'pathItemStringPath' that appears in the path.
:type path_item_string_path: str
:param local_string_path: should contain value 'localStringPath'.
:type local_string_path: str
:keyword path_item_string_query: A string value 'pathItemStringQuery' that appears as a query
parameter.
:paramtype path_item_string_query: str
:keyword local_string_query: should contain null value.
:paramtype local_string_query: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_path_items_get_global_and_local_query_null_request(
path_item_string_path=path_item_string_path,
global_string_path=self._config.global_string_path,
local_string_path=local_string_path,
path_item_string_query=path_item_string_query,
global_string_query=self._config.global_string_query,
local_string_query=local_string_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_global_and_local_query_null.metadata = {"url": "/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/null/pathItemStringQuery/null"} # type: ignore
@distributed_trace
def get_local_path_item_query_null(
self,
path_item_string_path: str,
local_string_path: str,
*,
path_item_string_query: Optional[str] = None,
local_string_query: Optional[str] = None,
**kwargs: Any
) -> None:
"""send globalStringPath='globalStringPath', pathItemStringPath='pathItemStringPath',
localStringPath='localStringPath', globalStringQuery='globalStringQuery',
pathItemStringQuery=null, localStringQuery=null.
:param path_item_string_path: A string value 'pathItemStringPath' that appears in the path.
:type path_item_string_path: str
:param local_string_path: should contain value 'localStringPath'.
:type local_string_path: str
:keyword path_item_string_query: should contain value null.
:paramtype path_item_string_query: str
:keyword local_string_query: should contain value null.
:paramtype local_string_query: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_path_items_get_local_path_item_query_null_request(
path_item_string_path=path_item_string_path,
global_string_path=self._config.global_string_path,
local_string_path=local_string_path,
path_item_string_query=path_item_string_query,
global_string_query=self._config.global_string_query,
local_string_query=local_string_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_local_path_item_query_null.metadata = {"url": "/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/globalStringQuery/null/null"} # type: ignore
| 1.765625 | 2 |
baseplate_py_upgrader/docker.py | reddit/baseplate.py-upgrader | 6 | 5264 | <reponame>reddit/baseplate.py-upgrader
import logging
import re
from pathlib import Path
from typing import Match
logger = logging.getLogger(__name__)
IMAGE_RE = re.compile(
r"/baseplate-py:(?P<version>[0-9.]+(\.[0-9]+)?)-py(?P<python>[23]\.[0-9]+)-(?P<distro>(bionic|buster))(?P<repo>-artifactory)?(?P<dev>-dev)?"
)
def upgrade_docker_image_references_in_file(target_series: str, filepath: Path) -> None:
major, minor = target_series.split(".")
if major == "0":
image_series = f"{major}.{minor}"
else:
image_series = f"{major}"
force_distro = None
force_dev = False
force_repo = None
if major == "2":
force_distro = "buster"
force_dev = True
force_repo = ""
def replace_docker_image_reference(m: Match[str]) -> str:
distro = force_distro or m["distro"]
repo = force_repo if force_repo is not None else m["repo"]
dev = "-dev" if force_dev else m["dev"]
return f"/baseplate-py:{image_series}-py{m['python']}-{distro}{repo or ''}{dev or ''}"
file_content = filepath.read_text()
changed = IMAGE_RE.sub(replace_docker_image_reference, file_content, re.MULTILINE)
if file_content == changed:
return
with filepath.open("w") as f:
logger.info("Updated Docker image references in %s", filepath)
f.write(changed)
def upgrade_docker_image_references(target_series: str, root: Path) -> None:
for dockerfile in root.glob("**/Dockerfile*"):
upgrade_docker_image_references_in_file(target_series, dockerfile)
dronefile = root / ".drone.yml"
if dronefile.exists():
upgrade_docker_image_references_in_file(target_series, dronefile)
| 2.640625 | 3 |
model/swtz_ty.py | ArcherLuo233/election-s-prediction | 0 | 5265 | <reponame>ArcherLuo233/election-s-prediction
from sqlalchemy import Column, ForeignKey, Integer, String, Text
from model.base import Base
class SWTZ_TY(Base):
__tablename__ = 'swtz_ty'
class_name = '商务团组-团员'
foreign_key = 'swtz_id'
export_docx = False
export_handle_file = ['identity']
field = [
'id', 'nickname', 'job', 'id_card', 'phone', 'remark', 'identity'
]
combo_field = {
'identity': {
'exclude': False,
'items': ['基层', '青年', '商界', '学界', '政界']
}
}
template_start_row = 3
swtz_id = Column(Integer, ForeignKey('swtz.id'))
nickname = Column(String(100), comment='姓名')
job = Column(String(100), comment='单位职务')
id_card = Column(String(100), comment='身份证号')
phone = Column(String(100), comment='联系电话')
remark = Column(Text, comment='备注')
identity_ = Column('identity', String(100), comment='身份')
@property
def identity(self):
if self.identity_ is None:
return []
return self.identity_.split(' ')
@identity.setter
def identity(self, val):
if isinstance(val, list):
while '' in val:
val.remove('')
self.identity_ = ' '.join(val)
else:
self.identity_ = val
| 2.25 | 2 |
amy/dashboard/tests/test_autoupdate_profile.py | code-review-doctor/amy | 53 | 5266 | from django.urls import reverse
from consents.models import Consent, Term
from workshops.models import KnowledgeDomain, Person, Qualification
from workshops.tests.base import TestBase
class TestAutoUpdateProfile(TestBase):
def setUp(self):
self._setUpAirports()
self._setUpLessons()
self._setUpLanguages()
self.user = Person.objects.create_user(
username="user",
personal="",
family="",
email="<EMAIL>",
password="<PASSWORD>",
)
self.person_consent_required_terms(self.user)
Qualification.objects.create(person=self.user, lesson=self.git)
Qualification.objects.create(person=self.user, lesson=self.sql)
self.physics = KnowledgeDomain.objects.create(name="physics")
self.chemistry = KnowledgeDomain.objects.create(name="chemistry")
self.user.domains.add(self.physics)
self.user.languages.add(self.english)
self.user.languages.add(self.french)
self.client.login(username="user", password="<PASSWORD>")
def test_load_form(self):
rv = self.client.get(reverse("autoupdate_profile"))
self.assertEqual(rv.status_code, 200)
def test_update_profile(self):
term_slugs = [
"may-contact",
"may-publish-name",
"public-profile",
]
terms_by_term_slug = {
term.slug: term
for term in Term.objects.filter(slug__in=term_slugs)
.active()
.prefetch_active_options()
}
consent_data = {
f"consents-{slug}": terms_by_term_slug[slug].active_options[0].pk
for slug in term_slugs
}
data = {
"personal": "admin",
"middle": "",
"family": "Smith",
"email": "<EMAIL>",
"gender": Person.UNDISCLOSED,
"airport": self.airport_0_0.pk,
"github": "changed",
"twitter": "",
"url": "",
"username": "changed",
"affiliation": "",
"languages": [self.latin.pk, self.french.pk],
"domains": [self.chemistry.pk],
"lessons": [self.git.pk, self.matlab.pk],
"consents-person": self.user.pk,
**consent_data,
}
rv = self.client.post(reverse("autoupdate_profile"), data, follow=True)
self.assertEqual(rv.status_code, 200)
content = rv.content.decode("utf-8")
self.assertNotIn("Fix errors below", content)
self.user.refresh_from_db()
self.assertEqual(self.user.username, "user") # username is read-only
self.assertEqual(self.user.github, None) # github is read-only
self.assertEqual(self.user.family, "Smith")
self.assertEqual(set(self.user.lessons.all()), {self.git, self.matlab})
self.assertEqual(list(self.user.domains.all()), [self.chemistry])
self.assertEqual(set(self.user.languages.all()), {self.french, self.latin})
updated_consents_by_term_slug = {
consent.term.slug: consent
for consent in Consent.objects.filter(
term__slug__in=term_slugs, person=self.user
)
.active()
.select_related("term")
}
for slug in term_slugs:
self.assertEqual(
updated_consents_by_term_slug[slug].term_option.pk,
consent_data[f"consents-{slug}"],
)
| 2.1875 | 2 |
bot/recognizer_bot/yolo/common/utils.py | kprokofi/animal-recognition-with-voice | 1 | 5267 | import numpy as np
import time
import cv2
import colorsys
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Activation, ReLU, Multiply
# Custom objects from backbones package https://github.com/david8862/keras-YOLOv3-model-set/tree/master/common/backbones
def mish(x):
return x * K.tanh(K.softplus(x))
def hard_swish(x):
return Multiply()([Activation(hard_sigmoid)(x), x])
def hard_sigmoid(x):
return ReLU(6.)(x + 3.) * (1. / 6.)
def swish(x):
"""Swish activation function.
# Arguments
x: Input tensor.
# Returns
The Swish activation: `x * sigmoid(x)`.
# References
[Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
"""
if K.backend() == 'tensorflow':
try:
# The native TF implementation has a more
# memory-efficient gradient implementation
return K.tf.nn.swish(x)
except AttributeError:
pass
return x * K.sigmoid(x)
def get_custom_objects():
'''
form up a custom_objects dict so that the customized
layer/function call could be correctly parsed when keras
.h5 model is loading or converting
'''
custom_objects_dict = {
'tf': tf,
'swish': swish,
'hard_sigmoid': hard_sigmoid,
'hard_swish': hard_swish,
'mish': mish
}
return custom_objects_dict
def get_multiscale_list():
input_shape_list = [(320, 320), (352, 352), (384, 384), (416, 416),
(448, 448), (480, 480), (512, 512), (544, 544), (576, 576), (608, 608)]
return input_shape_list
def resize_anchors(base_anchors, target_shape, base_shape=(416, 416)):
'''
original anchor size is clustered from COCO dataset
under input shape (416,416). We need to resize it to
our train input shape for better performance
'''
return np.around(base_anchors*target_shape[::-1]/base_shape[::-1])
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def get_colors(class_names):
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
# Shuffle colors to decorrelate adjacent classes.
np.random.shuffle(colors)
np.random.seed(None) # Reset seed to default.
return colors
def get_dataset(annotation_file, shuffle=True):
with open(annotation_file) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
if shuffle:
np.random.seed(int(time.time()))
np.random.shuffle(lines)
# np.random.seed(None)
return lines
def draw_label(image, text, color, coords):
font = cv2.FONT_HERSHEY_PLAIN
font_scale = 1.
(text_width, text_height) = cv2.getTextSize(
text, font, fontScale=font_scale, thickness=1)[0]
padding = 5
rect_height = text_height + padding * 2
rect_width = text_width + padding * 2
(x, y) = coords
cv2.rectangle(image, (x, y), (x + rect_width,
y - rect_height), color, cv2.FILLED)
cv2.putText(image, text, (x + padding, y - text_height + padding), font,
fontScale=font_scale,
color=(255, 255, 255),
lineType=cv2.LINE_AA)
return image
def draw_boxes(image, boxes, classes, scores, class_names, colors, show_score=True):
if boxes is None or len(boxes) == 0:
return image
if classes is None or len(classes) == 0:
return image
for box, cls, score in zip(boxes, classes, scores):
xmin, ymin, xmax, ymax = map(int, box)
class_name = class_names[cls]
if show_score:
label = '{} {:.2f}'.format(class_name, score)
else:
label = '{}'.format(class_name)
#print(label, (xmin, ymin), (xmax, ymax))
# if no color info, use black(0,0,0)
if colors is None:
color = (0, 0, 0)
else:
color = colors[cls]
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 1, cv2.LINE_AA)
image = draw_label(image, label, color, (xmin, ymin))
return image
| 2.859375 | 3 |
ros/src/tl_detector/light_classification/tl_classifier.py | PhilippHafe/CarND-Capstone | 0 | 5268 | <reponame>PhilippHafe/CarND-Capstone
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
import datetime
class TLClassifier(object):
def __init__(self):
PATH_TO_CKPT = "light_classification/frozen_inference_graph.pb"
self.graph = tf.Graph()
self.threshold = 0.5
with self.graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name = '')
self.image_tensor = self.graph.get_tensor_by_name('image_tensor:0')
self.boxes = self.graph.get_tensor_by_name('detection_boxes:0')
self.scores = self.graph.get_tensor_by_name('detection_scores:0')
self.classes = self.graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.graph.get_tensor_by_name('num_detections:0')
self.sess = tf.Session(graph=self.graph)
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
with self.graph.as_default():
image_np_expanded = np.expand_dims(image, axis=0)
start = datetime.datetime.now()
(boxes, scores, classes, num) = self.sess.run(
[self.boxes, self.scores, self.classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
end = datetime.datetime.now()
dur = end - start
#print(dur.total_seconds())
boxes = np.squeeze(boxes)
classes = np.squeeze(classes).astype(np.int32)
scores = np.squeeze(scores)
if len(scores)>0 and np.max(scores)>self.threshold:
detected_class = int(classes[np.argmax(scores)])
else:
detected_class = 4
if detected_class == 1:
print('Classes: {}, Green. Detection Duration {}'.format(TrafficLight.GREEN,dur.total_seconds()))
return TrafficLight.GREEN
elif detected_class == 2:
print('Classes: {}, Red Detection Duration {}'.format(TrafficLight.RED,dur.total_seconds()))
return TrafficLight.RED
elif detected_class == 3:
print('Classes: {}, Gelb. Detection Duration {}'.format(TrafficLight.YELLOW,dur.total_seconds()))
return TrafficLight.YELLOW
return TrafficLight.UNKNOWN
| 2.640625 | 3 |
testGMDS.py | ctralie/SiRPyGL | 7 | 5269 | #Based off of http://wiki.wxpython.org/GLCanvas
#Lots of help from http://wiki.wxpython.org/Getting%20Started
from OpenGL.GL import *
import wx
from wx import glcanvas
from Primitives3D import *
from PolyMesh import *
from LaplacianMesh import *
from Geodesics import *
from PointCloud import *
from Cameras3D import *
from ICP import *
from sys import exit, argv
import random
import numpy as np
import scipy.io as sio
from pylab import cm
import os
import subprocess
import math
import time
#from sklearn import manifold
from GMDS import *
DEFAULT_SIZE = wx.Size(1200, 800)
DEFAULT_POS = wx.Point(10, 10)
PRINCIPAL_AXES_SCALEFACTOR = 1
def saveImageGL(mvcanvas, filename):
view = glGetIntegerv(GL_VIEWPORT)
img = wx.EmptyImage(view[2], view[3] )
pixels = glReadPixels(0, 0, view[2], view[3], GL_RGB,
GL_UNSIGNED_BYTE)
img.SetData( pixels )
img = img.Mirror(False)
img.SaveFile(filename, wx.BITMAP_TYPE_PNG)
def saveImage(canvas, filename):
s = wx.ScreenDC()
w, h = canvas.size.Get()
b = wx.EmptyBitmap(w, h)
m = wx.MemoryDCFromDC(s)
m.SelectObject(b)
m.Blit(0, 0, w, h, s, 70, 0)
m.SelectObject(wx.NullBitmap)
b.SaveFile(filename, wx.BITMAP_TYPE_PNG)
class MeshViewerCanvas(glcanvas.GLCanvas):
def __init__(self, parent):
attribs = (glcanvas.WX_GL_RGBA, glcanvas.WX_GL_DOUBLEBUFFER, glcanvas.WX_GL_DEPTH_SIZE, 24)
glcanvas.GLCanvas.__init__(self, parent, -1, attribList = attribs)
self.context = glcanvas.GLContext(self)
self.parent = parent
#Camera state variables
self.size = self.GetClientSize()
#self.camera = MouseSphericalCamera(self.size.x, self.size.y)
self.camera = MousePolarCamera(self.size.width, self.size.height)
#Main state variables
self.MousePos = [0, 0]
self.initiallyResized = False
self.bbox = BBox3D()
self.unionbbox = BBox3D()
random.seed()
#Face mesh variables and manipulation variables
self.mesh1 = None
self.mesh1Dist = None
self.mesh1DistLoaded = False
self.mesh2 = None
self.mesh2DistLoaded = False
self.mesh2Dist = None
self.mesh3 = None
#Holds the transformations of the best iteration in ICP
self.transformations = []
self.savingMovie = False
self.movieIter = 0
self.displayMeshFaces = True
self.displayMeshEdges = False
self.displayMeshVertices = False
self.displayMeshNormals = False
self.displayPrincipalAxes = False
self.vertexColors = np.zeros(0)
self.cutPlane = None
self.displayCutPlane = False
self.GLinitialized = False
#GL-related events
wx.EVT_ERASE_BACKGROUND(self, self.processEraseBackgroundEvent)
wx.EVT_SIZE(self, self.processSizeEvent)
wx.EVT_PAINT(self, self.processPaintEvent)
#Mouse Events
wx.EVT_LEFT_DOWN(self, self.MouseDown)
wx.EVT_LEFT_UP(self, self.MouseUp)
wx.EVT_RIGHT_DOWN(self, self.MouseDown)
wx.EVT_RIGHT_UP(self, self.MouseUp)
wx.EVT_MIDDLE_DOWN(self, self.MouseDown)
wx.EVT_MIDDLE_UP(self, self.MouseUp)
wx.EVT_MOTION(self, self.MouseMotion)
#self.initGL()
def initPointCloud(self, pointCloud):
self.pointCloud = pointCloud
def centerOnMesh1(self, evt):
if not self.mesh1:
return
self.bbox = self.mesh1.getBBox()
self.camera.centerOnBBox(self.bbox, theta = -math.pi/2, phi = math.pi/2)
self.Refresh()
def centerOnMesh2(self, evt):
if not self.mesh2:
return
self.bbox = self.mesh2.getBBox()
self.camera.centerOnBBox(self.bbox, theta = -math.pi/2, phi = math.pi/2)
self.Refresh()
def centerOnBoth(self, evt):
if not self.mesh1 or not self.mesh2:
return
self.bbox = self.mesh1.getBBox()
self.bbox.Union(self.mesh2.getBBox())
self.camera.centerOnBBox(self.bbox, theta = -math.pi/2, phi = math.pi/2)
self.Refresh()
def MDSMesh1(self, evt):
if not self.mesh1:
print "ERROR: Mesh 1 not loaded yet"
return
if not self.mesh1DistLoaded:
print "ERROR: Mesh 1 distance matrix not loaded"
return
mds = manifold.MDS(n_components=2, dissimilarity="precomputed", n_jobs=1)
print "Doing MDS on mesh 1...."
pos = mds.fit(self.mesh1Dist).embedding_
print "Finished MDS on mesh 1"
for i in range(pos.shape[0]):
self.mesh1.vertices[i].pos = Point3D(pos[i, 0], pos[i, 1], pos[i, 2])
self.mesh1.needsDisplayUpdate = True
self.Refresh()
def MDSMesh2(self, evt):
if not self.mesh2:
print "ERROR: Mesh 2 not loaded yet"
return
if not self.mesh2DistLoaded:
print "ERROR: Mesh 2 distance matrix not loaded"
return
mds = manifold.MDS(n_components=2, dissimilarity="precomputed", n_jobs=1)
print "Doing MDS on mesh 2..."
pos = mds.fit(self.mesh2Dist).embedding_
print "Finished MDS on mesh 2"
for i in range(pos.shape[0]):
self.mesh2.vertices[i].pos = Point3D(pos[i, 0], pos[i, 1], pos[i, 2])
self.mesh2.needsDisplayUpdate = True
self.Refresh()
def doGMDS(self, evt):
if self.mesh1 and self.mesh2:
if not self.mesh1DistLoaded:
print "Mesh 1 distance not loaded"
return
if not self.mesh2DistLoaded:
print "Mesh 2 distance not loaded"
return
N = len(self.mesh1.vertices)
VX = np.zeros((N, 3))
for i in range(N):
V = self.mesh1.vertices[i].pos
VX[i, :] = np.array([V.x, V.y, V.z])
print "Doing GMDS..."
t, u = GMDSPointsToMesh(VX, self.mesh1Dist, self.mesh2, self.mesh2Dist)
print "Finished GMDS"
#Update the vertices based on the triangles where they landed
#and the barycentric coordinates of those triangles
for i in range(N):
Vs = [v.pos for v in self.mesh2.faces[int(t[i].flatten()[0])].getVertices()]
pos = Point3D(0, 0, 0)
for k in range(3):
pos = pos + u[i, k]*Vs[k]
self.mesh1.vertices[i].pos = pos
self.mesh1.needsDisplayUpdate = True
else:
print "ERROR: One or both meshes have not been loaded yet"
self.Refresh()
def displayMeshFacesCheckbox(self, evt):
self.displayMeshFaces = evt.Checked()
self.Refresh()
def displayMeshEdgesCheckbox(self, evt):
self.displayMeshEdges = evt.Checked()
self.Refresh()
def displayCutPlaneCheckbox(self, evt):
self.displayCutPlane = evt.Checked()
self.Refresh()
def displayMeshVerticesCheckbox(self, evt):
self.displayMeshVertices = evt.Checked()
self.Refresh()
def displayPrincipalAxesCheckbox(self, evt):
self.displayPrincipalAxes = evt.Checked()
self.Refresh()
def processEraseBackgroundEvent(self, event): pass #avoid flashing on MSW.
def processSizeEvent(self, event):
self.size = self.GetClientSize()
self.SetCurrent(self.context)
glViewport(0, 0, self.size.width, self.size.height)
if not self.initiallyResized:
#The canvas gets resized once on initialization so the camera needs
#to be updated accordingly at that point
self.camera = MousePolarCamera(self.size.width, self.size.height)
self.camera.centerOnBBox(self.bbox, math.pi/2, math.pi/2)
self.initiallyResized = True
def processPaintEvent(self, event):
dc = wx.PaintDC(self)
self.SetCurrent(self.context)
if not self.GLinitialized:
self.initGL()
self.GLinitialized = True
self.repaint()
def repaint(self):
#Set up projection matrix
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
farDist = (self.camera.eye - self.bbox.getCenter()).Length()*2
#This is to make sure we can see on the inside
farDist = max(farDist, self.unionbbox.getDiagLength()*2)
nearDist = farDist/50.0
gluPerspective(180.0*self.camera.yfov/M_PI, float(self.size.x)/self.size.y, nearDist, farDist)
#Set up modelview matrix
self.camera.gotoCameraFrame()
glClearColor(0.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLightfv(GL_LIGHT0, GL_POSITION, [3.0, 4.0, 5.0, 0.0]);
glLightfv(GL_LIGHT1, GL_POSITION, [-3.0, -2.0, -3.0, 0.0]);
glEnable(GL_LIGHTING)
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, [0.8, 0.8, 0.8, 1.0]);
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, [0.2, 0.2, 0.2, 1.0])
glMaterialfv(GL_FRONT_AND_BACK, GL_SHININESS, 64)
if self.mesh1:
self.mesh1.renderGL(True, True, False, False, None)
if self.mesh2:
self.mesh2.renderGL(self.displayMeshEdges, self.displayMeshVertices, self.displayMeshNormals, self.displayMeshFaces, None)
self.SwapBuffers()
def initGL(self):
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, [0.2, 0.2, 0.2, 1.0])
glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, GL_TRUE)
glLightfv(GL_LIGHT0, GL_DIFFUSE, [1.0, 1.0, 1.0, 1.0])
glEnable(GL_LIGHT0)
glLightfv(GL_LIGHT1, GL_DIFFUSE, [0.5, 0.5, 0.5, 1.0])
glEnable(GL_LIGHT1)
glEnable(GL_NORMALIZE)
glEnable(GL_LIGHTING)
glEnable(GL_DEPTH_TEST)
def handleMouseStuff(self, x, y):
#Invert y from what the window manager says
y = self.size.height - y
self.MousePos = [x, y]
def MouseDown(self, evt):
x, y = evt.GetPosition()
self.CaptureMouse()
self.handleMouseStuff(x, y)
self.Refresh()
def MouseUp(self, evt):
x, y = evt.GetPosition()
self.handleMouseStuff(x, y)
self.ReleaseMouse()
self.Refresh()
def MouseMotion(self, evt):
x, y = evt.GetPosition()
[lastX, lastY] = self.MousePos
self.handleMouseStuff(x, y)
dX = self.MousePos[0] - lastX
dY = self.MousePos[1] - lastY
if evt.Dragging():
if evt.MiddleIsDown():
self.camera.translate(dX, dY)
elif evt.RightIsDown():
self.camera.zoom(-dY)#Want to zoom in as the mouse goes up
elif evt.LeftIsDown():
self.camera.orbitLeftRight(dX)
self.camera.orbitUpDown(dY)
self.Refresh()
class MeshViewerFrame(wx.Frame):
(ID_LOADDATASET1, ID_LOADDATASET2, ID_SAVEDATASET, ID_SAVESCREENSHOT) = (1, 2, 3, 4)
def __init__(self, parent, id, title, pos=DEFAULT_POS, size=DEFAULT_SIZE, style=wx.DEFAULT_FRAME_STYLE, name = 'GLWindow', mesh1 = None, mesh2 = None):
style = style | wx.NO_FULL_REPAINT_ON_RESIZE
super(MeshViewerFrame, self).__init__(parent, id, title, pos, size, style, name)
#Initialize the menu
self.CreateStatusBar()
self.size = size
self.pos = pos
print "MeshViewerFrameSize = %s, pos = %s"%(self.size, self.pos)
filemenu = wx.Menu()
menuOpenMesh1 = filemenu.Append(MeshViewerFrame.ID_LOADDATASET1, "&Load Mesh1","Load a polygon mesh")
self.Bind(wx.EVT_MENU, self.OnLoadMesh1, menuOpenMesh1)
menuOpenMesh2 = filemenu.Append(MeshViewerFrame.ID_LOADDATASET2, "&Load Mesh2","Load a polygon mesh")
self.Bind(wx.EVT_MENU, self.OnLoadMesh2, menuOpenMesh2)
menuSaveScreenshot = filemenu.Append(MeshViewerFrame.ID_SAVESCREENSHOT, "&Save Screenshot", "Save a screenshot of the GL Canvas")
self.Bind(wx.EVT_MENU, self.OnSaveScreenshot, menuSaveScreenshot)
menuExit = filemenu.Append(wx.ID_EXIT,"E&xit"," Terminate the program")
self.Bind(wx.EVT_MENU, self.OnExit, menuExit)
# Creating the menubar.
menuBar = wx.MenuBar()
menuBar.Append(filemenu,"&File") # Adding the "filemenu" to the MenuBar
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
self.glcanvas = MeshViewerCanvas(self)
self.glcanvas.mesh1 = None
self.glcanvas.mesh2 = None
if mesh1:
(self.glcanvas.mesh1, self.glcanvas.mesh1Dist) = self.loadMesh(mesh1)
if self.glcanvas.mesh1Dist.shape[0] > 0:
self.glcanvas.mesh1DistLoaded = True
else:
self.glcanvas.mesh1DistLoaded = False
if mesh2:
(self.glcanvas.mesh2, self.glcanvas.mesh2Dist) = self.loadMesh(mesh2)
if self.glcanvas.mesh2Dist.shape[0] > 0:
self.glcanvas.mesh2DistLoaded = True
else:
self.glcanvas.mesh2DistLoaded = False
self.rightPanel = wx.BoxSizer(wx.VERTICAL)
#Buttons to go to a default view
viewPanel = wx.BoxSizer(wx.HORIZONTAL)
center1Button = wx.Button(self, -1, "Mesh1")
self.Bind(wx.EVT_BUTTON, self.glcanvas.centerOnMesh1, center1Button)
viewPanel.Add(center1Button, 0, wx.EXPAND)
center2Button = wx.Button(self, -1, "Mesh2")
self.Bind(wx.EVT_BUTTON, self.glcanvas.centerOnMesh2, center2Button)
viewPanel.Add(center2Button, 0, wx.EXPAND)
bothButton = wx.Button(self, -1, "Both")
self.Bind(wx.EVT_BUTTON, self.glcanvas.centerOnBoth, bothButton)
viewPanel.Add(bothButton, 0, wx.EXPAND)
self.rightPanel.Add(wx.StaticText(self, label="Views"), 0, wx.EXPAND)
self.rightPanel.Add(viewPanel, 0, wx.EXPAND)
#Buttons for MDS
MDSPanel = wx.BoxSizer(wx.HORIZONTAL)
MDS1Button = wx.Button(self, -1, "MDS Mesh1")
self.Bind(wx.EVT_BUTTON, self.glcanvas.MDSMesh1, MDS1Button)
MDSPanel.Add(MDS1Button, 0, wx.EXPAND)
MDS2Button = wx.Button(self, -1, "MDS Mesh2")
self.Bind(wx.EVT_BUTTON, self.glcanvas.MDSMesh2, MDS2Button)
MDSPanel.Add(MDS2Button, 0, wx.EXPAND)
self.rightPanel.Add(wx.StaticText(self, label="MDS on Meshes"), 0, wx.EXPAND)
self.rightPanel.Add(MDSPanel, 0, wx.EXPAND)
#Checkboxes for displaying data
self.displayMeshFacesCheckbox = wx.CheckBox(self, label = "Display Mesh Faces")
self.displayMeshFacesCheckbox.SetValue(True)
self.Bind(wx.EVT_CHECKBOX, self.glcanvas.displayMeshFacesCheckbox, self.displayMeshFacesCheckbox)
self.rightPanel.Add(self.displayMeshFacesCheckbox, 0, wx.EXPAND)
self.displayMeshEdgesCheckbox = wx.CheckBox(self, label = "Display Mesh Edges")
self.displayMeshEdgesCheckbox.SetValue(False)
self.Bind(wx.EVT_CHECKBOX, self.glcanvas.displayMeshEdgesCheckbox, self.displayMeshEdgesCheckbox)
self.rightPanel.Add(self.displayMeshEdgesCheckbox, 0, wx.EXPAND)
self.displayMeshVerticesCheckbox = wx.CheckBox(self, label = "Display Mesh Points")
self.displayMeshVerticesCheckbox.SetValue(False)
self.Bind(wx.EVT_CHECKBOX, self.glcanvas.displayMeshVerticesCheckbox, self.displayMeshVerticesCheckbox)
self.rightPanel.Add(self.displayMeshVerticesCheckbox)
#Button for doing ICP
GMDSButton = wx.Button(self, -1, "DO GMDS")
self.Bind(wx.EVT_BUTTON, self.glcanvas.doGMDS, GMDSButton)
self.rightPanel.Add(GMDSButton, 0, wx.EXPAND)
#Finally add the two main panels to the sizer
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(self.glcanvas, 2, wx.EXPAND)
self.sizer.Add(self.rightPanel, 0, wx.EXPAND)
self.SetSizer(self.sizer)
self.Layout()
self.Show()
def loadMesh(self, filepath):
print "Loading mesh %s..."%filepath
mesh = LaplacianMesh()
mesh.loadFile(filepath)
print "Finished loading mesh 1\n %s"%mesh
#Now try to load in the distance matrix
fileName, fileExtension = os.path.splitext(filepath)
matfile = sio.loadmat("%s.mat"%fileName)
D = np.array([])
if 'D' in matfile:
D = matfile['D']
else:
print "ERROR: No distance matrix found for mesh %s"%filepath
return (mesh, D)
def OnLoadMesh1(self, evt):
dlg = wx.FileDialog(self, "Choose a file", ".", "", "OBJ files (*.obj)|*.obj|OFF files (*.off)|*.off", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetFilename()
dirname = dlg.GetDirectory()
filepath = os.path.join(dirname, filename)
print dirname
(self.glcanvas.mesh1, self.glcanvas.mesh1Dist) = self.loadMesh(filepath)
self.glcanvas.bbox = self.glcanvas.mesh1.getBBox()
print "Mesh BBox: %s\n"%self.glcanvas.bbox
self.glcanvas.camera.centerOnBBox(self.glcanvas.bbox, theta = -math.pi/2, phi = math.pi/2)
#Now try to load in the distance matrix
if self.glcanvas.mesh1Dist.shape[0] > 0:
self.glcanvas.mesh1DistLoaded = True
self.glcanvas.Refresh()
dlg.Destroy()
return
def OnLoadMesh2(self, evt):
dlg = wx.FileDialog(self, "Choose a file", ".", "", "OBJ files (*.obj)|*.obj|OFF files (*.off)|*.off", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetFilename()
dirname = dlg.GetDirectory()
filepath = os.path.join(dirname, filename)
print dirname
(self.glcanvas.mesh2, self.glcanvas.mesh2Dist) = self.loadMesh(filepath)
self.glcanvas.bbox = self.glcanvas.mesh2.getBBox()
print "Mesh BBox: %s\n"%self.glcanvas.bbox
self.glcanvas.camera.centerOnBBox(self.glcanvas.bbox, theta = -math.pi/2, phi = math.pi/2)
#Now try to load in the distance matrix
if self.glcanvas.mesh2Dist.shape[0] > 0:
self.glcanvas.mesh2DistLoaded = True
self.glcanvas.Refresh()
dlg.Destroy()
return
def OnSaveScreenshot(self, evt):
dlg = wx.FileDialog(self, "Choose a file", ".", "", "*", wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetFilename()
dirname = dlg.GetDirectory()
filepath = os.path.join(dirname, filename)
saveImageGL(self.glcanvas, filepath)
dlg.Destroy()
return
def OnExit(self, evt):
self.Close(True)
return
class MeshViewer(object):
def __init__(self, m1 = None, m2 = None):
app = wx.App()
frame = MeshViewerFrame(None, -1, 'MeshViewer', mesh1 = m1, mesh2 = m2)
frame.Show(True)
app.MainLoop()
app.Destroy()
if __name__ == '__main__':
m1 = None
m2 = None
if len(argv) >= 3:
m1 = argv[1]
m2 = argv[2]
viewer = MeshViewer(m1, m2)
| 2.078125 | 2 |
PySS/fem.py | manpan-1/PySS | 2 | 5270 | import matplotlib.pyplot as plt
import numpy as np
import pickle
# import csv
# from collections import namedtuple
# from mpl_toolkits.mplot3d import Axes3D
# import matplotlib.animation as animation
# import matplotlib.colors as mc
class FEModel:
def __init__(self, name=None, hist_data=None):
self.name = name
self.hist_outs = hist_data
def tuple2dict(self, data):
"""
Used to convert the load-displacement data exported from models to a dictionary
"""
ld_data = []
for specimen in data:
sp_dict = dict()
load = []
disp = []
for action in specimen[0]:
load.append(action[1])
for action in specimen[1]:
disp.append(action[1])
sp_dict["Load"] = np.array(load)
sp_dict["Disp"] = -1 * np.array(disp)
ld_data.append(sp_dict)
def plot_history(self, x_axis, y_axis):
"""
XXXXXXXXXXXXXXXXXXXXXXXXXX
"""
plt.figure()
plt.plot(self.hist_outs[x_axis], self.hist_outs[y_axis])
@classmethod
def from_hist_pkl(cls, filename):
"""
Creates an object and imports history output data.
"""
with open(filename, "rb") as fh:
history_data = pickle.load(fh)
return cls(name=filename, hist_data=history_data)
#
# class ParametricDB:
# def __init__(self, dimensions, responses):
# self.responses = responses
# self.dimensions = dimensions
#
# @classmethod
# def from_file(cls, filename):
# """
# Create from file.
#
# The file should be comma separated, first row titles, subsequent rows only numbers.
#
# Parameters
# ----------
# filename : str
# Relative path/filename.
#
# Return
# ------
# ParametricDB
#
# """
# # with open(filename, 'rU') as infile:
# # reader = csv.reader(infile)
# # n_dim = int(next(reader)[0].split()[0])
# # db = {c[0]: c[1:] for c in zip(*reader)}
#
# with open(filename, 'rU') as infile:
# reader = csv.reader(infile, delimiter=";")
# n_dim = int(next(reader)[0].split()[0])
# db = [c for c in zip(*reader)]
#
# all_responses = {i[0]: i[1:] for i in db[n_dim:]}
#
# dim_ticks = np.array([i[1:] for i in db[:n_dim]]).T
# dim_lengths = [len(set(dim_ticks[:, i])) for i in range(n_dim)]
# dim_names = [db[i][0] for i in range(n_dim)]
#
# # with open(filename, 'r') as infile:
# # all_lines = [[c.split(sep=":")[0]] + c.split(sep=":")[1].split(sep=",") for c in infile]
# # db = {c[0]: c[1:] for c in zip(*all_lines)}
#
# # for key in db.keys():
# # if len(key.split(",")) > 1:
# # n_dim = len(key.split(","))
# # dim_str = key
# # dim_ticks = np.array([c.split(sep=",") for c in db[dim_str]])
# # dim_lengths = [len(set(dim_ticks[:, i])) for i in range(n_dim)]
# # dim_names = dim_str.split(sep=",")
# full_list = {i[0]: i[1:][0] for i in zip(dim_names, dim_ticks.T)}
#
# # del db[dim_str]
#
# #df = pd.DataFrame(full_dict)
#
# Address = namedtuple("map", " ".join(dim_names))
# args = [tuple(sorted(set(dim_ticks[:, i]))) for i, j in enumerate(dim_names)]
# addressbook = Address(*args)
#
# mtx = {i: np.empty(dim_lengths) for i in all_responses.keys()}
# for response in all_responses.keys():
# for i, response_value in enumerate(all_responses[response]):
# current_idx = tuple(addressbook[idx].index(full_list[name][i]) for idx, name in enumerate(dim_names))
# mtx[response][current_idx] = response_value
# mtx[response].flags.writeable = False
#
# return cls(addressbook, mtx)
#
# def get_slice(self, slice_at, response):
# """
# Get a slice of the database.
#
# Parameters
# ----------
# slice_at : dict of int
# A dictionary of the keys to be sliced at the assigned values.
# response : str
# The name of the requested response to be sliced.
#
# """
#
# idx_arr = [0]*len(self.dimensions)
#
# for key in self.dimensions._fields:
# if key not in slice_at.keys():
# idx_arr[self.get_idx(key)] = slice(None, None)
# for name, value in zip(slice_at.keys(), slice_at.values()):
# idx_arr[self.get_idx(name)] = value
#
# return self.responses[response][idx_arr]
#
# def get_idx(self, attrname):
# """
# Get the index number of a parameter (dimension) in the database.
#
# Parameters
# ----------
# attrname : str
#
# """
# return(self.dimensions.index(self.dimensions.__getattribute__(attrname)))
#
# def contour_2d(self, slice_at, response, transpose=False, fig=None, sbplt=None):
# """
# Contour plot.
# :param slice_at:
# :return:
# """
# plt.rc('text', usetex=True)
# if fig is None:
# fig = plt.figure()
# if sbplt is None:
# ax = fig.add_subplot(111)
# else:
# ax = fig.add_subplot(sbplt)
# else:
# if sbplt is None:
# ax = fig.add_subplot(111)
# else:
# ax = fig.add_subplot(sbplt)
#
# axes = [key for key in self.dimensions._fields if key not in slice_at.keys()]
#
# if transpose:
# X, Y = np.meshgrid(self.dimensions[self.get_idx(axes[1])], self.dimensions[self.get_idx(axes[0])])
# Z = self.get_slice(slice_at, response).T
# x_label, y_label = axes[1], axes[0]
# else:
# X, Y = np.meshgrid(self.dimensions[self.get_idx(axes[0])], self.dimensions[self.get_idx(axes[1])])
# Z = self.get_slice(slice_at, response)
# x_label, y_label = axes[0], axes[1]
#
# ttl_values = [self.dimensions[self.get_idx(i)][slice_at[i]] for i in slice_at.keys()]
#
# # levels = np.arange(0, 2., 0.025)
# # sbplt = ax.contour(X.astype(np.float), Y.astype(np.float), Z.T, vmin=0.4, vmax=1., levels=levels, cmap=plt.cm.inferno)
# sbplt = ax.contour(X.astype(np.float), Y.astype(np.float), Z.T, cmap=plt.cm.gray_r)
# sbplt2 = ax.contourf(X.astype(np.float), Y.astype(np.float), Z.T, cmap=plt.cm.inferno)
# plt.clabel(sbplt, inline=1, fontsize=10)
# ttl = [i for i in zip(slice_at.keys(), ttl_values)]
# ttl = ", ".join(["=".join(i) for i in ttl])
# ax.set_title("$" + response + "$" + " for : " + "$" + ttl + "$")
# ax.set_xlabel("$"+x_label+"$")
# ax.set_ylabel("$"+y_label+"$")
#
# return fig
#
# def surf_3d(self, slice_at, response, transpose=False, fig=None, sbplt=None):
# """
# Surface plot.
# :param slice_at:
# :return:
# """
# #Convenient window dimensions
# # one subplot:
# # 2 side by side: Bbox(x0=0.0, y0=0.0, x1=6.79, y1=2.57)
# # azim elev = -160 30
# # 3 subplots side by side
# # 4 subplots: Bbox(x0=0.0, y0=0.0, x1=6.43, y1=5.14)
# #azim elev -160 30
# plt.rc('text', usetex=True)
# if fig is None:
# fig = plt.figure()
# if sbplt is None:
# ax = fig.add_subplot(111, projection='3d')
# else:
# ax = fig.add_subplot(sbplt, projection='3d')
# else:
# if sbplt is None:
# ax = fig.add_subplot(111, projection='3d')
# else:
# ax = fig.add_subplot(sbplt, projection='3d')
#
#
# axes = [key for key in self.dimensions._fields if key not in slice_at.keys()]
#
# if transpose:
# X, Y = np.meshgrid(self.dimensions[self.get_idx(axes[1])], self.dimensions[self.get_idx(axes[0])])
# Z = self.get_slice(slice_at, response).T
# x_label, y_label = axes[1], axes[0]
# else:
# X, Y = np.meshgrid(self.dimensions[self.get_idx(axes[0])], self.dimensions[self.get_idx(axes[1])])
# Z = self.get_slice(slice_at, response)
# x_label, y_label = axes[0], axes[1]
#
# ttl_values = [self.dimensions[self.get_idx(i)][slice_at[i]] for i in slice_at.keys()]
#
# sbplt = ax.plot_surface(X.astype(np.float), Y.astype(np.float), Z.T, cmap=plt.cm.inferno)
# # plt.clabel(sbplt, inline=1, fontsize=10)
# ttl = [i for i in zip(slice_at.keys(), ttl_values)]
# ttl = ", ".join(["=".join(i) for i in ttl])
# ax.set_title("$" + response + "$" + " for : " + "$" + ttl + "$")
# ax.set_xlabel("$"+x_label+"$")
# ax.set_ylabel("$"+y_label+"$")
#
# return fig
#
# def match_viewports(fig=None):
# if fig is None:
# fig = plt.gcf()
# fig.axes[1].view_init(azim=fig.axes[0].azim, elev=fig.axes[0].elev)
def main():
lambda01 = ParametricDB.from_file("data/fem/fem-results_lambda01.dat")
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcA, f_yield: 355 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcB, f_yield: 355 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcC, f_yield: 355 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcA, f_yield: 700 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcB, f_yield: 700 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcC, f_yield: 700 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 2])
lambda02 = ParametricDB.from_file("data/fem/fem-results-lambda02.dat")
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcA, f_yield: 355 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcB, f_yield: 355 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcC, f_yield: 355 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcA, f_yield: 700 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcB, f_yield: 700 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcC, f_yield: 700 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 2])
return
| 2.859375 | 3 |
gigamonkeys/get.py | gigamonkey/sheets | 0 | 5271 | #!/usr/bin/env python
import json
import sys
from gigamonkeys.spreadsheets import spreadsheets
spreadsheet_id = sys.argv[1]
ranges = sys.argv[2:]
data = spreadsheets().get(spreadsheet_id, include_grid_data=bool(ranges), ranges=ranges)
json.dump(data, sys.stdout, indent=2)
| 2.203125 | 2 |
config.py | mhmddpkts/Get-Turkish-Words-with-Web-Scraping | 0 | 5272 |
root_URL = "https://tr.wiktionary.org/wiki/Vikis%C3%B6zl%C3%BCk:S%C3%B6zc%C3%BCk_listesi_"
filepath = "words.csv"
#letters=["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O",
# "P","R","S","T","U","V","Y","Z"] ##İ,Ç,Ö,Ş,Ü harfleri not work correctly
letters=["C"] | 2.171875 | 2 |
sow_generator/tasks.py | praekelt/sow-generator | 1 | 5273 | <filename>sow_generator/tasks.py
from github3 import login
from github3.models import GitHubError
from celery import task
from celery.decorators import periodic_task
from celery.task.schedules import crontab
from sow_generator.models import Repository, AuthToken
def _sync_repository(obj):
dirty = False
token = AuthToken.objects.get(id=1).token
gh = login(token=token)
dc = gh.user()
org, name = obj.orgname
repo = gh.repository(org, name)
if repo is not None:
# Find RST or MD files. Markdown takes precedence.
for fieldname in ("readme", "sow"):
v = repo.contents("%s.rst" % fieldname.upper())
if v is not None:
setattr(obj, fieldname, v.decoded)
setattr(obj, "%s_format" % fieldname, "rst")
dirty = True
v = repo.contents("%s.md" % fieldname.upper())
if v is not None:
setattr(obj, fieldname, v.decoded)
setattr(obj, "%s_format" % fieldname, "md")
dirty = True
if dirty:
obj.save()
@task(max_retries=5)
def sync_repository(id):
obj = Repository.objects.get(id=id)
_sync_repository(obj)
@periodic_task(run_every=crontab(hour='*', minute='0', day_of_week='*'))
def sync_repositories():
"""Sync all repositories"""
for obj in Repository.objects.all():
_sync_repository(obj)
| 2.28125 | 2 |
sources/wrappers.py | X-rayLaser/keras-auto-hwr | 0 | 5274 | import numpy as np
from sources import BaseSource
from sources.base import BaseSourceWrapper
from sources.preloaded import PreLoadedSource
import json
class WordsSource(BaseSource):
def __init__(self, source):
self._source = source
def __len__(self):
return len(self._source)
def _remove_apostrpohs(self, seq):
res = ''.join(seq.split('''))
res = ''.join(res.split('"'))
return res
def _clean(self, seq):
s = ''
for ch in seq.strip():
if ch.isalpha():
s += ch
return s
def get_sequences(self):
for seq_in, transcription in self._source.get_sequences():
transcription = self._remove_apostrpohs(transcription)
words = [self._clean(word) for word in transcription.split(' ')]
yield seq_in, words
class LabelSource(BaseSource):
def __init__(self, source, mapping_table):
self._source = source
self._mapping_table = mapping_table
def __len__(self):
return len(self._source)
def get_sequences(self):
for seq_in, seq_out in self._source.get_sequences():
label_seq = [self._mapping_table.encode(ch) for ch in seq_out]
yield seq_in, label_seq
class CTCAdaptedSource(BaseSource):
def __init__(self, source, padding_value=0):
self._source = source
self._padding = padding_value
def __len__(self):
return len(self._source)
def get_sequences(self):
for seq_in, seq_out in self._source.get_sequences():
seqs_in_pad = list(seq_in)
while len(seqs_in_pad) <= 2 * len(seq_out) + 1:
n = len(seqs_in_pad[0])
seqs_in_pad.append([self._padding] * n)
yield seqs_in_pad, seq_out
class Normalizer:
def __init__(self):
self._mu = None
self._sd = None
@staticmethod
def from_json(path):
with open(path, 'r') as f:
s = f.read()
d = json.loads(s)
normalizer = Normalizer()
mu = np.array(d['mu'])
sd = np.array(d['sd'])
normalizer.set_mean(mu)
normalizer.set_deviation(sd)
return normalizer
def to_json(self, path):
d = {
'mu': np.array(self.mu).tolist(),
'sd': np.array(self.sd).tolist()
}
with open(path, 'w') as f:
f.write(json.dumps(d))
def set_mean(self, mu):
self._mu = mu
def set_deviation(self, sd):
self._sd = sd
@property
def mu(self):
return self._mu
@property
def sd(self):
return self._sd
def fit(self, X):
sequence = []
for x in X:
sequence.extend(x)
self._mu = np.mean(sequence, axis=0)
self._sd = np.std(sequence, axis=0)
def preprocess(self, X):
res = []
for x in X:
x_norm = (x - self._mu) / self._sd
# we do not want to normalize END-OF-STROKE flag which is last in the tuple
x_norm[:, -1] = np.array(x)[:, -1]
res.append(x_norm.tolist())
return res
class OffsetPointsSource(BaseSource):
def __init__(self, source):
self._source = source
def __len__(self):
return len(self._source)
def get_sequences(self):
for strokes, transcription in self._source.get_sequences():
x0, y0, t0 = strokes[0].points[0]
new_seq = []
for stroke in strokes:
points = []
for x, y, t in stroke.points:
points.append((x - x0, y - y0, t - t0, 0))
points[-1] = points[-1][:-1] + (1,)
new_seq.extend(points)
yield new_seq, transcription
class NormalizedSource(BaseSource):
def __init__(self, source, normalizer):
self._source = source
self._normalizer = normalizer
def __len__(self):
return len(self._source)
def get_sequences(self):
for points, transcription in self._source.get_sequences():
norm = self._normalizer.preprocess([points])[0]
yield norm, transcription
class DenormalizedSource(BaseSource):
def __init__(self, source, normalizer):
self._source = source
self._normalizer = normalizer
def __len__(self):
return len(self._source)
def get_sequences(self):
mu = self._normalizer.mu
sd = self._normalizer.sd
for points, transcription in self._source.get_sequences():
denormalized = [(p * sd + mu).tolist() for p in points]
for i, p in enumerate(denormalized):
p[3] = points[i][3]
yield denormalized, transcription
class H5pySource(BaseSource):
def __init__(self, h5py_ds, random_order=True):
self._h5py = h5py_ds
self._random = random_order
def __len__(self):
return len(self._h5py)
def get_sequences(self):
return self._h5py.get_data(random_order=self._random)
class PreprocessedSource(BaseSourceWrapper):
def __init__(self, source, preprocessor):
super().__init__(source)
self._preprocessor = preprocessor
def get_sequences(self):
for xs, ys in self._source.get_sequences():
yield self._preprocessor.pre_process_example(xs, ys)
class ConstrainedSource(BaseSourceWrapper):
def __init__(self, source, num_lines):
super().__init__(source)
self._num_lines = num_lines
self._use_all = (num_lines == 0)
def get_sequences(self):
for j, (seq_in, seq_out) in enumerate(self._source.get_sequences()):
#print(j, seq_out)
if j % 500 == 0:
print('Fetched {} examples'.format(j))
if j >= self._num_lines and not self._use_all:
break
yield seq_in, seq_out
class PlainListSource(BaseSourceWrapper):
def get_sequences(self):
for strokes, t in self._source.get_sequences():
points = [stroke.points for stroke in strokes]
yield points, t
| 2.515625 | 3 |
multiworld/multiworld/core/image_env.py | yufeiwang63/ROLL | 11 | 5275 | <reponame>yufeiwang63/ROLL
import random
import cv2
import numpy as np
import warnings
from PIL import Image
from gym.spaces import Box, Dict
from multiworld.core.multitask_env import MultitaskEnv
from multiworld.core.wrapper_env import ProxyEnv
from multiworld.envs.env_util import concatenate_box_spaces
from multiworld.envs.env_util import get_stat_in_paths, create_stats_ordered_dict
class ImageEnv(ProxyEnv, MultitaskEnv):
def __init__(
self,
wrapped_env,
imsize=84,
init_camera=None,
transpose=False,
grayscale=False,
normalize=False,
reward_type='wrapped_env',
threshold=10,
image_length=None,
presampled_goals=None,
non_presampled_goal_img_is_garbage=False,
recompute_reward=True,
):
"""
:param wrapped_env:
:param imsize:
:param init_camera:
:param transpose:
:param grayscale:
:param normalize:
:param reward_type:
:param threshold:
:param image_length:
:param presampled_goals:
:param non_presampled_goal_img_is_garbage: Set this option to True if
you want to allow the code to work without presampled goals,
but where the underlying env doesn't support set_to_goal. As the name,
implies this will make it so that the goal image is garbage if you
don't provide pre-sampled goals. The main use case is if you want to
use an ImageEnv to pre-sample a bunch of goals.
"""
self.quick_init(locals())
super().__init__(wrapped_env)
self.wrapped_env.hide_goal_markers = True
self.imsize = imsize
self.init_camera = init_camera
self.transpose = transpose
self.grayscale = grayscale
self.normalize = normalize
self.recompute_reward = recompute_reward
self.non_presampled_goal_img_is_garbage = non_presampled_goal_img_is_garbage
if image_length is not None:
self.image_length = image_length
else:
if grayscale:
self.image_length = self.imsize * self.imsize
else:
self.image_length = 3 * self.imsize * self.imsize
self.channels = 1 if grayscale else 3
# This is torch format rather than PIL image
self.image_shape = (self.imsize, self.imsize)
# Flattened past image queue
# init camera
if init_camera is not None:
sim = self._wrapped_env.initialize_camera(init_camera)
# viewer = mujoco_py.MjRenderContextOffscreen(sim, device_id=-1)
# init_camera(viewer.cam)
# sim.add_render_context(viewer)
img_space = Box(0, 1, (self.image_length,), dtype=np.float32)
self._img_goal = img_space.sample() #has to be done for presampling
spaces = self.wrapped_env.observation_space.spaces.copy()
spaces['observation'] = img_space
spaces['desired_goal'] = img_space
spaces['achieved_goal'] = img_space
spaces['image_observation'] = img_space
spaces['image_desired_goal'] = img_space
spaces['image_achieved_goal'] = img_space
self.return_image_proprio = False
if 'proprio_observation' in spaces.keys():
self.return_image_proprio = True
spaces['image_proprio_observation'] = concatenate_box_spaces(
spaces['image_observation'],
spaces['proprio_observation']
)
spaces['image_proprio_desired_goal'] = concatenate_box_spaces(
spaces['image_desired_goal'],
spaces['proprio_desired_goal']
)
spaces['image_proprio_achieved_goal'] = concatenate_box_spaces(
spaces['image_achieved_goal'],
spaces['proprio_achieved_goal']
)
self.observation_space = Dict(spaces)
self.action_space = self.wrapped_env.action_space
self.reward_type = reward_type
self.threshold = threshold
self._presampled_goals = presampled_goals
if self._presampled_goals is None:
self.num_goals_presampled = 0
else:
self.num_goals_presampled = presampled_goals[random.choice(list(presampled_goals))].shape[0]
self._last_image = None
def step(self, action):
obs, reward, done, info = self.wrapped_env.step(action)
new_obs = self._update_obs(obs)
if self.recompute_reward:
reward = self.compute_reward(action, new_obs)
self._update_info(info, obs)
return new_obs, reward, done, info
def _update_info(self, info, obs):
achieved_goal = obs['image_achieved_goal']
desired_goal = self._img_goal
image_dist = np.linalg.norm(achieved_goal-desired_goal)
image_success = (image_dist<self.threshold).astype(float)-1
info['image_dist'] = image_dist
info['image_success'] = image_success
def reset(self):
obs = self.wrapped_env.reset()
if self.num_goals_presampled > 0:
goal = self.sample_goal()
self._img_goal = goal['image_desired_goal']
self.wrapped_env.set_goal(goal)
for key in goal:
obs[key] = goal[key]
elif self.non_presampled_goal_img_is_garbage:
# This is use mainly for debugging or pre-sampling goals.
self._img_goal = self._get_flat_img()
else:
env_state = self.wrapped_env.get_env_state()
self.wrapped_env.set_to_goal(self.wrapped_env.get_goal())
self._img_goal = self._get_flat_img()
self.wrapped_env.set_env_state(env_state)
return self._update_obs(obs)
def _get_obs(self):
return self._update_obs(self.wrapped_env._get_obs())
def _update_obs(self, obs):
img_obs = self._get_flat_img()
obs['image_observation'] = img_obs
obs['image_desired_goal'] = self._img_goal
obs['image_achieved_goal'] = img_obs
obs['observation'] = img_obs
obs['desired_goal'] = self._img_goal
obs['achieved_goal'] = img_obs
if self.return_image_proprio:
obs['image_proprio_observation'] = np.concatenate(
(obs['image_observation'], obs['proprio_observation'])
)
obs['image_proprio_desired_goal'] = np.concatenate(
(obs['image_desired_goal'], obs['proprio_desired_goal'])
)
obs['image_proprio_achieved_goal'] = np.concatenate(
(obs['image_achieved_goal'], obs['proprio_achieved_goal'])
)
return obs
def _get_flat_img(self):
image_obs = self._wrapped_env.get_image(
width=self.imsize,
height=self.imsize,
)
self._last_image = image_obs
if self.grayscale:
image_obs = Image.fromarray(image_obs).convert('L')
image_obs = np.array(image_obs)
if self.normalize:
image_obs = image_obs / 255.0
if self.transpose:
image_obs = image_obs.transpose()
assert image_obs.shape[0] == self.channels
return image_obs.flatten()
def render(self, mode='wrapped'):
if mode == 'wrapped':
self.wrapped_env.render()
elif mode == 'cv2':
if self._last_image is None:
self._last_image = self._wrapped_env.get_image(
width=self.imsize,
height=self.imsize,
)
cv2.imshow('ImageEnv', self._last_image)
cv2.waitKey(1)
else:
raise ValueError("Invalid render mode: {}".format(mode))
def show_obs(self, normalized_img_vec_, name='img'):
print(name)
normalized_img_vec = copy.deepcopy(normalized_img_vec_)
img = (normalized_img_vec * 255).astype(np.uint8)
img = img.reshape(3, self.imsize, self.imsize).transpose()
img = img[::-1, :, ::-1]
cv2.imshow(name, img)
cv2.waitKey()
"""
Multitask functions
"""
def get_goal(self):
goal = self.wrapped_env.get_goal()
goal['desired_goal'] = self._img_goal
goal['image_desired_goal'] = self._img_goal
return goal
def set_goal(self, goal):
''' Assume goal contains both image_desired_goal and any goals required for wrapped envs'''
self._img_goal = goal['image_desired_goal']
self.wrapped_env.set_goal(goal)
def sample_goals(self, batch_size):
if self.num_goals_presampled > 0:
idx = np.random.randint(0, self.num_goals_presampled, batch_size)
sampled_goals = {
k: v[idx] for k, v in self._presampled_goals.items()
}
return sampled_goals
if batch_size > 1:
warnings.warn("Sampling goal images is slow")
img_goals = np.zeros((batch_size, self.image_length))
goals = self.wrapped_env.sample_goals(batch_size)
pre_state = self.wrapped_env.get_env_state()
for i in range(batch_size):
goal = self.unbatchify_dict(goals, i)
self.wrapped_env.set_to_goal(goal)
img_goals[i, :] = self._get_flat_img()
self.wrapped_env.set_env_state(pre_state)
goals['desired_goal'] = img_goals
goals['image_desired_goal'] = img_goals
return goals
def compute_rewards(self, actions, obs):
achieved_goals = obs['achieved_goal']
desired_goals = obs['desired_goal']
dist = np.linalg.norm(achieved_goals - desired_goals, axis=1)
if self.reward_type=='image_distance':
return -dist
elif self.reward_type=='image_sparse':
return -(dist > self.threshold).astype(float)
elif self.reward_type=='wrapped_env':
return self.wrapped_env.compute_rewards(actions, obs)
else:
raise NotImplementedError()
def get_diagnostics(self, paths, **kwargs):
statistics = self.wrapped_env.get_diagnostics(paths, **kwargs)
for stat_name_in_paths in ["image_dist", "image_success"]:
stats = get_stat_in_paths(paths, 'env_infos', stat_name_in_paths)
statistics.update(create_stats_ordered_dict(
stat_name_in_paths,
stats,
always_show_all_stats=True,
))
final_stats = [s[-1] for s in stats]
statistics.update(create_stats_ordered_dict(
"Final " + stat_name_in_paths,
final_stats,
always_show_all_stats=True,
))
return statistics
def normalize_image(image, dtype=np.float64):
assert image.dtype == np.uint8
return dtype(image) / 255.0
def unormalize_image(image):
assert image.dtype != np.uint8
return np.uint8(image * 255.0)
| 2.25 | 2 |
sample_full_post_processor.py | huynguyen82/Modified-Kaldi-GStream-OnlineServer | 0 | 5276 | #!/usr/bin/env python
import sys
import json
import logging
from math import exp
import requests as rq
import re
### For NLP post-processing
header={"Content-Type": "application/json"}
message='{"sample":"Hello bigdata"}'
api_url="http://192.168.1.197:11992/norm"
###
def NLP_process_output(pre_str):
try:
jmsg=json.loads(message)
jmsg['sample']=pre_str
r = rq.post(api_url,json=jmsg, headers=header)
results = json.loads(r.text)['result']
logging.info("NLP=%s" % results)
return results
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.error("Failed to get NLP post-processing: %s : %s " % (exc_type, exc_value))
return pre_str
def post_process_json(str):
try:
event = json.loads(str)
if "result" in event:
if len(event["result"]["hypotheses"]) > 1:
likelihood1 = event["result"]["hypotheses"][0]["likelihood"]
likelihood2 = event["result"]["hypotheses"][1]["likelihood"]
confidence = likelihood1 - likelihood2
confidence = 1 - exp(-confidence)
else:
confidence = 1.0e+10
event["result"]["hypotheses"][0]["confidence"] = confidence
org_trans = event["result"]["hypotheses"][0]["transcript"]
logging.info("Recognized result=%s" % org_trans )
out_trans = NLP_process_output(org_trans) + '.'
out_trans =
logging.info("Pass into funtion is %s" % out_trans)
event["result"]["hypotheses"][0]["transcript"] = out_trans
del event["result"]["hypotheses"][1:]
return json.dumps(event)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.error("Failed to process JSON result: %s : %s " % (exc_type, exc_value))
return str
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format="%(levelname)8s %(asctime)s %(message)s ")
lines = []
while True:
l = sys.stdin.readline()
if not l: break # EOF
if l.strip() == "":
if len(lines) > 0:
result_json = post_process_json("".join(lines))
print result_json
print
sys.stdout.flush()
lines = []
else:
lines.append(l)
if len(lines) > 0:
result_json = post_process_json("".join(lines))
print result_json
lines = []
| 2.6875 | 3 |
lang_detect_gears.py | AlexMikhalev/cord19redisknowledgegraph | 7 | 5277 | <reponame>AlexMikhalev/cord19redisknowledgegraph<gh_stars>1-10
from langdetect import detect
def detect_language(x):
#detect language of the article
try:
lang=detect(x['value'])
except:
lang="empty"
execute('SET', 'lang_article:' + x['key'], lang)
if lang!='en':
execute('SADD','titles_to_delete', x['key'])
gb = GB()
gb.foreach(detect_language)
gb.run('title:*') | 2.40625 | 2 |
daemon/core/coreobj.py | shanv82/core | 0 | 5278 | """
Defines the basic objects for CORE emulation: the PyCoreObj base class, along with PyCoreNode,
PyCoreNet, and PyCoreNetIf.
"""
import os
import shutil
import socket
import threading
from socket import AF_INET
from socket import AF_INET6
from core.data import NodeData, LinkData
from core.enumerations import LinkTypes
from core.misc import ipaddress
class Position(object):
"""
Helper class for Cartesian coordinate position
"""
def __init__(self, x=None, y=None, z=None):
"""
Creates a Position instance.
:param x: x position
:param y: y position
:param z: z position
:return:
"""
self.x = x
self.y = y
self.z = z
def set(self, x=None, y=None, z=None):
"""
Returns True if the position has actually changed.
:param float x: x position
:param float y: y position
:param float z: z position
:return: True if position changed, False otherwise
:rtype: bool
"""
if self.x == x and self.y == y and self.z == z:
return False
self.x = x
self.y = y
self.z = z
return True
def get(self):
"""
Retrieve x,y,z position.
:return: x,y,z position tuple
:rtype: tuple
"""
return self.x, self.y, self.z
class PyCoreObj(object):
"""
Base class for CORE objects (nodes and networks)
"""
apitype = None
# TODO: appears start has no usage, verify and remove
def __init__(self, session, objid=None, name=None, start=True):
"""
Creates a PyCoreObj instance.
:param core.session.Session session: CORE session object
:param int objid: object id
:param str name: object name
:param bool start: start value
:return:
"""
self.session = session
if objid is None:
objid = session.get_object_id()
self.objid = objid
if name is None:
name = "o%s" % self.objid
self.name = name
self.type = None
self.server = None
self.services = None
# ifindex is key, PyCoreNetIf instance is value
self._netif = {}
self.ifindex = 0
self.canvas = None
self.icon = None
self.opaque = None
self.position = Position()
def startup(self):
"""
Each object implements its own startup method.
:return: nothing
"""
raise NotImplementedError
def shutdown(self):
"""
Each object implements its own shutdown method.
:return: nothing
"""
raise NotImplementedError
def setposition(self, x=None, y=None, z=None):
"""
Set the (x,y,z) position of the object.
:param float x: x position
:param float y: y position
:param float z: z position
:return: True if position changed, False otherwise
:rtype: bool
"""
return self.position.set(x=x, y=y, z=z)
def getposition(self):
"""
Return an (x,y,z) tuple representing this object's position.
:return: x,y,z position tuple
:rtype: tuple
"""
return self.position.get()
def ifname(self, ifindex):
"""
Retrieve interface name for index.
:param int ifindex: interface index
:return: interface name
:rtype: str
"""
return self._netif[ifindex].name
def netifs(self, sort=False):
"""
Retrieve network interfaces, sorted if desired.
:param bool sort: boolean used to determine if interfaces should be sorted
:return: network interfaces
:rtype: list
"""
if sort:
return map(lambda k: self._netif[k], sorted(self._netif.keys()))
else:
return self._netif.itervalues()
def numnetif(self):
"""
Return the attached interface count.
:return: number of network interfaces
:rtype: int
"""
return len(self._netif)
def getifindex(self, netif):
"""
Retrieve index for an interface.
:param PyCoreNetIf netif: interface to get index for
:return: interface index if found, -1 otherwise
:rtype: int
"""
for ifindex in self._netif:
if self._netif[ifindex] is netif:
return ifindex
return -1
def newifindex(self):
"""
Create a new interface index.
:return: interface index
:rtype: int
"""
while self.ifindex in self._netif:
self.ifindex += 1
ifindex = self.ifindex
self.ifindex += 1
return ifindex
def data(self, message_type, lat=None, lon=None, alt=None):
"""
Build a data object for this node.
:param message_type: purpose for the data object we are creating
:param str lat: latitude
:param str lon: longitude
:param str alt: altitude
:return: node data object
:rtype: core.data.NodeData
"""
if self.apitype is None:
return None
x, y, _ = self.getposition()
model = self.type
emulation_server = self.server
services = self.services
if services is not None:
services = "|".join([service.name for service in services])
node_data = NodeData(
message_type=message_type,
id=self.objid,
node_type=self.apitype,
name=self.name,
emulation_id=self.objid,
canvas=self.canvas,
icon=self.icon,
opaque=self.opaque,
x_position=x,
y_position=y,
latitude=lat,
longitude=lon,
altitude=alt,
model=model,
emulation_server=emulation_server,
services=services
)
return node_data
def all_link_data(self, flags):
"""
Build CORE Link data for this object. There is no default
method for PyCoreObjs as PyCoreNodes do not implement this but
PyCoreNets do.
:param flags: message flags
:return: list of link data
:rtype: core.data.LinkData
"""
return []
class PyCoreNode(PyCoreObj):
"""
Base class for CORE nodes.
"""
def __init__(self, session, objid=None, name=None, start=True):
"""
Create a PyCoreNode instance.
:param core.session.Session session: CORE session object
:param int objid: object id
:param str name: object name
:param bool start: boolean for starting
"""
super(PyCoreNode, self).__init__(session, objid, name, start=start)
self.services = []
self.nodedir = None
self.tmpnodedir = False
def addservice(self, service):
"""
Add a services to the service list.
:param core.service.CoreService service: service to add
:return: nothing
"""
if service is not None:
self.services.append(service)
def makenodedir(self):
"""
Create the node directory.
:return: nothing
"""
if self.nodedir is None:
self.nodedir = os.path.join(self.session.session_dir, self.name + ".conf")
os.makedirs(self.nodedir)
self.tmpnodedir = True
else:
self.tmpnodedir = False
def rmnodedir(self):
"""
Remove the node directory, unless preserve directory has been set.
:return: nothing
"""
preserve = self.session.options.get_config("preservedir") == "1"
if preserve:
return
if self.tmpnodedir:
shutil.rmtree(self.nodedir, ignore_errors=True)
def addnetif(self, netif, ifindex):
"""
Add network interface to node and set the network interface index if successful.
:param PyCoreNetIf netif: network interface to add
:param int ifindex: interface index
:return: nothing
"""
if ifindex in self._netif:
raise ValueError("ifindex %s already exists" % ifindex)
self._netif[ifindex] = netif
# TODO: this should have probably been set ahead, seems bad to me, check for failure and fix
netif.netindex = ifindex
def delnetif(self, ifindex):
"""
Delete a network interface
:param int ifindex: interface index to delete
:return: nothing
"""
if ifindex not in self._netif:
raise ValueError("ifindex %s does not exist" % ifindex)
netif = self._netif.pop(ifindex)
netif.shutdown()
del netif
# TODO: net parameter is not used, remove
def netif(self, ifindex, net=None):
"""
Retrieve network interface.
:param int ifindex: index of interface to retrieve
:param PyCoreNetIf net: network node
:return: network interface, or None if not found
:rtype: PyCoreNetIf
"""
if ifindex in self._netif:
return self._netif[ifindex]
else:
return None
def attachnet(self, ifindex, net):
"""
Attach a network.
:param int ifindex: interface of index to attach
:param PyCoreNetIf net: network to attach
:return:
"""
if ifindex not in self._netif:
raise ValueError("ifindex %s does not exist" % ifindex)
self._netif[ifindex].attachnet(net)
def detachnet(self, ifindex):
"""
Detach network interface.
:param int ifindex: interface index to detach
:return: nothing
"""
if ifindex not in self._netif:
raise ValueError("ifindex %s does not exist" % ifindex)
self._netif[ifindex].detachnet()
def setposition(self, x=None, y=None, z=None):
"""
Set position.
:param x: x position
:param y: y position
:param z: z position
:return: nothing
"""
changed = super(PyCoreNode, self).setposition(x, y, z)
if changed:
for netif in self.netifs(sort=True):
netif.setposition(x, y, z)
def commonnets(self, obj, want_ctrl=False):
"""
Given another node or net object, return common networks between
this node and that object. A list of tuples is returned, with each tuple
consisting of (network, interface1, interface2).
:param obj: object to get common network with
:param want_ctrl: flag set to determine if control network are wanted
:return: tuples of common networks
:rtype: list
"""
common = []
for netif1 in self.netifs():
if not want_ctrl and hasattr(netif1, "control"):
continue
for netif2 in obj.netifs():
if netif1.net == netif2.net:
common.append((netif1.net, netif1, netif2))
return common
def check_cmd(self, args):
"""
Runs shell command on node.
:param list[str]|str args: command to run
:return: combined stdout and stderr
:rtype: str
:raises CoreCommandError: when a non-zero exit status occurs
"""
raise NotImplementedError
def cmd(self, args, wait=True):
"""
Runs shell command on node, with option to not wait for a result.
:param list[str]|str args: command to run
:param bool wait: wait for command to exit, defaults to True
:return: exit status for command
:rtype: int
"""
raise NotImplementedError
def cmd_output(self, args):
"""
Runs shell command on node and get exit status and output.
:param list[str]|str args: command to run
:return: exit status and combined stdout and stderr
:rtype: tuple[int, str]
"""
raise NotImplementedError
def termcmdstring(self, sh):
"""
Create a terminal command string.
:param str sh: shell to execute command in
:return: str
"""
raise NotImplementedError
class PyCoreNet(PyCoreObj):
"""
Base class for networks
"""
linktype = LinkTypes.WIRED.value
def __init__(self, session, objid, name, start=True):
"""
Create a PyCoreNet instance.
:param core.session.Session session: CORE session object
:param int objid: object id
:param str name: object name
:param bool start: should object start
"""
super(PyCoreNet, self).__init__(session, objid, name, start=start)
self._linked = {}
self._linked_lock = threading.Lock()
def startup(self):
"""
Each object implements its own startup method.
:return: nothing
"""
raise NotImplementedError
def shutdown(self):
"""
Each object implements its own shutdown method.
:return: nothing
"""
raise NotImplementedError
def attach(self, netif):
"""
Attach network interface.
:param PyCoreNetIf netif: network interface to attach
:return: nothing
"""
i = self.newifindex()
self._netif[i] = netif
netif.netifi = i
with self._linked_lock:
self._linked[netif] = {}
def detach(self, netif):
"""
Detach network interface.
:param PyCoreNetIf netif: network interface to detach
:return: nothing
"""
del self._netif[netif.netifi]
netif.netifi = None
with self._linked_lock:
del self._linked[netif]
def all_link_data(self, flags):
"""
Build link data objects for this network. Each link object describes a link
between this network and a node.
"""
all_links = []
# build a link message from this network node to each node having a
# connected interface
for netif in self.netifs(sort=True):
if not hasattr(netif, "node"):
continue
otherobj = netif.node
uni = False
if otherobj is None:
# two layer-2 switches/hubs linked together via linknet()
if not hasattr(netif, "othernet"):
continue
otherobj = netif.othernet
if otherobj.objid == self.objid:
continue
netif.swapparams('_params_up')
upstream_params = netif.getparams()
netif.swapparams('_params_up')
if netif.getparams() != upstream_params:
uni = True
unidirectional = 0
if uni:
unidirectional = 1
interface2_ip4 = None
interface2_ip4_mask = None
interface2_ip6 = None
interface2_ip6_mask = None
for address in netif.addrlist:
ip, _sep, mask = address.partition("/")
mask = int(mask)
if ipaddress.is_ipv4_address(ip):
family = AF_INET
ipl = socket.inet_pton(family, ip)
interface2_ip4 = ipaddress.IpAddress(af=family, address=ipl)
interface2_ip4_mask = mask
else:
family = AF_INET6
ipl = socket.inet_pton(family, ip)
interface2_ip6 = ipaddress.IpAddress(af=family, address=ipl)
interface2_ip6_mask = mask
link_data = LinkData(
message_type=flags,
node1_id=self.objid,
node2_id=otherobj.objid,
link_type=self.linktype,
unidirectional=unidirectional,
interface2_id=otherobj.getifindex(netif),
interface2_mac=netif.hwaddr,
interface2_ip4=interface2_ip4,
interface2_ip4_mask=interface2_ip4_mask,
interface2_ip6=interface2_ip6,
interface2_ip6_mask=interface2_ip6_mask,
delay=netif.getparam("delay"),
bandwidth=netif.getparam("bw"),
dup=netif.getparam("duplicate"),
jitter=netif.getparam("jitter")
)
all_links.append(link_data)
if not uni:
continue
netif.swapparams('_params_up')
link_data = LinkData(
message_type=0,
node1_id=otherobj.objid,
node2_id=self.objid,
unidirectional=1,
delay=netif.getparam("delay"),
bandwidth=netif.getparam("bw"),
dup=netif.getparam("duplicate"),
jitter=netif.getparam("jitter")
)
netif.swapparams('_params_up')
all_links.append(link_data)
return all_links
class PyCoreNetIf(object):
"""
Base class for network interfaces.
"""
def __init__(self, node, name, mtu):
"""
Creates a PyCoreNetIf instance.
:param core.coreobj.PyCoreNode node: node for interface
:param str name: interface name
:param mtu: mtu value
"""
self.node = node
self.name = name
if not isinstance(mtu, (int, long)):
raise ValueError
self.mtu = mtu
self.net = None
self._params = {}
self.addrlist = []
self.hwaddr = None
# placeholder position hook
self.poshook = lambda a, b, c, d: None
# used with EMANE
self.transport_type = None
# interface index on the network
self.netindex = None
# index used to find flow data
self.flow_id = None
def startup(self):
"""
Startup method for the interface.
:return: nothing
"""
pass
def shutdown(self):
"""
Shutdown method for the interface.
:return: nothing
"""
pass
def attachnet(self, net):
"""
Attach network.
:param core.coreobj.PyCoreNet net: network to attach
:return: nothing
"""
if self.net:
self.detachnet()
self.net = None
net.attach(self)
self.net = net
def detachnet(self):
"""
Detach from a network.
:return: nothing
"""
if self.net is not None:
self.net.detach(self)
def addaddr(self, addr):
"""
Add address.
:param str addr: address to add
:return: nothing
"""
self.addrlist.append(addr)
def deladdr(self, addr):
"""
Delete address.
:param str addr: address to delete
:return: nothing
"""
self.addrlist.remove(addr)
def sethwaddr(self, addr):
"""
Set hardware address.
:param core.misc.ipaddress.MacAddress addr: hardware address to set to.
:return: nothing
"""
self.hwaddr = addr
def getparam(self, key):
"""
Retrieve a parameter from the, or None if the parameter does not exist.
:param key: parameter to get value for
:return: parameter value
"""
return self._params.get(key)
def getparams(self):
"""
Return (key, value) pairs for parameters.
"""
parameters = []
for k in sorted(self._params.keys()):
parameters.append((k, self._params[k]))
return parameters
def setparam(self, key, value):
"""
Set a parameter value, returns True if the parameter has changed.
:param key: parameter name to set
:param value: parameter value
:return: True if parameter changed, False otherwise
"""
# treat None and 0 as unchanged values
current_value = self._params.get(key)
if current_value == value or current_value <= 0 and value <= 0:
return False
self._params[key] = value
return True
def swapparams(self, name):
"""
Swap out parameters dict for name. If name does not exist,
intialize it. This is for supporting separate upstream/downstream
parameters when two layer-2 nodes are linked together.
:param str name: name of parameter to swap
:return: nothing
"""
tmp = self._params
if not hasattr(self, name):
setattr(self, name, {})
self._params = getattr(self, name)
setattr(self, name, tmp)
def setposition(self, x, y, z):
"""
Dispatch position hook handler.
:param x: x position
:param y: y position
:param z: z position
:return: nothing
"""
self.poshook(self, x, y, z)
| 2.859375 | 3 |
abc/128/b.py | wotsushi/competitive-programming | 3 | 5279 | # 入力
N = int(input())
S, P = (
zip(*(
(s, int(p))
for s, p in (input().split() for _ in range(N))
)) if N else
((), ())
)
ans = '\n'.join(
str(i)
for _, _, i in sorted(
zip(
S,
P,
range(1, N + 1)
),
key=lambda t: (t[0], -t[1])
)
)
# 出力
print(ans)
| 3.03125 | 3 |
additional/hashcat_crack.py | mmmds/WirelessDiscoverCrackScan | 2 | 5280 | # External cracking script, part of https://github.com/mmmds/WirelessDiscoverCrackScan
import datetime
import subprocess
import os
### CONFIGURATION
HASHCAT_DIR = "C:\\hashcat-5.1.0"
HASHCAT_EXE = "hashcat64.exe"
LOG_FILE = "crack_log.txt"
DICT_DIR = "./dicts"
def load_dict_list():
for r,d,f in os.walk(DICT_DIR):
return f
def parse_log():
r = {}
with open(LOG_FILE, "r") as f:
for line in f.readlines():
try:
a = line.split("/")
date = a[0]
dict_file = a[1].strip()
hash_file = a[2].split(".")[0].strip()
r[(hash_file, dict_file)] = date
except:
pass
return r
def append_log(file, dictionary):
text = "{}/{}/{}".format(str(datetime.datetime.now()), dictionary, file)
with open(LOG_FILE, "a") as f:
f.write("\n" + text)
def read_files():
result = ([],[])
files = os.listdir(".")
for f in files:
if f.endswith(".16800"):
result[0].append(f.split(".")[0])
elif f.endswith(".2500"):
result[1].append(f.split(".")[0])
return result
def process(files, t, logs, dicts):
for f in files:
for d in dicts:
if (f.split(".")[0], d) not in logs:
print("\n\n######## {} {}\n\n".format(f, d))
cwd = os.getcwd()
subprocess.Popen([HASHCAT_DIR+ "\\" + HASHCAT_EXE, "-m", t, "{}\\{}.{}".format(cwd,f, t), "{}\\{}\\{}".format(cwd,DICT_DIR, d)], cwd = HASHCAT_DIR).wait()
append_log(f, d)
else:
print("\n\n-----------{} {} in logs\n\n".format(f, d))
files = read_files()
logs = parse_log()
dicts = load_dict_list()
print(dicts)
print(files)
print(logs)
pmkid = files[0]
hs4 = files[1]
process(pmkid, "16800", logs, dicts)
process(hs4, "2500", logs, dicts)
| 2.453125 | 2 |
editortools/player.py | bennettdc/MCEdit-Unified | 237 | 5281 | """Copyright (c) 2010-2012 <NAME>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
#-# Modifiedby D.C.-G. for translation purpose
from OpenGL import GL
import numpy
import os
from albow import TableView, TableColumn, Label, Button, Column, CheckBox, AttrRef, Row, ask, alert, input_text_buttons, TabPanel
from albow.table_view import TableRowView
from albow.translate import _
from config import config
from editortools.editortool import EditorTool
from editortools.tooloptions import ToolOptions
from glbackground import Panel
from glutils import DisplayList
from mceutils import loadPNGTexture, alertException, drawTerrainCuttingWire, drawCube
from operation import Operation
import pymclevel
from pymclevel.box import BoundingBox, FloatBox
from pymclevel import nbt
import logging
from player_cache import PlayerCache, ThreadRS
from nbtexplorer import loadFile, saveFile, NBTExplorerToolPanel
import pygame
log = logging.getLogger(__name__)
class PlayerRemoveOperation(Operation):
undoTag = None
def __init__(self, tool, player="Player (Single Player)"):
super(PlayerRemoveOperation, self).__init__(tool.editor, tool.editor.level)
self.tool = tool
self.player = player
self.level = self.tool.editor.level
self.canUndo = False
self.playercache = PlayerCache()
def perform(self, recordUndo=True):
if self.level.saving:
alert(_("Cannot perform action while saving is taking place"))
return
if self.player == "Player (Single Player)":
answer = ask(_("Are you sure you want to delete the default player?"), ["Yes", "Cancel"])
if answer == "Cancel":
return
self.player = "Player"
if recordUndo:
self.undoTag = self.level.getPlayerTag(self.player)
self.level.players.remove(self.player)
if self.tool.panel:
if self.player != "Player":
#self.tool.panel.players.remove(player_cache.getPlayerNameFromUUID(self.player))
#self.tool.panel.players.remove(self.playercache.getPlayerInfo(self.player)[0])
str()
else:
self.tool.panel.players.remove("Player (Single Player)")
while self.tool.panel.table.index >= len(self.tool.panel.players):
self.tool.panel.table.index -= 1
#if len(self.tool.panel.players) == 0:
# self.tool.hidePanel()
# self.tool.showPanel()
self.tool.hidePanel()
self.tool.showPanel()
self.tool.markerList.invalidate()
self.tool.movingPlayer = None
pos = self.tool.revPlayerPos[self.editor.level.dimNo][self.player]
del self.tool.playerPos[self.editor.level.dimNo][pos]
if self.player != "Player":
del self.tool.playerTexture[self.player]
else:
del self.level.root_tag["Data"]["Player"]
del self.tool.revPlayerPos[self.editor.level.dimNo][self.player]
self.canUndo = True
def undo(self):
if not (self.undoTag is None):
if self.player != "Player":
self.level.playerTagCache[self.level.getPlayerPath(self.player)] = self.undoTag
else:
self.level.root_tag["Data"]["Player"] = self.undoTag
self.level.players.append(self.player)
if self.tool.panel:
#if self.player != "Player":
# self.tool.panel.players.append(self.playercache.getPlayerInfo(self.player)[0])
#else:
# self.tool.panel.players.append("Player (Single Player)")
if "[No players]" in self.tool.panel.players:
self.tool.panel.players.remove("[No players]")
self.tool.hidePanel()
self.tool.showPanel()
self.tool.markerList.invalidate()
def redo(self):
self.perform()
class PlayerAddOperation(Operation):
playerTag = None
def __init__(self, tool):
super(PlayerAddOperation, self).__init__(tool.editor, tool.editor.level)
self.tool = tool
self.level = self.tool.editor.level
self.canUndo = False
self.playercache = PlayerCache()
def perform(self, recordUndo=True):
initial = ""
allowed_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"
while True:
self.player = input_text_buttons("Enter a Player Name: ", 160, initial=initial, allowed_chars=allowed_chars)
if self.player is None:
return
elif len(self.player) > 16:
alert("Name too long. Maximum name length is 16.")
initial = self.player
elif len(self.player) < 1:
alert("Name too short. Minimum name length is 1.")
initial = self.player
else:
break
# print 1
data = self.playercache.getPlayerInfo(self.player)
if "<Unknown UUID>" not in data and "Server not ready" not in data:
self.uuid = data[0]
self.player = data[1]
else:
action = ask("Could not get {}'s UUID. Please make sure that you are connected to the internet and that the player \"{}\" exists.".format(self.player, self.player), ["Enter UUID manually", "Cancel"])
if action != "Enter UUID manually":
return
self.uuid = input_text_buttons("Enter a Player UUID: ", 160)
if not self.uuid:
return
# print 2
self.player = self.playercache.getPlayerInfo(self.uuid)
if self.player == self.uuid.replace("-", ""):
if ask("UUID was not found. Continue anyways?") == "Cancel":
return
# print "PlayerAddOperation.perform::self.uuid", self.uuid
if self.uuid in self.level.players:
alert("Player already exists in this World.")
return
self.playerTag = self.newPlayer()
#if self.tool.panel:
# self.tool.panel.players.append(self.player)
if self.level.oldPlayerFolderFormat:
self.level.playerTagCache[self.level.getPlayerPath(self.player)] = self.playerTag
self.level.players.append(self.player)
#if self.tool.panel:
#self.tool.panel.player_UUID[self.player] = self.player
else:
self.level.playerTagCache[self.level.getPlayerPath(self.uuid)] = self.playerTag
self.level.players.append(self.uuid)
if self.tool.panel:
self.tool.panel.player_UUID["UUID"].append(self.uuid)
self.tool.panel.player_UUID["Name"].append(self.player)
self.tool.playerPos[self.editor.level.dimNo][(0,0,0)] = self.uuid
self.tool.revPlayerPos[self.editor.level.dimNo][self.uuid] = (0,0,0)
# print 3
r = self.playercache.getPlayerSkin(self.uuid, force_download=False)
if not isinstance(r, (str, unicode)):
# print 'r 1', r
r = r.join()
# print 'r 2', r
self.tool.playerTexture[self.uuid] = loadPNGTexture(r)
self.tool.markerList.invalidate()
self.tool.recordMove = False
self.tool.movingPlayer = self.uuid
if self.tool.panel:
self.tool.hidePanel()
self.tool.showPanel()
self.canUndo = True
self.playerTag.save(self.level.getPlayerPath(self.uuid))
self.tool.nonSavedPlayers.append(self.level.getPlayerPath(self.uuid))
self.tool.inOtherDimension[self.editor.level.dimNo].append(self.uuid)
def newPlayer(self):
playerTag = nbt.TAG_Compound()
playerTag['Air'] = nbt.TAG_Short(300)
playerTag['AttackTime'] = nbt.TAG_Short(0)
playerTag['DeathTime'] = nbt.TAG_Short(0)
playerTag['Fire'] = nbt.TAG_Short(-20)
playerTag['Health'] = nbt.TAG_Short(20)
playerTag['HurtTime'] = nbt.TAG_Short(0)
playerTag['Score'] = nbt.TAG_Int(0)
playerTag['FallDistance'] = nbt.TAG_Float(0)
playerTag['OnGround'] = nbt.TAG_Byte(0)
playerTag['Dimension'] = nbt.TAG_Int(self.editor.level.dimNo)
playerTag["Inventory"] = nbt.TAG_List()
playerTag['Motion'] = nbt.TAG_List([nbt.TAG_Double(0) for i in xrange(3)])
spawn = self.level.playerSpawnPosition()
spawnX = spawn[0]
spawnZ = spawn[2]
blocks = [self.level.blockAt(spawnX, i, spawnZ) for i in xrange(self.level.Height)]
i = self.level.Height
done = False
for index, b in enumerate(reversed(blocks)):
if b != 0 and not done:
i = index
done = True
spawnY = self.level.Height - i
playerTag['Pos'] = nbt.TAG_List([nbt.TAG_Double([spawnX, spawnY, spawnZ][i]) for i in xrange(3)])
playerTag['Rotation'] = nbt.TAG_List([nbt.TAG_Float(0), nbt.TAG_Float(0)])
return playerTag
def undo(self):
self.level.players.remove(self.uuid)
self.tool.movingPlayer = None
if self.tool.panel:
#self.tool.panel.players.remove(self.player)
self.tool.panel.player_UUID["UUID"].remove(self.uuid)
self.tool.panel.player_UUID["Name"].remove(self.player)
self.tool.hidePanel()
self.tool.showPanel()
if self.tool.movingPlayer is None:
del self.tool.playerPos[self.tool.revPlayerPos[self.uuid]]
else:
del self.tool.playerPos[(0,0,0)]
del self.tool.revPlayerPos[self.uuid]
del self.tool.playerTexture[self.uuid]
os.remove(self.level.getPlayerPath(self.uuid))
if self.level.getPlayerPath(self.uuid) in self.tool.nonSavedPlayers:
self.tool.nonSavedPlayers.remove(self.level.getPlayerPath(self.uuid))
self.tool.markerList.invalidate()
def redo(self):
if not (self.playerTag is None):
self.level.playerTagCache[self.level.getPlayerPath(self.uuid)] = self.playerTag
self.level.players.append(self.uuid)
if self.tool.panel:
#self.tool.panel.players.append(self.uuid)
#self.tool.panel.player_UUID[self.player] = self.uuid
self.tool.panel.player_UUID["UUID"].append(self.uuid)
self.tool.panel.player_UUID["Name"].append(self.player)
# print 4
r = self.playercache.getPlayerSkin(self.uuid)
if isinstance(r, (str, unicode)):
r = r.join()
self.tool.playerTexture[self.uuid] = loadPNGTexture(r)
self.tool.playerPos[(0,0,0)] = self.uuid
self.tool.revPlayerPos[self.uuid] = (0,0,0)
self.playerTag.save(self.level.getPlayerPath(self.uuid))
self.tool.nonSavedPlayers.append(self.level.getPlayerPath(self.uuid))
self.tool.markerList.invalidate()
class PlayerMoveOperation(Operation):
undoPos = None
redoPos = None
def __init__(self, tool, pos, player="Player", yp=(None, None)):
super(PlayerMoveOperation, self).__init__(tool.editor, tool.editor.level)
self.tool = tool
self.canUndo = False
self.pos = pos
self.player = player
self.yp = yp
def perform(self, recordUndo=True):
if self.level.saving:
alert(_("Cannot perform action while saving is taking place"))
return
try:
level = self.tool.editor.level
try:
self.undoPos = level.getPlayerPosition(self.player)
self.undoDim = level.getPlayerDimension(self.player)
self.undoYP = level.getPlayerOrientation(self.player)
except Exception as e:
log.info(_("Couldn't get player position! ({0!r})").format(e))
yaw, pitch = self.yp
if yaw is not None and pitch is not None:
level.setPlayerOrientation((yaw, pitch), self.player)
level.setPlayerPosition(self.pos, self.player)
level.setPlayerDimension(level.dimNo, self.player)
self.tool.playerPos[tuple(self.pos)] = self.player
self.tool.revPlayerPos[self.player] = self.pos
self.tool.markerList.invalidate()
self.canUndo = True
except pymclevel.PlayerNotFound as e:
print "Player move failed: ", e
def undo(self):
if not (self.undoPos is None):
level = self.tool.editor.level
try:
self.redoPos = level.getPlayerPosition(self.player)
self.redoDim = level.getPlayerDimension(self.player)
self.redoYP = level.getPlayerOrientation(self.player)
except Exception as e:
log.info(_("Couldn't get player position! ({0!r})").format(e))
level.setPlayerPosition(self.undoPos, self.player)
level.setPlayerDimension(self.undoDim, self.player)
level.setPlayerOrientation(self.undoYP, self.player)
self.tool.markerList.invalidate()
def redo(self):
if not (self.redoPos is None):
level = self.tool.editor.level
try:
self.undoPos = level.getPlayerPosition(self.player)
self.undoDim = level.getPlayerDimension(self.player)
self.undoYP = level.getPlayerOrientation(self.player)
except Exception as e:
log.info(_("Couldn't get player position! ({0!r})").format(e))
level.setPlayerPosition(self.redoPos, self.player)
level.setPlayerDimension(self.redoDim, self.player)
level.setPlayerOrientation(self.redoYP, self.player)
self.tool.markerList.invalidate()
@staticmethod
def bufferSize():
return 20
class SpawnPositionInvalid(Exception):
pass
def okayAt63(level, pos):
"""blocks 63 or 64 must be occupied"""
# return level.blockAt(pos[0], 63, pos[2]) != 0 or level.blockAt(pos[0], 64, pos[2]) != 0
return True
def okayAboveSpawn(level, pos):
"""3 blocks above spawn must be open"""
return not any([level.blockAt(pos[0], pos[1] + i, pos[2]) for i in xrange(1, 4)])
def positionValid(level, pos):
try:
return okayAt63(level, pos) and okayAboveSpawn(level, pos)
except EnvironmentError:
return False
class PlayerSpawnMoveOperation(Operation):
undoPos = None
redoPos = None
def __init__(self, tool, pos):
super(PlayerSpawnMoveOperation, self).__init__(tool.editor, tool.editor.level)
self.tool, self.pos = tool, pos
self.canUndo = False
def perform(self, recordUndo=True):
if self.level.saving:
alert(_("Cannot perform action while saving is taking place"))
return
level = self.tool.editor.level
'''
if isinstance(level, pymclevel.MCInfdevOldLevel):
if not positionValid(level, self.pos):
if config.spawn.spawnProtection.get():
raise SpawnPositionInvalid(
"You cannot have two air blocks at Y=63 and Y=64 in your spawn point's column. Additionally, you cannot have a solid block in the three blocks above your spawn point. It's weird, I know.")
'''
self.undoPos = level.playerSpawnPosition()
level.setPlayerSpawnPosition(self.pos)
self.tool.markerList.invalidate()
self.canUndo = True
def undo(self):
if self.undoPos is not None:
level = self.tool.editor.level
self.redoPos = level.playerSpawnPosition()
level.setPlayerSpawnPosition(self.undoPos)
self.tool.markerList.invalidate()
def redo(self):
if self.redoPos is not None:
level = self.tool.editor.level
self.undoPos = level.playerSpawnPosition()
level.setPlayerSpawnPosition(self.redoPos)
self.tool.markerList.invalidate()
class PlayerPositionPanel(Panel):
def __init__(self, tool):
Panel.__init__(self, name='Panel.PlayerPositionPanel')
self.tool = tool
self.player_UUID = {"UUID": [], "Name": []}
self.level = tool.editor.level
self.playercache = PlayerCache()
# Add this instance to PlayerCache 'targets'. PlayerCache generated processes will call
# this instance 'update_player' method when they have finished their execution.
self.playercache.add_target(self.update_player)
if hasattr(self.level, 'players'):
players = self.level.players or ["[No players]"]
if not self.level.oldPlayerFolderFormat:
for player in players:
if player != "Player" and player != "[No players]":
if len(player) > 4 and player[4] == "-":
os.rename(os.path.join(self.level.worldFolder.getFolderPath("playerdata"), player+".dat"), os.path.join(self.level.worldFolder.getFolderPath("playerdata"), player.replace("-", "", 1)+".dat"))
player = player.replace("-", "", 1)
# print 5
data = self.playercache.getPlayerInfo(player, use_old_data=True)
#self.player_UUID[data[0]] = data[1]
self.player_UUID["UUID"].append(data[0])
self.player_UUID["Name"].append(data[1])
#self.player_UUID[player] = data
if "Player" in players:
#self.player_UUID["Player (Single Player)"] = "Player"
self.player_UUID["UUID"].append("Player")
self.player_UUID["Name"].append("Player (Single Player)")
if "[No players]" not in players:
self.player_names = sorted(self.player_UUID.values(), key=lambda x: False if x == "Player (Single Player)" else x)
else:
self.player_UUID["UUID"].append("[No players]")
self.player_UUID["Name"].append("[No players]")
else:
players = ["Player (Single Player)"]
self.players = players
if 'Player' in self.player_UUID['UUID'] and 'Player (Single Player)' in self.player_UUID['Name']:
self.player_UUID['UUID'].insert(0, self.player_UUID['UUID'].pop(self.player_UUID['UUID'].index('Player')))
self.player_UUID['Name'].insert(0, self.player_UUID['Name'].pop(self.player_UUID['Name'].index('Player (Single Player)')))
self.pages = TabPanel()
tab_height = self.pages.tab_height
max_height = tab_height + self.tool.editor.mainViewport.height - self.tool.editor.toolbar.height - self.tool.editor.subwidgets[0].height - self.pages.margin * 2
#-# Uncomment the following line to have a maximum height for this panel.
# max_height = min(max_height, 500)
self.editNBTDataButton = Button("Edit NBT", action=self.editNBTData, tooltipText="Open the NBT Explorer to edit player's attributes and inventory")
addButton = Button("Add", action=self.tool.addPlayer)
removeButton = Button("Remove", action=self.tool.removePlayer)
gotoButton = Button("Goto", action=self.tool.gotoPlayer)
gotoCameraButton = Button("Goto View", action=self.tool.gotoPlayerCamera)
moveButton = Button("Move", action=self.tool.movePlayer)
moveToCameraButton = Button("Align to Camera", action=self.tool.movePlayerToCamera)
reloadSkin = Button("Reload Skins", action=self.tool.reloadSkins, tooltipText="This pulls skins from the online server, so this may take a while")
btns = [self.editNBTDataButton]
if not isinstance(self.level, pymclevel.leveldbpocket.PocketLeveldbWorld):
btns.extend([addButton, removeButton])
btns.extend([gotoButton, gotoCameraButton, moveButton, moveToCameraButton, reloadSkin])
btns = Column(btns, margin=0, spacing=2)
h = max_height - btns.height - self.pages.margin * 2 - 2 - self.font.get_linesize() * 2
col = Label('')
def close():
self.pages.show_page(col)
self.nbttree = NBTExplorerToolPanel(self.tool.editor, nbtObject={}, height=max_height, \
close_text="Go Back", no_header=True, close_action=close,
load_text=None)
self.nbttree.shrink_wrap()
self.nbtpage = Column([self.nbttree])
self.nbtpage.shrink_wrap()
self.pages.add_page("NBT Data", self.nbtpage)
self.pages.set_rect(map(lambda x:x+self.margin, self.nbttree._rect))
tableview = TableView(nrows=(h - (self.font.get_linesize() * 2.5)) / self.font.get_linesize(),
header_height=self.font.get_linesize(),
columns=[TableColumn("Player Name(s):", (self.nbttree.width - (self.margin * 3)) / 3),
TableColumn("Player UUID(s):", (self.nbttree.width - (self.margin * 3)))],
)
tableview.index = 0
tableview.num_rows = lambda: len(self.player_UUID["UUID"])
tableview.row_data = lambda i: (self.player_UUID["Name"][i],self.player_UUID["UUID"][i])
tableview.row_is_selected = lambda x: x == tableview.index
tableview.zebra_color = (0, 0, 0, 48)
def selectTableRow(i, evt):
tableview.index = i
tableview.click_row = selectTableRow
def mouse_down(e):
if e.button == 1 and e.num_clicks > 1:
self.editNBTData()
TableRowView.mouse_down(tableview.rows, e)
tableview.rows.mouse_down = mouse_down
tableview.rows.tooltipText = "Double-click or use the button below to edit the NBT Data."
self.table = tableview
col.set_parent(None)
self.col = col = Column([tableview, btns], spacing=2)
self.pages.add_page("Players", col, 0)
self.pages.shrink_wrap()
self.pages.show_page(col)
self.add(self.pages)
self.shrink_wrap()
self.max_height = max_height
def editNBTData(self):
player = self.selectedPlayer
if player == 'Player (Single Player)':
alert("Not yet implemented.\nUse the NBT Explorer to edit this player.")
elif player == '[No players]':
return
else:
player = self.level.getPlayerTag(self.selectedPlayer)
if player is not None:
self.pages.remove_page(self.nbtpage)
def close():
self.pages.show_page(self.col)
self.nbttree = NBTExplorerToolPanel(self.tool.editor, nbtObject=player, fileName=None,
savePolicy=-1, dataKeyName=None,
height=self.max_height, no_header=True, close_text="Go Back",
close_action=close, load_text=None,
copy_data=False)
self.nbtpage = Column([self.nbttree,])
self.nbtpage.shrink_wrap()
self.pages.add_page("NBT Data", self.nbtpage)
self.pages.show_page(self.nbtpage)
else:
alert(_("Unable to load player %s" % self.selectedPlayer()))
@property
def selectedPlayer(self):
if not self.level.oldPlayerFolderFormat:
player = self.players[self.table.index]
if player != "Player (Single Player)" and player != "[No players]" and player != "~local_player":
return self.player_UUID["UUID"][self.table.index]
else:
return player
else:
return self.players[self.table.index]
def key_down(self, evt):
self.dispatch_key('key_down', evt)
def dispatch_key(self, name, evt):
if not hasattr(evt, 'key'):
return
if name == "key_down":
keyname = self.root.getKey(evt)
if self.pages.current_page == self.col:
if keyname == "Up" and self.table.index > 0:
self.table.index -= 1
self.table.rows.scroll_to_item(self.table.index)
elif keyname == "Down" and self.table.index < len(self.players) - 1:
self.table.index += 1
self.table.rows.scroll_to_item(self.table.index)
elif keyname == 'Page down':
self.table.index = min(len(self.players) - 1, self.table.index + self.table.rows.num_rows())
elif keyname == 'Page up':
self.table.index = max(0, self.table.index - self.table.rows.num_rows())
elif keyname == 'Return':
if self.selectedPlayer:
self.editNBTData()
if self.table.rows.cell_to_item_no(0, 0) + self.table.rows.num_rows() -1 > self.table.index or self.table.rows.cell_to_item_no(0, 0) + self.table.rows.num_rows() -1 < self.table.index:
self.table.rows.scroll_to_item(self.table.index)
elif self.pages.current_page == self.nbtpage:
self.nbttree.dispatch_key(name, evt)
def update_player(self, data):
if isinstance(data, tuple):
if data[0] in self.player_UUID['UUID']:
idx = self.player_UUID['UUID'].index(data[0])
self.player_UUID['UUID'][idx] = data[0]
self.player_UUID['Name'][idx] = data[1]
class PlayerPositionTool(EditorTool):
surfaceBuild = True
toolIconName = "player"
tooltipText = "Players"
movingPlayer = None
recordMove = True
def reloadTextures(self):
self.charTex = loadPNGTexture('char.png')
@alertException
def addPlayer(self):
op = PlayerAddOperation(self)
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
@alertException
def removePlayer(self):
player = self.panel.selectedPlayer
if player != "[No players]":
op = PlayerRemoveOperation(self, player)
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
@alertException
def movePlayer(self):
if self.panel.selectedPlayer != "[No players]":
self.movingPlayer = self.panel.selectedPlayer
if self.movingPlayer == "Player (Single Player)":
self.movingPlayer = "Player"
@alertException
def movePlayerToCamera(self):
player = self.panel.selectedPlayer
if player == "Player (Single Player)":
player = "Player"
if player != "[No players]":
pos = self.editor.mainViewport.cameraPosition
y = self.editor.mainViewport.yaw
p = self.editor.mainViewport.pitch
op = PlayerMoveOperation(self, pos, player, (y, p))
self.movingPlayer = None
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
def delete_skin(self, uuid):
del self.playerTexture[uuid]
self.playerTexture[uuid] = self.charTex
@alertException
def reloadSkins(self):
#result = ask("This pulls skins from the online server, so this may take a while", ["Ok", "Cancel"])
#if result == "Ok":
try:
for player in self.editor.level.players:
if player != "Player" and player in self.playerTexture.keys():
del self.playerTexture[player]
# print 6
r = self.playercache.getPlayerSkin(player, force_download=True, instance=self)
if isinstance(r, (str, unicode)):
r = r.join()
self.playerTexture[player] = loadPNGTexture(r)
#self.markerList.call(self._drawToolMarkers)
except:
raise Exception("Could not connect to the skins server, please check your Internet connection and try again.")
def gotoPlayerCamera(self):
player = self.panel.selectedPlayer
if player == "Player (Single Player)":
player = "Player"
try:
pos = self.editor.level.getPlayerPosition(player)
y, p = self.editor.level.getPlayerOrientation(player)
self.editor.gotoDimension(self.editor.level.getPlayerDimension(player))
self.editor.mainViewport.cameraPosition = pos
self.editor.mainViewport.yaw = y
self.editor.mainViewport.pitch = p
self.editor.mainViewport.stopMoving()
self.editor.mainViewport.invalidate()
except pymclevel.PlayerNotFound:
pass
def gotoPlayer(self):
player = self.panel.selectedPlayer
if player == "Player (Single Player)":
player = "Player"
try:
if self.editor.mainViewport.pitch < 0:
self.editor.mainViewport.pitch = -self.editor.mainViewport.pitch
self.editor.mainViewport.cameraVector = self.editor.mainViewport._cameraVector()
cv = self.editor.mainViewport.cameraVector
pos = self.editor.level.getPlayerPosition(player)
pos = map(lambda p, c: p - c * 5, pos, cv)
self.editor.gotoDimension(self.editor.level.getPlayerDimension(player))
self.editor.mainViewport.cameraPosition = pos
self.editor.mainViewport.stopMoving()
except pymclevel.PlayerNotFound:
pass
def __init__(self, *args):
EditorTool.__init__(self, *args)
self.reloadTextures()
self.nonSavedPlayers = []
textureVerticesHead = numpy.array(
(
# Backside of Head
24, 16, # Bottom Left
24, 8, # Top Left
32, 8, # Top Right
32, 16, # Bottom Right
# Front of Head
8, 16,
8, 8,
16, 8,
16, 16,
#
24, 0,
16, 0,
16, 8,
24, 8,
#
16, 0,
8, 0,
8, 8,
16, 8,
#
8, 8,
0, 8,
0, 16,
8, 16,
16, 16,
24, 16,
24, 8,
16, 8,
), dtype='f4')
textureVerticesHat = numpy.array(
(
56, 16,
56, 8,
64, 8,
64, 16,
48, 16,
48, 8,
40, 8,
40, 16,
56, 0,
48, 0,
48, 8,
56, 8,
48, 0,
40, 0,
40, 8,
48, 8,
40, 8,
32, 8,
32, 16,
40, 16,
48, 16,
56, 16,
56, 8,
48, 8,
), dtype='f4')
textureVerticesHead.shape = (24, 2)
textureVerticesHat.shape = (24, 2)
textureVerticesHead *= 4
textureVerticesHead[:, 1] *= 2
textureVerticesHat *= 4
textureVerticesHat[:, 1] *= 2
self.texVerts = (textureVerticesHead, textureVerticesHat)
self.playerPos = {0:{}, -1:{}, 1:{}}
self.playerTexture = {}
self.revPlayerPos = {0:{}, -1:{}, 1:{}}
self.inOtherDimension = {0: [], 1: [], -1: []}
self.playercache = PlayerCache()
self.markerList = DisplayList()
panel = None
def showPanel(self):
if not self.panel:
self.panel = PlayerPositionPanel(self)
self.panel.centery = (self.editor.mainViewport.height - self.editor.toolbar.height) / 2 + self.editor.subwidgets[0].height
self.panel.left = self.editor.left
self.editor.add(self.panel)
def hidePanel(self):
if self.panel and self.panel.parent:
self.panel.parent.remove(self.panel)
self.panel = None
def drawToolReticle(self):
if self.movingPlayer is None:
return
pos, direction = self.editor.blockFaceUnderCursor
dim = self.editor.level.getPlayerDimension(self.movingPlayer)
pos = (pos[0], pos[1] + 2, pos[2])
x, y, z = pos
# x,y,z=map(lambda p,d: p+d, pos, direction)
GL.glEnable(GL.GL_BLEND)
GL.glColor(1.0, 1.0, 1.0, 0.5)
self.drawCharacterHead(x + 0.5, y + 0.75, z + 0.5, self.revPlayerPos[dim][self.movingPlayer], dim)
GL.glDisable(GL.GL_BLEND)
GL.glEnable(GL.GL_DEPTH_TEST)
self.drawCharacterHead(x + 0.5, y + 0.75, z + 0.5, self.revPlayerPos[dim][self.movingPlayer], dim)
drawTerrainCuttingWire(BoundingBox((x, y, z), (1, 1, 1)))
drawTerrainCuttingWire(BoundingBox((x, y - 1, z), (1, 1, 1)))
#drawTerrainCuttingWire( BoundingBox((x,y-2,z), (1,1,1)) )
GL.glDisable(GL.GL_DEPTH_TEST)
markerLevel = None
def drawToolMarkers(self):
if not config.settings.drawPlayerHeads.get():
return
if self.markerLevel != self.editor.level:
self.markerList.invalidate()
self.markerLevel = self.editor.level
self.markerList.call(self._drawToolMarkers)
def _drawToolMarkers(self):
GL.glColor(1.0, 1.0, 1.0, 0.5)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glMatrixMode(GL.GL_MODELVIEW)
for player in self.editor.level.players:
try:
pos = self.editor.level.getPlayerPosition(player)
yaw, pitch = self.editor.level.getPlayerOrientation(player)
dim = self.editor.level.getPlayerDimension(player)
self.inOtherDimension[dim].append(player)
self.playerPos[dim][pos] = player
self.revPlayerPos[dim][player] = pos
if player != "Player" and config.settings.downloadPlayerSkins.get():
# print 7
r = self.playercache.getPlayerSkin(player, force_download=False)
if not isinstance(r, (str, unicode)):
r = r.join()
self.playerTexture[player] = loadPNGTexture(r)
else:
self.playerTexture[player] = self.charTex
if dim != self.editor.level.dimNo:
continue
x, y, z = pos
GL.glPushMatrix()
GL.glTranslate(x, y, z)
GL.glRotate(-yaw, 0, 1, 0)
GL.glRotate(pitch, 1, 0, 0)
GL.glColor(1, 1, 1, 1)
self.drawCharacterHead(0, 0, 0, (x,y,z), self.editor.level.dimNo)
GL.glPopMatrix()
# GL.glEnable(GL.GL_BLEND)
drawTerrainCuttingWire(FloatBox((x - .5, y - .5, z - .5), (1, 1, 1)),
c0=(0.3, 0.9, 0.7, 1.0),
c1=(0, 0, 0, 0),
)
#GL.glDisable(GL.GL_BLEND)
except Exception, e:
print "Exception in editortools.player.PlayerPositionTool._drawToolMarkers:", repr(e)
import traceback
print traceback.format_exc()
continue
GL.glDisable(GL.GL_DEPTH_TEST)
def drawCharacterHead(self, x, y, z, realCoords=None, dim=0):
GL.glEnable(GL.GL_CULL_FACE)
origin = (x - 0.25, y - 0.25, z - 0.25)
size = (0.5, 0.5, 0.5)
box = FloatBox(origin, size)
hat_origin = (x - 0.275, y - 0.275, z - 0.275)
hat_size = (0.55, 0.55, 0.55)
hat_box = FloatBox(hat_origin, hat_size)
if realCoords is not None and self.playerPos[dim][realCoords] != "Player" and config.settings.downloadPlayerSkins.get():
drawCube(box,
texture=self.playerTexture[self.playerPos[dim][realCoords]], textureVertices=self.texVerts[0])
GL.glEnable(GL.GL_BLEND)
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
drawCube(hat_box,
texture=self.playerTexture[self.playerPos[dim][realCoords]], textureVertices=self.texVerts[1])
GL.glDisable(GL.GL_BLEND)
else:
drawCube(box,
texture=self.charTex, textureVertices=self.texVerts[0])
GL.glDisable(GL.GL_CULL_FACE)
#@property
#def statusText(self):
# if not self.panel:
# return ""
# player = self.panel.selectedPlayer
# if player == "Player":
# return "Click to move the player"
#
# return _("Click to move the player \"{0}\"").format(player)
@alertException
def mouseDown(self, evt, pos, direction):
if self.movingPlayer is None:
return
pos = (pos[0] + 0.5, pos[1] + 2.75, pos[2] + 0.5)
op = PlayerMoveOperation(self, pos, self.movingPlayer)
self.movingPlayer = None
if self.recordMove:
self.editor.addOperation(op)
addingMoving = False
else:
self.editor.performWithRetry(op) #Prevent recording of Undo when adding player
self.recordMove = True
addingMoving = True
if op.canUndo and not addingMoving:
self.editor.addUnsavedEdit()
def keyDown(self, evt):
keyname = evt.dict.get('keyname', None) or self.editor.get_root().getKey(evt)
if not self.recordMove:
if not pygame.key.get_focused():
return
if keyname == "Escape":
self.recordMove = True
if self.panel and self.panel.__class__ == PlayerPositionPanel:
self.panel.key_down(evt)
def keyUp(self, evt):
pass
def levelChanged(self):
self.markerList.invalidate()
@alertException
def toolSelected(self):
self.showPanel()
self.movingPlayer = None
@alertException
def toolReselected(self):
if self.panel:
self.gotoPlayer()
class PlayerSpawnPositionOptions(ToolOptions):
def __init__(self, tool):
ToolOptions.__init__(self, name='Panel.PlayerSpawnPositionOptions')
self.tool = tool
self.spawnProtectionCheckBox = CheckBox(ref=AttrRef(tool, "spawnProtection"))
self.spawnProtectionLabel = Label("Spawn Position Safety")
self.spawnProtectionLabel.mouse_down = self.spawnProtectionCheckBox.mouse_down
tooltipText = "Minecraft will randomly move your spawn point if you try to respawn in a column where there are no blocks at Y=63 and Y=64. Only uncheck this box if Minecraft is changed."
self.spawnProtectionLabel.tooltipText = self.spawnProtectionCheckBox.tooltipText = tooltipText
row = Row((self.spawnProtectionCheckBox, self.spawnProtectionLabel))
col = Column((Label("Spawn Point Options"), row, Button("OK", action=self.dismiss)))
self.add(col)
self.shrink_wrap()
class PlayerSpawnPositionTool(PlayerPositionTool):
surfaceBuild = True
toolIconName = "playerspawn"
tooltipText = "Move Spawn Point\nRight-click for options"
def __init__(self, *args):
PlayerPositionTool.__init__(self, *args)
self.optionsPanel = PlayerSpawnPositionOptions(self)
def toolEnabled(self):
return self.editor.level.dimNo == 0
def showPanel(self):
self.panel = Panel(name='Panel.PlayerSpawnPositionTool')
button = Button("Goto Spawn", action=self.gotoSpawn)
self.panel.add(button)
self.panel.shrink_wrap()
self.panel.left = self.editor.left
self.panel.centery = self.editor.centery
self.editor.add(self.panel)
def gotoSpawn(self):
cv = self.editor.mainViewport.cameraVector
pos = self.editor.level.playerSpawnPosition()
pos = map(lambda p, c: p - c * 5, pos, cv)
self.editor.mainViewport.cameraPosition = pos
self.editor.mainViewport.stopMoving()
@property
def statusText(self):
return "Click to set the spawn position."
spawnProtection = config.spawn.spawnProtection.property()
def drawToolReticle(self):
pos, direction = self.editor.blockFaceUnderCursor
x, y, z = map(lambda p, d: p + d, pos, direction)
color = (1.0, 1.0, 1.0, 0.5)
if isinstance(self.editor.level, pymclevel.MCInfdevOldLevel) and self.spawnProtection:
if not positionValid(self.editor.level, (x, y, z)):
color = (1.0, 0.0, 0.0, 0.5)
GL.glColor(*color)
GL.glEnable(GL.GL_BLEND)
self.drawCage(x, y, z)
self.drawCharacterHead(x + 0.5, y + 0.5, z + 0.5)
GL.glDisable(GL.GL_BLEND)
GL.glEnable(GL.GL_DEPTH_TEST)
self.drawCage(x, y, z)
self.drawCharacterHead(x + 0.5, y + 0.5, z + 0.5)
color2 = map(lambda a: a * 0.4, color)
drawTerrainCuttingWire(BoundingBox((x, y, z), (1, 1, 1)), color2, color)
GL.glDisable(GL.GL_DEPTH_TEST)
def _drawToolMarkers(self):
x, y, z = self.editor.level.playerSpawnPosition()
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
GL.glEnable(GL.GL_BLEND)
color = config.selectionColors.black.get() + (0.35,)
GL.glColor(*color)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GL.glLineWidth(2.0)
drawCube(FloatBox((x, y, z), (1, 1, 1)))
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
drawCube(FloatBox((x, y, z), (1, 1, 1)))
GL.glDisable(GL.GL_BLEND)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glColor(1.0, 1.0, 1.0, 1.0)
self.drawCage(x, y, z)
self.drawCharacterHead(x + 0.5, y + 0.5 + 0.125 * numpy.sin(self.editor.frames * 0.05), z + 0.5)
GL.glDisable(GL.GL_DEPTH_TEST)
def drawCage(self, x, y, z):
cageTexVerts = numpy.array(pymclevel.MCInfdevOldLevel.materials.blockTextures[52, 0])
pixelScale = 0.5 if self.editor.level.materials.name in ("Pocket", "Alpha") else 1.0
texSize = 16 * pixelScale
cageTexVerts = cageTexVerts.astype(float) * pixelScale
cageTexVerts = numpy.array(
[((tx, ty), (tx + texSize, ty), (tx + texSize, ty + texSize), (tx, ty + texSize)) for (tx, ty) in
cageTexVerts], dtype='float32')
GL.glEnable(GL.GL_ALPHA_TEST)
drawCube(BoundingBox((x, y, z), (1, 1, 1)), texture=pymclevel.alphaMaterials.terrainTexture,
textureVertices=cageTexVerts)
GL.glDisable(GL.GL_ALPHA_TEST)
@alertException
def mouseDown(self, evt, pos, direction):
pos = map(lambda p, d: p + d, pos, direction)
op = PlayerSpawnMoveOperation(self, pos)
try:
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
self.markerList.invalidate()
except SpawnPositionInvalid, e:
if "Okay" != ask(str(e), responses=["Okay", "Fix it for me!"]):
level = self.editor.level
status = ""
if not okayAt63(level, pos):
level.setBlockAt(pos[0], 63, pos[2], 1)
status += _("Block added at y=63.\n")
if 59 < pos[1] < 63:
pos[1] = 63
status += _("Spawn point moved upward to y=63.\n")
if not okayAboveSpawn(level, pos):
if pos[1] > 63 or pos[1] < 59:
lpos = (pos[0], pos[1] - 1, pos[2])
if level.blockAt(*pos) == 0 and level.blockAt(*lpos) != 0 and okayAboveSpawn(level, lpos):
pos = lpos
status += _("Spawn point shifted down by one block.\n")
if not okayAboveSpawn(level, pos):
for i in xrange(1, 4):
level.setBlockAt(pos[0], pos[1] + i, pos[2], 0)
status += _("Blocks above spawn point cleared.\n")
self.editor.invalidateChunks([(pos[0] // 16, pos[2] // 16)])
op = PlayerSpawnMoveOperation(self, pos)
try:
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
self.markerList.invalidate()
except SpawnPositionInvalid, e:
alert(str(e))
return
if len(status):
alert(_("Spawn point fixed. Changes: \n\n") + status)
@alertException
def toolReselected(self):
self.gotoSpawn()
| 1.46875 | 1 |
seismic/checkpointing/checkpoint.py | slimgroup/Devito-Examples | 7 | 5282 | # The MIT License (MIT)
#
# Copyright (c) 2016, Imperial College, London
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pyrevolve import Checkpoint, Operator
from devito import TimeFunction
from devito.tools import flatten
class CheckpointOperator(Operator):
"""Devito's concrete implementation of the ABC pyrevolve.Operator. This class wraps
devito.Operator so it conforms to the pyRevolve API. pyRevolve will call apply
with arguments t_start and t_end. Devito calls these arguments t_s and t_e so
the following dict is used to perform the translations between different names.
Parameters
----------
op : Operator
devito.Operator object that this object will wrap.
args : dict
If devito.Operator.apply() expects any arguments, they can be provided
here to be cached. Any calls to CheckpointOperator.apply() will
automatically include these cached arguments in the call to the
underlying devito.Operator.apply().
"""
t_arg_names = {'t_start': 'time_m', 't_end': 'time_M'}
def __init__(self, op, **kwargs):
self.op = op
self.args = kwargs
op_default_args = self.op._prepare_arguments(**kwargs)
self.start_offset = op_default_args[self.t_arg_names['t_start']]
def _prepare_args(self, t_start, t_end):
args = self.args.copy()
args[self.t_arg_names['t_start']] = t_start + self.start_offset
args[self.t_arg_names['t_end']] = t_end - 1 + self.start_offset
return args
def apply(self, t_start, t_end):
""" If the devito operator requires some extra arguments in the call to apply
they can be stored in the args property of this object so pyRevolve calls
pyRevolve.Operator.apply() without caring about these extra arguments while
this method passes them on correctly to devito.Operator
"""
# Build the arguments list to invoke the kernel function
args = self.op.arguments(**self._prepare_args(t_start, t_end))
# Invoke kernel function with args
arg_values = [args[p.name] for p in self.op.parameters]
self.op.cfunction(*arg_values)
class DevitoCheckpoint(Checkpoint):
"""Devito's concrete implementation of the Checkpoint abstract base class provided by
pyRevolve. Holds a list of symbol objects that hold data.
"""
def __init__(self, objects):
"""Intialise a checkpoint object. Upon initialisation, a checkpoint
stores only a reference to the objects that are passed into it."""
assert(all(isinstance(o, TimeFunction) for o in objects))
dtypes = set([o.dtype for o in objects])
assert(len(dtypes) == 1)
self._dtype = dtypes.pop()
self.objects = objects
@property
def dtype(self):
return self._dtype
def get_data(self, timestep):
data = flatten([get_symbol_data(s, timestep) for s in self.objects])
return data
def get_data_location(self, timestep):
return self.get_data(timestep)
@property
def size(self):
"""The memory consumption of the data contained in a checkpoint."""
return sum([int((o.size_allocated/(o.time_order+1))*o.time_order)
for o in self.objects])
def save(*args):
raise RuntimeError("Invalid method called. Did you check your version" +
" of pyrevolve?")
def load(*args):
raise RuntimeError("Invalid method called. Did you check your version" +
" of pyrevolve?")
def get_symbol_data(symbol, timestep):
timestep += symbol.time_order - 1
ptrs = []
for i in range(symbol.time_order):
# Use `._data`, instead of `.data`, as `.data` is a view of the DOMAIN
# data region which is non-contiguous in memory. The performance hit from
# dealing with non-contiguous memory is so big (introduces >1 copy), it's
# better to checkpoint unneccesarry stuff to get a contiguous chunk of memory.
ptr = symbol._data[timestep - i, :, :]
ptrs.append(ptr)
return ptrs
| 1.9375 | 2 |
lbrynet/file_manager/EncryptedFileManager.py | shyba/lbry | 1 | 5283 | """
Keep track of which LBRY Files are downloading and store their LBRY File specific metadata
"""
import logging
import os
from twisted.enterprise import adbapi
from twisted.internet import defer, task, reactor
from twisted.python.failure import Failure
from lbrynet.reflector.reupload import reflect_stream
from lbrynet.core.PaymentRateManager import NegotiatedPaymentRateManager
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloader
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloaderFactory
from lbrynet.lbry_file.StreamDescriptor import EncryptedFileStreamType
from lbrynet.cryptstream.client.CryptStreamDownloader import AlreadyStoppedError
from lbrynet.cryptstream.client.CryptStreamDownloader import CurrentlyStoppingError
from lbrynet.core.sqlite_helpers import rerun_if_locked
from lbrynet import conf
log = logging.getLogger(__name__)
def safe_start_looping_call(looping_call, seconds=3600):
if not looping_call.running:
looping_call.start(seconds)
def safe_stop_looping_call(looping_call):
if looping_call.running:
looping_call.stop()
class EncryptedFileManager(object):
"""Keeps track of currently opened LBRY Files, their options, and
their LBRY File specific metadata.
"""
def __init__(self, session, stream_info_manager, sd_identifier, download_directory=None):
self.session = session
self.stream_info_manager = stream_info_manager
# TODO: why is sd_identifier part of the file manager?
self.sd_identifier = sd_identifier
self.lbry_files = []
self.sql_db = None
if download_directory:
self.download_directory = download_directory
else:
self.download_directory = os.getcwd()
self.lbry_file_reflector = task.LoopingCall(self.reflect_lbry_files)
log.debug("Download directory for EncryptedFileManager: %s", str(self.download_directory))
@defer.inlineCallbacks
def setup(self):
yield self._open_db()
yield self._add_to_sd_identifier()
yield self._start_lbry_files()
if conf.settings['reflect_uploads']:
safe_start_looping_call(self.lbry_file_reflector)
def get_lbry_file_status(self, lbry_file):
return self._get_lbry_file_status(lbry_file.rowid)
def set_lbry_file_data_payment_rate(self, lbry_file, new_rate):
return self._set_lbry_file_payment_rate(lbry_file.rowid, new_rate)
def change_lbry_file_status(self, lbry_file, status):
log.debug("Changing status of %s to %s", lbry_file.stream_hash, status)
return self._change_file_status(lbry_file.rowid, status)
def get_lbry_file_status_reports(self):
ds = []
for lbry_file in self.lbry_files:
ds.append(lbry_file.status())
dl = defer.DeferredList(ds)
def filter_failures(status_reports):
return [status_report for success, status_report in status_reports if success is True]
dl.addCallback(filter_failures)
return dl
def save_sd_blob_hash_to_stream(self, stream_hash, sd_hash):
return self.stream_info_manager.save_sd_blob_hash_to_stream(stream_hash, sd_hash)
def _add_to_sd_identifier(self):
downloader_factory = ManagedEncryptedFileDownloaderFactory(self)
self.sd_identifier.add_stream_downloader_factory(
EncryptedFileStreamType, downloader_factory)
@defer.inlineCallbacks
def _check_stream_is_managed(self, stream_hash):
# check that all the streams in the stream_info_manager are also
# tracked by lbry_file_manager and fix any streams that aren't.
rowid = yield self._get_rowid_for_stream_hash(stream_hash)
if rowid is not None:
defer.returnValue(True)
rate = self.session.base_payment_rate_manager.min_blob_data_payment_rate
key, stream_name, file_name = yield self.stream_info_manager.get_stream_info(stream_hash)
log.warning("Trying to fix missing lbry file for %s", stream_name.decode('hex'))
yield self._save_lbry_file(stream_hash, rate)
@defer.inlineCallbacks
def _check_stream_info_manager(self):
def _iter_streams(stream_hashes):
for stream_hash in stream_hashes:
yield self._check_stream_is_managed(stream_hash)
stream_hashes = yield self.stream_info_manager.get_all_streams()
log.debug("Checking %s streams", len(stream_hashes))
yield defer.DeferredList(list(_iter_streams(stream_hashes)))
@defer.inlineCallbacks
def _start_lbry_files(self):
yield self._check_stream_info_manager()
files_and_options = yield self._get_all_lbry_files()
yield defer.DeferredList([
self._set_options_and_restore(rowid, stream_hash, options)
for rowid, stream_hash, options in files_and_options
])
log.info("Started %i lbry files", len(self.lbry_files))
@defer.inlineCallbacks
def _set_options_and_restore(self, rowid, stream_hash, options):
try:
b_prm = self.session.base_payment_rate_manager
payment_rate_manager = NegotiatedPaymentRateManager(
b_prm, self.session.blob_tracker)
downloader = yield self.start_lbry_file(
rowid, stream_hash, payment_rate_manager, blob_data_rate=options)
yield downloader.restore()
except Exception:
log.error('An error occurred while starting a lbry file (%s, %s, %s)',
rowid, stream_hash, options)
@defer.inlineCallbacks
def start_lbry_file(self, rowid, stream_hash,
payment_rate_manager, blob_data_rate=None,
download_directory=None, file_name=None):
if not download_directory:
download_directory = self.download_directory
payment_rate_manager.min_blob_data_payment_rate = blob_data_rate
lbry_file_downloader = ManagedEncryptedFileDownloader(
rowid,
stream_hash,
self.session.peer_finder,
self.session.rate_limiter,
self.session.blob_manager,
self.stream_info_manager,
self,
payment_rate_manager,
self.session.wallet,
download_directory,
file_name=file_name
)
yield lbry_file_downloader.set_stream_info()
self.lbry_files.append(lbry_file_downloader)
defer.returnValue(lbry_file_downloader)
@defer.inlineCallbacks
def _stop_lbry_file(self, lbry_file):
def wait_for_finished(lbry_file, count=2):
if count or lbry_file.saving_status is not False:
return task.deferLater(reactor, 1, self._stop_lbry_file, lbry_file, count=count - 1)
try:
yield lbry_file.stop(change_status=False)
self.lbry_files.remove(lbry_file)
except CurrentlyStoppingError:
yield wait_for_finished(lbry_file)
except AlreadyStoppedError:
pass
finally:
defer.returnValue(None)
def _stop_lbry_files(self):
log.info("Stopping %i lbry files", len(self.lbry_files))
lbry_files = self.lbry_files
for lbry_file in lbry_files:
yield self._stop_lbry_file(lbry_file)
@defer.inlineCallbacks
def add_lbry_file(self, stream_hash, payment_rate_manager, blob_data_rate=None,
download_directory=None, file_name=None):
rowid = yield self._save_lbry_file(stream_hash, blob_data_rate)
lbry_file = yield self.start_lbry_file(rowid, stream_hash, payment_rate_manager,
blob_data_rate, download_directory,
file_name)
defer.returnValue(lbry_file)
@defer.inlineCallbacks
def delete_lbry_file(self, lbry_file, delete_file=False):
if lbry_file not in self.lbry_files:
raise ValueError("Could not find that LBRY file")
def wait_for_finished(count=2):
if count <= 0 or lbry_file.saving_status is False:
return True
else:
return task.deferLater(reactor, 1, wait_for_finished, count=count - 1)
full_path = os.path.join(lbry_file.download_directory, lbry_file.file_name)
try:
yield lbry_file.stop()
except (AlreadyStoppedError, CurrentlyStoppingError):
yield wait_for_finished()
self.lbry_files.remove(lbry_file)
yield self._delete_lbry_file_options(lbry_file.rowid)
yield lbry_file.delete_data()
# TODO: delete this
# get count for stream hash returns the count of the lbry files with the stream hash
# in the lbry_file_options table, which will soon be removed.
stream_count = yield self.get_count_for_stream_hash(lbry_file.stream_hash)
if stream_count == 0:
yield self.stream_info_manager.delete_stream(lbry_file.stream_hash)
else:
msg = ("Can't delete stream info for %s, count is %i\n"
"The call that resulted in this warning will\n"
"be removed in the database refactor")
log.warning(msg, lbry_file.stream_hash, stream_count)
if delete_file and os.path.isfile(full_path):
os.remove(full_path)
defer.returnValue(True)
def toggle_lbry_file_running(self, lbry_file):
"""Toggle whether a stream reader is currently running"""
for l in self.lbry_files:
if l == lbry_file:
return l.toggle_running()
return defer.fail(Failure(ValueError("Could not find that LBRY file")))
def _reflect_lbry_files(self):
for lbry_file in self.lbry_files:
yield reflect_stream(lbry_file)
@defer.inlineCallbacks
def reflect_lbry_files(self):
yield defer.DeferredList(list(self._reflect_lbry_files()))
@defer.inlineCallbacks
def stop(self):
safe_stop_looping_call(self.lbry_file_reflector)
yield defer.DeferredList(list(self._stop_lbry_files()))
if self.sql_db:
yield self.sql_db.close()
self.sql_db = None
log.info("Stopped %s", self)
defer.returnValue(True)
def get_count_for_stream_hash(self, stream_hash):
return self._get_count_for_stream_hash(stream_hash)
######### database calls #########
def _open_db(self):
# check_same_thread=False is solely to quiet a spurious error that appears to be due
# to a bug in twisted, where the connection is closed by a different thread than the
# one that opened it. The individual connections in the pool are not used in multiple
# threads.
self.sql_db = adbapi.ConnectionPool(
"sqlite3",
os.path.join(self.session.db_dir, "lbryfile_info.db"),
check_same_thread=False
)
return self.sql_db.runQuery(
"create table if not exists lbry_file_options (" +
" blob_data_rate real, " +
" status text," +
" stream_hash text,"
" foreign key(stream_hash) references lbry_files(stream_hash)" +
")"
)
@rerun_if_locked
def _save_lbry_file(self, stream_hash, data_payment_rate):
def do_save(db_transaction):
row = (data_payment_rate, ManagedEncryptedFileDownloader.STATUS_STOPPED, stream_hash)
db_transaction.execute("insert into lbry_file_options values (?, ?, ?)", row)
return db_transaction.lastrowid
return self.sql_db.runInteraction(do_save)
@rerun_if_locked
def _delete_lbry_file_options(self, rowid):
return self.sql_db.runQuery("delete from lbry_file_options where rowid = ?",
(rowid,))
@rerun_if_locked
def _set_lbry_file_payment_rate(self, rowid, new_rate):
return self.sql_db.runQuery(
"update lbry_file_options set blob_data_rate = ? where rowid = ?",
(new_rate, rowid))
@rerun_if_locked
def _get_all_lbry_files(self):
d = self.sql_db.runQuery("select rowid, stream_hash, blob_data_rate from lbry_file_options")
return d
@rerun_if_locked
def _change_file_status(self, rowid, new_status):
return self.sql_db.runQuery("update lbry_file_options set status = ? where rowid = ?",
(new_status, rowid))
@rerun_if_locked
def _get_lbry_file_status(self, rowid):
d = self.sql_db.runQuery("select status from lbry_file_options where rowid = ?",
(rowid,))
d.addCallback(lambda r: (r[0][0] if len(r) else None))
return d
@rerun_if_locked
def _get_count_for_stream_hash(self, stream_hash):
d = self.sql_db.runQuery("select count(*) from lbry_file_options where stream_hash = ?",
(stream_hash,))
d.addCallback(lambda r: (r[0][0] if r else 0))
return d
@rerun_if_locked
def _get_rowid_for_stream_hash(self, stream_hash):
d = self.sql_db.runQuery("select rowid from lbry_file_options where stream_hash = ?",
(stream_hash,))
d.addCallback(lambda r: (r[0][0] if len(r) else None))
return d
| 2.15625 | 2 |
Perforce/AppUtils.py | TomMinor/MayaPerforce | 13 | 5284 | <filename>Perforce/AppUtils.py
import os
import sys
import re
import logging
p4_logger = logging.getLogger("Perforce")
# Import app specific utilities, maya opens scenes differently than nuke etc
# Are we in maya or nuke?
if re.match( "maya", os.path.basename( sys.executable ), re.I ):
p4_logger.info("Configuring for Maya")
from MayaUtils import *
elif re.match( "nuke", os.path.basename( sys.executable ), re.I ):
p4_logger.info("Configuring for Nuke")
from NukeUtils import *
else:
p4_logger.warning("Couldn't find app configuration")
raise ImportError("No supported applications found that this plugin can interface with")
| 2.09375 | 2 |
fhir/immunizations_demo/models/trainer/model.py | kourtneyshort/healthcare | 0 | 5285 | <reponame>kourtneyshort/healthcare
#!/usr/bin/python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A simple logistics regression model for immunization prediction.
The following features are used in this model:
1. age of the patient
2. gender of the patient
3. country the patient is visiting
4. expected duration of stay
5. disease
We are predicting the possibility of the patient getting a disease.
Note that this model is part of an end-to-end demo which shows how
to leverage the Google Cloud Healthcare APIs (FHIR APIs specifically)
to finish data analysis and machine learning tasks. This problem
itself is not a natural machine learning task.
"""
import tensorflow as tf
from functools import reduce
# Input data specific flags.
tf.flags.DEFINE_string("training_data", default=None,
help="Path to training data. This should be a GCS path.")
tf.flags.DEFINE_string("eval_data", default=None,
help="Path to evaluation data. This should be a GCS path.")
# Model specific flags. See more details here:
# https://www.tensorflow.org/api_docs/python/tf/estimator/LinearClassifier
tf.flags.DEFINE_string("model_dir", default=None,
help="Estimator model_dir.")
tf.flags.DEFINE_string("export_model_dir", default=None,
help="Folder to export trained model.")
tf.flags.DEFINE_integer("batch_size", default=96,
help="Mini-batch size for the training.")
tf.flags.DEFINE_integer("training_steps", default=1000,
help="Total number of training steps.")
tf.flags.DEFINE_integer("eval_steps", default=100,
help="Total number of evaluation steps.")
tf.flags.DEFINE_integer("n_classes", default=2,
help="Number of categories to classify to.")
# More advanced flags that controls the behavior of FTRL optimizer.
# See more details here:
# https://www.tensorflow.org/api_docs/python/tf/train/FtrlOptimizer
tf.flags.DEFINE_float("learning_rate", default=0.01,
help="Learning rate")
tf.flags.DEFINE_float("l1_regularization_strength", default=0.005,
help="L1 regularization strength for FTRL optimizer.")
tf.flags.DEFINE_float("l2_regularization_strength", default=0.001,
help="L2 regularization strength for FTRL optimizer.")
FLAGS = tf.flags.FLAGS
# Feature and label keys.
FEATURE_KEYS = ['age', 'gender', 'country', 'duration', 'disease']
LABEL_KEYS = ['risk']
DS_BUFFER_SIZE = 50000
def build_input_fn(filename):
"""Builds the input funciton for training/evaluation.
Args:
filename (string): The path of the file that contains features and
labels. This can be a Google Cloud Storage path (e.g. gs://...).
"""
def input_fn():
"""Input function to be used by the classifier."""
def parse(serialized_example):
"""Parses a single tensorflow example."""
def parse_feature(features, key):
features[key] = tf.FixedLenFeature([], tf.int64)
return features
data = tf.parse_single_example(serialized_example,
features=reduce(parse_feature, FEATURE_KEYS + LABEL_KEYS, {}))
features = [tf.convert_to_tensor(tf.cast(data[key], tf.int32))
for key in FEATURE_KEYS]
labels = [tf.convert_to_tensor(tf.cast(data[key], tf.int32))
for key in LABEL_KEYS]
return features, labels
dataset = tf.data.TFRecordDataset(filename, buffer_size=DS_BUFFER_SIZE)
dataset = dataset.map(parse).cache().repeat()
dataset = dataset.batch(FLAGS.batch_size)
features, labels = dataset.make_one_shot_iterator().get_next()
# Slice features into a dictionary which is expected by the classifier.
features = tf.transpose(features)
def map_feature(dict, idx):
"""Maps individual features into a dictionary."""
dict[FEATURE_KEYS[idx]] = tf.transpose(
tf.nn.embedding_lookup(features, [idx]))
return dict
return reduce(map_feature, list(range(len(FEATURE_KEYS))), {}), labels
return input_fn
def build_serving_input_receiver_fn():
"""Builds a serving_input_receiver_fn which takes JSON as input."""
def serving_input_receiver_fn():
def add_input(inputs, feature):
inputs[feature] = tf.placeholder(shape=[None], dtype=tf.int32)
return inputs
inputs = reduce(add_input, FEATURE_KEYS, {})
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
return serving_input_receiver_fn
def main(_):
# All features have been converted to integer representation beforehand.
feature_columns = [tf.feature_column.numeric_column(key=key, dtype=tf.int32)
for key in FEATURE_KEYS]
classifier = tf.estimator.LinearClassifier(
feature_columns=feature_columns,
model_dir=FLAGS.model_dir,
n_classes=FLAGS.n_classes,
optimizer=tf.train.FtrlOptimizer(
learning_rate=FLAGS.learning_rate,
l1_regularization_strength=FLAGS.l1_regularization_strength,
l2_regularization_strength=FLAGS.l2_regularization_strength),
config=tf.estimator.RunConfig(keep_checkpoint_max=1))
# Training.
classifier.train(
input_fn=build_input_fn(FLAGS.training_data),
steps=FLAGS.training_steps)
# Evaluation.
classifier.evaluate(
input_fn=build_input_fn(FLAGS.eval_data),
steps=FLAGS.eval_steps)
# Export SavedModel.
if FLAGS.export_model_dir is not None:
classifier.export_saved_model(
FLAGS.export_model_dir,
build_serving_input_receiver_fn())
if __name__ == '__main__':
# Set logging level to INFO.
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 2.484375 | 2 |
heliosburn/django/hbproject/webui/models.py | thecodeteam/heliosburn | 0 | 5286 | <gh_stars>0
import json
import re
from django.conf import settings
import requests
from webui.exceptions import BadRequestException, UnauthorizedException, ServerErrorException, RedirectException, \
UnexpectedException, LocationHeaderNotFoundException, NotFoundException
def validate_response(response):
if 200 <= response.status_code < 300:
return True
return False
def status_code_to_exception(status_code):
if status_code == 400:
return BadRequestException()
if status_code == 401:
return UnauthorizedException()
if status_code == 404:
return NotFoundException()
if status_code >= 500:
return ServerErrorException()
if 300 <= status_code < 400:
return RedirectException()
return UnexpectedException()
def get_resource_id_or_raise_exception(resource_name, response):
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
location = response.headers.get('location')
pattern = '.+{}\/(?P<id>\w+)'.format(resource_name)
p = re.compile(pattern)
m = p.match(location)
try:
resource_id = m.group('id')
return resource_id
except:
return UnexpectedException('Could not get the resource ID from the response.')
class Base(object):
def __init__(self, auth_token=None):
self.auth_token = auth_token
def get_url(self, extra=''):
return '{base_url}{endpoint}{extra}'.format(base_url=settings.API_BASE_URL,
endpoint=object.__getattribute__(self, '__endpoint__'),
extra=extra)
class Session(Base):
__endpoint__ = '/session/'
__resourcename__ = 'session'
def create(self, data):
url = self.get_url()
headers = {'X-Auth-Token': self.auth_token}
response = requests.post(url, headers=headers, data=json.dumps(data))
resource_id = get_resource_id_or_raise_exception(self.__resourcename__, response)
return resource_id
def get(self, resource_id):
url = self.get_url(extra=str(resource_id))
headers = {'X-Auth-Token': self.auth_token}
response = requests.get(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
session = json.loads(response.text)
return session
def start(self, resource_id):
url = self.get_url(extra='{}/{}/'.format(resource_id, 'start'))
headers = {'X-Auth-Token': self.auth_token}
response = requests.post(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
def stop(self, resource_id):
url = self.get_url(extra='{}/{}/'.format(resource_id, 'stop'))
headers = {'X-Auth-Token': self.auth_token}
response = requests.post(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
class TestPlan(Base):
__endpoint__ = '/testplan/'
__resourcename__ = 'testplan'
def create(self, data):
url = self.get_url()
headers = {'X-Auth-Token': self.auth_token}
response = requests.post(url, headers=headers, data=json.dumps(data))
resource_id = get_resource_id_or_raise_exception(self.__resourcename__, response)
return resource_id
def update(self, resource_id, data):
url = self.get_url(extra=str(resource_id))
headers = {'X-Auth-Token': self.auth_token}
response = requests.put(url, headers=headers, data=json.dumps(data))
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
def get(self, resource_id):
url = self.get_url(extra=str(resource_id))
headers = {'X-Auth-Token': self.auth_token}
response = requests.get(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
testplan = json.loads(response.text)
return testplan
def get_all(self):
url = self.get_url()
headers = {'X-Auth-Token': self.auth_token}
response = requests.get(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
testplans = json.loads(response.text)
return testplans
def delete(self, resource_id):
url = self.get_url(extra=str(resource_id))
headers = {'X-Auth-Token': self.auth_token}
response = requests.delete(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
class Rule(Base):
__endpoint__ = '/testplan/{testplan_id}/rule/'
__resourcename__ = 'rule'
def __init__(self, testplan_id, auth_token=None):
self.auth_token = auth_token
self.__endpoint__ = self.__endpoint__.format(testplan_id=testplan_id)
def create(self, data):
url = self.get_url()
headers = {'X-Auth-Token': self.auth_token}
response = requests.post(url, headers=headers, data=json.dumps(data))
resource_id = get_resource_id_or_raise_exception(self.__resourcename__, response)
return resource_id
def update(self, resource_id, data):
url = self.get_url(extra=str(resource_id))
headers = {'X-Auth-Token': self.auth_token}
response = requests.put(url, headers=headers, data=json.dumps(data))
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
def get(self, resource_id):
url = self.get_url(extra=str(resource_id))
headers = {'X-Auth-Token': self.auth_token}
response = requests.get(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
rule = json.loads(response.text)
return rule
def get_all(self):
url = self.get_url()
headers = {'X-Auth-Token': self.auth_token}
response = requests.get(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
resource = json.loads(response.text)
return resource
class Recording(Base):
__endpoint__ = '/recording/'
__resourcename__ = 'recording'
def create(self, data):
url = self.get_url()
headers = {'X-Auth-Token': self.auth_token}
response = requests.post(url, headers=headers, data=json.dumps(data))
resource_id = get_resource_id_or_raise_exception(self.__resourcename__, response)
return resource_id
def get(self, resource_id):
url = self.get_url(extra=str(resource_id))
headers = {'X-Auth-Token': self.auth_token}
response = requests.get(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
recording = json.loads(response.text)
return recording
def get_all(self):
url = self.get_url()
headers = {'X-Auth-Token': self.auth_token}
response = requests.get(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
recordings = json.loads(response.text)
return recordings
def update(self, resource_id, data):
url = self.get_url(extra=str(resource_id))
headers = {'X-Auth-Token': self.auth_token}
response = requests.put(url, headers=headers, data=json.dumps(data))
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
def start(self, resource_id):
url = self.get_url(extra='{}/{}'.format(resource_id, 'start'))
headers = {'X-Auth-Token': self.auth_token}
response = requests.post(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
def stop(self, resource_id):
url = self.get_url(extra='{}/{}'.format(resource_id, 'stop'))
headers = {'X-Auth-Token': self.auth_token}
response = requests.post(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
class QoS(Base):
__endpoint__ = '/qos/'
__resourcename__ = 'qos'
def create(self, data):
url = self.get_url()
headers = {'X-Auth-Token': self.auth_token}
response = requests.post(url, headers=headers, data=json.dumps(data))
resource_id = get_resource_id_or_raise_exception(self.__resourcename__, response)
return resource_id
def update(self, resource_id, data):
url = self.get_url(extra=str(resource_id))
headers = {'X-Auth-Token': self.auth_token}
response = requests.put(url, headers=headers, data=json.dumps(data))
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
def get(self, resource_id):
url = self.get_url(extra=str(resource_id))
headers = {'X-Auth-Token': self.auth_token}
response = requests.get(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
qos = json.loads(response.text)
return qos
def get_all(self):
url = self.get_url()
headers = {'X-Auth-Token': self.auth_token}
response = requests.get(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
qos = json.loads(response.text)
return qos
def delete(self, resource_id):
url = self.get_url(extra=str(resource_id))
headers = {'X-Auth-Token': self.auth_token}
response = requests.delete(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
class ServerOverload(Base):
__endpoint__ = '/serveroverload/'
__resourcename__ = 'serveroverload'
def create(self, data):
url = self.get_url()
headers = {'X-Auth-Token': self.auth_token}
response = requests.post(url, headers=headers, data=json.dumps(data))
resource_id = get_resource_id_or_raise_exception(self.__resourcename__, response)
return resource_id
def update(self, resource_id, data):
url = self.get_url(extra=str(resource_id))
headers = {'X-Auth-Token': self.auth_token}
response = requests.put(url, headers=headers, data=json.dumps(data))
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
def get(self, resource_id):
url = self.get_url(extra=str(resource_id))
headers = {'X-Auth-Token': self.auth_token}
response = requests.get(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
profile = json.loads(response.text)
return profile
def get_all(self):
url = self.get_url()
headers = {'X-Auth-Token': self.auth_token}
response = requests.get(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
profile = json.loads(response.text)
return profile
def delete(self, resource_id):
url = self.get_url(extra=str(resource_id))
headers = {'X-Auth-Token': self.auth_token}
response = requests.delete(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
class Logs(Base):
__endpoint__ = '/log/'
__resourcename__ = 'log'
def stats(self):
url = self.get_url(extra='stats')
headers = {'X-Auth-Token': self.auth_token}
response = requests.get(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
stats = json.loads(response.text)
return stats
def get(self, start, length, component, levels, date_from, date_to, msg):
url = self.get_url(
extra='?start={}&limit={}&component={}&levels={}&from={}&to={}&msg={}'.format(start, length, component,
levels,
date_from, date_to, msg))
headers = {'X-Auth-Token': self.auth_token}
response = requests.get(url, headers=headers)
if not validate_response(response):
exception = status_code_to_exception(response.status_code)
exception.message = response.text
raise exception
logs = json.loads(response.text)
return logs | 2.203125 | 2 |
src/ychaos/core/verification/controller.py | sushilkar/ychaos | 0 | 5287 | # Copyright 2021, Yahoo
# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms
import time
from typing import Dict, List, Optional, Type
from pydantic import validate_arguments
from ...app_logger import AppLogger
from ...testplan import SystemState
from ...testplan.schema import TestPlan
from ...testplan.verification import VerificationConfig, VerificationType
from ...utils.hooks import EventHook
from ...utils.yaml import Dumper
from .data import VerificationData, VerificationStateData
from .plugins.BaseVerificationPlugin import BaseVerificationPlugin
from .plugins.HTTPRequestVerificationPlugin import (
HTTPRequestVerificationPlugin,
)
from .plugins.PythonModuleVerificationPlugin import (
PythonModuleVerificationPlugin,
)
from .plugins.SDv4VerificationPlugin import SDv4VerificationPlugin
# Enum value to corresponding Plugin Map
VERIFICATION_PLUGIN_MAP: Dict[str, Type[BaseVerificationPlugin]] = {
"python_module": PythonModuleVerificationPlugin,
"http_request": HTTPRequestVerificationPlugin,
"sdv4": SDv4VerificationPlugin,
}
class VerificationController(EventHook):
"""
Verification controller is used to run all the verification plugins configured in the testplan
and assert that the system is expected to be in a state expected by the user. Extends the EventHook class,
that defines the following event hooks.
## Valid Hooks
=== "on_start"
Hook that gets called when the verification execution is about to start.
No arguments are passed to the callable.
```python
def callable_hook(): ...
```
=== "on_each_plugin_start"
Hook that gets called when a particular plugin execution is about to start. `index` in the signature refers
to the position in the list
```python
def callable_hook(index: int, config: VerificationConfig): ...
```
References:
1. [VerificationConfig][ychaos.testplan.verification.VerificationConfig]
=== "on_each_plugin_end"
Hook that gets called when a particular plugin execution has ended. `index` in the signature refers to the
position in the list
```python
def callable_hook(index: int, config: VerificationConfig, state_data: VerificationStateData): ...
```
References:
1. [VerificationConfig][ychaos.testplan.verification.VerificationConfig]
2. [VerificationStateData][ychaos.core.verification.data.VerificationStateData]
=== "on_end"
Hook that gets called when the verification execution has ended. Each element in the list
of boolean corresponds to the result of the plugin, where `True` indicates successful verification
and `False` is a failure to verify the state
```python
def callable_hook(verify_list: List[bool]): ...
```
=== "on_plugin_not_found"
Hook that gets called when a plugin available in schema is not ready for usage/not implemented.
This case is possible for the plugins that are in Beta/development phase
```python
def callable_hook(index:int, plugin_type: VerificationType): ...
```
---
Each of the hooks get called on a certain event. The caller can register as many hooks for a particular event,
by calling the `register_hook(event_name, hook_method)` method. All the hooks are executed sequentially. The best example
of this is to register a hook to print information on CLI.
"""
__hook_events__ = {
"on_start": EventHook.CallableType(),
"on_each_plugin_start": EventHook.CallableType(int, VerificationConfig),
"on_each_plugin_end": EventHook.CallableType(
int, VerificationConfig, VerificationStateData
),
"on_plugin_not_found": EventHook.CallableType(int, VerificationType),
"on_end": EventHook.CallableType(List[bool]),
}
@validate_arguments
def __init__(
self,
testplan: TestPlan,
current_state: SystemState,
verification_data: List[Dict[SystemState, Optional[VerificationStateData]]],
):
"""
Initialize a verification controller object.
Args:
testplan: A valid testplan object
current_state: The state in which the system is expected to be in
verification_data (List[VerificationData]): The verification data probably from previous run.
"""
super(VerificationController, self).__init__()
self.logger = AppLogger.get_logger(self.__class__.__name__)
self.logger.bind(event="controller")
self.testplan = testplan
self.current_state = current_state
if not verification_data:
verification_data = [
dict(),
] * len(self.testplan.verification)
elif len(verification_data) != len(self.testplan.verification):
raise ValueError("Data and verification config size mismatch")
self.verification_data = list()
for data in verification_data:
self.verification_data.append(VerificationData.parse_obj(data))
def execute(self) -> bool:
"""
Execute the Verification controller.
Returns:
True if all the verification plugin pass, False otherwise
"""
# Call all the hooks that were registered for `verification_start`
# If there were no hooks registered, this will be no-op
self.execute_hooks("on_start")
_verify_list = list()
for index, (verification_plugin, data) in enumerate(
zip(self.testplan.verification, self.verification_data)
):
# Delay before verifying
time.sleep(verification_plugin.delay_before)
assert isinstance(verification_plugin.states, List) # For mypy
if self.current_state in verification_plugin.states:
self.logger.info(
msg=f"Starting {verification_plugin.type.value} verification"
)
plugin_class = VERIFICATION_PLUGIN_MAP.get(
verification_plugin.type.value, None
)
if plugin_class is None:
# This can happen when a new plugin is not implemented yet, but is
# available in the schema
self.execute_hooks(
"on_plugin_not_found", index, verification_plugin.type
)
continue
plugin = plugin_class(verification_plugin.config, data)
# Call all the hooks that were registered for `verification_plugin_start`.
self.execute_hooks("on_each_plugin_start", index, verification_plugin)
state_data = plugin.run_verification()
self.logger.info(
msg=f"Completed {verification_plugin.type.value} verification"
)
# Call all the hooks that were registered for `verification_plugin_end`.
self.execute_hooks(
"on_each_plugin_end", index, verification_plugin, state_data
)
data.replace_data(self.current_state, state_data)
if verification_plugin.strict:
_verify_list.append(state_data.rc == 0)
else:
data.add_data(self.current_state, None)
# Delay after verifying
time.sleep(verification_plugin.delay_after)
# Call all the hooks that were registered for `verification_end`.
self.execute_hooks("on_end", _verify_list)
return all(_verify_list)
def get_encoded_verification_data(self):
return [data.encoded_dict() for data in self.verification_data]
def dump_verification_json(self, fp):
import json
json.dump(self.get_encoded_verification_data(), fp=fp, indent=4)
def dump_verification_yaml(self, fp):
import yaml
yaml.dump(
self.get_encoded_verification_data(),
fp,
default_flow_style=False,
sort_keys=False,
Dumper=Dumper,
indent=4,
)
| 2.328125 | 2 |
tests/test_vimeodl.py | binary-signal/vimeo-channel-downloader | 6 | 5288 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from vimeodl import __version__
from vimeodl.vimeo import VimeoLinkExtractor, VimeoDownloader
def test_version():
assert __version__ == '0.1.0'
def test_vimeo_link_extractor():
vm = VimeoLinkExtractor()
vm.extract()
| 2.125 | 2 |
labgraph/graphs/node_test_harness.py | Yunusbcr/labgraph | 124 | 5289 | <gh_stars>100-1000
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import asyncio
import functools
import inspect
from contextlib import contextmanager
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Generic,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from ..messages.message import Message
from ..util.testing import get_event_loop
from .config import Config
from .method import AsyncPublisher
from .node import Node
from .state import State
from .topic import Topic
N = TypeVar("N", bound=Node) # Node type
T = TypeVar("T", bound=Tuple[Topic, Message]) # Type yielded by async functions
class NodeTestHarness(Generic[N]):
"""
Utility class for testing Labgraph nodes. This allows a user to test some behavior
of a node in an asyncio event loop, with the harness taking care of setting up and
cleaning up the node.
Args:
node_type: The type of node this harness will test.
"""
def __init__(self, node_type: Type[N]) -> None:
self.node_type: Type[N] = node_type
@contextmanager
def get_node(
self, config: Optional[Config] = None, state: Optional[State] = None
) -> Iterator[N]:
"""
Context manager to create, configure and yield a node of specified type.
Node is cleaned up when the context manager exits.
Args:
config: The configuration to set on the node, if provided.
state: The state to set on the Node, if provided.
"""
node = None
try:
node = self.node_type(config=config, state=state)
node.setup()
yield node
finally:
if node is not None:
node.cleanup()
@overload
def run_with_harness(
node_type: Type[N],
fn: Callable[[N], AsyncIterable[T]],
config: Optional[Config],
state: Optional[State],
max_num_results: Optional[int] = None,
) -> List[T]:
...
@overload
def run_with_harness(
node_type: Type[N],
fn: Callable[[N], Awaitable[T]],
config: Optional[Config],
state: Optional[State],
) -> T:
...
def run_with_harness(node_type, fn, config=None, state=None, max_num_results=None):
"""
Runs an async function on a new node of the provided type using `NodeTestHarness`.
Args:
node_type: The type of node to create.
fn:
The async function to run. An instance of a node typed `node_type` will be
provided to the function as an argument.
config: The configuration to set on the node, if provided.
state: The state to set on the node, if provided.
max_num_results:
If `fn` is an async generator, the maximum number of results it will generate.
If this is `None`, then the generator can produce an unbounded number of
results.
"""
# Check whether the max_num_results argument was improperly provided
_check_max_num_results_arg(run_with_harness.__name__, fn, max_num_results)
test_harness = NodeTestHarness(node_type=node_type)
with test_harness.get_node(config=config, state=state) as node:
return run_async(fn, args=[node], max_num_results=max_num_results)
@overload
def run_async(
fn: Callable[..., Awaitable[T]],
args: Optional[Sequence[Any]] = None,
kwargs: Optional[Mapping[str, Any]] = None,
) -> T:
...
@overload
def run_async(
fn: Callable[..., AsyncIterable[T]],
args: Optional[Sequence[Any]] = None,
kwargs: Optional[Mapping[str, Any]] = None,
max_num_results: Optional[int] = None,
) -> List[T]:
...
def run_async(fn, args=None, kwargs=None, max_num_results=None):
"""
Runs an async function to completion. Uses the current thread's event loop. Blocks
until the async function has finished executing. Forwards all arguments after `fn`
to the async function.
Args:
fn: The async function to run.
args: Positional arguments to forward to the function.
kwargs: Keyword arguments to forward to the function.
max_num_results:
If `fn` is an async generator, the maximum number of results it will generate.
If this is `None`, then the generator can produce an unbounded number of
results.
"""
# Check whether the max_num_results argument was improperly provided
_check_max_num_results_arg(run_async.__name__, fn, max_num_results)
# Unwrap functools.partial so we can check whether it is async
if isinstance(fn, functools.partial):
test_fn = fn.func
else:
test_fn = fn
if inspect.isasyncgenfunction(test_fn):
return get_event_loop().run_until_complete(
_async_generator_to_list(
fn=fn,
args=args or [],
kwargs=kwargs or {},
max_num_results=max_num_results,
)
)
elif asyncio.iscoroutinefunction(test_fn):
return get_event_loop().run_until_complete(fn(*(args or []), **(kwargs or {})))
else:
raise TypeError(f"{run_async.__name__}: function '{fn}' is not async")
def _check_max_num_results_arg(
called_fn_name: str,
fn: Union[Callable[..., Awaitable[Any]], Callable[..., AsyncIterable[Any]]],
max_num_results: Optional[int] = None,
) -> None:
if not inspect.isasyncgenfunction(fn) and max_num_results is not None:
raise TypeError(
f"{called_fn_name}: function '{fn}' is not an async generator but "
"max_num_results was provided"
)
async def _async_generator_to_list(
fn: Callable[..., AsyncIterable[T]],
args: Sequence[Any],
kwargs: Mapping[str, Any],
max_num_results: Optional[int] = None,
) -> List[T]:
if max_num_results is not None and max_num_results < 0:
raise ValueError("max_num_results must be non-negative")
result = []
async for retval in fn(*args, **kwargs):
result.append(retval)
if max_num_results is not None and len(result) >= max_num_results:
return result
return result
| 2.328125 | 2 |
pygamelearning/lrud.py | edward70/2021Computing | 0 | 5290 | <filename>pygamelearning/lrud.py
import pygame
import sys
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode([500, 500])
gameOn = True
x1 = 0
y1 = 100
x2 = 100
y2 = 0
while gameOn == True:
screen.fill([255,255,255])
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if x1 == 500:
moveRight = False
elif x1 == 0:
moveRight = True
if y2 == 500:
moveDown = False
elif y2 == 0:
moveDown = True
if moveRight:
x1 = x1+1
else:
x1 = x1-1
if moveDown:
y2 = y2+1
else:
y2 = y2-1
pygame.draw.circle(screen, [0,0,0], [x1,y1], 10)
pygame.draw.rect(screen, [0,0,0], [x2,y2,30,30])
clock.tick(100)
pygame.display.flip()
pygame.quit()
| 3.703125 | 4 |
pytorch/xor/training_a_perceptron.py | e93fem/PyTorchNLPBook | 0 | 5291 | import numpy as np
import torch
import matplotlib.pyplot as plt
from torch import optim, nn
from pytorch.xor.multilayer_perceptron import MultilayerPerceptron
from pytorch.xor.utils import LABELS, get_toy_data, visualize_results, plot_intermediate_representations
input_size = 2
output_size = len(set(LABELS))
num_hidden_layers = 0
hidden_size = 2 # isn't ever used but we still set it
seed = 24
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
mlp1 = MultilayerPerceptron(input_size=input_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
output_size=output_size)
print(mlp1)
batch_size = 1000
x_data_static, y_truth_static = get_toy_data(batch_size)
fig, ax = plt.subplots(1, 1, figsize=(10,5))
visualize_results(mlp1, x_data_static, y_truth_static,
ax=ax, title='Initial Perceptron State', levels=[0.5])
plt.axis('off')
plt.savefig('images/perceptron_initial.png')
plt.show()
losses = []
batch_size = 10000
n_batches = 10
max_epochs = 10
loss_change = 1.0
last_loss = 10.0
change_threshold = 1e-3
epoch = 0
all_imagefiles = []
lr = 0.01
optimizer = optim.Adam(params=mlp1.parameters(), lr=lr)
cross_ent_loss = nn.CrossEntropyLoss()
def early_termination(loss_change, change_threshold, epoch, max_epochs):
terminate_for_loss_change = loss_change < change_threshold
terminate_for_epochs = epoch > max_epochs
# return terminate_for_loss_change or
return terminate_for_epochs
while not early_termination(loss_change, change_threshold, epoch, max_epochs):
for _ in range(n_batches):
# step 0: fetch the data
x_data, y_target = get_toy_data(batch_size)
# step 1: zero the gradients
mlp1.zero_grad()
# step 2: run the forward pass
y_pred = mlp1(x_data).squeeze()
# step 3: compute the loss
loss = cross_ent_loss(y_pred, y_target.long())
# step 4: compute the backward pass
loss.backward()
# step 5: have the optimizer take an optimization step
optimizer.step()
# auxillary: bookkeeping
loss_value = loss.item()
losses.append(loss_value)
loss_change = abs(last_loss - loss_value)
last_loss = loss_value
print("epoch: {}: loss_value: {}".format(epoch, loss_value))
fig, ax = plt.subplots(1, 1, figsize=(10, 5))
visualize_results(mlp1, x_data_static, y_truth_static, ax=ax, epoch=epoch,
title=f"{loss_value:0.2f}; {loss_change:0.4f}")
plt.axis('off')
epoch += 1
all_imagefiles.append(f'images/perceptron_epoch{epoch}_toylearning.png')
plt.savefig(all_imagefiles[-1])
_, ax = plt.subplots(1,1,figsize=(10,5))
visualize_results(mlp1, x_data_static, y_truth_static, epoch=None, levels=[0.5], ax=ax)
plt.axis('off');
plt.savefig('images/perceptron_final.png')
plot_intermediate_representations(mlp1,
"The Perceptron's Input and Intermediate Representation",
figsize=(9, 3))
plt.savefig("images/perceptron_intermediate.png")
plt.savefig("images/figure_4_5.pdf") | 2.765625 | 3 |
mysite/api/v0/tests.py | raccoongang/socraticqs2 | 3 | 5292 | <reponame>raccoongang/socraticqs2
import json
import mock
from django.core.urlresolvers import reverse
from pymongo.errors import ServerSelectionTimeoutError
from analytics.models import CourseReport
from core.common.mongo import c_onboarding_status, _conn
from core.common import onboarding
from ct.models import UnitLesson, StudentError
from ctms.tests import MyTestCase
HEALTH_URL = reverse('api:v0:health-check')
def test_health_positive(client, db):
result = client.get(HEALTH_URL)
assert result.status_code == 200
assert 'ok' in json.loads(result.content)
def test_health_non_ok(client, db, mocker):
"""
Ping and Stats Mongo command return non ok results.
"""
do_health = mocker.patch('api.v0.views.do_health')
do_health.return_value = {}, {}
result = client.get(HEALTH_URL)
assert result.status_code == 503
def test_health_exception(client, db, mocker):
"""
Mongo query raises exception.
"""
do_health = mocker.patch('api.v0.views.do_health')
do_health.side_effect = ServerSelectionTimeoutError()
result = client.get(HEALTH_URL)
assert result.status_code == 503
class TestOnboardingStatus(MyTestCase):
namespace = 'api:v0:onboarding-status'
def setUp(self):
super(TestOnboardingStatus, self).setUp()
# # Hack: remove all test_ databases before test
# for db in _conn.connector.list_databases():
# if 'test_' in db.get('name') and:
# _conn.connector.drop_database(db.get('name'))
self.data = {
onboarding.USER_ID: self.user.id,
onboarding.STEP_1: False,
onboarding.STEP_2: False,
onboarding.STEP_3: False,
onboarding.STEP_4: False,
}
def test_put_valid_data(self):
data_to_update = {onboarding.STEP_2: True}
c_onboarding_status().remove()
c_onboarding_status().insert(self.data.copy())
ensure_saved = c_onboarding_status().find_one({onboarding.USER_ID: self.user.id}, {'_id': False})
self.assertEqual(ensure_saved, self.data)
self.assertEqual(self.client.login(username=self.username, password=<PASSWORD>), True)
response = self.client.put(
reverse('api:v0:onboarding-status'),
data=json.dumps(data_to_update),
content_type="application/json"
)
data = self.data.copy()
self.assertEqual(response.status_code, 200)
data.update(data_to_update)
mongo_data = c_onboarding_status().find_one({onboarding.USER_ID: self.user.id}, {'_id': False})
self.assertEqual(mongo_data, data)
def test_put_invalid_keys(self):
data_to_update = {'invalid_key': True}
c_onboarding_status().remove()
c_onboarding_status().insert(self.data.copy())
ensure_saved = c_onboarding_status().find_one({onboarding.USER_ID: self.user.id}, {'_id': False})
self.assertEqual(ensure_saved, self.data)
response = self.client.put(
reverse('api:v0:onboarding-status'),
data=json.dumps(data_to_update),
content_type="application/json"
)
self.assertEqual(response.status_code, 400)
def test_wo_user_403(self):
c_onboarding_status().remove()
self.client.logout()
response = self.client.get(reverse(self.namespace))
self.assertEqual(response.status_code, 403)
def test_get_with_user_200(self):
c_onboarding_status().remove()
c_onboarding_status().insert(self.data.copy())
response = self.client.get(reverse(self.namespace))
expected_data = {
"done": True,
}
response_data = json.loads(response.content)['data']
for key in response_data.keys():
self.assertSetEqual(set(expected_data), set(response_data[key]))
class ApiAccessMixinTest(object):
def test_permissions_instructor_allowed(self):
response = self.client.get(reverse(self.namespace, kwargs={'course_id': self.course.id}))
self.assertEqual(response.status_code, 200)
def test_permissions_not_instructor_disallowed(self):
self.client.login(username=self.username2, password=<PASSWORD>)
response = self.client.get(reverse(self.namespace, kwargs={'course_id': self.course.id}))
self.assertEqual(response.status_code, 403)
def test_permissions_user_not_authenticated(self):
self.client.logout()
response = self.client.get(reverse(self.namespace, kwargs={'course_id': self.course.id}))
self.assertEqual(response.status_code, 403)
def test_course_doesnt_exist(self):
response = self.client.get(reverse(self.namespace, kwargs={'course_id': 100}))
self.assertEqual(response.status_code, 404)
class TestResponseViewSet(ApiAccessMixinTest, MyTestCase):
namespace = 'api:v0:responses'
def test_serializer_author_name(self):
response = self.client.get(reverse(self.namespace, kwargs={'course_id': self.course.id}))
self.assertEqual(
json.loads(response.content)[0].get('author_name'),
self.user.get_full_name() or self.user.username
)
class TestErrorViewSet(ApiAccessMixinTest, MyTestCase):
namespace = 'api:v0:errors'
def setUp(self):
super(TestErrorViewSet, self).setUp()
self.unit_lesson_error = UnitLesson(
unit=self.unit, order=0,
lesson=self.lesson, addedBy=self.user,
treeID=self.lesson.id
)
self.unit_lesson_error.save()
self.student_error = StudentError(
response=self.resp1,
errorModel=self.unit_lesson_error,
author=self.user
)
self.student_error.save()
def test_serializer_em_data(self):
response = self.client.get(reverse(self.namespace, kwargs={'course_id': self.course.id}))
fields_set = set([
'id', 'lesson_concept_id', 'lesson_concept_isAbort', 'lesson_concept_isFail', 'lesson_text', 'treeID'
])
em_data_set = set(json.loads(response.content)[0]['em_data'])
self.assertSetEqual(fields_set, em_data_set)
class TestGenReportView(MyTestCase):
namespace = 'api:v0:gen-report'
def test_missed_course_id(self):
response = self.client.get(reverse(self.namespace))
self.assertEqual(response.status_code, 400)
def test_course_doesnt_exist(self):
response = self.client.get(reverse(self.namespace), data={'course_id': 100})
self.assertEqual(response.status_code, 404)
def test_not_allowed(self):
self.client.login(username=self.username2, password=self.<PASSWORD>)
response = self.client.get(reverse(self.namespace), data={'course_id': self.course.id})
self.assertEqual(response.status_code, 403)
@mock.patch('api.v0.views.report.delay')
def test_report_generated(self, report):
response = self.client.get(reverse(self.namespace), data={'course_id': self.course.id})
self.assertEqual(response.status_code, 200)
report.assert_called_with(str(self.course.id), self.user.id)
class TestCourseReportViewSet(ApiAccessMixinTest, MyTestCase):
namespace = 'api:v0:reports'
def test_serializer_data(self):
report = CourseReport(
course=self.course
)
report.save()
response = self.client.get(reverse(self.namespace, kwargs={'course_id': self.course.id}))
fields_set = {'date', 'response_report'}
data_set = set(json.loads(response.content)[0])
self.assertSetEqual(fields_set, data_set)
class TestEchoDataView(MyTestCase):
namespace = 'api:v0:echo-data'
def test_echo_405(self):
get_response = self.client.get(reverse(self.namespace))
self.assertEqual(get_response.status_code, 405)
def test_echo_200(self):
post_response = self.client.post(reverse(self.namespace))
self.assertEqual(post_response.status_code, 200)
self.client.logout()
post_response = self.client.post(reverse(self.namespace))
self.assertEqual(post_response.status_code, 200)
| 2.203125 | 2 |
signbank/settings/base.py | anthonymark33/Global-signbank | 0 | 5293 | # Django settings for signbank project.
import os
from signbank.settings.server_specific import *
from datetime import datetime
DEBUG = True
PROJECT_DIR = os.path.dirname(BASE_DIR)
MANAGERS = ADMINS
TIME_ZONE = 'Europe/Amsterdam'
LOCALE_PATHS = [BASE_DIR+'conf/locale']
# in the database, SITE_ID 1 is example.com
SITE_ID = 2
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = WRITABLE_FOLDER
MEDIA_URL = PREFIX_URL+'/media/'
MEDIA_MOBILE_URL = MEDIA_URL
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = PREFIX_URL
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = PREFIX_URL+'/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, "media"),
)
# STATICFILES_STORAGE = ( os.path.join(PROJECT_DIR, "static"), )
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = <KEY>'
MIDDLEWARE_CLASSES = (
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'signbank.pages.middleware.PageFallbackMiddleware',
# 'django_mobile.middleware.MobileDetectionMiddleware',
# 'django_mobile.middleware.SetFlavourMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'reversion.middleware.RevisionMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_DIR, 'templates/' + SIGNBANK_VERSION_CODE + '-templates'),
os.path.join(PROJECT_DIR, 'signbank/registration/templates/')],
'OPTIONS': {
'context_processors': [
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"signbank.context_processors.url",
"signbank.pages.context_processors.menu",
# "django_mobile.context_processors.flavour",
],
'loaders': [
# 'django_mobile.loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
},
},
]
# add the Email backend to allow logins using email as username
AUTHENTICATION_BACKENDS = (
"signbank.registration.EmailBackend",
"django.contrib.auth.backends.ModelBackend",
'guardian.backends.ObjectPermissionBackend',
)
AUTH_PROFILE_MODULE = 'dictionary.UserProfile'
INTERNAL_IPS = ('127.0.0.1','172.16.31.10')
ROOT_URLCONF = 'signbank.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'signbank.wsgi.application'
INSTALLED_APPS = (
'modeltranslation',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.staticfiles',
'bootstrap3',
'django_summernote',
# 'django_select2',
# 'easy_select2',
'signbank.dictionary',
'signbank.feedback',
#'signbank.registration',
'signbank.pages',
'signbank.attachments',
'signbank.video',
'reversion',
#'django_mobile',
'tagging',
'guardian',
#'debug_toolbar'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# turn on lots of logging or not
DO_LOGGING = False
LOG_FILENAME = "debug.log"
SOUTH_TESTS_MIGRATE = False
## Application settings for signbank
## Settings controlling page contents
# do we implement safe search for anonymous users?
# if True, any gloss that is tagged lexis:crude will be removed from
# search results for users who are not logged in
ANON_SAFE_SEARCH = False
# do we show the tag based search for anonymous users?
ANON_TAG_SEARCH = False
# do we display the previous/next links to signs, requires gloss.sn to be used consistently
SIGN_NAVIGATION = False
# which definition fields do we show and in what order?
DEFINITION_FIELDS = ['general', 'noun', 'verb', 'interact', 'deictic', 'modifier', 'question', 'augment', 'note']
HANDSHAPE_RESULT_FIELDS = ['machine_value', 'english_name', 'dutch_name', 'chinese_name',
'hsFingSel', 'hsFingConf', 'hsFingSel2', 'hsFingConf2', 'hsFingUnsel', 'hsSpread', 'hsAperture']
# location and URL for uploaded files
UPLOAD_ROOT = MEDIA_ROOT + "upload/"
UPLOAD_URL = MEDIA_URL + "upload/"
# Location for comment videos relative to MEDIA_ROOT
COMMENT_VIDEO_LOCATION = "comments"
# Location for videos associated with pages
PAGES_VIDEO_LOCATION = 'pages'
# location for upload of videos relative to MEDIA_ROOT
# videos are stored here prior to copying over to the main
# storage location
VIDEO_UPLOAD_LOCATION = "upload"
# path to store uploaded attachments relative to MEDIA_ROOT
ATTACHMENT_LOCATION = 'attachments'
# which fields from the Gloss model should be included in the quick update form on the sign view
QUICK_UPDATE_GLOSS_FIELDS = ['signlanguage', 'dialect']
# should we always require a login for viewing dictionary content
ALWAYS_REQUIRE_LOGIN = True
# do we allow people to register for the site
ALLOW_REGISTRATION = True
ACCOUNT_ACTIVATION_DAYS = 7
# show the number signs page or an under construction page?
SHOW_NUMBERSIGNS = True
LOGIN_URL = PREFIX_URL+'/accounts/login/'
LOGIN_REDIRECT_URL = PREFIX_URL+'/signs/recently_added/'
# location of ffmpeg, used to convert uploaded videos
# FFMPEG_PROGRAM = "/Applications/ffmpegX.app/Contents/Resources/ffmpeg"
FFMPEG_TIMEOUT = 60
FFMPEG_OPTIONS = ["-vcodec", "h264", "-an"]
# defines the aspect ratio for videos
VIDEO_ASPECT_RATIO = 3.0/4.0
# settings for django-tagging
FORCE_LOWERCASE_TAGS = False
PRIMARY_CSS = "css/"+SIGNBANK_VERSION_CODE+"/main.css"
import mimetypes
mimetypes.add_type("video/mp4", ".mov", True)
# a list of tags we're allowed to use
XALLOWED_TAGS = [ '',
'workflow:needs video',
'workflow:redo video',
'workflow:problematic',
'corpus:attested',
'lexis:doubtlex',
'phonology:alternating',
'phonology:dominant hand only',
'phonology:double handed',
'phonology:forearm rotation',
'phonology:handshape change',
'phonology:onehand',
'phonology:parallel',
'phonology:symmetrical',
'phonology:two handed',
]
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
EARLIEST_GLOSS_CREATION_DATE = datetime(2015,1,1)
SUPPORTED_CITATION_IMAGE_EXTENSIONS = ['.jpg','.jpeg','.png']
MAXIMUM_UPLOAD_SIZE = 5000000
MINIMUM_OVERLAP_BETWEEN_SIGNING_HANDS_IN_CNGT = 40
DISABLE_MOVING_THUMBNAILS_ABOVE_NR_OF_GLOSSES = 200
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
DATA_UPLOAD_MAX_MEMORY_SIZE = None | 1.945313 | 2 |
ocs_ci/ocs/cluster.py | crombus/ocs-ci | 0 | 5294 | <filename>ocs_ci/ocs/cluster.py
"""
A module for all rook functionalities and abstractions.
This module has rook related classes, support for functionalities to work with
rook cluster. This works with assumptions that an OCP cluster is already
functional and proper configurations are made for interaction.
"""
import base64
import logging
import random
import re
import threading
import yaml
import time
import ocs_ci.ocs.resources.pod as pod
from ocs_ci.ocs.exceptions import UnexpectedBehaviour
from ocs_ci.ocs.resources import ocs, storage_cluster
import ocs_ci.ocs.constants as constant
from ocs_ci.ocs.resources.mcg import MCG
from ocs_ci.utility.retry import retry
from ocs_ci.utility.utils import (
TimeoutSampler,
run_cmd,
convert_device_size,
get_trim_mean,
)
from ocs_ci.ocs.utils import get_pod_name_by_pattern
from ocs_ci.framework import config
from ocs_ci.ocs import ocp, constants, exceptions
from ocs_ci.ocs.exceptions import PoolNotFound
from ocs_ci.ocs.resources.pvc import get_all_pvc_objs
logger = logging.getLogger(__name__)
class CephCluster(object):
"""
Handles all cluster related operations from ceph perspective
This class has depiction of ceph cluster. Contains references to
pod objects which represents ceph cluster entities.
Attributes:
pods (list) : A list of ceph cluster related pods
cluster_name (str): Name of ceph cluster
namespace (str): openshift Namespace where this cluster lives
"""
def __init__(self):
"""
Cluster object initializer, this object needs to be initialized
after cluster deployment. However its harmless to do anywhere.
"""
# cluster_name is name of cluster in rook of type CephCluster
self.POD = ocp.OCP(kind="Pod", namespace=config.ENV_DATA["cluster_namespace"])
self.CEPHCLUSTER = ocp.OCP(
kind="CephCluster", namespace=config.ENV_DATA["cluster_namespace"]
)
self.CEPHFS = ocp.OCP(
kind="CephFilesystem", namespace=config.ENV_DATA["cluster_namespace"]
)
self.DEP = ocp.OCP(
kind="Deployment", namespace=config.ENV_DATA["cluster_namespace"]
)
self.cluster_resource_config = self.CEPHCLUSTER.get().get("items")[0]
try:
self.cephfs_config = self.CEPHFS.get().get("items")[0]
except IndexError as e:
logging.warning(e)
logging.warning("No CephFS found")
self.cephfs_config = None
self._cluster_name = self.cluster_resource_config.get("metadata").get("name")
self._namespace = self.cluster_resource_config.get("metadata").get("namespace")
# We are not invoking ocs.create() here
# assuming cluster creation is done somewhere after deployment
# So just load ocs with existing cluster details
self.cluster = ocs.OCS(**self.cluster_resource_config)
if self.cephfs_config:
self.cephfs = ocs.OCS(**self.cephfs_config)
else:
self.cephfs = None
self.mon_selector = constant.MON_APP_LABEL
self.mds_selector = constant.MDS_APP_LABEL
self.tool_selector = constant.TOOL_APP_LABEL
self.mgr_selector = constant.MGR_APP_LABEL
self.osd_selector = constant.OSD_APP_LABEL
self.noobaa_selector = constant.NOOBAA_APP_LABEL
self.noobaa_core_selector = constant.NOOBAA_CORE_POD_LABEL
self.mons = []
self._ceph_pods = []
self.mdss = []
self.mgrs = []
self.osds = []
self.noobaas = []
self.rgws = []
self.toolbox = None
self.mds_count = 0
self.mon_count = 0
self.mgr_count = 0
self.osd_count = 0
self.noobaa_count = 0
self.rgw_count = 0
self._mcg_obj = None
self.scan_cluster()
logging.info(f"Number of mons = {self.mon_count}")
logging.info(f"Number of mds = {self.mds_count}")
self.used_space = 0
@property
def mcg_obj(self):
if not self._mcg_obj:
self._mcg_obj = MCG()
return self._mcg_obj
@property
def cluster_name(self):
return self._cluster_name
@property
def namespace(self):
return self._namespace
@property
def pods(self):
return self._ceph_pods
def scan_cluster(self):
"""
Get accurate info on current state of pods
"""
self._ceph_pods = pod.get_all_pods(self._namespace)
# TODO: Workaround for BZ1748325:
mons = pod.get_mon_pods(self.mon_selector, self.namespace)
for mon in mons:
if mon.ocp.get_resource_status(mon.name) == constant.STATUS_RUNNING:
self.mons.append(mon)
# TODO: End of workaround for BZ1748325
self.mdss = pod.get_mds_pods(self.mds_selector, self.namespace)
self.mgrs = pod.get_mgr_pods(self.mgr_selector, self.namespace)
self.osds = pod.get_osd_pods(self.osd_selector, self.namespace)
self.noobaas = pod.get_noobaa_pods(self.noobaa_selector, self.namespace)
self.rgws = pod.get_rgw_pods()
self.toolbox = pod.get_ceph_tools_pod()
# set port attrib on mon pods
self.mons = list(map(self.set_port, self.mons))
self.cluster.reload()
if self.cephfs:
self.cephfs.reload()
else:
try:
self.cephfs_config = self.CEPHFS.get().get("items")[0]
self.cephfs = ocs.OCS(**self.cephfs_config)
self.cephfs.reload()
except IndexError as e:
logging.warning(e)
logging.warning("No CephFS found")
self.mon_count = len(self.mons)
self.mds_count = len(self.mdss)
self.mgr_count = len(self.mgrs)
self.osd_count = len(self.osds)
self.noobaa_count = len(self.noobaas)
self.rgw_count = len(self.rgws)
@staticmethod
def set_port(pod):
"""
Set port attribute on pod.
port attribute for mon is required for secrets and this attrib
is not a member for original pod class.
Args:
pod(Pod): Pod object without 'port' attribute
Returns:
pod(Pod): A modified pod object with 'port' attribute set
"""
container = pod.pod_data.get("spec").get("containers")
port = container[0]["ports"][0]["containerPort"]
# Dynamically added attribute 'port'
pod.port = port
logging.info(f"port={pod.port}")
return pod
def is_health_ok(self):
"""
Returns:
bool: True if "HEALTH_OK" else False
"""
self.cluster.reload()
return self.cluster.data["status"]["ceph"]["health"] == "HEALTH_OK"
def cluster_health_check(self, timeout=None):
"""
Check overall cluster health.
Relying on health reported by CephCluster.get()
Args:
timeout (int): in seconds. By default timeout value will be scaled
based on number of ceph pods in the cluster. This is just a
crude number. Its been observed that as the number of pods
increases it takes more time for cluster's HEALTH_OK.
Returns:
bool: True if "HEALTH_OK" else False
Raises:
CephHealthException: if cluster is not healthy
"""
# Scale timeout only if user hasn't passed any value
timeout = timeout or (10 * len(self.pods))
sample = TimeoutSampler(timeout=timeout, sleep=3, func=self.is_health_ok)
if not sample.wait_for_func_status(result=True):
raise exceptions.CephHealthException("Cluster health is NOT OK")
# This way of checking health of different cluster entities and
# raising only CephHealthException is not elegant.
# TODO: add an attribute in CephHealthException, called "reason"
# which should tell because of which exact cluster entity health
# is not ok ?
expected_mon_count = self.mon_count
expected_mds_count = self.mds_count
self.scan_cluster()
try:
self.mon_health_check(expected_mon_count)
except exceptions.MonCountException as e:
logger.error(e)
raise exceptions.CephHealthException("Cluster health is NOT OK")
try:
if not expected_mds_count:
pass
else:
self.mds_health_check(expected_mds_count)
except exceptions.MDSCountException as e:
logger.error(e)
raise exceptions.CephHealthException("Cluster health is NOT OK")
# TODO: OSD and MGR health check
logger.info("Cluster HEALTH_OK")
# This scan is for reconcilation on *.count
# because during first scan in this function some of the
# pods may not be up and would have set count to lesser number
self.scan_cluster()
# Check Noobaa health
self.wait_for_noobaa_health_ok()
def noobaa_health_check(self):
"""
Check Noobaa health
"""
if not self.mcg_obj.status:
raise exceptions.NoobaaHealthException("Cluster health is NOT OK")
def wait_for_noobaa_health_ok(self, tries=60, delay=5):
"""
Wait for Noobaa health to be OK
"""
return retry(
exceptions.NoobaaHealthException, tries=tries, delay=delay, backoff=1
)(self.noobaa_health_check)()
def mon_change_count(self, new_count):
"""
Change mon count in the cluster
Args:
new_count(int): Absolute number of mons required
"""
self.cluster.reload()
self.cluster.data["spec"]["mon"]["count"] = new_count
logger.info(self.cluster.data)
self.cluster.apply(**self.cluster.data)
self.mon_count = new_count
self.cluster_health_check()
logger.info(f"Mon count changed to {new_count}")
self.cluster.reload()
def mon_health_check(self, count):
"""
Mon health check based on pod count
Args:
count (int): Expected number of mon pods
Raises:
MonCountException: if mon pod count doesn't match
"""
timeout = 10 * len(self.pods)
logger.info(f"Expected MONs = {count}")
try:
assert self.POD.wait_for_resource(
condition="Running",
selector=self.mon_selector,
resource_count=count,
timeout=timeout,
sleep=3,
)
# TODO: Workaround for BZ1748325:
actual_mons = pod.get_mon_pods()
actual_running_mons = list()
for mon in actual_mons:
if mon.ocp.get_resource_status(mon.name) == constant.STATUS_RUNNING:
actual_running_mons.append(mon)
actual = len(actual_running_mons)
# TODO: End of workaround for BZ1748325
assert count == actual, f"Expected {count}, Got {actual}"
except exceptions.TimeoutExpiredError as e:
logger.error(e)
raise exceptions.MonCountException(
f"Failed to achieve desired Mon count" f" {count}"
)
def mds_change_count(self, new_count):
"""
Change mds count in the cluster
Args:
new_count(int): Absolute number of active mdss required
"""
self.cephfs.data["spec"]["metadataServer"]["activeCount"] = new_count
self.cephfs.apply(**self.cephfs.data)
logger.info(f"MDS active count changed to {new_count}")
if self.cephfs.data["spec"]["metadataServer"]["activeStandby"]:
expected = new_count * 2
else:
expected = new_count
self.mds_count = expected
self.cluster_health_check()
self.cephfs.reload()
def mds_health_check(self, count):
"""
MDS health check based on pod count
Args:
count (int): number of pods expected
Raises:
MDACountException: if pod count doesn't match
"""
timeout = 10 * len(self.pods)
try:
assert self.POD.wait_for_resource(
condition="Running",
selector=self.mds_selector,
resource_count=count,
timeout=timeout,
sleep=3,
)
except AssertionError as e:
logger.error(e)
raise exceptions.MDSCountException(
f"Failed to achieve desired MDS count" f" {count}"
)
def get_admin_key(self):
"""
Returns:
adminkey (str): base64 encoded key
"""
return self.get_user_key("client.admin")
def set_noout(self):
"""
Set noout flag for maintainance
"""
self.toolbox.exec_cmd_on_pod("ceph osd set noout")
def unset_noout(self):
"""
unset noout flag for peering
"""
self.toolbox.exec_cmd_on_pod("ceph osd unset noout")
def get_user_key(self, user):
"""
Args:
user (str): ceph username ex: client.user1
Returns:
key (str): base64 encoded user key
"""
out = self.toolbox.exec_cmd_on_pod(f"ceph auth get-key {user} --format json")
if "ENOENT" in out:
return False
key_base64 = base64.b64encode(out["key"].encode()).decode()
return key_base64
def create_user(self, username, caps):
"""
Create a ceph user in the cluster
Args:
username (str): ex client.user1
caps (str): ceph caps ex: mon 'allow r' osd 'allow rw'
Return:
return value of get_user_key()
"""
cmd = f"ceph auth add {username} {caps}"
# As of now ceph auth command gives output to stderr
# To be handled
out = self.toolbox.exec_cmd_on_pod(cmd)
logging.info(type(out))
return self.get_user_key(username)
def get_mons_from_cluster(self):
"""
Getting the list of mons from the cluster
Returns:
available_mon (list): Returns the mons from the cluster
"""
ret = self.DEP.get(
resource_name="", out_yaml_format=False, selector="app=rook-ceph-mon"
)
available_mon = re.findall(r"[\w-]+mon-+[\w-]", ret)
return available_mon
def remove_mon_from_cluster(self):
"""
Removing the mon pod from deployment
Returns:
remove_mon(bool): True if removal of mon is successful, False otherwise
"""
mons = self.get_mons_from_cluster()
after_delete_mon_count = len(mons) - 1
random_mon = random.choice(mons)
remove_mon = self.DEP.delete(resource_name=random_mon)
assert self.POD.wait_for_resource(
condition=constant.STATUS_RUNNING,
resource_count=after_delete_mon_count,
selector="app=rook-ceph-mon",
)
logging.info(f"Removed the mon {random_mon} from the cluster")
return remove_mon
@retry(UnexpectedBehaviour, tries=20, delay=10, backoff=1)
def check_ceph_pool_used_space(self, cbp_name):
"""
Check for the used space of a pool in cluster
Returns:
used_in_gb (float): Amount of used space in pool (in GBs)
Raises:
UnexpectedBehaviour: If used size keeps varying in Ceph status
"""
ct_pod = pod.get_ceph_tools_pod()
rados_status = ct_pod.exec_ceph_cmd(ceph_cmd=f"rados df -p {cbp_name}")
assert rados_status is not None
used = rados_status["pools"][0]["size_bytes"]
used_in_gb = format(used / constants.GB, ".4f")
if self.used_space and self.used_space == used_in_gb:
return float(self.used_space)
self.used_space = used_in_gb
raise UnexpectedBehaviour("In Rados df, Used size is varying")
def get_ceph_health(self, detail=False):
"""
Exec `ceph health` cmd on tools pod and return the status of the ceph
cluster.
Args:
detail (bool): If True the 'ceph health detail' is executed
Returns:
str: Output of the ceph health command.
"""
ceph_health_cmd = "ceph health"
if detail:
ceph_health_cmd = f"{ceph_health_cmd} detail"
return self.toolbox.exec_cmd_on_pod(
ceph_health_cmd,
out_yaml_format=False,
)
def get_ceph_status(self, format=None):
"""
Exec `ceph status` cmd on tools pod and return its output.
Args:
format (str) : Format of the output (e.g. json-pretty, json, plain)
Returns:
str: Output of the ceph status command.
"""
cmd = "ceph status"
if format:
cmd += f" -f {format}"
return self.toolbox.exec_cmd_on_pod(cmd, out_yaml_format=False)
def get_ceph_capacity(self):
"""
The function gets the total mount of storage capacity of the ocs cluster.
the calculation is <Num of OSD> * <OSD size> / <replica number>
it will not take into account the current used capacity.
Returns:
int : Total storage capacity in GiB (GiB is for development environment)
"""
storage_cluster_obj = storage_cluster.StorageCluster(
resource_name=config.ENV_DATA["storage_cluster_name"],
namespace=config.ENV_DATA["cluster_namespace"],
)
replica = int(
storage_cluster_obj.data["spec"]["storageDeviceSets"][0]["replica"]
)
ceph_pod = pod.get_ceph_tools_pod()
ceph_status = ceph_pod.exec_ceph_cmd(ceph_cmd="ceph df")
usable_capacity = (
int(ceph_status["stats"]["total_bytes"]) / replica / constant.GB
)
return usable_capacity
def get_ceph_cluster_iops(self):
"""
The function gets the IOPS from the ocs cluster
Returns:
Total IOPS in the cluster
"""
ceph_pod = pod.get_ceph_tools_pod()
ceph_status = ceph_pod.exec_ceph_cmd(ceph_cmd="ceph status")
read_ops = ceph_status["pgmap"]["read_op_per_sec"]
write_ops = ceph_status["pgmap"]["write_op_per_sec"]
cluster_iops = read_ops + write_ops
return cluster_iops
def get_iops_percentage(self, osd_size=2):
"""
The function calculates the IOPS percentage
of the cluster depending on number of osds in the cluster
Args:
osd_size (int): Size of 1 OSD in Ti
Returns:
IOPS percentage of the OCS cluster
"""
osd_count = count_cluster_osd()
iops_per_osd = osd_size * constants.IOPS_FOR_1TiB_OSD
iops_in_cluster = self.get_ceph_cluster_iops()
osd_iops_limit = iops_per_osd * osd_count
iops_percentage = (iops_in_cluster / osd_iops_limit) * 100
logging.info(f"The IOPS percentage of the cluster is {iops_percentage}%")
return iops_percentage
def get_cluster_throughput(self):
"""
Function to get the throughput of ocs cluster
Returns:
float: The write throughput of the cluster in MiB/s
"""
ceph_status = self.get_ceph_status()
for item in ceph_status.split("\n"):
if "client" in item:
throughput_data = item.strip("client: ").split(",")
throughput_data = throughput_data[:2:1]
# Converting all B/s and KiB/s to MiB/s
throughput = 0
for val in throughput_data:
throughput += [
float(re.findall(r"\d+", val)[0]) * constants.TP_CONVERSION[key]
for key in constants.TP_CONVERSION.keys()
if key in val
][0]
logger.info(
f"The {val[-2:].upper()} throughput is {throughput} MiB/s"
)
return throughput
def get_throughput_percentage(self):
"""
Function to get throughput percentage of the ocs cluster
Returns:
Throughput percentage of the cluster
"""
throughput_of_cluster = self.get_cluster_throughput()
throughput_percentage = (
throughput_of_cluster / constants.THROUGHPUT_LIMIT_OSD
) * 100
logging.info(
f"The throughput percentage of the cluster is {throughput_percentage}%"
)
return throughput_percentage
def calc_trim_mean_throughput(self, samples=8):
"""
Calculate the cluster average throughput out of a few samples
Args:
samples (int): The number of samples to take
Returns:
float: The average cluster throughput
"""
throughput_vals = [self.get_cluster_throughput() for _ in range(samples)]
return round(get_trim_mean(throughput_vals), 3)
def get_rebalance_status(self):
"""
This function gets the rebalance status
Returns:
bool: True if rebalance is completed, False otherwise
"""
ceph_pod = pod.get_ceph_tools_pod()
ceph_status = ceph_pod.exec_ceph_cmd(ceph_cmd="ceph status")
ceph_health = ceph_pod.exec_ceph_cmd(ceph_cmd="ceph health")
total_pg_count = ceph_status["pgmap"]["num_pgs"]
pg_states = ceph_status["pgmap"]["pgs_by_state"]
logger.info(ceph_health)
logger.info(pg_states)
for states in pg_states:
return (
states["state_name"] == "active+clean"
and states["count"] == total_pg_count
)
def wait_for_rebalance(self, timeout=600):
"""
Wait for re-balance to complete
Args:
timeout (int): Time to wait for the completion of re-balance
Returns:
bool: True if rebalance completed, False otherwise
"""
try:
for rebalance in TimeoutSampler(
timeout=timeout, sleep=10, func=self.get_rebalance_status
):
if rebalance:
logging.info("Re-balance is completed")
return True
except exceptions.TimeoutExpiredError:
logger.error(
f"Data re-balance failed to complete within the given "
f"timeout of {timeout} seconds"
)
return False
def time_taken_to_complete_rebalance(self, timeout=600):
"""
This function calculates the time taken to complete
rebalance
Args:
timeout (int): Time to wait for the completion of rebalance
Returns:
int : Time taken in minutes for the completion of rebalance
"""
start_time = time.time()
assert self.wait_for_rebalance(timeout=timeout), (
f"Data re-balance failed to complete within the given "
f"timeout of {timeout} seconds"
)
time_taken = time.time() - start_time
return time_taken / 60
class CephHealthMonitor(threading.Thread):
"""
Context manager class for monitoring ceph health status of CephCluster.
If CephCluster will get to HEALTH_ERROR state it will save the ceph status
to health_error_status variable and will stop monitoring.
"""
def __init__(self, ceph_cluster, sleep=5):
"""
Constructor for ceph health status thread.
Args:
ceph_cluster (CephCluster): Reference to CephCluster object.
sleep (int): Number of seconds to sleep between health checks.
"""
self.ceph_cluster = ceph_cluster
self.sleep = sleep
self.health_error_status = None
self.health_monitor_enabled = False
self.latest_health_status = None
super(CephHealthMonitor, self).__init__()
def run(self):
self.health_monitor_enabled = True
while self.health_monitor_enabled and (not self.health_error_status):
time.sleep(self.sleep)
self.latest_health_status = self.ceph_cluster.get_ceph_health(detail=True)
if "HEALTH_ERROR" in self.latest_health_status:
self.health_error_status = self.ceph_cluster.get_ceph_status()
self.log_error_status()
def __enter__(self):
self.start()
def __exit__(self, exception_type, value, traceback):
"""
Exit method for context manager
Raises:
CephHealthException: If no other exception occurred during
execution of context manager and HEALTH_ERROR is detected
during the monitoring.
exception_type: In case of exception raised during processing of
the context manager.
"""
self.health_monitor_enabled = False
if self.health_error_status:
self.log_error_status()
if exception_type:
raise exception_type.with_traceback(value, traceback)
if self.health_error_status:
raise exceptions.CephHealthException(
f"During monitoring of Ceph health status hit HEALTH_ERROR: "
f"{self.health_error_status}"
)
return True
def log_error_status(self):
logger.error(
f"ERROR HEALTH STATUS DETECTED! " f"Status: {self.health_error_status}"
)
def validate_ocs_pods_on_pvc(pods, pvc_names):
"""
Validate if ocs pod has PVC. This validation checking if there is the pvc
like: rook-ceph-mon-a for the pod rook-ceph-mon-a-56f67f5968-6j4px.
Args:
pods (list): OCS pod names
pvc_names (list): names of all PVCs
Raises:
AssertionError: If no PVC found for one of the pod
"""
logger.info(f"Validating if each pod from: {pods} has PVC from {pvc_names}.")
for pod_name in pods:
found_pvc = ""
for pvc in pvc_names:
if pvc in pod_name:
found_pvc = pvc
if found_pvc:
logger.info(f"PVC {found_pvc} found for pod {pod_name}")
continue
assert found_pvc, f"No PVC found for pod: {pod_name}!"
def validate_cluster_on_pvc():
"""
Validate creation of PVCs for MON and OSD pods.
Also validate that those PVCs are attached to the OCS pods
Raises:
AssertionError: If PVC is not mounted on one or more OCS pods
"""
# Get the PVCs for selected label (MON/OSD)
ns = config.ENV_DATA["cluster_namespace"]
ocs_pvc_obj = get_all_pvc_objs(namespace=ns)
# Check all pvc's are in bound state
pvc_names = []
for pvc_obj in ocs_pvc_obj:
if pvc_obj.name.startswith(
constants.DEFAULT_DEVICESET_PVC_NAME
) or pvc_obj.name.startswith(constants.DEFAULT_MON_PVC_NAME):
assert (
pvc_obj.status == constants.STATUS_BOUND
), f"PVC {pvc_obj.name} is not Bound"
logger.info(f"PVC {pvc_obj.name} is in Bound state")
pvc_names.append(pvc_obj.name)
mon_pods = get_pod_name_by_pattern("rook-ceph-mon", ns)
if not config.DEPLOYMENT.get("local_storage"):
logger.info("Validating all mon pods have PVC")
validate_ocs_pods_on_pvc(mon_pods, pvc_names)
else:
logger.debug(
"Skipping validation if all mon pods have PVC because in LSO "
"deployment we don't have mon pods backed by PVC"
)
logger.info("Validating all osd pods have PVC")
osd_deviceset_pods = get_pod_name_by_pattern(
"rook-ceph-osd-prepare-ocs-deviceset", ns
)
validate_ocs_pods_on_pvc(osd_deviceset_pods, pvc_names)
osd_pods = get_pod_name_by_pattern("rook-ceph-osd", ns, filter="prepare")
for ceph_pod in mon_pods + osd_pods:
out = run_cmd(f"oc -n {ns} get pods {ceph_pod} -o yaml")
out_yaml = yaml.safe_load(out)
for vol in out_yaml["spec"]["volumes"]:
if vol.get("persistentVolumeClaim"):
claimName = vol.get("persistentVolumeClaim").get("claimName")
logger.info(f"{ceph_pod} backed by pvc {claimName}")
assert claimName in pvc_names, "Ceph Internal Volume not backed by PVC"
def count_cluster_osd():
"""
The function returns the number of cluster OSDs
Returns:
osd_count (int): number of OSD pods in current cluster
"""
storage_cluster_obj = storage_cluster.StorageCluster(
resource_name=config.ENV_DATA["storage_cluster_name"],
namespace=config.ENV_DATA["cluster_namespace"],
)
storage_cluster_obj.reload_data()
osd_count = int(
storage_cluster_obj.data["spec"]["storageDeviceSets"][0]["count"]
) * int(storage_cluster_obj.data["spec"]["storageDeviceSets"][0]["replica"])
return osd_count
def validate_pdb_creation():
"""
Validate creation of PDBs for MON, MDS and OSD pods.
Raises:
AssertionError: If required PDBs were not created.
"""
pdb_obj = ocp.OCP(kind="PodDisruptionBudget")
item_list = pdb_obj.get().get("items")
pdb_list = [item["metadata"]["name"] for item in item_list]
osd_count = count_cluster_osd()
pdb_required = [constants.MDS_PDB, constants.MON_PDB]
for num in range(osd_count):
pdb_required.append(constants.OSD_PDB + str(num))
pdb_list.sort()
pdb_required.sort()
for required, given in zip(pdb_required, pdb_list):
assert required == given, f"{required} was not created"
logger.info(f"All required PDBs created: {pdb_required}")
def get_osd_utilization():
"""
Get osd utilization value
Returns:
osd_filled (dict): Dict of osd name and its used value
i.e {'osd.1': 15.276289408185841, 'osd.0': 15.276289408185841, 'osd.2': 15.276289408185841}
"""
osd_filled = {}
ceph_cmd = "ceph osd df"
ct_pod = pod.get_ceph_tools_pod()
output = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd)
for osd in output.get("nodes"):
osd_filled[osd["name"]] = osd["utilization"]
return osd_filled
def get_ceph_df_detail():
"""
Get ceph osd df detail
Returns:
dict: 'ceph df details' command output
"""
ceph_cmd = "ceph df detail"
ct_pod = pod.get_ceph_tools_pod()
return ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd)
def validate_replica_data(pool_name, replica):
"""
Check if data is replica 2 or 3
Args:
replica (int): size of the replica(2,3)
pool_name (str): name of the pool to check replica
Returns:
Bool: True if replicated data size is meet rep config and False if dont
"""
ceph_df_detail_output = get_ceph_df_detail()
pool_list = ceph_df_detail_output.get("pools")
for pool in pool_list:
if pool.get("name") == pool_name:
logger.info(f"{pool_name}")
stored = pool["stats"]["stored"]
byte_used = pool["stats"]["bytes_used"]
compress_bytes_used = pool["stats"]["compress_bytes_used"]
compress_under_bytes = pool["stats"]["compress_under_bytes"]
byte_used = byte_used + compress_under_bytes - compress_bytes_used
store_ratio = byte_used / stored
if (replica + 0.2) > store_ratio > (replica - 0.2):
logger.info(f"pool {pool_name} meet rep {replica} size")
return True
else:
logger.info(
f"pool {pool_name} meet do not meet rep {replica}"
f" size Store ratio is {store_ratio}"
)
return False
raise PoolNotFound(f"Pool {pool_name} not found on cluster")
def validate_compression(pool_name):
"""
Check if data was compressed
Args:
pool_name (str): name of the pool to check replica
Returns:
bool: True if compression works. False if not
"""
ceph_df_detail_output = get_ceph_df_detail()
pool_list = ceph_df_detail_output.get("pools")
for pool in pool_list:
if pool.get("name") == pool_name:
logger.info(f"{pool_name}")
byte_used = pool["stats"]["bytes_used"]
compress_bytes_used = pool["stats"]["compress_bytes_used"]
compress_under_bytes = pool["stats"]["compress_under_bytes"]
all_byte_used = byte_used + compress_under_bytes - compress_bytes_used
compression_ratio = byte_used / all_byte_used
logger.info(f"this is the comp_ratio {compression_ratio}")
if 0.6 < compression_ratio:
logger.info(
f"Compression ratio {compression_ratio} is " f"larger than 0.6"
)
return True
else:
logger.info(
f"Compression ratio {compression_ratio} is " f"smaller than 0.6"
)
return False
raise PoolNotFound(f"Pool {pool_name} not found on cluster")
def validate_osd_utilization(osd_used=80):
"""
Validates osd utilization matches osd_used value
Args:
osd_used (int): osd used value
Returns:
bool: True if all osd values is equal or greater to osd_used.
False Otherwise.
"""
_rc = True
osd_filled = get_osd_utilization()
for osd, value in osd_filled.items():
if int(value) >= osd_used:
logger.info(f"{osd} used value {value}")
else:
_rc = False
logger.warning(f"{osd} used value {value}")
return _rc
def get_pgs_per_osd():
"""
Function to get ceph pg count per OSD
Returns:
osd_dict (dict): Dict of osd name and its used value
i.e {'osd.0': 136, 'osd.2': 136, 'osd.1': 136}
"""
osd_dict = {}
ceph_cmd = "ceph osd df"
ct_pod = pod.get_ceph_tools_pod()
output = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd)
for osd in output.get("nodes"):
osd_dict[osd["name"]] = osd["pgs"]
return osd_dict
def get_balancer_eval():
"""
Function to get ceph pg balancer eval value
Returns:
eval_out (float): Eval output of pg balancer
"""
ceph_cmd = "ceph balancer eval"
ct_pod = pod.get_ceph_tools_pod()
eval_out = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd).split(" ")
return float(eval_out[3])
def get_pg_balancer_status():
"""
Function to check pg_balancer active and mode is upmap
Returns:
bool: True if active and upmap is set else False
"""
# Check either PG balancer is active or not
ceph_cmd = "ceph balancer status"
ct_pod = pod.get_ceph_tools_pod()
output = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd)
# Check 'mode' is 'upmap', based on suggestion from Ceph QE
# TODO: Revisit this if mode needs change.
if output["active"] and output["mode"] == "upmap":
logging.info("PG balancer is active and mode is upmap")
return True
else:
logging.error("PG balancer is not active")
return False
def validate_pg_balancer():
"""
Validate either data is equally distributed to OSDs
Returns:
bool: True if avg PG's per osd difference is <=10 else False
"""
# Check OSD utilization either pg balancer is active
# TODO: Revisit this if pg difference value needs change
# TODO: Revisit eval value if pg balancer mode changes from 'upmap'
if get_pg_balancer_status():
eval = get_balancer_eval()
osd_dict = get_pgs_per_osd()
osd_avg_pg_value = round(sum(osd_dict.values()) / len(osd_dict))
osd_pg_value_flag = True
for key, value in osd_dict.items():
diff = abs(value - osd_avg_pg_value)
if diff <= 10:
logging.info(f"{key} PG difference {diff} is acceptable")
else:
logging.error(f"{key} PG difference {diff} is not acceptable")
osd_pg_value_flag = False
if osd_pg_value_flag and eval <= 0.025:
logging.info(
f"Eval value is {eval} and pg distribution "
f"average difference is <=10 which is acceptable"
)
return True
else:
logging.error(
f"Eval value is {eval} and pg distribution "
f"average difference is >=10 which is high and not acceptable"
)
return False
else:
logging.info("pg_balancer is not active")
def get_percent_used_capacity():
"""
Function to calculate the percentage of used capacity in a cluster
Returns:
float: The percentage of the used capacity in the cluster
"""
ct_pod = pod.get_ceph_tools_pod()
output = ct_pod.exec_ceph_cmd(ceph_cmd="ceph df")
total_used = output.get("stats").get("total_used_raw_bytes")
total_avail = output.get("stats").get("total_bytes")
return 100.0 * total_used / total_avail
def get_osd_pods_memory_sum():
"""
Get the sum of memory of all OSD pods. This is used to determine the size
needed for a PVC so when IO will be running over it the OSDs cache will be filled
Returns:
int: The sum of the OSD pods memory in GB
"""
osd_pods = pod.get_osd_pods()
num_of_osd_pods = len(osd_pods)
osd_pod_mem_size_str = osd_pods[0].get_memory().get("osd")
osd_pod_mem_size = convert_device_size(
unformatted_size=osd_pod_mem_size_str, units_to_covert_to="GB"
)
return num_of_osd_pods * osd_pod_mem_size
def get_child_nodes_osd_tree(node_id, osd_tree):
"""
This function finds the children of a node from the 'ceph osd tree' and returns them as list
Args:
node_id (int): the id of the node for which the children to be retrieved
osd_tree (dict): dictionary containing the output of 'ceph osd tree'
Returns:
list: of 'children' of a given node_id
"""
for i in range(len(osd_tree["nodes"])):
if osd_tree["nodes"][i]["id"] == node_id:
return osd_tree["nodes"][i]["children"]
def check_osds_in_hosts_osd_tree(hosts, osd_tree):
"""
Checks if osds are formed correctly after cluster expansion
Args:
hosts (list) : List of hosts
osd_tree (str) : 'ceph osd tree' command output
Returns:
bool : True if osd tree formatted correctly
"""
for each_host in hosts:
osd_in_each_host = get_child_nodes_osd_tree(each_host, osd_tree)
if len(osd_in_each_host) > 1 or len(osd_in_each_host) <= 0:
logger.error(
"Error. ceph osd tree is NOT formed correctly after cluster expansion"
)
return False
logger.info("osd tree verification Passed")
return True
def check_osd_tree_1az_vmware(osd_tree, number_of_osds):
"""
Checks whether an OSD tree is created/modified correctly. This can be used as a verification step for
deployment and cluster expansion tests.
This function is specifically for ocs cluster created on 1 AZ VMWare setup
Args:
osd_tree (dict): Dictionary of the values which represent 'osd tree'.
number_of_osds (int): total number of osds in the cluster
Returns:
bool: True, if the ceph osd tree is formed correctly. Else False
"""
# in case of vmware, there will be only one zone as of now. The OSDs are arranged as follows:
# ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
# -1 0.99326 root default
# -8 0.33109 rack rack0
# -7 0.33109 host ocs-deviceset-0-0-dktqc
# 1 hdd 0.33109 osd.1 up 1.00000 1.00000
# There will be 3 racks - rack0, rack1, rack2.
# When cluster expansion is successfully done, a host and an osd are added in each rack.
# The number of hosts will be equal to the number osds the cluster has. Each rack can
# have multiple hosts but each host will have only one osd under it.
number_of_hosts_expected = int(number_of_osds / 3)
all_hosts = []
racks = osd_tree["nodes"][0]["children"]
for rack in racks:
hosts = get_child_nodes_osd_tree(rack, osd_tree)
if len(hosts) != number_of_hosts_expected:
logging.error(
f"Number of hosts under rack {rack} "
f"is not matching the expected ={number_of_hosts_expected} "
)
return False
else:
all_hosts.append(hosts)
all_hosts_flatten = [item for sublist in all_hosts for item in sublist]
return check_osds_in_hosts_osd_tree(all_hosts_flatten, osd_tree)
def check_osd_tree_3az_aws(osd_tree, number_of_osds):
"""
Checks whether an OSD tree is created/modified correctly. This can be used as a verification step for
deployment and cluster expansion tests.
This function is specifically for ocs cluster created on 3 AZ AWS config
Args:
osd_tree (dict): Dictionary of the values which represent 'osd tree'.
number_of_osds (int): total number of osds in the cluster
Returns:
Boolean: True, if the ceph osd tree is formed correctly. Else False
"""
all_hosts = []
region = osd_tree["nodes"][0]["children"]
zones = get_child_nodes_osd_tree(region[0], osd_tree)
for each_zone in zones:
hosts_in_each_zone = get_child_nodes_osd_tree(each_zone, osd_tree)
if len(hosts_in_each_zone) != number_of_osds / 3: # 3 is replica_factor
logger.error("number of hosts in zone is incorrect")
return False
else:
all_hosts.append(hosts_in_each_zone)
all_hosts_flatten = [item for sublist in all_hosts for item in sublist]
return check_osds_in_hosts_osd_tree(all_hosts_flatten, osd_tree)
def check_osd_tree_1az_aws(osd_tree, number_of_osds):
"""
Checks whether an OSD tree is created/modified correctly. This can be used as a verification step for
deployment and cluster expansion tests.
This function is specifically for ocs cluster created on 1 AZ AWS config
Args:
osd_tree (dict): Dictionary of the values which represent 'osd tree'.
number_of_osds (int): total number of osds in the cluster
Returns:
Boolean: True, if the ceph osd tree is formed correctly. Else False
"""
all_hosts = []
region = osd_tree["nodes"][0]["children"]
zones = get_child_nodes_osd_tree(region[0], osd_tree)
racks = get_child_nodes_osd_tree(zones[0], osd_tree)
logging.info(f"racks = {racks}")
if len(racks) != 3:
logging.error(f"Expected 3 racks but got {len(racks)}")
for each_rack in racks:
hosts_in_each_rack = get_child_nodes_osd_tree(each_rack, osd_tree)
if len(hosts_in_each_rack) != number_of_osds / 3: # 3 is replica_factor
logging.error("number of hosts in rack is incorrect")
return False
else:
logging.info(f"adding host...{hosts_in_each_rack}")
all_hosts.append(hosts_in_each_rack)
all_hosts_flatten = [item for sublist in all_hosts for item in sublist]
return check_osds_in_hosts_osd_tree(all_hosts_flatten, osd_tree)
def check_osds_in_hosts_are_up(osd_tree):
"""
Check if all the OSD's in status 'up'
Args:
osd_tree (dict): The ceph osd tree
Returns:
bool: True if all the OSD's in status 'up'. Else False
"""
for n in osd_tree["nodes"]:
if n["type"] == "osd":
if n["status"] != "up":
logger.warning(f"osd with name {n['name']} is not up")
return False
return True
def check_ceph_osd_tree():
"""
Checks whether an OSD tree is created/modified correctly.
It is a summary of the previous functions: 'check_osd_tree_1az_vmware',
'check_osd_tree_3az_aws', 'check_osd_tree_1az_aws'.
Returns:
bool: True, if the ceph osd tree is formed correctly. Else False
"""
osd_pods = pod.get_osd_pods()
# 'ceph osd tree' should show the new osds under right nodes/hosts
# Verification is different for 3 AZ and 1 AZ configs
ct_pod = pod.get_ceph_tools_pod()
tree_output = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd tree")
if config.ENV_DATA["platform"].lower() == constants.VSPHERE_PLATFORM:
return check_osd_tree_1az_vmware(tree_output, len(osd_pods))
aws_number_of_zones = 3
if config.ENV_DATA["platform"].lower() == constants.AWS_PLATFORM:
# parse the osd tree. if it contains a node 'rack' then it's a
# AWS_1AZ cluster. Else, 3 AWS_3AZ cluster
for i in range(len(tree_output["nodes"])):
if tree_output["nodes"][i]["name"] in "rack0":
aws_number_of_zones = 1
if aws_number_of_zones == 1:
return check_osd_tree_1az_aws(tree_output, len(osd_pods))
else:
return check_osd_tree_3az_aws(tree_output, len(osd_pods))
def check_ceph_osd_tree_after_node_replacement():
"""
Check the ceph osd tree after the process of node replacement.
Returns:
bool: True if the ceph osd tree formation is correct,
and all the OSD's are up. Else False
"""
ct_pod = pod.get_ceph_tools_pod()
osd_tree = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd tree")
if not check_ceph_osd_tree():
logger.warning("Incorrect ceph osd tree formation found")
return False
if not check_osds_in_hosts_are_up(osd_tree):
logger.warning("Not all the osd's are in status 'up'")
return False
return True
def silence_ceph_osd_crash_warning(osd_pod_name):
"""
Silence the osd crash warning of a specific osd pod
Args:
osd_pod_name (str): The name of the osd pod which we need to
silence the crash warning
Returns:
bool: True if it found the osd crash with name 'osd_pod_name'. False otherwise
"""
ct_pod = pod.get_ceph_tools_pod()
new_crash_objects_list = ct_pod.exec_ceph_cmd(ceph_cmd="ceph crash ls-new")
for crash_obj in new_crash_objects_list:
if crash_obj.get("utsname_hostname") == osd_pod_name:
logger.info(f"Found osd crash with name {osd_pod_name}")
obj_crash_id = crash_obj.get("crash_id")
crash_info = ct_pod.exec_ceph_cmd(
ceph_cmd=f"ceph crash info {obj_crash_id}"
)
logger.info(f"ceph crash info: {crash_info}")
logger.info("silence the osd crash warning")
ct_pod.exec_ceph_cmd(ceph_cmd=f"ceph crash archive {obj_crash_id}")
return True
logger.info(
f"Didn't find osd crash with name {osd_pod_name} in ceph crash warnings"
)
return False
def wait_for_silence_ceph_osd_crash_warning(osd_pod_name, timeout=900):
"""
Wait for 'timeout' seconds to check for the ceph osd crash warning,
and silence it.
Args:
osd_pod_name (str): The name of the osd pod which we need to
silence the crash warning
timeout (int): time in seconds to wait for silence the osd crash warning
Returns:
bool: True if it found the osd crash with name 'osd_pod_name'. False otherwise
"""
try:
for silence_old_osd_crash_warning in TimeoutSampler(
timeout=timeout,
sleep=30,
func=silence_ceph_osd_crash_warning,
osd_pod_name=osd_pod_name,
):
if silence_old_osd_crash_warning:
return True
except TimeoutError:
return False
class CephClusterExternal(CephCluster):
"""
Handle all external ceph cluster related functionalities
Assumption: Cephcluster Kind resource exists
"""
def __init__(self):
self.POD = ocp.OCP(kind="Pod", namespace=config.ENV_DATA["cluster_namespace"])
self.CEPHCLUSTER = ocp.OCP(
kind="CephCluster", namespace=config.ENV_DATA["cluster_namespace"]
)
self.wait_for_cluster_cr()
self._cluster_name = self.cluster_resource.get("metadata").get("name")
self._namespace = self.cluster_resource.get("metadata").get("namespace")
self.cluster = ocs.OCS(**self.cluster_resource)
self.wait_for_nooba_cr()
@property
def cluster_name(self):
return self._cluster_name
@property
def namespace(self):
return self._namespace
@retry(IndexError, 10, 3, 1)
def wait_for_cluster_cr(self):
"""
we have to wait for cluster cr to appear else
it leads to list index out of range error
"""
cluster_cr = self.CEPHCLUSTER.get()
self.cluster_resource = cluster_cr.get("items")[0]
@retry((IndexError, AttributeError, TypeError), 100, 3, 1)
def wait_for_nooba_cr(self):
self._mcg_obj = MCG()
def cluster_health_check(self, timeout=300):
"""
This would be a comprehensive cluster health check
which includes checking pods, external ceph cluster health.
raise exceptions.CephHealthException("Cluster health is NOT OK")
"""
sample = TimeoutSampler(timeout=timeout, sleep=3, func=self.is_health_ok)
if not sample.wait_for_func_status(result=True):
raise exceptions.CephHealthException("Cluster health is NOT OK")
self.wait_for_noobaa_health_ok()
self.validate_pvc()
def validate_pvc(self):
"""
Check whether all PVCs are in bound state
"""
ocs_pvc_obj = get_all_pvc_objs(namespace=self.namespace)
for pvc_obj in ocs_pvc_obj:
assert pvc_obj.status == constants.STATUS_BOUND, {
f"PVC {pvc_obj.name} is not Bound"
}
logger.info(f"PVC {pvc_obj.name} is in Bound state")
| 2.1875 | 2 |
scenic/projects/baselines/detr/configs/detr_config.py | techthiyanes/scenic | 0 | 5295 | <reponame>techthiyanes/scenic<gh_stars>0
# pylint: disable=line-too-long
r"""Default configs for COCO detection using DETR.
"""
# pylint: enable=line-too-long
import copy
import ml_collections
_COCO_TRAIN_SIZE = 118287
NUM_EPOCHS = 300
def get_config():
"""Returns the configuration for COCO detection using DETR."""
config = ml_collections.ConfigDict()
config.experiment_name = 'coco_detection_detr'
# Dataset.
config.dataset_name = 'coco_detr_detection'
config.dataset_configs = ml_collections.ConfigDict()
config.dataset_configs.prefetch_to_device = 2
config.dataset_configs.shuffle_buffer_size = 10_000
config.dataset_configs.max_boxes = 99
config.data_dtype_str = 'float32'
# Model.
config.model_dtype_str = 'float32'
config.model_name = 'detr'
config.matcher = 'hungarian_cover_tpu'
config.hidden_dim = 256
config.num_queries = 100
config.query_emb_size = None # Same as hidden_size.
config.transformer_num_heads = 8
config.transformer_num_encoder_layers = 6
config.transformer_num_decoder_layers = 6
config.transformer_qkv_dim = 256
config.transformer_mlp_dim = 2048
config.transformer_normalize_before = False
config.backbone_num_filters = 64
config.backbone_num_layers = 50
config.dropout_rate = 0.
config.attention_dropout_rate = 0.1
# Loss.
config.aux_loss = True
config.bbox_loss_coef = 5.0
config.giou_loss_coef = 2.0
config.class_loss_coef = 1.0
config.eos_coef = 0.1
# Training.
config.trainer_name = 'detr_trainer'
config.optimizer = 'adam'
config.optimizer_configs = ml_collections.ConfigDict()
config.optimizer_configs.weight_decay = 1e-4
config.optimizer_configs.beta1 = 0.9
config.optimizer_configs.beta2 = 0.999
config.max_grad_norm = 0.1
config.num_training_epochs = NUM_EPOCHS
config.batch_size = 64
config.rng_seed = 0
decay_events = {500: 400}
# Learning rate.
steps_per_epoch = _COCO_TRAIN_SIZE // config.batch_size
config.lr_configs = ml_collections.ConfigDict()
config.lr_configs.learning_rate_schedule = 'compound'
config.lr_configs.factors = 'constant*piecewise_constant'
config.lr_configs.decay_events = [
decay_events.get(NUM_EPOCHS, NUM_EPOCHS * 2 // 3) * steps_per_epoch,
]
# Note: this is absolute (not relative):
config.lr_configs.decay_factors = [.1]
config.lr_configs.base_learning_rate = 1e-4
# Backbone training configs: optimizer and learning rate.
config.backbone_training = ml_collections.ConfigDict()
config.backbone_training.optimizer = copy.deepcopy(config.optimizer)
config.backbone_training.optimizer_configs = copy.deepcopy(
config.optimizer_configs)
config.backbone_training.lr_configs = copy.deepcopy(config.lr_configs)
config.backbone_training.lr_configs.base_learning_rate = 1e-5
# Pretrained_backbone.
config.load_pretrained_backbone = True
config.freeze_backbone_batch_stats = True
config.pretrained_backbone_configs = ml_collections.ConfigDict()
# Download pretrained ResNet50 checkpoints from here:
# https://github.com/google-research/scenic/tree/main/scenic/projects/baselines pylint: disable=line-too-long
config.pretrained_backbone_configs.checkpoint_path = 'path_to_checkpoint_of_resnet_50'
# Logging.
config.write_summary = True
config.xprof = True # Profile using xprof.
config.log_summary_steps = 50 # train summary steps
config.log_large_summary_steps = 1000 # Expensive summary operations freq
config.checkpoint = True # Do checkpointing.
config.checkpoint_steps = steps_per_epoch
config.debug_train = False # Debug mode during training.
config.debug_eval = False # Debug mode during eval.
return config
| 1.695313 | 2 |
tests/conftest.py | artembashlak/share-youtube-to-mail | 0 | 5296 | import pytest
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
@pytest.fixture(scope="function")
def browser():
options = webdriver.ChromeOptions()
options.add_argument('ignore-certificate-errors')
options.add_argument("--headless")
options.add_argument('--no-sandbox')
options.add_argument('start-maximized')
options.add_argument('disable-infobars')
options.add_argument("--disable-extensions")
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
yield driver
driver.quit()
| 2.25 | 2 |
prepare_cicero_peaks.py | lab-medvedeva/SCABFA-feature-selection | 0 | 5297 | <gh_stars>0
from scale.dataset import read_mtx
from argparse import ArgumentParser
import pandas as pd
import numpy as np
import os
def parse_args():
parser = ArgumentParser('Preparing raw peaks from cicero pipeline')
parser.add_argument('--dataset_path', help='Path to Scale dataset: count, feature, barcode folder')
parser.add_argument('--label_path', help='Path to cell labels')
parser.add_argument('--num_peaks_threshold', type=int, help='Num peaks to filter')
parser.add_argument('--output_path', help='Path to save peaks in bed folder')
parser.add_argument('--suffix', help='Suffix to path')
return parser.parse_args()
def main():
args = parse_args()
labels = pd.read_csv(args.label_path, sep='\t', header=None)
count, feature, barcode = read_mtx(args.dataset_path)
os.makedirs(args.output_path, exist_ok=True)
cell_types = labels[1].unique()
cell_barcodes = {}
for cell_type in cell_types:
cell_barcodes[cell_type] = list(labels[labels[1] == cell_type].index)
for cell_type, barcode in cell_barcodes.items():
cell_by_feature = np.asarray(count[barcode].sum(axis=0)).flatten()
feature_threshold = cell_by_feature[np.argsort(cell_by_feature)[-args.num_peaks_threshold]]
print(f'{cell_type}: {feature_threshold}')
filtered_features = (cell_by_feature > 0) & (cell_by_feature >= feature_threshold)
print(f'{cell_type}: filtered {np.sum(filtered_features)}')
output = pd.DataFrame(feature[filtered_features])
# print(cell_type, cell_by_feature[np.argsort(cell_by_feature)[-args.num_peaks_threshold:]][:10])
output['chr'] = output[0].apply(lambda x: x.split('_')[0])
output['start'] = output[0].apply(lambda x: x.split('_')[1])
output['end'] = output[0].apply(lambda x: x.split('_')[2])
output.drop(0, axis=1).to_csv(
os.path.join(args.output_path, f'{cell_type.replace(" ", "_").replace("/", "_")}_{args.suffix}.bed'),
header=None,
index=None,
sep='\t'
)
if __name__ == '__main__':
main()
| 2.734375 | 3 |
crusoe_observe/neo4j-client/neo4jclient/CMSClient.py | CSIRT-MU/CRUSOE | 3 | 5298 | <gh_stars>1-10
from neo4jclient.AbsClient import AbstractClient
class CMSClient(AbstractClient):
def __init__(self, password, **kwargs):
super().__init__(password=password, **kwargs)
def get_domain_names(self):
"""
Gets all domain names from database.
:return: domain names in JSON-like form
"""
return self._run_query("MATCH(n:DomainName) RETURN n.domain_name AS domains")
def get_ips_and_domain_names(self):
"""
Gets all domain names with corresponding IPs from database.
:return: IPs and DomainNames in JSON-like form
"""
return self._run_query("MATCH(n:IP)-[:RESOLVES_TO]-(y:DomainName {tag: \'A/AAAA\'}) "
"RETURN { IP: n.address , Domain: y.domain_name } AS entry")
def create_cms_component(self, path):
"""
Create nodes and relationships for cms client.
-------------
Antivirus_query:
1. Parse csv given in path.
2. Create node of type [:SoftwareVersion, :IP] if not already exists.
3. Create node of type [:Host], relationship of type [:ON] with parameters [start,end] if not already exists.
Otherwise just update information about time on parameters [start,end].
4. Create node of type [:Node], relationship of type [:HAS_ASSIGNED].
5. Create relationship of type [:IS_A] between :Host and :Node if not already exists.
:param path: Path to the JSON with values
:return:
"""
path = f'file:///{path}'
query = "CALL apoc.load.json($path) " \
"YIELD value " \
"UNWIND value.data AS data " \
"UNWIND data.cpe as cpe " \
"WITH data.ip as ip_ad, cpe, value.time as theTime " \
"MERGE (ipadd:IP {address: ip_ad}) " \
"MERGE (softVersion:SoftwareVersion {version: cpe, tag: \'cms_client\'}) " \
"MERGE (ipadd)<-[:HAS_ASSIGNED]-(nod:Node) " \
"MERGE (nod)-[:IS_A]->(host:Host) " \
"MERGE (softVersion)-[r:ON]->(host) " \
"ON CREATE SET r.start = datetime(theTime),r.end = datetime(theTime) " \
"ON MATCH SET r.end = datetime(theTime)"
params = {'path': path}
self._run_query(query, **params)
| 2.59375 | 3 |
location.py | jonasjucker/wildlife-telegram | 0 | 5299 | import time
from datetime import date,datetime
from astral import LocationInfo
from astral.sun import sun
class CamLocation:
def __init__(self,lat,lon,info,country,timezone):
self.info = LocationInfo(info, country, timezone, lat, lon)
def is_night(self):
s = sun(self.info.observer, date=date.today(),tzinfo=self.info.timezone)
sunrise = s["sunrise"].timestamp()
sunset = s["sunset"].timestamp()
time_now = datetime.now().timestamp()
if time_now > sunrise and time_now < sunset:
return False
else:
return True
| 3.296875 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.