content
stringlengths 5
1.05M
|
---|
import json
import os, sys
import markdown
from flask import Flask, render_template
from flask_flatpages import FlatPages
import urllib
from flask_frozen import Freezer
DEBUG = True
FLATPAGES_AUTO_RELOAD = DEBUG
FLATPAGES_EXTENSION = '.md'
app = Flask(__name__)
app.config.from_object(__name__)
pages = FlatPages(app)
freezer = Freezer(app)
def readJSON(file):
data=[]
print(file)
with open("data/"+file+".json", encoding='utf-8') as data_file:
data+=json.loads(data_file.read())
return data
@freezer.register_generator
def noten():
for link in links:
yield {"name": link}
@app.route('/<name>/')
def noten(name):
data=readJSON(name)
return render_template('noten.html',CONST=CONST ,facher=facher, data=data)
@app.route('/')
def home():
return render_template('home.html',CONST=CONST ,facher=facher)
CONST={
"url":"http://www.p4w5.eu/noten/"
}
index = readJSON("index")
links=[v["files"] for v in index]
links+=[v["files"] for f in index if "vertiefungen" in f for v in f["vertiefungen"]]
facher=readJSON("index")
if __name__ == '__main__':
freezer.freeze()
app.run(port=5000)
|
import sys
from setuptools import setup
install_requires = ["h2>=2.2.0,<3.0.0"]
if sys.version_info < (3, 5):
install_requires.append("typing")
setup(
name='asyncio-apns',
version='0.0.1',
install_requires=install_requires,
packages=['asyncio_apns'],
url='https://github.com/etataurov/asyncio-apns',
license='MIT',
author='etataurov',
author_email='[email protected]',
description='asyncio client for Apple Push Notification Service'
)
|
from datahub.dataset.core.pagination import DatasetCursorPagination
class AdvisersDatasetViewCursorPagination(DatasetCursorPagination):
"""
Cursor Pagination for AdvisersDatasetView
"""
ordering = ('date_joined', 'pk')
|
from time import time
import struct
t0=time()
total = 0
with open('test_1m.bin','rb') as f:
while True:
raw = f.read(20*8)
if not raw: break
rec = struct.unpack('q'*20,raw)
for x in rec:
total += x
print(total)
print(time()-t0)
|
#!/usr/bin/env python
# File : tokenize.py
# Author : Douglas Anderson
# Description: Driver for my parser implementation
import os, sys
import csv
import subprocess
from Token import Token
scanner_path = "./scanner"
def scan_text(tweetid, label, intext):
tokens = []
tmp_f = os.tmpfile()
tmp_f.write(intext)
tmp_f.seek(0)
output = subprocess.check_output([scanner_path], stdin=tmp_f)
#subprocess.call(["cat"], stdin=tmp_f)
tmp_f.close()
outlines = output.split("\n")
for line in outlines:
try:
tokentype, text = line.split("\t")
tokens.append(Token(tweetid, label, tokentype, text))
except ValueError:
pass
return tokens
def create_tweetid(sid, uid):
return sid + "-" + uid
def open_tweets_file(filename, start, end):
count = 0
tokens = []
with open(filename) as f:
r = csv.DictReader(f, delimiter="\t")
for tweet in r:
if count >= start and count < end:
#print
#print tweet["text"]
newtokens = scan_text(create_tweetid(tweet["sid"], tweet["uid"]), tweet["class"], tweet["text"])
tokens += newtokens
count += 1
return tokens
|
import os
import pathlib
import subprocess
from typing import Sequence
import click
import virtualenv
from pygit2 import clone_repository
class InvalidName(ValueError):
"""
Name of sample project invalid.
"""
class AlreadyExists(KeyError):
"""
Sample project already exists.
"""
class CantCheckout(KeyError):
"""
Couldn't check out sample branch.
"""
DEFAULT_SAMPLE_BRANCH = "sample-project-basic"
def create_sample_project(
name: str,
from_branch: str = DEFAULT_SAMPLE_BRANCH,
kd_repo_path: str = "../..",
) -> None:
"""
Create a kedro sample project from repo branch with skeleton.
Args:
name: name of sample project
from_branch: name of branch for kedro starter
"""
if name == "":
raise InvalidName("pass valid directory name")
q_dir = pathlib.Path(f"tmp/{name}")
if q_dir.exists():
raise AlreadyExists(f"Path exists: {q_dir}")
q_dir.mkdir(parents=True)
try:
os.chdir(q_dir)
try:
clone_repository(
"https://github.com/FactFiber/kedro-dvc.git",
".",
checkout_branch=from_branch,
)
except Exception as exc:
raise CantCheckout(f"result: {exc}")
virtualenv.cli_run([f"env/{name}"])
# # using virtualenv.create_environment no longer works
activate_this_file = f"env/{name}/bin/activate_this.py"
_exec_file(activate_this_file)
subprocess.check_call(["pip", "install", "--upgrade", "pip"])
if str(kd_repo_path) != "../..":
# for local dev, we install in tmp/{name} under project
# if we are installing elsewhere, fix requirements for path
new_req = subprocess.check_output(
["tail", "-n", "+2", "src/requirements.txt"]
).decode()
new_req = f"-e {kd_repo_path}\n{new_req}"
pathlib.Path("src/requirements.txt").write_text(new_req)
subprocess.check_call(["pip", "install", "-r", "src/requirements.txt"])
finally:
# make sure we leave shell, return to original directory after finished
try:
subprocess.call(["deactivate"])
except FileNotFoundError:
pass
os.chdir("../..")
def _exec_file(activate_this_file: str) -> None:
exec(
compile(
open(activate_this_file, "rb").read(),
activate_this_file,
"exec",
),
dict(__file__=activate_this_file),
)
@click.command(name="create-sample-project") # type: ignore
@click.argument("name") # type: ignore
@click.argument(
"branch", required=False, default=DEFAULT_SAMPLE_BRANCH # type: ignore
)
@click.argument("kd_path", required=False, default="../..") # type: ignore
@click.pass_context # type: ignore
def create_sample_project_cmd(
ctx: click.Context, name: str, branch: str, kd_path: str
) -> None:
"""
Create sample project in tmp/<name>.
NAME: name of sample project subdirectory
BRANCH: name of repo branch with sample content
"""
print(f"creating sample project {name} from branch {branch}")
try:
create_sample_project(name, from_branch=branch, kd_repo_path=kd_path)
except CantCheckout:
param = _get_param(ctx.command.params, "branch")
raise click.BadParameter("Can't check out branch", param=param)
except AlreadyExists as exc:
raise click.UsageError(message=str(exc))
print(f'To use the sample project run "source env/{name}/bin/activate"')
def _get_param(
param_list: Sequence[click.Parameter], name: str
) -> click.Parameter:
return list(filter(lambda p: p.name == "branch", param_list))[0]
if __name__ == "__main__": # pragma: no cover
create_sample_project_cmd()
|
"""
Tools for working with the bytecode for a module. This currently just
defines a function for extracting information about import statements
and the use of global names.
"""
import collections
import dis
import types
from typing import Deque, Dict, Iterator, List, Optional, Set, Tuple
from ._importinfo import ImportInfo, create_importinfo
def _all_code_objects(
code: types.CodeType,
) -> Iterator[Tuple[types.CodeType, List[types.CodeType]]]:
"""
Yield all code objects directly and indirectly referenced from
*code* (including *code* itself).
This could explicitly manages a work queue and does not recurse
to avoid exhausting the stack.
Args:
code: A code object
Returns:
An iterator that yields tuples *(child_code, parents)*, where *parents*
is a list all code objects in the reference chain from *code*.
"""
work_q: Deque[types.CodeType] = collections.deque()
work_q.append(code)
parents: Dict[types.CodeType, List[types.CodeType]] = {}
while work_q:
current = work_q.popleft()
yield current, parents.get(current, [])
for value in current.co_consts:
if isinstance(value, types.CodeType):
work_q.append(value)
parents[value] = parents.get(current, []) + [value]
def _extract_single(code: types.CodeType, is_function_code: bool, is_class_code: bool):
"""
Extract import information from a single bytecode object (without recursing
into child objects).
Args:
code: The code object to process
is_function_code: True if this is a code object for a function or
anything in a function.
is_class_code: True if this is the code object for a class
"""
instructions = list(dis.get_instructions(code))
imports: List[ImportInfo] = []
globals_written: Set[str] = set()
globals_read: Set[str] = set()
func_codes: Set[types.CodeType] = set()
class_codes: Set[types.CodeType] = set()
fromvalues: Optional[List[Tuple[str, Optional[str]]]]
for offset, inst in enumerate(instructions):
if inst.opname == "IMPORT_NAME":
from_inst_offset = 1
if instructions[offset - from_inst_offset].opname == "EXTENDED_ARG":
from_inst_offset += 1
level_inst_offset = from_inst_offset + 1
if instructions[offset - level_inst_offset].opname == "EXTENDED_ARG":
level_inst_offset += 1
assert (
instructions[offset - from_inst_offset].opname == "LOAD_CONST"
), instructions[offset - 1].opname
assert instructions[offset - level_inst_offset].opname == "LOAD_CONST", (
instructions[offset - 2].opname,
code,
)
from_offset = instructions[offset - from_inst_offset].arg
assert from_offset is not None
level_offset = instructions[offset - level_inst_offset].arg
assert level_offset is not None
fromlist = code.co_consts[from_offset]
level = code.co_consts[level_offset]
assert fromlist is None or isinstance(fromlist, tuple)
name_offset = inst.arg
assert name_offset is not None
import_module = code.co_names[name_offset]
if fromlist is not None:
fromvalues = [(nm, None) for nm in fromlist]
else:
fromvalues = None
imports.append(
create_importinfo(
(import_module, None),
fromvalues,
level,
is_function_code,
False,
False,
)
)
if not (is_function_code or is_class_code):
if fromlist is not None:
globals_written |= set(fromlist) - {"*"}
elif inst.opname in ("STORE_NAME", "STORE_NAME"):
if is_class_code and inst.opname == "STORE_NAME":
continue
const_offset = inst.arg
assert const_offset is not None
globals_written.add(code.co_names[const_offset])
elif inst.opname in ("LOAD_GLOBAL", "LOAD_NAME"):
if is_class_code and inst.opname == "LOAD_NAME":
continue
const_offset = inst.arg
assert const_offset is not None
globals_read.add(code.co_names[const_offset])
elif inst.opname == "MAKE_FUNCTION":
const_offset = instructions[offset - 2].arg
assert const_offset is not None
if offset >= 3 and instructions[offset - 3].opname == "LOAD_BUILD_CLASS":
class_codes.add(code.co_consts[const_offset])
else:
func_codes.add(code.co_consts[const_offset])
return imports, globals_written, globals_read, func_codes, class_codes
def _is_code_for_function(
code: types.CodeType, parents: List[types.CodeType], func_codes: Set[types.CodeType]
):
"""
Check if this is the code object for a function or inside a function
Args:
code: The code object to check
parents: List of parents for this code object
func_codes: Set of code objects that are directly for functions
"""
return code in func_codes or any(p in func_codes for p in parents)
def extract_bytecode_info(
code: types.CodeType,
) -> Tuple[List[ImportInfo], Set[str], Set[str]]:
"""
Extract interesting information from the code object for a module or script
Returns a tuple of three items:
1) List of all imports
2) A set of global names written
3) A set of global names read by
"""
# Note: This code is iterative to avoid exhausting the stack in
# patalogical code bases (in particular deeply nested functions)
all_imports: List[ImportInfo] = []
all_globals_written: Set[str] = set()
all_globals_read: Set[str] = set()
all_func_codes: Set[types.CodeType] = set()
all_class_codes: Set[types.CodeType] = set()
for current, parents in _all_code_objects(code):
(
imports,
globals_written,
globals_read,
func_codes,
class_codes,
) = _extract_single(
current,
_is_code_for_function(current, parents, all_func_codes),
current in all_class_codes,
)
all_imports += imports
all_globals_written |= globals_written
all_globals_read |= globals_read
all_func_codes |= func_codes
all_class_codes |= class_codes
return all_imports, all_globals_written, all_globals_read
|
def contact_card(name, age, car_model):
return f"{name} is {age} and drives a {car_model}"
#calling args in sequence
contact_card("owais", 28, "bonusCar")
#if calling out of order then you have to specify name and value
contact_card(age=28, car_model="f1", name="owais")
#Positional argument follows keyword argument
contact_card(age=28, "keith", car_model="civic")
#File "<stdin>", line 1
#SyntaxError: positional argument follows keyword argument
#default arguments
def can_drive(age, drive_age=16):
return age >= drive_age
can_drive(15) #False
|
from threading import Thread
from os import system
def executar_rp(exe: str):
'''Função que executa um aplicativo externo
Parameters:
exe (str): String com aplicativo e parâmetros
'''
try:
system(exe)
except Exception as er:
print('executar_rp:')
print(er)
def outraRota(funcao, *args: tuple):
'''Função que executa um aplicativo em paralelo (Thread)
Parameters:
args (tuple): Múltiplos parâmetros
(arg1, arg2, arg3...)
'''
try:
t = Thread(target = funcao, args = args)
t.daemon = True
t.start()
except Exception as er:
print('outraRota:')
print(er)
def executar(exe: str):
'''Função que executa um aplicativo externo em paralelo
Parameters:
exe (str): String com aplicativo e parâmetros
'''
try:
outraRota(executar_rp, exe)
except Exception as er:
print('executar:')
print(er)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 18 00:06:28 2019.
@author: mtageld
"""
import pytest
import os
import numpy as np
import tempfile
import shutil
from pandas import read_csv
from histomicstk.saliency.tissue_detection import (
get_slide_thumbnail, get_tissue_mask,
get_tissue_boundary_annotation_documents)
from histomicstk.annotations_and_masks.annotation_and_mask_utils import (
delete_annotations_in_slide)
from histomicstk.saliency.cellularity_detection_thresholding import (
Cellularity_detector_thresholding)
from histomicstk.saliency.cellularity_detection_superpixels import (
Cellularity_detector_superpixels)
import sys
thisDir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(thisDir, '../../../tests'))
# import htk_test_utilities as utilities # noqa
from htk_test_utilities import girderClient, getTestFilePath # noqa
# # for protyping
# from tests.htk_test_utilities import _connect_to_existing_local_dsa
# girderClient = _connect_to_existing_local_dsa()
class Cfg:
def __init__(self):
self.gc = None
self.iteminfo = None
self.labeled = None
self.GTcodes = None
self.logging_savepath_cdt = tempfile.mkdtemp()
self.logging_savepath_cds = tempfile.mkdtemp()
cfg = Cfg()
@pytest.mark.usefixtures('girderClient') # noqa
def test_prep(girderClient): # noqa
cfg.gc = girderClient
cfg.iteminfo = cfg.gc.get('/item', parameters={
'text': "TCGA-A2-A0YE-01Z-00-DX1"})[0]
cfg.GTcodes = read_csv(getTestFilePath('saliency_GTcodes.csv'))
class TestTissueDetection():
"""Test methods for getting ROI mask from annotations."""
def test_get_tissue_mask(self):
"""Test get_tissue_mask()."""
thumbnail_rgb = get_slide_thumbnail(cfg.gc, cfg.iteminfo['_id'])
cfg.labeled, mask = get_tissue_mask(
thumbnail_rgb, deconvolve_first=True,
n_thresholding_steps=1, sigma=1.5, min_size=30)
assert cfg.labeled.shape == (156, 256)
assert len(np.unique(cfg.labeled)) == 11
def test_get_tissue_boundary_annotation_documents(self):
"""Test get_tissue_boundary_annotation_documents()."""
annotation_docs = get_tissue_boundary_annotation_documents(
cfg.gc, slide_id=cfg.iteminfo['_id'], labeled=cfg.labeled)
assert 'elements' in annotation_docs[0].keys()
assert len(annotation_docs[0]['elements']) == 10
# test delete existing annotations in slide
delete_annotations_in_slide(cfg.gc, cfg.iteminfo['_id'])
# check that it posts without issues
resps = [
cfg.gc.post(
"/annotation?itemId=" + cfg.iteminfo['_id'], json=doc)
for doc in annotation_docs
]
assert all(['annotation' in resp for resp in resps])
class TestCellularityDetection(object):
"""Test methods for getting cellularity."""
def test_cellularity_detection_thresholding(self):
"""Test Cellularity_detector_thresholding()."""
# run cellularity detector
cdt = Cellularity_detector_thresholding(
cfg.gc, slide_id=cfg.iteminfo['_id'],
GTcodes=cfg.GTcodes, MAG=1.0,
verbose=1, monitorPrefix='test',
logging_savepath=cfg.logging_savepath_cdt)
tissue_pieces = cdt.run()
# check
assert len(tissue_pieces) == 3
assert all([
j in tissue_pieces[0].__dict__.keys() for j in
('labeled', 'ymin', 'xmin', 'ymax', 'xmax')
])
# cleanup
shutil.rmtree(cfg.logging_savepath_cdt)
def test_cellularity_detection_superpixels(self):
"""Test Cellularity_detector_superpixels()."""
# from the ROI in Amgad et al, 2019
cnorm_main = {
'mu': np.array([8.74108109, -0.12440419, 0.0444982]),
'sigma': np.array([0.6135447, 0.10989545, 0.0286032]),
}
# run cellularity detector
cds = Cellularity_detector_superpixels(
cfg.gc, slide_id=cfg.iteminfo['_id'],
MAG=1.0, compactness=0.1, spixel_size_baseMag=256 * 256,
max_cellularity=40,
visualize_spixels=True, visualize_contiguous=True,
get_tissue_mask_kwargs={
'deconvolve_first': False,
'n_thresholding_steps': 2,
'sigma': 1.5,
'min_size': 500, },
verbose=1, monitorPrefix='test',
logging_savepath=cfg.logging_savepath_cds)
cds.set_color_normalization_values(
mu=cnorm_main['mu'], sigma=cnorm_main['sigma'], what='main')
tissue_pieces = cds.run()
# check
assert len(tissue_pieces) == 2
assert all([
j in tissue_pieces[0].__dict__.keys()
for j in ('tissue_mask', 'ymin', 'xmin', 'ymax', 'xmax',
'spixel_mask', 'fdata', 'cluster_props')
])
assert len(tissue_pieces[0].cluster_props) == 5
# cleanup
shutil.rmtree(cfg.logging_savepath_cds)
|
# -*- coding: utf-8 -*-
"""
Catch-up TV & More
Copyright (C) 2017 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from codequick import Route, Resolver, Listitem, utils, Script
from resources.lib.labels import LABELS
from resources.lib import web_utils
from resources.lib import download
from resources.lib.menu_utils import item_post_treatment
import htmlement
import json
import re
import requests
import urlquick
from kodi_six import xbmcgui
# TO DO
# Fix Live TV
URL_ROOT = 'https://www.nrj-play.fr'
URL_REPLAY = URL_ROOT + '/%s/replay'
# channel_name (nrj12, ...)
URL_COMPTE_LOGIN = 'https://user-api2.nrj.fr/api/5/login'
# TO DO add account for using Live Direct
URL_LIVE_WITH_TOKEN = URL_ROOT + '/compte/live?channel=%s'
# channel (nrj12, ...) -
# call this url after get session (url live with token inside this page)
def replay_entry(plugin, item_id, **kwargs):
"""
First executed function after replay_bridge
"""
return list_categories(plugin, item_id)
@Route.register
def list_categories(plugin, item_id, **kwargs):
"""
Build categories listing
- Tous les programmes
- Séries
- Informations
- ...
"""
resp = urlquick.get(URL_REPLAY % item_id)
root = resp.parse("ul", attrs={"class": "subNav-menu hidden-xs"})
for category_datas in root.iterfind(".//a"):
category_title = category_datas.text.strip()
category_url = URL_ROOT + category_datas.get('href')
item = Listitem()
item.label = category_title
item.set_callback(list_programs,
item_id=item_id,
category_url=category_url)
item_post_treatment(item)
yield item
@Route.register
def list_programs(plugin, item_id, category_url, **kwargs):
"""
Build programs listing
- Les feux de l'amour
- ...
"""
resp = urlquick.get(category_url)
root = resp.parse()
for program_datas in root.iterfind(".//div[@class='linkProgram-visual']"):
program_title = program_datas.find('.//img').get('alt')
program_url = URL_ROOT + program_datas.find('.//a').get('href')
program_image = ''
if program_datas.find('.//source').get('data-srcset') is not None:
program_image = program_datas.find('.//source').get('data-srcset')
else:
program_image = program_datas.find('.//source').get('srcset')
item = Listitem()
item.label = program_title
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(list_videos,
item_id=item_id,
program_title=program_title,
program_url=program_url)
item_post_treatment(item)
yield item
@Route.register
def list_videos(plugin, item_id, program_title, program_url, **kwargs):
resp = urlquick.get(program_url)
root = resp.parse()
if len(root.findall(".//figure[@class='thumbnailReplay-visual']")) > 0:
for video_datas in root.findall(
".//figure[@class='thumbnailReplay-visual']"):
video_title = program_title + ' - ' + video_datas.find(
'.//img').get('alt')
video_url = URL_ROOT + video_datas.find('.//a').get('href')
video_image = ''
if video_datas.find('.//source').get('data-srcset') is not None:
video_image = video_datas.find('.//source').get('data-srcset')
else:
video_image = video_datas.find('.//source').get('srcset')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url,
item_id=item_id,
video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
else:
video_title = root.find(".//div[@class='nrjVideo-player']").find(
'.//meta').get('alt')
video_url = program_url
video_image = root.find(".//div[@class='nrjVideo-player']").find(
'.//meta').get('content')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url,
item_id=item_id,
video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
@Resolver.register
def get_video_url(plugin,
item_id,
video_url,
download_mode=False,
**kwargs):
# Just One format of each video (no need of QUALITY)
resp = urlquick.get(video_url)
root = resp.parse("div", attrs={"class": "nrjVideo-player"})
stream_url = ''
for stream in root.iterfind(".//meta"):
if 'mp4' in stream.get('content'):
stream_url = stream.get('content')
if download_mode:
return download.download_video(stream_url)
return stream_url
def live_entry(plugin, item_id, **kwargs):
return get_live_url(plugin, item_id, item_id.upper())
@Resolver.register
def get_live_url(plugin, item_id, video_id, **kwargs):
# Live TV Not working / find a way to dump html received
# Create session
# KO - session_urlquick = urlquick.Session()
session_requests = requests.session()
# Build PAYLOAD
payload = {
"email": plugin.setting.get_string('nrj.login'),
"password": plugin.setting.get_string('nrj.password')
}
headers = {
'accept': 'application/json, text/javascript, */*; q=0.01',
'origin': 'https://www.nrj-play.fr',
'referer': 'https://www.nrj-play.fr/'
}
# LOGIN
# KO - resp2 = session_urlquick.post(
# URL_COMPTE_LOGIN, data=payload,
# headers={'User-Agent': web_utils.get_ua, 'referer': URL_COMPTE_LOGIN})
resp2 = session_requests.post(URL_COMPTE_LOGIN,
data=payload,
headers=headers)
if 'error alert alert-danger' in repr(resp2.text):
plugin.notify('ERROR', 'NRJ : ' + plugin.localize(30711))
return False
# GET page with url_live with the session logged
# KO - resp3 = session_urlquick.get(
# URL_LIVE_WITH_TOKEN % item_id,
# headers={'User-Agent': web_utils.get_ua, 'referer': URL_LIVE_WITH_TOKEN % item_id})
resp3 = session_requests.get(URL_LIVE_WITH_TOKEN % (item_id),
headers=dict(referer=URL_LIVE_WITH_TOKEN %
(item_id)))
parser = htmlement.HTMLement()
parser.feed(resp3.text)
root = parser.close()
live_data = root.find(".//div[@class='player']")
url_live_json = live_data.get('data-options')
url_live_json_jsonparser = json.loads(url_live_json)
return url_live_json_jsonparser["file"]
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
定义类的属性
'''
class Programer(object):
hobby = 'Play computer'
def __init__(self,name,age,weight):
self.name = name #类的共有属性
self._age = age #私有属性和共有属性差不多
self.__weight = weight #私有属性 这个属性只有在类内部可以调用 对象是不可以直接调用的
def get_weight(self):
return self.__weight
if __name__ == '__main__':
programer = Programer('zhang',25,75)
print dir(programer)
print programer.__dict__
print programer.get_weight()
print programer._Programer__weight
'''
['_Programer__weight', '__class__', '__delattr__', '__dict__', '__doc__', '__format__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_age', 'get_weight', 'hobby', 'name']
{'_age': 25, '_Programer__weight': 75, 'name': 'zhang'}
75
75
'''
|
import base64
import copy
import hashlib
import logging
import os
from datetime import datetime
from typing import Dict, Optional, Union
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from lxml import etree
from oscrypto.asymmetric import Certificate, load_certificate
from .exceptions import SignatureVerificationError
from .ocsp import OCSP
from .signature_verifier import verify
from .tsa import TSA
logger = logging.getLogger(__name__)
def get_utc_time():
# For testing purposes, as we can't patch a datetime object
return datetime.utcnow()
class XmlSignature:
"""
Usage:
# Create a signature XAdES structure
sig = XmlSignature.create() \
.add_document('test.pdf', b'PDF data', 'application/pdf') \
.set_certificate(file_or_binary_data) \
.update_signed_info()
# Get the actual signature from e.g. smartid, out of scope
signature_value = sign(id_code, sig.digest())
# Get OCSP and TSA confirmation
result_xml = sig.set_signature_value(signature_value) \
.verify() \
.set_ocsp_response(...) \
.set_timestamp_response(...) \
.dump()
# or:
from .utils import finalize_signature
finalize_signature(sig, lt_ts=True)
"""
SIGNATURE_TEMPLATE = os.path.join(os.path.dirname(__file__), "templates", "signature.xml")
NAMESPACES = {
"asic": "http://uri.etsi.org/02918/v1.2.1#",
"ds": "http://www.w3.org/2000/09/xmldsig#",
"xades": "http://uri.etsi.org/01903/v1.3.2#",
}
C14N_METHODS = (
"http://www.w3.org/TR/2001/REC-xml-c14n-20010315", # this is the REQUIRED c14n algorithm [xmldsig]
"http://www.w3.org/2001/10/xml-exc-c14n#",
"http://www.w3.org/2006/12/xml-c14n11",
)
DIGEST_ALGORITHMS = {
"sha256": "http://www.w3.org/2001/04/xmlenc#sha256",
}
# https://www.w3.org/TR/xmldsig-core1/#sec-AlgID
SIGNATURE_ALGO_ID_TEMPLATE = "http://www.w3.org/2001/04/xmldsig-more#{algo}"
SIGNATURE_ALGORITHMS = [
"rsa-sha256", # the default one, is embedded in the XML template.
"ecdsa-sha256",
"rsa-sha384",
"ecdsa-sha384",
"rsa-sha512",
"ecdsa-sha512",
]
SIGNED_PROPERTIES_TYPE = (
# Standards are ambiguous about this:
"http://uri.etsi.org/01903#SignedProperties", # BDOC 2.1.2 mandates this
"http://uri.etsi.org/01903/v1.1.1#SignedProperties", # and this is as per https://www.w3.org/TR/XAdES/
)
NEW_SIGNATURE_ID = "S1" # This is arbitrary but used a few times in the XAdES structure.
def __init__(self, xml_or_binary_data):
if isinstance(xml_or_binary_data, (etree._Element, etree._ElementTree)):
self.xml = xml_or_binary_data
else:
parser = etree.XMLParser(remove_blank_text=True, remove_comments=True)
try:
self.xml = etree.XML(xml_or_binary_data, parser=parser)
except ValueError:
logger.exception("Failed to load XML document: %s", xml_or_binary_data)
raise
if self.xml.tag != "{%s}XAdESSignatures" % self.NAMESPACES["asic"]:
raise ValueError("Expecting an 'asic:XAdESSignatures' root node")
data_objects_props_node = self._get_node("ds:SignedInfo")
doc_entries = data_objects_props_node.findall('ds:Reference[@Type=""]', namespaces=self.NAMESPACES)
self.doc_ids = [doc_entry.attrib["Id"] for doc_entry in doc_entries]
self._certificate: Certificate = None
self._prepared = None
@classmethod
def create(cls):
"""Create a XAdES structure from the accompanying template"""
with open(cls.SIGNATURE_TEMPLATE, "rb") as f:
xml_sig = cls(f.read().replace(b"{SIGNATURE_ID}", cls.NEW_SIGNATURE_ID.encode("ascii")))
xml_sig.doc_ids = []
return xml_sig
def dump(self):
return b'<?xml version="1.0" encoding="UTF-8"?>' + etree.tostring(self.xml)
def get_signing_time(self):
time_node = self._get_signing_time_node()
if time_node is None or not time_node.text:
return None
return time_node.text
def get_certificate(self) -> Certificate:
if not self._certificate:
cert_asn1 = self.get_certificate_value()
if cert_asn1:
# cache it on the instance, may be used a few times
self._certificate = load_certificate(cert_asn1)
return self._certificate
def get_certificate_value(self):
cert_node = self._get_node("ds:X509Certificate")
if cert_node is None or not cert_node.text:
return None
return base64.b64decode(cert_node.text)
def get_certificate_issuer_common_name(self):
subject_cert = self.get_certificate()
return subject_cert.asn1.issuer.native["common_name"] if subject_cert else None
def set_certificate(self, subject_cert: Union[bytes, Certificate]):
"""Set the signer's certificate
:param subject_cert: bytes, file name (Python 3.4+), asn1crypto.x509.Certificate objects
"""
if not isinstance(subject_cert, Certificate):
subject_cert = load_certificate(subject_cert)
# cache it on the instance, may be used a few times
self._certificate = subject_cert
cert_asn1 = subject_cert.asn1
der_encoded_cert = cert_asn1.dump()
serial_number = ("%d" % cert_asn1.serial_number).encode("ascii")
cert_node = self._get_node("ds:X509Certificate")
cert_node.text = base64.b64encode(der_encoded_cert)
cert_props = self._get_node("xades:SigningCertificate")
cert_props.find(".//ds:DigestValue", self.NAMESPACES).text = base64.b64encode(cert_asn1.sha256)
cert_props.find(".//ds:X509SerialNumber", self.NAMESPACES).text = serial_number
# No idea what value is possible, but rfc4514 is most common, so get it from a cryptography object
x509_cert = x509.load_der_x509_certificate(der_encoded_cert, default_backend())
cert_props.find(".//ds:X509IssuerName", self.NAMESPACES).text = x509_cert.issuer.rfc4514_string()
return self
def add_document(self, file_name, binary_data, mime_type, hash_type="sha256") -> "XmlSignature":
"""Add a document for signing
:param file_name: the file name to display in the container
:param mime_type: the document's mime type
:param binary_data: the document's contents
:param hash_type: the hash function to use for digesting
:return:
"""
try:
digest_algo = self.DIGEST_ALGORITHMS[hash_type]
except KeyError as e:
raise ValueError("Unknown hash type: %s" % hash_type) from e
digest_fn = getattr(hashlib, hash_type)
doc_hash = digest_fn(binary_data).digest()
signed_info = self._get_node("ds:SignedInfo")
first_ref_entry = signed_info.find('.//ds:Reference[@Type=""]', self.NAMESPACES)
doc_id = first_ref_entry.attrib["Id"]
doc_props = self._get_node("xades:SignedDataObjectProperties")
first_doc_entry = doc_props.find('.//xades:DataObjectFormat[@ObjectReference="#%s"]' % doc_id, self.NAMESPACES)
if self.doc_ids:
next_num = len(self.doc_ids) + 1
# generate new Id attribute
while True:
new_doc_id = "r-id-{}".format(next_num)
if new_doc_id not in self.doc_ids:
break
next_num += 1
# Instead of manually creating elements, just copy the structure
new_ref_entry = copy.deepcopy(first_ref_entry)
signed_info.append(new_ref_entry)
new_doc_entry = copy.deepcopy(first_doc_entry)
doc_props.append(new_doc_entry)
else:
new_doc_id = doc_id.format(DOCUMENT_NUMBER=1)
new_doc_entry = first_doc_entry
new_ref_entry = first_ref_entry
self.doc_ids.append(new_doc_id)
new_ref_entry.attrib["Id"] = new_doc_id
new_ref_entry.attrib["URI"] = file_name
new_ref_entry.find(".//ds:DigestMethod", self.NAMESPACES).attrib["Algorithm"] = digest_algo
new_ref_entry.find(".//ds:DigestValue", self.NAMESPACES).text = base64.b64encode(doc_hash)
new_doc_entry.attrib["ObjectReference"] = "#%s" % new_doc_id
new_doc_entry.find(".//xades:MimeType", self.NAMESPACES).text = mime_type
return self
def update_signed_info(self) -> "XmlSignature":
"""Calculate the digest over SignedProperties and embed it in SignedInfo"""
# Set signing time
time_node = self._get_signing_time_node()
# Add a UTC timestamp. Can't use isoformat() as it adds +00:00 and microseconds
# which can break the parser elsewhere
time_node.text = get_utc_time().strftime("%Y-%m-%dT%H:%M:%SZ")
self._calc_signed_properties_hash(update=True)
self._prepared = True
return self
@property
def prepared(self) -> bool:
if self._prepared is None:
self._prepared = False
if self.get_signing_time():
old, new = self._calc_signed_properties_hash()
if old == new:
self._prepared = True
return self._prepared
def signed_data(self) -> bytes:
if not self.prepared:
raise ValueError("The XML signature is not prepared")
sign_info_node = self._get_node("ds:SignedInfo")
return self.canonicalize(sign_info_node)
def digest(self) -> bytes:
signature_algo = self.get_signature_algorithm()
hash_algo_name = signature_algo.split("-")[-1]
hash_algo = getattr(hashlib, hash_algo_name)
return hash_algo(self.signed_data()).digest()
def verify(self) -> "XmlSignature":
hash_algo = self.get_signature_algorithm().split("-")[-1]
cert = self.get_certificate_value()
signature = self.get_signature_value()
signed_data = self.signed_data()
verify(cert, signature, signed_data, hash_algo)
return self
def get_signature_value(self):
sig_value_node = self._get_signature_value_node()
try:
text = sig_value_node.text.strip()
except AttributeError:
return None
return base64.b64decode(text) if text else None
def set_signature_value(self, signature: bytes) -> "XmlSignature":
"""Insert the base64-encoded value of a signature obtained from a signing service or device
NOTE: the signature method should be known in advance, as it's part of the SignedInfo structure over which
the signature is calculated.
:param signature: Binary signature
"""
sig_value_node = self._get_signature_value_node()
sig_value_node.text = base64.b64encode(signature)
return self
def get_signature_algorithm(self) -> str:
"""
Returns the #hash part of the corresponding node, in form of 'rsa-sha256'
This algorithm is expected to be present in SIGNATURE_ALGORITHMS and be in form of
{CRYPTOMETHOD}-{DIGESTMETHOD}. See the note for `set_signature_algorithm` below.
"""
sig_method_node = self._get_node("ds:SignatureMethod")
return sig_method_node.attrib["Algorithm"].split("#")[-1]
def set_signature_algorithm(self, algo: str = None) -> "XmlSignature":
"""Set a signature algorithm, if it is not the default one (rsa-sha256).
NOTE: Since the algorithm is included in the signed data, it is not possible to change the algo
after signing.
Ultimately, if an external signing service MAY select different algos at its discretion,
this field should be ignored.
FWIW the verification method of this class only takes into account
the hash algo (DIGESTMETHOD) part of this attribute.
:param algo: signature algorithm, one of SIGNATURE_ALGORITHMS
"""
if algo:
algo = algo.lower()
if algo not in self.SIGNATURE_ALGORITHMS:
raise ValueError("Unsupported signature algorithm")
else:
algo = self.SIGNATURE_ALGORITHMS[0]
sig_method_node = self._get_node("ds:SignatureMethod")
sig_method_node.attrib["Algorithm"] = self.SIGNATURE_ALGO_ID_TEMPLATE.format(algo=algo)
return self
def set_root_ca_cert(self, root_cert: Union[Certificate, bytes]) -> "XmlSignature":
"""Sets a root CA cert. This is not mandatory
:param root_cert: can be a PEM- or DER-encoded bytes content, or an `oscrypto.Certificate` object
"""
self.add_cert(root_cert, {"Id": f"{self.NEW_SIGNATURE_ID}-ROOT-CA-CERT"})
return self
def add_cert(self, cert: Union[Certificate, bytes], attribs: Optional[Dict] = None) -> "XmlSignature":
"""Add a cert. Latvian EDOC must have all of certs used in the xml (Root, OCSP and TimeStamp)
This is mandatory for Latvian EDOC format
:param cert: can be a PEM- or DER-encoded bytes content, or an `oscrypto.Certificate` object
:param attrib: dict with attributes for <EncapsulatedX509Certificate> tag
"""
certs_node = self._get_node("xades:CertificateValues")
ca_node = etree.Element("{%s}EncapsulatedX509Certificate" % self.NAMESPACES["xades"])
if attribs is not None:
for name, value in attribs.items():
ca_node.attrib[name] = value
if not isinstance(cert, Certificate):
cert = load_certificate(cert)
ca_node.text = base64.b64encode(cert.asn1.dump())
certs_node.append(ca_node)
return self
def set_ocsp_response(self, ocsp_response: OCSP, embed_ocsp_certificate=False) -> "XmlSignature":
"""
Embed the OCSP response and certificates
:param OCSP ocsp_response:
:param bool embed_ocsp_certificate: Whether to add ocsp certificate to the xml.
This is needed when the OCSP service in use
does not embed the certificate in its response.
:return: self
"""
ocsp_response_node = self._get_node("xades:EncapsulatedOCSPValue")
ocsp_response_node.text = base64.b64encode(ocsp_response.get_encapsulated_response())
if embed_ocsp_certificate:
ocsp_certs_node = self._get_node("xades:CertificateValues")
ocsp_certs = ocsp_response.get_responder_certs()
cert_node = ocsp_certs_node.find(".//xades:EncapsulatedX509Certificate", namespaces=self.NAMESPACES)
cert_node.text = base64.b64encode(ocsp_certs[0].dump())
cert_node.attrib["Id"] = "S1-Responder-cert-1"
for i, next_cert in enumerate(ocsp_certs[1:]):
cert_node = copy.deepcopy(cert_node)
cert_node.text = base64.b64encode(ocsp_certs[next_cert].dump())
cert_node.attrib["Id"] = "S1-Responder-cert-%d" % i
ocsp_certs_node.append(cert_node)
return self
def get_ocsp_response(self) -> Optional[OCSP]:
ocsp_response_node = self._get_node("xades:EncapsulatedOCSPValue")
try:
text = ocsp_response_node.text.strip()
except AttributeError:
return None
return OCSP.load(base64.b64decode(text)) if text else None
def verify_ocsp_response(self) -> "XmlSignature":
"""Verify embedded OCSP response.
:raises exceptions.SignatureVerificationError:
"""
try:
self.get_ocsp_response().verify()
except AttributeError:
raise SignatureVerificationError("The XML Signature doesn't contain an OCSP response")
return self
def verify_ts_response(self) -> "XmlSignature":
"""Verify embedded TSA response.
:raises exceptions.SignatureVerificationError:
"""
TSA.verify(self.get_timestamp_response(), self.get_timestamped_message())
return self
def get_timestamped_message(self) -> bytes:
sig_value_node = self._get_signature_value_node()
method = self.get_c14n_method("xades:SignatureTimeStamp")
return self.canonicalize(sig_value_node, method)
def get_timestamp_response(self) -> Optional[bytes]:
"""
Get the embedded TSA response for an LT-TS profile signature.
LT-TM must not even have the XML node.
"""
ts_value_node = self._get_node("xades:EncapsulatedTimeStamp")
try:
text = ts_value_node.text.strip()
except AttributeError:
return None
return base64.b64decode(text) if text else None
def set_timestamp_response(self, tsr) -> "XmlSignature":
ts_value_node = self._get_node("xades:EncapsulatedTimeStamp")
ts_value_node.text = base64.b64encode(tsr.dump())
return self
def remove_timestamp_node(self) -> "XmlSignature":
"""LT-TM profile requires the absence of the XML node"""
ts_value_node = self._get_node("xades:SignatureTimeStamp")
ts_value_node.getparent().remove(ts_value_node)
return self
def get_c14n_method(self, parent_node="ds:SignedInfo"):
"""Get a c14n method used within a specific context given by `parent_node`
The default context is the SignedInfo node. Also encountered in SignatureTimestamp
"""
method_node = self._get_node("{}/ds:CanonicalizationMethod".format(parent_node))
if method_node is not None:
method = method_node.attrib["Algorithm"]
if method not in self.C14N_METHODS:
raise ValueError("Unknown c14n method: {}".format(method))
else:
method = self.C14N_METHODS[0]
return method
def canonicalize(self, node, method=None) -> bytes:
if method is not None:
assert method in self.C14N_METHODS
else:
method = self.get_c14n_method()
exclusive = "xml-exc-c14n" in method
return etree.tostring(node, method="c14n", exclusive=exclusive)
def _get_node(self, tag_name) -> etree._Element:
return self.xml.find(".//{}".format(tag_name), namespaces=self.NAMESPACES)
def _get_signed_properties_node(self):
return self.xml.find(
"./ds:Signature/ds:Object/xades:QualifyingProperties/xades:SignedProperties", self.NAMESPACES
)
def _get_signing_time_node(self):
return self.xml.find(
"./ds:Signature/ds:Object/xades:QualifyingProperties/xades:SignedProperties/"
"xades:SignedSignatureProperties/xades:SigningTime",
self.NAMESPACES,
)
def _get_signed_info_ref_node(self):
"""Find the SignedInfo/Reference node that refers to the XMLSig itself (not to the signed files)"""
return next(
self.xml.find('./ds:Signature/ds:SignedInfo/ds:Reference[@Type="%s"]' % ref_type, self.NAMESPACES)
for ref_type in self.SIGNED_PROPERTIES_TYPE
)
def _get_signed_properties_c14n_algo(self, signed_info_ref_node: etree._Element):
"""
Gets a transform/c14n algorithm for SignedProperties
The child nodes may be absent, in this case use the default c14n algo
# This is very obscure in the standard:
# https://www.w3.org/TR/2002/REC-xmldsig-core-20020212/#sec-ReferenceProcessingModel
"""
try:
c14n_algo = signed_info_ref_node.find("./ds:Transforms/ds:Transform", self.NAMESPACES).attrib["Algorithm"]
except: # noqa: E722
c14n_algo = None
return c14n_algo
def _get_signature_value_node(self):
return self.xml.find("./ds:Signature/ds:SignatureValue", self.NAMESPACES)
def _calc_signed_properties_hash(self, update=False) -> tuple:
"""
Calculates, and updates, if requested, the SignedInfo/Reference/DigestValue
...based on the current SignedProperties
:param update: update the digest value with current SignedProperties hash
:return: tuple(DigestValue node value, SignedProperties hash value)
"""
si_ref_node = self._get_signed_info_ref_node()
signed_props_node = self._get_signed_properties_node()
c14n_algo = self._get_signed_properties_c14n_algo(si_ref_node)
signed_props_c14n = self.canonicalize(signed_props_node, c14n_algo)
# TODO select algorithm based on DigestMethod // update DigestMethod
signed_props_hash = hashlib.sha256(signed_props_c14n).digest()
new_digest_value = base64.b64encode(signed_props_hash).decode()
si_digest_node = si_ref_node.find("./ds:DigestValue", self.NAMESPACES)
if update:
si_digest_node.text = new_digest_value
return si_digest_node.text, new_digest_value
|
# -*- coding: utf-8 -*-
from PIL import Image
import os
CODE_LIB = r"@B%8&WM#ahdpmZOQLCJYXzcunxrjft/\|()1[]?-_+~<>i!I;:, "
count = len(CODE_LIB)
def transform_ascii(image_file):
image_file = image_file.convert("L")
code_pic = ''
for h in range(0,image_file.size[1]):
for w in range(0,image_file.size[0]):
gray = image_file.getpixel((w,h))
code_pic = code_pic + CODE_LIB[int(((count-1)*gray)/256)]
code_pic = code_pic + "\n"
return code_pic
def convert_image(fn, hratio=1.0, wratio=1.0):
fp = open(fn,'rb')
image_file = Image.open(fp)
image_file=image_file.resize((int(image_file.size[0]*wratio), int(image_file.size[1]*hratio)))
print(u'Size info:',image_file.size[0],' ',image_file.size[1],' ')
tmp = open('result.txt','w')
trans_data = transform_ascii(image_file)
print(trans_data)
tmp.write(trans_data)
tmp.close()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import string
def get_chars74k_label_map():
"""
This function generates a label map for the chars74K datasetselfself.
This will help to display the true label from the predicted class.
"""
# samples 1 through 10 are numbers '0' - '9'
# samples 11 through 36 are uppercase letters
# samples 37 through 62 are lowercase letters
num_start = 0
upper_start = 10
lower_start = 36
label_map = dict()
for label, char in enumerate(string.digits, start = num_start):
label_map[label] = char
for label, char in enumerate(string.ascii_uppercase, start = upper_start):
label_map[label] = char
for label, char in enumerate(string.ascii_lowercase, start = lower_start):
label_map[label] = char
return label_map
|
# -*- coding: utf-8 -*-
import logging
import sys
from ._constants_ import LOGGERNAME
def create_log_handler(fmt='%(asctime)s| %(levelname)s |%(message)s', stream=sys.stderr):
formatter = logging.Formatter(fmt)
handler = logging.StreamHandler(stream=stream)
handler.setFormatter(formatter)
logging.getLogger(LOGGERNAME).addHandler(handler) |
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='item-info']/h1",
'price' : "//div[@class='price-info']/div[@class='left']/p[@class='price']",
'category' : "",
'description' : "//div[@class='clm-l']/div[@class='item-content-detail']/div[1]",
'images' : "//div[@id='bigPic']/img/@src",
'canonical' : "",
'base_url' : "",
'brand' : "",
'in_stock' : "",
'guarantee' : "",
'promotion' : ""
}
name = 'secoo.vn'
allowed_domains = ['secoo.vn']
start_urls = ['http://secoo.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = ['']
rules = [
#Rule(LinkExtractor(), 'parse_item'),
#Rule(LinkExtractor(), 'parse'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+\.htm$']), 'parse_item_and_links'),
]
|
import brother_ql
import brother_ql.backends.helpers
from bottle import get, post, run, static_file, template, request, response
import click
from base64 import b64decode
from PIL import Image
from io import BytesIO
LABEL_API_HOST = "0.0.0.0"
LABEL_API_PORT = 8765
BROTHER_QL_MODEL = "QL-800"
BROTHER_QL_BACKEND = None
BROTHER_QL_PRINTER = "file:///dev/usb/lp0"
DEBUG = False
IS_PRINTING = False
@get('/')
def getIndex():
return static_file("index.html", root='./public')
@get('/static/<filename:path>')
def getStatic(filename):
return static_file(filename, root='./public')
@post('/api/print')
def postPrintImage():
global IS_PRINTING
try:
body = request.json
if not "image" in body:
return {'success': False, 'error': "image is required"}
if IS_PRINTING:
return {'success': False, 'error': "Printer is busy"}
IS_PRINTING = True
rawImageData = b64decode(body["image"])
rawImage = Image.open(BytesIO(rawImageData))
# convert image
qlr = brother_ql.raster.BrotherQLRaster(BROTHER_QL_MODEL)
brother_ql.conversion.convert(qlr, [rawImage], **body)
# print it
kwargs = {
'printer_identifier': BROTHER_QL_PRINTER,
'backend_identifier': BROTHER_QL_BACKEND,
'blocking': True,
}
result = brother_ql.backends.helpers.send(qlr.data, **kwargs)
success = result['did_print'] and result['ready_for_next_job']
return {'success': success, 'result': result}
except Exception as e:
return {'success': False, 'error': str(e)}
finally:
IS_PRINTING = False
def run_server():
run(host=LABEL_API_HOST, port=LABEL_API_PORT)
@click.command()
@click.option('--host', default=LABEL_API_HOST, help='Host / IP to listen on')
@click.option('--port', default=LABEL_API_PORT, help='Port to listen on')
@click.option('--model', default=BROTHER_QL_MODEL , help='brother_ql model')
@click.option('--backend', default=BROTHER_QL_BACKEND, help='brother_ql backend')
@click.option('--printer', default=BROTHER_QL_PRINTER, help='brother_ql printer')
@click.option('--debug', is_flag=True, help='Enable verbose debugging output')
def cli(host, port, model, backend, printer, debug):
"""
Start the label_api software
"""
global LABEL_API_HOST, LABEL_API_PORT
global BROTHER_QL_MODEL, BROTHER_QL_BACKEND, BROTHER_QL_PRINTER
global DEBUG
LABEL_API_HOST = host
LABEL_API_PORT = port
BROTHER_QL_MODEL = model
BROTHER_QL_BACKEND = backend
BROTHER_QL_PRINTER = printer
DEBUG = debug
run_server()
if __name__ == '__main__':
cli() |
import logging
logger = logging.getLogger(__name__)
uri = "/iam/access/v8/mmfa-config"
requires_modules = None
requires_version = None
def get(isamAppliance, check_mode=False, force=False):
"""
Retrieve MMFA endpoint details
"""
return isamAppliance.invoke_get("Retrieve MMFA endpoint details",
"/iam/access/v8/mmfa-config")
def set(isamAppliance, client_id, options, endpoints, discovery_mechanisms, check_mode=False, force=False):
"""
Set MMFA endpoint configuration
"""
json_data = {
"client_id": client_id,
"options": options,
"endpoints": endpoints,
"discovery_mechanisms": discovery_mechanisms
}
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post(
"Set MMFA endpoint configuration",
"/iam/access/v8/mmfa-config/", json_data)
def delete(isamAppliance, check_mode=False, force=False):
"""
Unconfigure MMFA endpoint details
"""
ret_obj = get(isamAppliance)
if force is True or ret_obj['data'] != {}:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete(
"Unconfigure MMFA endpoint details",
"{0}".format(uri),
requires_modules=requires_modules, requires_version=requires_version
)
return isamAppliance.create_return_object()
|
import torch.utils.data.dataset as data
import pymia.data.transformation as tfm
import pymia.data.indexexpression as expr
from . import reader as rd
from . import indexing as idx
from . import extractor as extr
class ParameterizableDataset(data.Dataset):
def __init__(self, dataset_path: str, indexing_strategy: idx.IndexingStrategy=None, extractor: extr.Extractor=None,
transform: tfm.Transform=None, subject_subset: list=None, init_reader_once=True) -> None:
self.dataset_path = dataset_path
self.indexing_strategy = None
self.extractor = extractor
self.transform = transform
self.subject_subset = subject_subset
self.init_reader_once = init_reader_once
self.indices = []
self.reader = None
# init indices
if indexing_strategy is None:
indexing_strategy = idx.EmptyIndexing()
self.set_indexing_strategy(indexing_strategy, subject_subset)
def close_reader(self):
if self.reader is not None:
self.reader.close()
self.reader = None
def set_extractor(self, extractor: extr.Extractor):
self.extractor = extractor
def set_indexing_strategy(self, indexing_strategy: idx.IndexingStrategy, subject_subset: list=None):
self.indices.clear()
self.indexing_strategy = indexing_strategy
with rd.get_reader(self.dataset_path) as reader:
all_subjects = reader.get_subjects()
for i, subject in enumerate(reader.get_subject_entries()):
if subject_subset is None or all_subjects[i] in subject_subset:
subject_indices = self.indexing_strategy(reader.get_shape(subject))
subject_and_indices = zip(len(subject_indices) * [i], subject_indices)
self.indices.extend(subject_and_indices)
def set_transform(self, transform: tfm.Transform):
self.transform = transform
def get_subjects(self):
with rd.get_reader(self.dataset_path) as reader:
return reader.get_subjects()
def direct_extract(self, extractor: extr.Extractor, subject_index: int, index_expr: expr.IndexExpression=None,
transform: tfm.Transform=None):
if index_expr is None:
index_expr = expr.IndexExpression()
params = {'subject_index': subject_index, 'index_expr': index_expr}
extracted = {}
if not self.init_reader_once:
with rd.get_reader(self.dataset_path) as reader:
extractor.extract(reader, params, extracted)
else:
if self.reader is None:
self.reader = rd.get_reader(self.dataset_path, direct_open=True)
extractor.extract(self.reader, params, extracted)
if transform:
extracted = transform(extracted)
return extracted
def __len__(self):
return len(self.indices)
def __getitem__(self, item):
subject_index, index_expr = self.indices[item]
return self.direct_extract(self.extractor, subject_index, index_expr, self.transform)
def __del__(self):
self.close_reader()
|
import socket
import threading
HOST, PORT = "127.0.0.1", 14900
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((HOST, PORT))
name = input("Please enter ur name: ")
def listen():
while True:
msg = conn.recv(16384).decode()
if msg == "name":
conn.send(name.encode)
else:
print(msg)
return
def send():
while True:
msg = f"\n>>> Пользователь {name}: \n{input('')}\n".encode("utf-8")
conn.send(msg)
return
listen_thread = threading.Thread(target=listen)
listen_thread.start()
send_thread = threading.Thread(target=send)
send_thread.start() |
import json
import stackx as sx
def main():
#Load config file
with open('config.json') as json_config_file:
config = json.load(json_config_file)
print(config)
#Connect to database
sxdb = sx.Connection(config=config["mysql"])
a = sx.Archive7z("/Users/ardeego/repos/stackx/tests/data/test.stackexchange.com.7z")
#Extract file to pipe (use cat on console)
print(a.list_files())
name = a.extract("Badges.xml", "./", pipe=True)
a.join()
#Extract all files
a.extract_multiple("./")
if __name__ == '__main__':
main() |
import logging
import pytest
from ocs_ci.framework.testlib import ManageTest, tier1, acceptance
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import UnexpectedBehaviour
from ocs_ci.ocs.resources import pod
from ocs_ci.utility.retry import retry
from tests import helpers
logger = logging.getLogger(__name__)
class TestDynamicPvc(ManageTest):
"""
Automates the following test cases:
OCS-530 - RBD Based RWO Dynamic PVC creation with Reclaim policy set to Retain
OCS-533 - RBD Based RWO Dynamic PVC creation with Reclaim policy set to Delete
OCS-525 - CephFS Based RWO Dynamic PVC creation with Reclaim policy set to Retain
OCS-526 - CephFS Based RWO Dynamic PVC creation with Reclaim policy set to Delete
OCS-542 - CephFS Based RWX Dynamic PVC creation with Reclaim policy set to Retain
OCS-529 - CephFS Based RWX Dynamic PVC creation with Reclaim policy set to Delete
"""
pvc_size = 10 # size in Gi
@pytest.fixture()
def setup(
self, interface_type, reclaim_policy, storageclass_factory
):
"""
Creates storage class with specified interface and reclaim policy.
Fetches all worker nodes
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
reclaim_policy (str): The type of reclaim policy
(eg., 'Delete', 'Retain')
storageclass_factory: A fixture to create new storage class
Returns:
tuple: containing the storage class instance and list of worker nodes
"""
# Create storage class
sc_obj = storageclass_factory(
interface=interface_type, reclaim_policy=reclaim_policy
)
worker_nodes_list = helpers.get_worker_nodes()
return sc_obj, worker_nodes_list
@retry(UnexpectedBehaviour, tries=10, delay=5, backoff=1)
def verify_expected_failure_event(self, ocs_obj, failure_str):
"""
Checks for the expected failure event message in oc describe command
"""
logger.info(
"Check expected failure event message in oc describe command"
)
if failure_str in ocs_obj.describe():
logger.info(
f"Failure string {failure_str} is present in oc describe"
f" command"
)
return True
else:
raise UnexpectedBehaviour(
f"Failure string {failure_str} is not found in oc describe"
f" command"
)
@acceptance
@tier1
@pytest.mark.parametrize(
argnames=["interface_type", "reclaim_policy"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL, constants.RECLAIM_POLICY_RETAIN],
marks=[
pytest.mark.polarion_id("OCS-530"),
pytest.mark.bugzilla("1772990")
]
),
pytest.param(
*[constants.CEPHBLOCKPOOL, constants.RECLAIM_POLICY_DELETE],
marks=[
pytest.mark.polarion_id("OCS-533"),
pytest.mark.bugzilla("1750916"),
pytest.mark.bugzilla("1772990")
]
),
pytest.param(
*[constants.CEPHFILESYSTEM, constants.RECLAIM_POLICY_RETAIN],
marks=[
pytest.mark.polarion_id("OCS-525"),
pytest.mark.bugzilla("1751866"),
pytest.mark.bugzilla("1750916"),
pytest.mark.bugzilla("1772990")
]
),
pytest.param(
*[constants.CEPHFILESYSTEM, constants.RECLAIM_POLICY_DELETE],
marks=[
pytest.mark.polarion_id("OCS-526"),
pytest.mark.bugzilla("1751866"),
pytest.mark.bugzilla("1750916"),
pytest.mark.bugzilla("1772990")
]
)
]
)
def test_rwo_dynamic_pvc(
self, interface_type, reclaim_policy, setup, pvc_factory, pod_factory
):
"""
RWO Dynamic PVC creation tests with Reclaim policy set to Retain/Delete
"""
access_mode = constants.ACCESS_MODE_RWO
expected_failure_str = 'Multi-Attach error for volume'
storage_type = 'fs'
sc_obj, worker_nodes_list = setup
logger.info(f"Creating PVC with {access_mode} access mode")
pvc_obj = pvc_factory(
interface=interface_type,
storageclass=sc_obj,
size=self.pvc_size,
access_mode=access_mode,
status=constants.STATUS_BOUND
)
logger.info(
f"Creating first pod on node: {worker_nodes_list[0]} "
f"with pvc {pvc_obj.name}"
)
pod_obj1 = pod_factory(
interface=interface_type,
pvc=pvc_obj,
status=constants.STATUS_RUNNING,
node_name=worker_nodes_list[0],
pod_dict_path=constants.NGINX_POD_YAML
)
logger.info(
f"Creating second pod on node: {worker_nodes_list[1]} "
f"with pvc {pvc_obj.name}"
)
pod_obj2 = pod_factory(
interface=interface_type,
pvc=pvc_obj,
status=constants.STATUS_CONTAINER_CREATING,
node_name=worker_nodes_list[1],
pod_dict_path=constants.NGINX_POD_YAML
)
node_pod1 = pod_obj1.get().get('spec').get('nodeName')
node_pod2 = pod_obj2.get().get('spec').get('nodeName')
assert node_pod1 != node_pod2, 'Both pods are on the same node'
logger.info(f"Running IO on first pod {pod_obj1.name}")
file_name = pod_obj1.name
pod_obj1.run_io(
storage_type=storage_type, size='1G', fio_filename=file_name
)
pod.get_fio_rw_iops(pod_obj1)
md5sum_pod1_data = pod.cal_md5sum(
pod_obj=pod_obj1, file_name=file_name
)
# Verify that second pod is still in ContainerCreating state and not
# able to attain Running state due to expected failure
logger.info(
f"Verify that second pod {pod_obj2.name} is still in ContainerCreating state"
)
helpers.wait_for_resource_state(
resource=pod_obj2, state=constants.STATUS_CONTAINER_CREATING
)
self.verify_expected_failure_event(
ocs_obj=pod_obj2, failure_str=expected_failure_str
)
logger.info(
f"Deleting first pod so that second pod can attach PVC {pvc_obj.name}"
)
pod_obj1.delete()
pod_obj1.ocp.wait_for_delete(resource_name=pod_obj1.name)
# Wait for second pod to be in Running state
helpers.wait_for_resource_state(
resource=pod_obj2, state=constants.STATUS_RUNNING, timeout=240
)
logger.info(
f"Verify data from second pod {pod_obj2.name}"
)
pod.verify_data_integrity(
pod_obj=pod_obj2, file_name=file_name,
original_md5sum=md5sum_pod1_data
)
pod_obj2.run_io(
storage_type=storage_type, size='1G', fio_filename=pod_obj2.name
)
pod.get_fio_rw_iops(pod_obj2)
# Again verify data integrity
logger.info(
f"Again verify data from second pod {pod_obj2.name}"
)
pod.verify_data_integrity(
pod_obj=pod_obj2, file_name=file_name,
original_md5sum=md5sum_pod1_data
)
@acceptance
@tier1
@pytest.mark.bugzilla("1750916")
@pytest.mark.bugzilla("1751866")
@pytest.mark.parametrize(
argnames=["interface_type", "reclaim_policy"],
argvalues=[
pytest.param(
*[constants.CEPHFILESYSTEM, constants.RECLAIM_POLICY_RETAIN],
marks=pytest.mark.polarion_id("OCS-542")
),
pytest.param(
*[constants.CEPHFILESYSTEM, constants.RECLAIM_POLICY_DELETE],
marks=pytest.mark.polarion_id("OCS-529")
)
]
)
def test_rwx_dynamic_pvc(
self, interface_type, reclaim_policy, setup, pvc_factory, pod_factory
):
"""
RWX Dynamic PVC creation tests with Reclaim policy set to Retain/Delete
"""
access_mode = constants.ACCESS_MODE_RWX
storage_type = 'fs'
sc_obj, worker_nodes_list = setup
logger.info("CephFS RWX test")
logger.info(f"Creating PVC with {access_mode} access mode")
pvc_obj = pvc_factory(
interface=interface_type,
storageclass=sc_obj,
size=self.pvc_size,
access_mode=access_mode,
status=constants.STATUS_BOUND
)
logger.info(
f"Creating first pod on node: {worker_nodes_list[0]} "
f"with pvc {pvc_obj.name}"
)
pod_obj1 = pod_factory(
interface=interface_type,
pvc=pvc_obj,
status=constants.STATUS_RUNNING,
node_name=worker_nodes_list[0],
pod_dict_path=constants.NGINX_POD_YAML
)
logger.info(
f"Creating second pod on node: {worker_nodes_list[1]} "
f"with pvc {pvc_obj.name}"
)
pod_obj2 = pod_factory(
interface=interface_type,
pvc=pvc_obj,
status=constants.STATUS_RUNNING,
node_name=worker_nodes_list[1],
pod_dict_path=constants.NGINX_POD_YAML
)
node_pod1 = pod_obj1.get().get('spec').get('nodeName')
node_pod2 = pod_obj2.get().get('spec').get('nodeName')
assert node_pod1 != node_pod2, 'Both pods are on the same node'
# Run IO on both the pods
logger.info(f"Running IO on pod {pod_obj1.name}")
file_name1 = pod_obj1.name
logger.info(file_name1)
pod_obj1.run_io(
storage_type=storage_type, size='1G', fio_filename=file_name1
)
logger.info(f"Running IO on pod {pod_obj2.name}")
file_name2 = pod_obj2.name
pod_obj2.run_io(
storage_type=storage_type, size='1G', fio_filename=file_name2
)
# Check IO and calculate md5sum of files
pod.get_fio_rw_iops(pod_obj1)
md5sum_pod1_data = pod.cal_md5sum(
pod_obj=pod_obj1, file_name=file_name1
)
pod.get_fio_rw_iops(pod_obj2)
md5sum_pod2_data = pod.cal_md5sum(
pod_obj=pod_obj2, file_name=file_name2
)
logger.info("verify data from alternate pods")
pod.verify_data_integrity(
pod_obj=pod_obj2, file_name=file_name1,
original_md5sum=md5sum_pod1_data
)
pod.verify_data_integrity(
pod_obj=pod_obj1, file_name=file_name2,
original_md5sum=md5sum_pod2_data
)
# Verify that data is mutable from any pod
logger.info("Perform modification of files from alternate pod")
# Access and rename file written by pod-2 from pod-1
file_path2 = pod.get_file_path(pod_obj2, file_name2)
logger.info(file_path2)
pod_obj1.exec_cmd_on_pod(
command=f"bash -c \"mv {file_path2} {file_path2}-renamed\"",
out_yaml_format=False
)
# Access and rename file written by pod-1 from pod-2
file_path1 = pod.get_file_path(pod_obj1, file_name1)
logger.info(file_path1)
pod_obj2.exec_cmd_on_pod(
command=f"bash -c \"mv {file_path1} {file_path1}-renamed\"",
out_yaml_format=False
)
logger.info("Verify presence of renamed files from both pods")
file_names = [f"{file_path1}-renamed", f"{file_path2}-renamed"]
for file in file_names:
assert pod.check_file_existence(pod_obj1, file), (
f"File {file} doesn't exist"
)
logger.info(f"File {file} exists in {pod_obj1.name} ")
assert pod.check_file_existence(pod_obj2, file), (
f"File {file} doesn't exist"
)
logger.info(f"File {file} exists in {pod_obj2.name}")
# ROX access mode not supported in OCS
# BZ 1727004
|
#
# Python script using Power Manager API to get or set Power Manager - Settings.
#
# _author_ = Mahendran P <[email protected]>
#
#
# Copyright (c) 2021 Dell EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
SYNOPSIS:
Script to get or set Power Manager Settings applied on OpenManage Enterprise
DESCRIPTION:
This script exercises the Power Manager REST API to get & set Power Manager Settings.
- For authentication X-Auth is used over Basic Authentication
- Note that the credentials entered are not stored to disk.
EXAMPLE:
python get_set_power_manager_settings.py --ip <xx> --username <username> --password <pwd>
"""
#Import the modules required for this script
import sys
import argparse
from argparse import RawTextHelpFormatter
import json
import requests
import urllib3
from columnar import columnar
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
#Temperature Display Unit dictionary to display the output for better reading
temp_disp_unit_dictionary = {
1:"Celsius",
2:"Fahrenheit"}
#Power Display Unit dictionary to display the output for better reading
power_disp_unit_dictionary = {
1:"Watt",
2:"BTU/Hr"}
#Metric Interval dictionary to display the output for better reading
metric_interval_dictionary = {
15:"15 minutes",
30:"30 minutes",
60:"60 minutes"}
#Built-in Report Interval dictionary to display the output for better reading
report_interval_dictionary = {
1:"1 Day",
7:"7 Days",
15:"15 Days",
30:"30 Days",
90:"90 Days",
180:"180 Days",
365:"365 Days"}
#Built-in Report Time Granularity dictionary to display the output for better reading
report_granularity_dictionary = {
1:"1 Hour",
2:"1 Day"}
#Top Energy Consumers dictionary to display the output for better reading
top_energy_interval_dictionary = {
4:"1 Day",
5:"1 Week",
6:"2 Weeks",
7:"1 Month",
8:"3 Months",
9:"6 Months",
10:"1 Year"}
#Delete Metric Data dictionary to display the output for better reading
delete_metric_data_dictionary = {
1:"Delete data",
2:"Keep data"}
#Reset WSMAN Power Metric dictionary to display the output for better reading
reset_metric_dictionary = {
1:"Enabled",
2:"Disable"}
#Power Manager Settings dictionary's dictionary to display the output for better reading
settings_dictionary = {
(1 or 1.0):temp_disp_unit_dictionary,
(2 or 2.0):power_disp_unit_dictionary,
(3 or 3.0):metric_interval_dictionary,
(5 or 5.0):report_interval_dictionary,
(6 or 6.0):report_granularity_dictionary,
(7 or 7.0):top_energy_interval_dictionary,
(8 or 8.0):delete_metric_data_dictionary,
(9 or 9.0):reset_metric_dictionary,
(51 or 51.0):"Any number of days between 30 to 365"}
def get_power_manager_settings(ip_address, user_name, password):
""" Authenticate with OpenManage Enterprise, get power manager settings"""
try:
# Defining Session URL & headers
session_url = 'https://%s/api/SessionService/Sessions' % ip_address
headers = {'content-type': 'application/json'}
# Define Payload for posting session API
user_details = {'UserName': user_name,
'Password': password,
'SessionType': 'API'}
# Define OUTPUT header & data format
output_column_headers = ['Setting_ID', 'Name', 'Default_Value', 'Current_Value', 'Setting_Value_Enum']
output_column_data = []
# Defining Power Manager settings URL
settings_url = "https://%s/api/PowerService/Settings" % ip_address
# Create the session with OpenManage Enterprise
session_info = requests.post(session_url, verify=False,
data=json.dumps(user_details),
headers=headers)
#If session doesn't create, message the user with error
if session_info.status_code != 201 & session_info.status_code != 200:
session_json_data = session_info.json()
if 'error' in session_json_data:
error_content = session_json_data['error']
if '@Message.ExtendedInfo' not in error_content:
print("Unable to create a session with %s" % ip_address)
else:
extended_error_content = error_content['@Message.ExtendedInfo']
print("Unable to create a session with %s. See below ExtendedInfo for more information" % ip_address)
print(extended_error_content[0]['Message'])
else:
print("Unable to create a session with %s. Please try again later" % ip_address)
return 0
else:
headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']
#Get Power Manager settings API call with OpenManage Enterprise
settings_response = requests.get(settings_url, headers=headers, verify=False)
settings_json_data = settings_response.json()
#If settings API doesn't respond or failed, message the user with error
if settings_response.status_code != 201 & settings_response.status_code != 200:
if 'error' in settings_json_data:
error_content = settings_json_data['error']
if '@Message.ExtendedInfo' not in error_content:
print("Unable to retrieve Power Manager settings from %s" % ip_address)
else:
extended_error_content = error_content['@Message.ExtendedInfo']
print("Unable to retrieve Power Manager settings from %s. See below ExtendedInfo for more information" % ip_address)
print(extended_error_content[0]['Message'])
else:
print("Unable to retrieve Power Manager settings from %s" % ip_address)
return 0
else:
settings_content = json.loads(settings_response.content)
if settings_content:
# For every elements in the Settings response, store the details in the table
for settings_elem in settings_content["value"]:
data_dictionary = settings_dictionary[int(settings_elem["Id"])]
settings_data = [settings_elem["Id"], settings_elem["Name"], settings_elem["DefaultValue"], settings_elem["Value"], data_dictionary]
output_column_data.append(settings_data)
table = columnar(output_column_data, output_column_headers, no_borders=True)
print("\n ==========================================")
print(" Power Manager Settings ")
print(" ==========================================")
print(table)
return 1
else:
print("No Power Manager settings from %s" % ip_address)
return 0
except Exception as error:
print("Unexpected error:", str(error))
return 0
def set_power_manager_settings(ip_address, user_name, password, settings_id, settings_value):
""" Authenticate with OpenManage Enterprise, set power manager settings"""
try:
# Defining Session URL & its headers
session_url = 'https://%s/api/SessionService/Sessions' % ip_address
headers = {'content-type': 'application/json'}
# Define Payload for posting session API
user_details = {'UserName': user_name,
'Password': password,
'SessionType': 'API'}
# Defining Power Manager settings URL
settings_url = "https://%s/api/PowerService/Actions/PowerService.UpdateSettings" % ip_address
# Payload for posting settings API
settings_payload = {"Settings":[{ "Id": int(settings_id), "Value": int(settings_value)}]}
# Create the session with OpenManage Enterprise
session_info = requests.post(session_url, verify=False,
data=json.dumps(user_details),
headers=headers)
#If session doesn't create, message the user with error
if session_info.status_code != 201 & session_info.status_code != 200:
session_json_data = session_info.json()
if 'error' in session_json_data:
error_content = session_json_data['error']
if '@Message.ExtendedInfo' not in error_content:
print("Unable to create a session with %s" % ip_address)
else:
extended_error_content = error_content['@Message.ExtendedInfo']
print("Unable to create a session with %s. See below ExtendedInfo for more information" % ip_address)
print(extended_error_content[0]['Message'])
else:
print("Unable to create a session with %s. Please try again later" % ip_address)
else:
headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']
#Set Power Manager settings API call with OpenManage Enterprise
settings_response = requests.post(settings_url, data=json.dumps(settings_payload), headers=headers, verify=False)
settings_json_data = settings_response.json()
#If settings API doesn't respond or failed, message the user with error
if settings_response.status_code != 201 & settings_response.status_code != 200:
if 'error' in settings_json_data:
error_content = settings_json_data['error']
if '@Message.ExtendedInfo' not in error_content:
print("Unable to set Power Manager Setting on %s" % ip_address)
else:
extended_error_content = error_content['@Message.ExtendedInfo']
print("Unable to set Power Manager Setting on %s. See below ExtendedInfo for more information" % ip_address)
print(extended_error_content[0]['Message'])
else:
print("Unable to set Power Manager Setting on %s" % ip_address)
else:
print("Successfully applied Power Manger Setting on %s" % ip_address)
except Exception as error:
print("Unexpected error:", str(error))
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--ip", "-i", required=True, help="OpenManage Enterprise IP <- Mandatory")
parser.add_argument("--username", "-u", required=False, help="Username for OpenManage Enterprise <- Optional; default = admin", default="admin")
parser.add_argument("--password", "-p", required=True, help="Password for OpenManage Enterprise <- Mandatory")
args = parser.parse_args()
return_value = get_power_manager_settings(args.ip, args.username, args.password)
# Only if get_power_manager_settings returns success, the proceed to do set.
if return_value == 1:
get_inputs = input("Do you want to change any Power Manager settings (Y/N) : ")
#Until user says No to change the settings
while get_inputs in ('Y','y',''):
#Get the respective settings enumeration dictionary basis user input i.e. setting ID.
setting_id_input = input("Please enter Setting_ID : ")
#Define the type basis the input given either in int or float. Error out if otherwise & continue
if "." in setting_id_input:
setting_id_input = float(setting_id_input)
elif setting_id_input.isdigit():
setting_id_input = int(setting_id_input)
else:
print("\n !!! ERROR :: Wrong Setting's ID Entered !!! \n Please provide proper setting's ID & try again\n")
continue
#if the setting ID provided doesn't exist, then error out & continue
if settings_dictionary.get(setting_id_input) == None:
print("\n !!! ERROR :: Wrong Setting's ID Entered !!! \n Please provide proper setting's ID & try again\n")
continue
else:
define_dictionary = settings_dictionary[setting_id_input]
#Display the supported values basis the user setting input for easy user choice of setting the value
print("Supported key values: \n ")
if setting_id_input != 51 or setting_id_input != 51.0:
for key, value in define_dictionary.items():
print(" ",key, ' : ' ,value)
else:
print(define_dictionary)
#Get the user input for setting value
settings_value_inputs = input("\nPlease enter Setting_Value : ")
#if the setting value provided is not an integer and doesn't exist in the dictionary, then error out & continue
if not settings_value_inputs.isdigit():
print("\n !!! ERROR :: Wrong Setting's Value Entered !!! \n Please provide proper setting's value & try again\n")
continue
else:
settings_value_inputs = int(settings_value_inputs)
if define_dictionary.get(settings_value_inputs) == None:
print("\n !!! ERROR :: Wrong Setting's Value Entered !!! \n Please provide proper setting's value & try again\n")
continue
set_power_manager_settings(args.ip, args.username, args.password, setting_id_input, settings_value_inputs)
get_inputs = input("\nDo you want to change any other Power Manager settings (Y/N) : ") |
import bpy, bmesh, struct
import base64, hashlib
from time import strftime, gmtime
import speckle.schemas
def export_mesh(blender_object, scale=1.0):
return MeshObject_to_SpeckleMesh(blender_object, scale)
#return None
def SetGeometryHash(data):
code = hashlib.md5(data.encode('utf-8')).hexdigest()
return code
def MeshObject_to_SpeckleMesh(obj, scale=1.0):
if obj.data.loop_triangles is None or len(obj.data.loop_triangles) < 1:
obj.data.calc_loop_triangles()
verts = [x.co * scale for x in obj.data.vertices]
# TODO: add n-gon support, using tessfaces for now
faces = [x.vertices for x in obj.data.loop_triangles]
#faces = [x.vertices for x in obj.data.polygons]
sm = speckle.schemas.Mesh()
for v in verts:
sm.vertices.extend(v)
for f in faces:
if len(f) == 3:
sm.faces.append(0)
elif len(f) == 4:
sm.faces.append(1)
else:
continue
sm.faces.extend(f)
# Add texture coordinates
# TODO: make switchable
# Using tessfaces for now - possible future n-gon support
#if obj.data.uv_layers.active is not None:
'''
if obj.data.tessface_uv_textures.active is not None:
uvs = [x.uv for x in obj.data.tessface_uv_textures.active.data]
uv_string_list = ["%f %f" % (x[0][0], x[0][1]) for x in uvs]
uv_string = ' '.join(uv_string_list)
sm['properties']['texture_coordinates'] = base64.encodestring(uv_string.encode("utf-8")).decode("utf-8")
'''
sm.name = obj.name
#sm.id = obj.speckle.object_id
#sm.geometryHash = SetGeometryHash(str(sm))[:12]
#sm.hash = SetGeometryHash(str(sm) + strftime("%Y-%m-%d %H:%M:%S", gmtime()))[:12]
sm.colors = []
return sm |
import urllib.request
import sys
from lxml import html
if len(sys.argv) < 2:
print('Usage example: python fetch_html.py https://github.com')
sys.exit(1)
url = sys.argv[1]
response = urllib.request.urlopen(url)
html_text = response.read().decode('UTF-8')
text = html.fromstring(html_text).text_content()
print(text)
|
import streamlit as st
from pyuba.calc.bayesian import Bayesian
from pyuba.calc.frequentist import Frequentist
from pyuba.calc.utils import create_plotly_table
def local_css(file_name: str) -> str:
with open(file_name) as f:
st.markdown("<style>{}</style>".format(f.read()), unsafe_allow_html=True)
def draw_abtest():
local_css("style/style.css")
st.markdown(
"""
AB test calculator
Bayesian과 Frequent방식으로 AB Test를 수행합니다.
👈 메뉴 설정은 옆에 있습니다.
"""
)
# Sidebar
st.sidebar.markdown(
"""
## Approach
"""
)
method = st.sidebar.radio("Bayesian vs. Frequentist", ["Bayesian", "Frequentist"])
st.sidebar.markdown(
"""
## Test data
"""
)
visitors_A = st.sidebar.number_input("Visitors A", value=50000, step=100)
conversions_A = st.sidebar.number_input("Conversions A", value=1500, step=10)
visitors_B = st.sidebar.number_input("Visitors B", value=50000, step=100)
conversions_B = st.sidebar.number_input("Conversions B", value=1560, step=10)
st.sidebar.markdown(
"""
## Frequentist settings
"""
)
alpha_input = 1 - st.sidebar.slider(
"Significance level", value=0.95, min_value=0.5, max_value=0.99
)
tails_input = st.sidebar.selectbox(
"One vs. two tail", ["One-tail", "Two-tail"], index=1
)
if tails_input == "One-tail":
two_tails_bool = False
else:
two_tails_bool = True
b = Bayesian(visitors_A, conversions_A, visitors_B, conversions_B)
# Bayesian Method
if method == "Bayesian":
st.markdown(
"""
Bayesian 코드 입니다.
```python
from pyuba.calc.bayesian import Bayesian
visitors_A = 50000
conversions_A = 1500
visitors_B = 50000
conversions_B = 1560
b = Bayesian(visitors_A, conversions_A, visitors_B, conversions_B)
b.generate_posterior_samples()
b.calculate_probabilities()
fig = b.plot_bayesian_probabilities()
fig.show()
```
"""
)
try:
b.generate_posterior_samples()
b.calculate_probabilities()
fig = b.plot_bayesian_probabilities()
st.write(fig)
st.text("")
bayesian_data = {
"<b>Variant</b>": ["A", "B"],
"<b>Visitors</b>": [f"{b.visitors_A:,}", f"{b.visitors_B:,}"],
"<b>Conversions</b>": [b.conversions_A, b.conversions_B],
"<b>Conversion rate</b>": [
f"{b.control_cr:.2%}",
f"{b.variant_cr:.2%}",
],
"<b>Uplift</b>": ["", f"{b.relative_difference:.2%}"],
"<b>Likelihood of being better</b>": [
f"{b.prob_A:.2%}",
f"{b.prob_B:.2%}",
],
}
fig = create_plotly_table(bayesian_data)
st.plotly_chart(fig)
"""
The below graph plots the simulated difference between the two
posterior distributions for the variants. It highlights the potential
range of difference between the two variants. More data will reduce
the range.
"""
st.text("")
fig = b.plot_simulation_of_difference()
st.write(fig)
"""
---
### Recommended Reading
* [Bayesian Methods for Hackers by Cameron Davidson-Pilon]\
(https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers)
* [Bayesian AB test calculator by AB Testguide]\
(https://abtestguide.com/bayesian/)
* [Beta distribution Wikipedia]\
(https://en.wikipedia.org/wiki/Beta_distribution)
"""
except ValueError:
t = """
<img class='error'
src='https://www.flaticon.com/svg/static/icons/svg/595/595067.svg'>
"""
st.markdown(t, unsafe_allow_html=True)
"""
An error occured, please check the test data input and try again.
For Bayesian calculations, the conversion rate must be between 0 and
1.
"""
else: # Frequentist
st.markdown(
"""
Frequency 방식의 코드 입니다.
```python
from pyuba.calc.frequentist import Frequentist
f = Frequentist(
visitors_A,
conversions_A,
visitors_B,
conversions_B,
alpha=alpha_input,
two_tails=two_tails_bool,
)
z_score, p_value = f.z_test()
```
"""
)
f = Frequentist(
visitors_A,
conversions_A,
visitors_B,
conversions_B,
alpha=alpha_input,
two_tails=two_tails_bool,
)
z_score, p_value = f.z_test()
power = f.get_power()
if p_value < alpha_input:
t = """
<h3 class='frequentist_title'>Significant</h3>
<img class='frequentist_icon'
src='https://www.flaticon.com/svg/static/icons/svg/1533/1533913.svg'>
"""
st.markdown(t, unsafe_allow_html=True)
if f.relative_difference < 0:
t = (
"""
<p>B's conversion rate is <span class='lower'>"""
+ "{:.2%}".format(abs(f.relative_difference))
+ """ lower</span> than A's CR."""
)
st.markdown(t, unsafe_allow_html=True)
else:
t = (
"""
<p>B's conversion rate is <span class='higher'>"""
+ "{:.2%}".format(abs(f.relative_difference))
+ """ higher</span> than A's CR."""
)
st.markdown(t, unsafe_allow_html=True)
f"""
You can be {1-alpha_input:.0%} confident that the result is true and
due to the changes made. There is a {alpha_input:.0%} chance that the result
is a false positive or type I error meaning the result is due to
random chance.
"""
else:
t = """
<h3 class='frequentist_title'>Not significant</h3>
<img class='frequentist_icon'
src='https://www.flaticon.com/svg/static/icons/svg/1533/1533919.svg'>
"""
st.markdown(t, unsafe_allow_html=True)
f"""
There is not enough evidence to prove that there is a
{f.relative_difference:.2%} difference in the conversion rates between
variants A and B.
"""
"""
Either collect more data to achieve greater precision in your test,
or conclude the test as inconclusive.
"""
st.markdown(
"""
frequency 데이터를 보여줍니다.
```python
from pyplot.pyplot import iplot
frequentist_data = {
"<b>Variant</b>": ["A", "B"],
"<b>Visitors</b>": [f"{f.visitors_A:,}", f"{f.visitors_B:,}"],
"<b>Conversions</b>": [f.conversions_A, f.conversions_B],
"<b>Conversion rate</b>": [
f"{f.control_cr:.2%}",
f"{f.variant_cr:.2%}",
],
"<b>Uplift</b>": ["", f"{f.relative_difference:.2%}"],
"<b>Power</b>": ["", f"{power:.4f}"],
"<b>Z-score</b>": ["", f"{z_score:.4f}"],
"<b>P-value</b>": ["", f"{p_value:.4f}"],
}
fig = create_plotly_table(frequentist_data)
iplot(fig)
f.plot_test_visualisation()
```
"""
)
frequentist_data = {
"<b>Variant</b>": ["A", "B"],
"<b>Visitors</b>": [f"{f.visitors_A:,}", f"{f.visitors_B:,}"],
"<b>Conversions</b>": [f.conversions_A, f.conversions_B],
"<b>Conversion rate</b>": [
f"{f.control_cr:.2%}",
f"{f.variant_cr:.2%}",
],
"<b>Uplift</b>": ["", f"{f.relative_difference:.2%}"],
"<b>Power</b>": ["", f"{power:.4f}"],
"<b>Z-score</b>": ["", f"{z_score:.4f}"],
"<b>P-value</b>": ["", f"{p_value:.4f}"],
}
fig = create_plotly_table(frequentist_data)
st.plotly_chart(fig)
z = f.get_z_value()
"""
According to the null hypothesis, there is no difference between the means.
The plot below shows the distribution of the difference of the means that
we would expect under the null hypothesis.
"""
f.plot_test_visualisation()
if p_value < alpha_input:
f"""
The shaded areas cover {alpha_input:.0%} of the distribution. It is
because the observed mean of the variant falls into this area that we
can reject the null hypothesis with {1-alpha_input:.0%} confidence.
"""
else:
f"""
The shaded areas cover {alpha_input:.0%} of the distribution. It is
because the observed mean of the variant does not fall into this area that
we are unable to reject the null hypothesis and get a significant
result. A difference of greater than
{f.se_difference*z/f.control_cr:.2%} is needed.
"""
"""
#### Statistical Power
"""
f"""
Power is a measure of how likely we are to detect a difference when there
is one with 80% being the generally accepted threshold for statistical
validity. **The power for your test is {power:.2%}**
"""
f"""
An alternative way of defining power is that it is our likelihood of
avoiding a Type II error or a false negative. Therefore the inverse of
power is 1 - {power:.2%} = {1-power:.2%} which is our likelihood of a
type II error.
"""
st.markdown(
"""
```python
from pyplot.pyplot import iplot
fig = f.plot_power()
iplot(fig)
```
"""
)
fig = f.plot_power()
st.write(fig)
"""
---
### Recommended reading
* [Z-test Wikipedia](https://en.wikipedia.org/wiki/Z-test)
* [The Math Behind AB Testing by Nguyen Ngo]\
(https://towardsdatascience.com/the-math-behind-a-b-testing-with-example-code-part-1-of-2-7be752e1d06f)
* [AB test calculator by AB Testguide](https://www.abtestguide.com/calc/)
"""
"""
### See also
* [Sample size calculator](https://abtestsamplesize.herokuapp.com/)
* [Github Repository](https://github.com/rjjfox/ab-test-calculator)
"""
|
guest_name = input()
residence_host_name = input()
letters_in_pile = input()
def get_letters_count_dict(str):
letters_counts = dict()
for letter in str:
if letter in letters_counts:
letters_counts[letter] += 1
else:
letters_counts[letter] = 1
return letters_counts
names_letters_dict = get_letters_count_dict(guest_name + residence_host_name)
letters_in_pile_dict = get_letters_count_dict(letters_in_pile)
if letters_in_pile_dict == names_letters_dict:
print('YES')
else:
print('NO')
|
#Copyright (C) Practica Ana Sollars & Co.
#Permission is granted to copy, distribute and/or modify this document
#under the terms of the GNU Free Documentation License, Version 1.3
#or any later version published by the Free Software Foundation;
#with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
#A copy of the license is included in the section entitled "GNU
#Free Documentation License"
#<Authors: Ana Mª Sollars Castellanos>
#lista de compañias
#compañia--> lista de drugs
import http.server
import json
import http.client
class OpenFDAClient():
OPENFDA_API_URL = "api.fda.gov"
OPENFDA_API_EVENT = "/drug/event.json"
def get_event(self, limit): #--> conectado a ***
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET", self.OPENFDA_API_EVENT + "?limit=" + limit)
r1 = conn.getresponse()
data1 = r1.read()
data2 = data1.decode("utf8") #bytes a string
event = data2
return event
def get_search(self, drug):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET", self.OPENFDA_API_EVENT + '?search=patient.drug.medicinalproduct='+drug+'&limit=10')
r1 = conn.getresponse()
data1 = r1.read()
data2 = data1.decode("utf8")
event = data2
return event
def get_company_drug(self, comp):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET", self.OPENFDA_API_EVENT + '?search=companynumb='+comp+'&limit=10')
r1 = conn.getresponse()
data1 = r1.read()
data2 = data1.decode("utf8")
event = data2
return event
class OpenFDAParser():
def get_medicinalproduct(self,limit):
client = OpenFDAClient()
event = client.get_event(limit)
event2= json.loads(event)
results= event2["results"]
for i in results:
patient= i["patient"]
drug= patient["drug"]
med_list= []
for i in range(int(limit)):
patient= results[i]["patient"]
medicinal= patient["drug"][0]["medicinalproduct"]
med_list.append(medicinal)
return med_list
def get_company_list(self, limit):
client= OpenFDAClient()
event = client.get_event(limit)
event2= json.loads(event)
results= event2["results"]
med_list= []
for i in results:
companynumb= i["companynumb"]
med_list.append(companynumb)
return med_list
def parser_get_search(self,event):
event2= json.loads(event)
results= event2["results"]
company_list= []
for i in results:
company= i["companynumb"]
company_list.append(company)
return company_list
def get_gender_list(self, limit):
client = OpenFDAClient()
event = client.get_event(limit)
event2= json.loads(event)
results= event2["results"]
sex_list=[]
listGender=[]
for i in results:
patient = i["patient"]
patient_sex= patient["patientsex"]
sex_list.append(patient_sex)
for i in sex_list:
if i == "1":
listGender.append("Female")
elif i == "2":
listGender.append("Male")
return listGender
def parser_get_company_drug(self,event):
event2= json.loads(event)
results= event2["results"]
drug_list=[]
for i in results:
companynumb = i["companynumb"]
patient= i["patient"]
medicinal= patient["drug"][0]["medicinalproduct"]
drug_list.append(medicinal)
return drug_list
class OpenFDAHTML():
def get_main_page(self):
html = """
<html>
<head>
<link rel="shortcut icon" href="https://b64459531885200b3efb-5206a7b3a50a3f5974248375cd863061.ssl.cf1.rackcdn.com/favicon-new.ico">
<title>OpenFDA Cool App</title>
<DIV ALIGN=center>
<IMG SRC="https://pbs.twimg.com/profile_images/701113332183371776/57JHEzt7.jpg" width="400" height="200" alt="correo">
</DIV>
<style type= "text/css">
.button{
text-decoration: none;
padding: 3px;
padding-left: 10px;
padding-right: 10px;
font-family: Helvetica Neue;
font-weight: 300;
font-size: 15px;
font-style: bold;
color: blue;
background-color: #99CCFF;
border-radius: 15px;
border: 3px double blue;
}
.boton_1:hover{
opacity: 0.6;
text-decoration: none;
}
</style>
</head>
<body>
<DIV ALIGN=center>
<h1>
<FONT FACE="arial" SIZE=8 COLOR=><u>OpenFDA Client</u></FONT>
</h1>
<form method="get" action="listDrugs">
<input class="button" type="submit" value="Drug List: Send to OpenFDA">
</input>
limit:<input type="text" name="limit">
</input>
</form>
<form method="get" action="listCompanies">
<input class="button" type="submit" value="Company List: Send to OpenFDA">
</input>
limit:<input type="text" name="limit">
</input>
</form>
<form method="get" action="searchDrug">
<input type="text" name="drug">
<input class="button" type="submit" value="Drug Search: Send to OpenFDA">
</input>
</form>
<form method="get" action="searchCompany">
<input type="text" name="company">
<input class="button" type="submit" value="Company Search: Send to OpenFDA">
</input>
</form>
<form method="get" action="listGender">
<input type="text" name="limit">
<input class="button" type="submit" value="Gender">
</input>
</form>
</DIV>
</body>
</html>
"""
return html
def get_drug(self,drug):
client = OpenFDAClient()
parser = OpenFDAParser()
event = client.get_search(drug)
items= parser.parser_get_search(event)
list = self.write_html(items)
return list
def get_second_page(self,limit):
parser = OpenFDAParser()
items= parser.get_medicinalproduct(limit)
list = self.write_html(items)
return list
def get_third_page(self,limit):
parser = OpenFDAParser()
items= parser.get_company_list(limit)
list = self.write_html(items)
return list
def get_company_html(self,comp):
client = OpenFDAClient()
parser = OpenFDAParser()
event= client.get_company_drug(comp)
items= parser.parser_get_company_drug(event)
list = self.write_html(items)
return list
def get_patient_sex(self,limit):
parser = OpenFDAParser()
items= parser.get_gender_list(limit)
list= self.write_html(items)
return list
def get_error_page(self):
list = """
<html>
<head>
<body>
<h1>Error 404</h1>
<body>
</head>
<body>
Page not found
</body>
</html>
"""
return list
def write_html(self,items):
list = """
<html>
<head></head>
<body>
<ol>
"""
for element in items:
list += "<li>" +element+ "</li>"
list += """
</ol>
</body>
</html>
"""
return list
# HTTPRequestHandler class
class testHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
""" class that manages the HTTP request from web clients """
def execute(self,html):
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(bytes(html, "utf8"))
# GET
def do_GET(self):
# Write content as utf-8 data
if self.path == "/":
# Get main page
HTML = OpenFDAHTML()
self.send_response(200)
html = HTML.get_main_page()
self.execute(html)
elif "/listDrugs?limit=" in self.path:
HTML = OpenFDAHTML()
self.send_response(200)
web= self.path.split("=")
limit= web[-1]
html= HTML.get_second_page(limit)
self.execute(html)
elif "/listCompanies?limit=" in self.path:
HTML = OpenFDAHTML()
self.send_response(200)
web= self.path.split("=")
limit= web[-1]
html= HTML.get_third_page(limit)
self.execute(html)
elif '/searchDrug?drug=' in self.path:
HTML = OpenFDAHTML()
self.send_response(200)
web= self.path.split("=")
drug= web[-1]
html= HTML.get_drug(drug)
self.execute(html)
elif '/searchCompany?company=' in self.path:
HTML = OpenFDAHTML()
self.send_response(200)
web= self.path.split("=")
comp= web[-1]
html= HTML.get_company_html(comp)
self.execute(html)
elif "/listGender?limit=" in self.path:
HTML = OpenFDAHTML()
self.send_response(200)
web= self.path.split("=")
limit= web[-1]
html= HTML.get_patient_sex(limit)
self.execute(html)
elif "/secret" in self.path:
self.send_response(401)
self.send_header('WWW-Authenticate','Basic realm="User Visible Realm"')
self.end_headers()
elif "/redirect" in self.path:
self.send_response(302)
self.send_header('Location', 'http://localhost:8000/')
self.end_headers()
else:
HTML = OpenFDAHTML()
self.send_response(404)
html= HTML.get_error_page()
self.execute(html)
return
|
#!/usr/bin/env python
"""
From bluerov_ros_playground respository (https://github.com/patrickelectric/bluerov_ros_playground)
Credits: patrickelectric
"""
import cv2
import rospy
import time
try:
import pubs
import subs
import video
except:
import bluerov.pubs as pubs
import bluerov.subs as subs
import bluerov.video as video
from geometry_msgs.msg import TwistStamped
from mavros_msgs.srv import CommandBool
from sensor_msgs.msg import JointState, Joy
from sensor_msgs.msg import BatteryState
from mavros_msgs.msg import OverrideRCIn, RCIn, RCOut
#from uuv_gazebo_ros_plugins_msgs.msg import FloatStamped
class Code(object):
"""Class to provide user access
Attributes:
cam (Video): Video object, get video stream
pub (Pub): Pub object, do topics publication
sub (Sub): Sub object, subscribe in topics
"""
def __init__(self):
super(Code, self).__init__()
# Do what is necessary to start the process
# and to leave gloriously
self.arm()
self.sub = subs.Subs()
self.pub = pubs.Pubs()
self.pub.subscribe_topic('/mavros/rc/override', OverrideRCIn)
self.pub.subscribe_topic('/mavros/setpoint_velocity/cmd_vel', TwistStamped)
# Thrusters Input
#self.pub.subscribe_topic('/bluerov2/thrusters/0/input', FloatStamped)
#self.pub.subscribe_topic('/bluerov2/thrusters/1/input', FloatStamped)
#self.pub.subscribe_topic('/bluerov2/thrusters/2/input', FloatStamped)
#self.pub.subscribe_topic('/bluerov2/thrusters/3/input', FloatStamped)
#self.pub.subscribe_topic('/bluerov2/thrusters/4/input', FloatStamped)
#self.pub.subscribe_topic('/bluerov2/thrusters/5/input', FloatStamped)
self.sub.subscribe_topic('/joy', Joy)
self.sub.subscribe_topic('/mavros/battery', BatteryState)
self.sub.subscribe_topic('/mavros/rc/in', RCIn)
self.sub.subscribe_topic('/mavros/rc/out', RCOut)
#self.cam = None
#try:
#video_udp_port = rospy.get_param("/user_node/video_udp_port")
#rospy.loginfo("video_udp_port: {}".format(video_udp_port))
#self.cam = video.Video(video_udp_port)
#except Exception as error:
#rospy.loginfo(error)
#self.cam = video.Video()
def arm(self):
""" Arm the vehicle and trigger the disarm
"""
rospy.wait_for_service('/mavros/cmd/arming')
#self.arm_service = rospy.ServiceProxy('/mavros/cmd/arming', CommandBool)
#self.arm_service(True)
# Disarm is necessary when shutting down - not working, i think
#rospy.on_shutdown(self.disarm)
@staticmethod
def pwm_to_thrust(pwm):
"""Transform pwm to thruster value is in UUV Simulator. Here we give an offset to positive and negative values.
The equation come from:
http://docs.bluerobotics.com/thrusters/#t100-thruster-specifications
In our case we are using T100 Thrusters on BlueROV2
Args:
pwm (int): pwm value
Returns:
int: pwm value offsetted to positive and negative values
"""
# PWM to Forward
if pwm > 1500:
pwm = pwm - 1500;
# PWM to Backward
elif pwm < 1500:
pwm = pwm - 1500;
# PWM to STOP
else:
pwm = 0;
return pwm
def run(self):
"""Run user code
"""
while not rospy.is_shutdown():
time.sleep(0.1)
# Try to get data
#try:
#rospy.loginfo(self.sub.get_data()['mavros']['battery']['voltage'])
#rospy.loginfo(self.sub.get_data()['mavros']['rc']['in']['channels'])
#rospy.loginfo(self.sub.get_data()['mavros']['rc']['out']['channels'])
#except Exception as error:
#print('Get data error:', error)
try:
# Get joystick data
joy = self.sub.get_data()['joy']['axes']
# rc run between 1100 and 2000, a joy command is between -1.0 and 1.0
#override = [int(val*400 + 1500) for val in joy]
#for _ in range(len(override), 8):
# override.append(0)
# Pitch, Roll
override = [1500, 1500]
# Throttle
override.append(int(joy[1]*400 + 1500))
# Yaw
override.append(int(joy[2]*400 + 1500))
# Forward
override.append(int(joy[3]*400 + 1500))
# Lateral
override.append(int(joy[0]*400 + 1500))
# Camera Pan
override.append(1500)
# Camera Tilt
override.append(int(joy[5]*400 + 1500))
# Get Buttons data
buttons = self.sub.get_data()['joy']['buttons']
# Lights 1 Level
#if buttons[5] > 0:
# override.append(1700)
#elif buttons[7] > 0:
# override.append(1500)
# override.append(1300) override len is 8!!!
# Send joystick data as rc output into rc override topic
# (fake radio controller)
self.pub.set_data('/mavros/rc/override', override)
except Exception as error:
print('joy error:', error)
#try:
# Get pwm output and send it to Gazebo model
#rc = self.sub.get_data()['mavros']['rc']['out']['channels']
# Variable object type of
#_input = FloatStamped()
# Array size of rc
#_input.header.stamp = rospy.Time.now()
#_input.header.seq = 1
#_input.data = self.pwm_to_thrust(rc[0]) # [self.pwm_to_thrust(pwm) for pwm in rc] #
# Send Thrusters Input FloatStamped
#self.pub.set_data('/bluerov2/thrusters/0/input', _input)
#_input.data = self.pwm_to_thrust(rc[1])
#self.pub.set_data('/bluerov2/thrusters/1/input', _input)
#_input.data = self.pwm_to_thrust(rc[2])
#self.pub.set_data('/bluerov2/thrusters/2/input', _input)
#_input.data = self.pwm_to_thrust(rc[3])
#self.pub.set_data('/bluerov2/thrusters/3/input', _input)
#_input.data = self.pwm_to_thrust(rc[4])
#self.pub.set_data('/bluerov2/thrusters/4/input', _input)
#_input.data = self.pwm_to_thrust(rc[5])
#self.pub.set_data('/bluerov2/thrusters/5/input', _input)
#except Exception as error:
#print('rc error:', error)
#try:
#if not self.cam.frame_available():
# continue
# Show video output
#frame = self.cam.frame()
#cv2.imshow('frame', frame)
#cv2.waitKey(1)
#except Exception as error:
#print('imshow error:', error)
def disarm(self):
self.arm_service(False)
if __name__ == "__main__":
try:
rospy.init_node('user_node', log_level=rospy.DEBUG)
#rate = rospy.Rate(10) # 10hz
except rospy.ROSInterruptException as error:
print('pubs error with ROS: ', error)
exit(1)
code = Code()
code.run()
#rate.sleep()
|
# development
import cv2
from maskdetector.mask_detector import MaskDetector
from flask import Flask, render_template
from flask_socketio import SocketIO
from flask_cors import CORS, cross_origin
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
CORS(app)
socketio = SocketIO(app,cors_allowed_origins="*")
## intialize objects
mask_detector =MaskDetector()
def live_feed():
# load the video stream
cap= cv2.VideoCapture(0)
while True:
# read the camera frame
success, frame = cap.read()
frame=cv2.resize(frame,(720,1280))
(H, W) = frame.shape[:2]
# if frame is not empty
if(success):
# pass frame to mask_detector and return labels and corresponding bounding boxes
labels,bboxes = mask_detector.detect(frame)
print(bboxes)
print(labels)
if(len(labels)>0):
#if a person is wearing mask emit true
if(labels[0]=="mask"):
print("Person is wearing mask")
socketio.emit('maskDetection', {'mask_detected': True,'bb':bboxes[0]})
#if a person is not wearing mask emit flase
else:
print("Person not wearing mask")
socketio.emit('maskDetection', {'mask_detected': False,'bb':bboxes[0]})
# draw bounding box
x1,y1,x2,y2=bboxes[0]
cv2.rectangle(frame, (x1, y1),(x2, y2), (255,0,0), 2)
cv2.rectangle(frame, ( W//3,H//3),((W//3)+(W//3),(H//3)+(H//3)), (0,255,0), 2)
# show the output frame
frame=cv2.resize(frame,(640,480))
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
if __name__ == '__main__':
socketio.run(app,debug=True)
live_feed() |
##!/usr/bin/env python3
from traceback import format_exception
def error_logging(path):
return ErrorLoggingContext(path)
class ErrorLoggingContext:
def __init__(self, path):
self.path = path
def __enter__(self):
return self
def __exit__(self,
exception_type,
exception_value,
traceback):
if not issubclass(exception_type, Exception):
return
tb_list = format_exception(exception_type,
exception_value,
traceback,
100)
tb_string = "".join(tb_list)
with open(self.path, "a") as f:
f.write(tb_string)
f.write(f"{'-'*10}\n")
print(f"Exception logged to {self.path}.")
|
from itertools import permutations
def print_permutations(string):
for perm in permutations(string):
print("".join(perm))
def main():
print_permutations("asdf")
if __name__ == '__main__':
main()
|
import logging
import traceback
from functools import wraps
def try_catch_with_logging(default_response=None):
def out_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
res = func(*args, **kwargs)
except Exception:
res = default_response
logging.error(traceback.format_exc())
return res
return wrapper
return out_wrapper
def todict(obj, classkey=None):
if isinstance(obj, dict):
data = {}
for (k, v) in obj.items():
data[k] = todict(v, classkey)
return data
elif hasattr(obj, "_ast"):
return todict(obj._ast())
elif isinstance(obj, (list, set, tuple)):
return [todict(v, classkey) for v in obj]
elif hasattr(obj, "__dict__"):
data = dict([(key, todict(value, classkey))
for key, value in obj.__dict__.items()
if not callable(value) and not key.startswith('_')])
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
else:
return obj
class ShowTime(object):
'''
用上下文管理器计时
'''
import time, logging
time = time
logging = logging
def __init__(self, prefix="", ipdb=False):
self.prefix = prefix
self.ipdb = ipdb
def __enter__(self):
self.t1 = self.time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.runtime = self.time.time() - self.t1
print("{} take time: {:.2f} s".format(self.prefix, self.runtime))
if exc_type is not None:
print(exc_type, exc_val, exc_tb)
import traceback
print(traceback.format_exc())
if self.ipdb:
import ipdb
ipdb.set_trace()
return self
if __name__ == "__main__":
with ShowTime("hello", ipdb=False) as g:
print("*** g.runtime: {}".format(getattr(g, "runtime", "")))
import time
time.sleep(2)
raise ValueError(0) # 被忽略
# print(g.t1)
# raise ValueError(0)
g = ShowTime("hello", ipdb=True)
with g:
import time
time.sleep(3)
raise ValueError("222")
|
#!/usr/bin/env python3
# coding=utf-8
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(6)
y = np.arange(5)
z = x * y[:, np.newaxis]
for i in range(5):
if i == 0:
p = plt.imshow(z)
fig = plt.gcf()
plt.clim() # clamp the color limits
plt.title("Boring slide show")
else:
z = z + 2
p.set_data(z)
print("step", i)
plt.pause(0.5)
|
from functools import partial
import logging
from multiprocessing import get_context, log_to_stderr
import numpy as np
import os
from ray_tracer.canvas import Canvas, overlay
from ray_tracer.rays import Ray
from ray_tracer.transformations import Transformation
from ray_tracer.tuples import Point, normalize
class Camera:
def __init__(self, hsize, vsize, field_of_view):
self.hsize = hsize
self.vsize = vsize
self.field_of_view = field_of_view
self.transform = Transformation(np.eye(4))
half_view = np.round(np.tan(field_of_view/2), 5)
aspect = hsize / vsize
if aspect >= 1:
self.half_width = half_view
self.half_height = half_view / aspect
else:
self.half_height = half_view
self.half_width = half_view * aspect
self.pixel_size = (self.half_width * 2) / hsize
self.logger = None
def ray_for_pixel(self, x, y):
xoffset = (x + .5) * self.pixel_size
yoffset = (y + .5) * self.pixel_size
world_x = self.half_width - xoffset
world_y = self.half_height - yoffset
pixel = self.transform.inverse() * Point(world_x, world_y, -1)
origin = self.transform.inverse() * Point(0, 0, 0)
direction = normalize(pixel - origin)
return Ray(origin, direction)
def _process_slice(self, world, y_range):
image = Canvas(self.hsize, self.vsize)
total_cols = len(y_range)
for y in y_range:
percent_completed = int(100 * (y-y_range[0])/total_cols)
self.logger.info(f"{percent_completed}% completed")
for x in range(0, self.hsize - 1):
ray = self.ray_for_pixel(x, y)
color = world.color_at(ray)
image.write_pixel(x, y, color)
return image
def _render_multiprocess(self, world):
self.logger = log_to_stderr()
self.logger.setLevel(logging.INFO)
num_cores = os.cpu_count()
y_range = np.arange(0, self.vsize - 1)
splits = np.array_split(y_range, num_cores)
with get_context("spawn").Pool() as pool:
fp = partial(self._process_slice, world)
images = pool.map(fp, splits)
return overlay(*images)
def _render_single_process(self, world):
self.logger = logging.getLogger(__name__)
y_range = np.arange(0, self.vsize - 1)
image = self._process_slice(world, y_range)
return image
def render(self, world):
return self._render_multiprocess(world)
# return self._render_single_process(world)
def ray_for_pixel(camera, x, y):
return camera.ray_for_pixel(x, y)
def render(camera, world):
return camera.render(world)
# Copyright 2020 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# -*- coding: cp936 -*-
# -*- coding: utf-8 -*-
import sys
import os
import glob
import Engine
# package cmd
# pyinstaller -F ./ImageEngine.py
# gat image input path
inputPath = sys.argv[1] if len(sys.argv) > 1 else "/"
# get image.swift output path
outputPath = sys.argv[2] if len(sys.argv) > 2 else "/"
# move os workplace
os.chdir(inputPath)
fileList = []
class ImageFloderParser:
def __init__(self):
pass
def parseFloder(self, targetPath):
print(targetPath)
for path in targetPath:
print("parse ->", path)
isImageset = ".imageset" in path
if isImageset:
print("is image ->", path)
fileName = path.replace('.imageset', '')
fileList.append(fileName)
elif os.path.isdir(path):
print("is floder ->", path)
self.parseFloder(os.listdir(path))
else:
print("nothing at all ->", path)
continue
ImageFloderParser().parseFloder(os.listdir(inputPath))
engine = Engine.UIImageEngine()
engine.imageList = sorted(fileList,key = lambda s: s[0].lower())
engine.output(outputPath)
print("file count -> ", len(fileList))
|
# Copyright 2019 Intel, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
import wsme
from wsme import types as wtypes
from oslo_log import log
from cyborg.api.controllers import base
from cyborg.api.controllers import link
from cyborg.api.controllers import types
from cyborg.api import expose
from cyborg.common import policy
from cyborg import objects
LOG = log.getLogger(__name__)
class Device(base.APIBase):
"""API representation of a device.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation.
"""
uuid = types.uuid
"""The UUID of the device"""
type = wtypes.text
"""The type of the device"""
vendor = wtypes.text
"""The vendor of the device"""
model = wtypes.text
"""The model of the device"""
std_board_info = wtypes.text
"""The standard board info of the device"""
vendor_board_info = wtypes.text
"""The vendor board info of the device"""
hostname = wtypes.text
"""The hostname of the device"""
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link"""
def __init__(self, **kwargs):
super(Device, self).__init__(**kwargs)
self.fields = []
for field in objects.Device.fields:
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
@classmethod
def convert_with_links(cls, obj_device):
api_device = cls(**obj_device.as_dict())
api_device.links = [
link.Link.make_link('self', pecan.request.public_url,
'devices', api_device.uuid)
]
return api_device
class DeviceCollection(base.APIBase):
"""API representation of a collection of devices."""
devices = [Device]
"""A list containing Device objects"""
@classmethod
def convert_with_links(cls, devices):
collection = cls()
collection.devices = [Device.convert_with_links(device)
for device in devices]
return collection
class DevicesController(base.CyborgController):
"""REST controller for Devices."""
@policy.authorize_wsgi("cyborg:device", "get_one")
@expose.expose(Device, wtypes.text)
def get_one(self, uuid):
"""Get a single device by UUID.
:param uuid: uuid of a device.
"""
context = pecan.request.context
device = objects.Device.get(context, uuid)
return Device.convert_with_links(device)
@policy.authorize_wsgi("cyborg:device", "get_all", False)
@expose.expose(DeviceCollection, wtypes.text, wtypes.text, wtypes.text,
wtypes.ArrayType(types.FilterType))
def get_all(self, type=None, vendor=None, hostname=None, filters=None):
"""Retrieve a list of devices.
:param type: type of a device.
:param vendor: vendor ID of a device.
:param hostname: the hostname of a compute node where the device
locates.
:param filters: a filter of FilterType to get device list by filter.
"""
filters_dict = {}
if type:
filters_dict["type"] = type
if vendor:
filters_dict["vendor"] = vendor
if hostname:
filters_dict["hostname"] = hostname
if filters:
for filter in filters:
filters_dict.update(filter.as_dict())
context = pecan.request.context
obj_devices = objects.Device.list(context, filters=filters_dict)
LOG.info('[devices:get_all] Returned: %s', obj_devices)
return DeviceCollection.convert_with_links(obj_devices)
|
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import (
LoginRequiredMixin,
UserPassesTestMixin
)
from django.db.models import Exists, OuterRef
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse, reverse_lazy
from django.views.generic import (
CreateView,
DeleteView,
DetailView,
ListView,
UpdateView,
)
from django_pdfkit import PDFView
from . import services
from .context_processors import tags_for_paginator_link
from .forms import RecipeForm
from .models import Recipe, Subscription
User = get_user_model()
class PaginatorRedirectMixin:
"""
Редирект на последнюю существущую страницу.
Возвращает последнюю страницу, если номер запрошенной страницы
превышает общее количество страниц пагинатора, или введены некорректные
данные в гет-параметр вручную.
"""
def get(self, request, *args, **kwargs):
try:
return super().get(request, *args, **kwargs)
except Http404:
queryset = self.get_queryset()
paginator = self.get_paginator(
queryset,
self.get_paginate_by(queryset),
orphans=self.get_paginate_orphans(),
allow_empty_first_page=self.get_allow_empty(),
)
tags = tags_for_paginator_link(request)['tags_for_paginator_link']
url = reverse(request.resolver_match.url_name)
print(url)
return redirect(f'{url}?page={paginator.num_pages}{tags}')
class IndexView(PaginatorRedirectMixin, ListView):
"""Главная страница со списком всех рецептов сайта."""
context_object_name = 'recipes'
ordering = '-pub_date'
paginate_by = settings.OBJECTS_PER_PAGE
template_name = 'recipes/index.html'
def get_queryset(self):
return services.get_filtered_queryset(self.request)
class RecipeDetailView(DetailView):
"""Страница отдельного рецепта."""
context_object_name = 'recipe'
template_name = 'recipes/singlePage.html'
def get_queryset(self):
queryset = services.get_filtered_queryset(self.request)
return queryset
class ProfileView(PaginatorRedirectMixin, ListView):
"""Страница рецептов отдельного автора."""
context_object_name = 'author_recipes'
ordering = '-pub_date'
paginate_by = settings.OBJECTS_PER_PAGE
template_name = 'recipes/authorRecipe.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.user.is_authenticated:
context['author'] = get_object_or_404(
User.objects.annotate(
in_subscriptions=Exists(
Subscription.objects.filter(
author=OuterRef('pk'),
user=self.request.user
)
)
),
id=self.kwargs.get('id')
)
else:
context['author'] = get_object_or_404(
User,
id=self.kwargs.get('id')
)
return context
def get_queryset(self):
author = get_object_or_404(User, id=self.kwargs.get('id'))
queryset = services.get_filtered_queryset(self.request).filter(
author=author
)
return queryset
class SubscriptionsView(LoginRequiredMixin, PaginatorRedirectMixin, ListView):
"""Список авторов, на которых подписан пользователь."""
context_object_name = 'subscriptions'
paginate_by = settings.OBJECTS_PER_PAGE
template_name = 'recipes/myFollow.html'
def get_queryset(self):
return self.request.user.subscriptions.select_related('author')
class RecipePostMixin:
"""Содержит общую логику обработки создания и редактирования рецепта."""
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
if self.object:
kwargs['ingredients'] = self.object.ingredients.all()
return kwargs
class CreateRecipeView(LoginRequiredMixin, RecipePostMixin, CreateView):
"""Страница создания нового рецепта."""
context_object_name = 'recipe'
form_class = RecipeForm
template_name = 'recipes/formRecipe.html'
class UpdateRecipeView(
LoginRequiredMixin,
UserPassesTestMixin,
RecipePostMixin,
UpdateView
):
"""Страница редактирования рецепта автором."""
context_object_name = 'recipe'
form_class = RecipeForm
template_name = 'recipes/formRecipe.html'
def get_queryset(self):
return services.get_filtered_queryset(self.request)
def test_func(self):
return self.request.user == self.get_object().author
class FavoritesView(LoginRequiredMixin, PaginatorRedirectMixin, ListView):
"""Список избранных рецептов пользователя."""
context_object_name = 'favorites'
ordering = '-pub_date'
paginate_by = settings.OBJECTS_PER_PAGE
template_name = 'recipes/favorite.html'
def get_queryset(self):
return services.get_filtered_queryset(self.request).filter(
in_favored=True
)
class PurchasesView(LoginRequiredMixin, ListView):
"""Список покупок пользователя."""
context_object_name = 'purchases'
ordering = '-pub_date'
template_name = 'recipes/purchaseList.html'
def get_queryset(self):
purchases = services.get_filtered_queryset(self.request).filter(
in_purchased=True
)
return purchases
class DownloadShoppingList(LoginRequiredMixin, PDFView):
"""Загрузка сформированного файла со списком покупок пользователя."""
template_name = 'recipes/aux/shopping_list.html'
def get_context_data(self, **kwargs):
kwargs = super().get_context_data(**kwargs)
purchase_list = services.make_purchase_list_for_download(
user=self.request.user
)
kwargs.update({'purchase_list': purchase_list})
return kwargs
class DeleteRecipeView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
"""Страница подтверждения удаления рецепта автором."""
model = Recipe
template_name = 'recipes/deleteRecipe.html'
success_url = reverse_lazy('index')
def test_func(self):
return self.request.user == self.get_object().author
def page_not_found(request, exception):
"""Вернуть страницу ошибки 404."""
return render(request, 'misc/404.html', {'path': request.path}, status=404)
def page_forbidden(request, exception):
"""Вернуть страницу ошибки 403."""
return render(request, 'misc/403.html', {'path': request.path}, status=403)
def server_error(request):
"""Вернуть страницу ошибки 500."""
return render(request, 'misc/500.html', status=500)
|
# coding: utf-8
import matplotlib.pyplot as plt
import matplotlib.image as image
class Person():
def __init__(self, name, img=None):
self.name = name
if img is not None:
self.img = img
self.partners = [self.name]
def add_partner(self,person):
if person.name not in self.partners:
self.partners.append(person.name)
person.partners.append(self.name)
return True
else:
return False
def clear_partners(self):
self.partners = [self.name]
def remove_last_partner(self):
self.partners = self.partners[:-1]
def check_availability(self,people):
for person in people:
if person.name not in self.partners:
return True
return False
def __eq__(self,person):
try:
return self.name == person.name
except AttributeError:
return False
def __neq__(self,person):
return not self.__eq__(person)
def __repr__(self):
return self.name
class Pool():
def __init__(self, people):
""" Initialize a pool of players. """
self.items = []
self.pairs = []
for person in people:
self.push(person)
def push(self,person):
""" Add a new person to the stack."""
self.items.append(person)
def replace(self, person):
""" Add a person into the stack at the bottom."""
self.items = [person] + self.items
def pop(self):
""" Remove the last person added to the stack."""
return self.items.pop()
def size(self):
""" How many players do we have left."""
return len(self.items)
def empty(self):
""" Return True if everyone has been paired."""
return self.size == 0
def shuffle(self):
import random
random.shuffle(self.items)
def compare(self, bachelor, bachelorette):
return bachelorette.name in bachelor.partners
def swap(self,bachelor):
""" Look through the pairs and try to swap partners"""
count = 0
while count < len(self.pairs):
pair = self.pairs.pop()
if pair[0].name not in bachelor.partners and pair[1].check_availability(self.items):
# Try to swap with the first person
pair[0].remove_last_partner()
pair[1].remove_last_partner()
bachelor.add_partner(pair[0])
self.push(pair[1])
return bachelor, pair[0]
if pair[1].name not in bachelor.partners and pair[0].check_availability(self.items):
# Try to swap with the second person
pair[0].remove_last_partner()
pair[1].remove_last_partner()
bachelor.add_partner(pair[1])
self.push(pair[0])
return bachelor, pair[1]
self.pairs = [pair] + self.pairs
count += 1
# We've exausted all possible pairs
return None
def pair(self):
bachelor = self.pop()
count = 0
tot = self.size()
while count < tot:
bachelorette = self.pop()
if bachelor.add_partner(bachelorette):
return bachelor, bachelorette
else:
self.replace(bachelorette)
count += 1
# If we've gotten here, we went through the whole stack
return self.swap(bachelor)
def pair_all(self):
self.shuffle()
while self.size() > 0:
res = self.pair()
if res is not None:
self.pairs.append(res)
def pairplot(self,people, axes=None):
num = len(people)
if axes is None:
fig, axes = plt.subplots(1,num)
for ax,person in zip(axes,people):
ax.imshow(image.imread(person.img))
ax.set_title(person.name)
ax.axis('off')
def show_all(self, **kwargs):
figsize = kwargs.pop('figsize',(10,2*len(self.pairs)))
fig,axes = plt.subplots(len(self.pairs),2,figsize=figsize)
for ax,pair in zip(axes,self.pairs):
self.pairplot(pair,axes=ax)
plt.show()
def __repr__(self):
for item in self.items:
print(item)
return '{:d} participants'.format(self.size())
def next_round(people,**kwargs):
pool = Pool(people)
pool.pair_all()
if len(pool.pairs) > 0:
pool.show_all(**kwargs)
else:
print('Everyone has been paired up!')
|
"""
MIT License
Copyright (C) 2021 ROCKY4546
https://github.com/rocky4546
This file is part of Cabernet
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
"""
import json
import logging
import re
import time
import urllib.request
from datetime import datetime
from lib.common.decorators import handle_url_except
from lib.common.decorators import handle_json_except
import lib.common.exceptions as exceptions
import lib.common.utils as utils
class Geo:
def __init__(self, _config_obj, _section):
self.logger = logging.getLogger(__name__)
self.config_obj = _config_obj
self.section = _section
self.geoId = None
self.channelListId = None
self.get_geo()
@handle_json_except
@handle_url_except
def get_geo(self):
"""
Geo info comes from json object on the home page
If the request fails, we will use the last data available in config
"""
if self.config_obj.data[self.section]['geoid'] is not None and \
self.config_obj.data[self.section]['channellistid'] is not None:
self.geoId = self.config_obj.data[self.section]['geoid']
self.channelListId = self.config_obj.data[self.section]['channellistid']
self.logger.debug('Reusing XUMO geoId and channelListId from provider')
else:
geo_url = 'https://www.xumo.tv'
login_headers = {'Content-Type': 'application/json', 'User-agent': utils.DEFAULT_USER_AGENT}
req = urllib.request.Request(geo_url, headers=login_headers)
with urllib.request.urlopen(req, timeout=5) as resp:
results = json.loads(
re.findall(b'__JOBS_REHYDRATE_STATE__=(.+?);</script>', (resp.read()), flags=re.DOTALL)[0])
self.geoId, self.channelListId = results["jobs"]["1"]["data"]["geoId"], results["jobs"]["1"]["data"]["channelListId"]
self.config_obj.write(self.section, 'geoid', self.geoId)
self.config_obj.write(self.section, 'channellistid', self.channelListId)
|
#!/usr/bin/env python
# coding: utf-8
# In[6]:
students = ["Ravi", "Anika", "Aniketh", "Chaitra", "Rashmi"]
print(students[4])
print(students[-2])
# In[9]:
#list slicing
print(students[2:4])
less_students = students[1:3]
print(less_students)
# In[11]:
#fetch except last value
print(students[0:-1])
print(students[:-2])
# In[15]:
#Slicing with strings
print("Hello Ravindra"[5:-4])
# In[ ]:
|
import platform
from typing import ClassVar, Dict, List
from .. import __version__, command, module, util
OFFICIAL_SUPPORT_LINK = "https://t.me/pyrobud"
class CoreModule(module.Module):
name: ClassVar[str] = "Core"
@command.desc("List the commands")
@command.usage("[filter: command or module name?]", optional=True)
async def cmd_help(self, ctx: command.Context) -> str:
filt = ctx.input
lines: Dict[str, List[str]] = {}
# Handle command filters
if filt and filt not in self.bot.modules:
if filt in self.bot.commands:
cmd = self.bot.commands[filt]
# Generate aliases section
aliases = f"`{'`, `'.join(cmd.aliases)}`" if cmd.aliases else "none"
# Generate parameters section
if cmd.usage is None:
args_desc = "none"
else:
args_desc = cmd.usage
if cmd.usage_optional:
args_desc += " (optional)"
if cmd.usage_reply:
args_desc += " (also accepts replies)"
# Show info card
return f"""`{cmd.name}`: **{cmd.desc if cmd.desc else '__No description provided.__'}**
Module: {cmd.module.name}
Aliases: {aliases}
Expected parameters: {args_desc}"""
else:
return "__That filter didn't match any commands or modules.__"
# Show full help
for name, cmd in self.bot.commands.items():
# Check if a filter is being used
if filt:
# Ignore commands that aren't part of the filtered module
if cmd.module.name != filt:
continue
else:
# Don't count aliases as separate commands
if name != cmd.name:
continue
desc = cmd.desc if cmd.desc else "__No description provided__"
aliases = ""
if cmd.aliases:
aliases = f' (aliases: {", ".join(cmd.aliases)})'
mod_name = type(cmd.module).name
if mod_name not in lines:
lines[mod_name] = []
lines[mod_name].append(f"**{cmd.name}**: {desc}{aliases}")
sections = []
for mod, ln in sorted(lines.items()):
sections.append(f"**{mod}**:\n \u2022 " + "\n \u2022 ".join(ln) + "\n")
return "\n".join(sections)
@command.desc("Get how long this bot has been up for")
async def cmd_uptime(self, ctx: command.Context) -> str:
delta_us = util.time.usec() - self.bot.start_time_us
return f"Uptime: {util.time.format_duration_us(delta_us)}"
@command.desc("Get or change this bot prefix")
@command.usage("[new prefix?]", optional=True)
async def cmd_prefix(self, ctx: command.Context) -> str:
new_prefix = ctx.input
if not new_prefix:
return f"The prefix is `{self.bot.prefix}`."
self.bot.prefix = new_prefix
await self.bot.db.put("prefix", new_prefix)
return f"Prefix set to `{self.bot.prefix}`."
@command.desc("Get the link to the official bot support group")
async def cmd_support(self, ctx: command.Context) -> str:
return f"[Join the official bot support group for help.]({OFFICIAL_SUPPORT_LINK})"
@command.desc("Get information about this bot instance")
@command.alias("binfo", "bi")
async def cmd_botinfo(self, ctx: command.Context) -> None:
# Get tagged version and optionally the Git commit
commit = await util.run_sync(util.version.get_commit)
dirty = ", dirty" if await util.run_sync(util.git.is_dirty) else ""
unofficial = ", unofficial" if not await util.run_sync(util.git.is_official) else ""
version = f"{__version__} (<code>{commit}</code>{dirty}{unofficial})" if commit else __version__
# Clean system version
sys_ver = platform.release()
try:
sys_ver = sys_ver[: sys_ver.index("-")]
except ValueError:
pass
# Get current uptime
now = util.time.usec()
uptime = util.time.format_duration_us(now - self.bot.start_time_us)
# Get total uptime from stats module (if loaded)
stats_module = self.bot.modules.get("Stats", None)
get_start_time = getattr(stats_module, "get_start_time", None)
if stats_module is not None and callable(get_start_time):
stats_start_time = await get_start_time()
total_uptime = f"""
\u2022 <b>Total uptime</b>: {util.time.format_duration_us(now - stats_start_time)}"""
else:
total_uptime = ""
# Get total number of chats, including PMs
num_chats = (await self.bot.client.get_dialogs(limit=0)).total
await ctx.respond(
f"""<b><a href="https://github.com/kdrag0n/pyrobud">Pyrobud</a> info:</b>
\u2022 <b>Version</b>: {version}
\u2022 <b>Python</b>: {platform.python_implementation()} {platform.python_version()}
\u2022 <b>System</b>: {platform.system()} {sys_ver}
\u2022 <b>Uptime</b>: {uptime}{total_uptime}
\u2022 <b>Commands loaded</b>: {len(self.bot.commands)}
\u2022 <b>Modules loaded</b>: {len(self.bot.modules)}
\u2022 <b>Listeners loaded</b>: {sum(len(evt) for evt in self.bot.listeners.values())}
\u2022 <b>Events activated</b>: {self.bot.events_activated}
\u2022 <b>Chats</b>: {num_chats}""",
# We use the HTML parse mode to be able to send bolded links
parse_mode="html",
)
|
"""
Portals: useful functions.
"""
from typing import Dict, List, Optional, Tuple, Union
from constants.memkeys import portal_segment_data_key_destination_room_name, portal_segment_data_key_xy_pairs
from empire import stored_data
from jstools.screeps import *
from utilities import movement, positions, robjs
__pragma__('noalias', 'name')
__pragma__('noalias', 'undefined')
__pragma__('noalias', 'Infinity')
__pragma__('noalias', 'keys')
__pragma__('noalias', 'get')
__pragma__('noalias', 'set')
__pragma__('noalias', 'type')
__pragma__('noalias', 'update')
__pragma__('noalias', 'values')
def portals_near(room_name: str) -> List[Tuple[str, Dict[str, Union[str, int]]]]:
portal_data = stored_data.portal_info()
result = []
for center in movement.sector_centers_near(room_name):
if center in portal_data:
result.append((center, portal_data[center]))
for odd_name in stored_data.odd_portal_rooms():
if odd_name in portal_data and movement.room_chebyshev_distance(room_name, odd_name) < 10:
result.append((odd_name, portal_data[odd_name]))
return result
def recommended_reroute(origin: RoomPosition, destination: RoomPosition) -> Optional[Tuple[RoomPosition, RoomPosition]]:
path_len = movement.room_chebyshev_distance(origin.roomName, destination.roomName)
if path_len < 5:
return None
reroute = None # type: Optional[Tuple[str, Dict[str, Union[str, int]]]]
for origin_portal_room, portal_data in portals_near(origin.roomName):
destination_portal_room = portal_data[portal_segment_data_key_destination_room_name]
trying_len = (
movement.room_chebyshev_distance(origin.roomName, origin_portal_room)
+ movement.room_chebyshev_distance(destination_portal_room, destination.roomName)
)
if trying_len < path_len:
path_len = trying_len
reroute = (origin_portal_room, portal_data)
if reroute is None:
return None
origin_portal_room, portal_data = reroute
destination_portal_room = portal_data[portal_segment_data_key_destination_room_name]
xys_encoded = portal_data[portal_segment_data_key_xy_pairs]
origin_x, origin_y = positions.deserialize_xy(int(robjs.get_str_codepoint(xys_encoded, 0)))
destination_x, destination_y = positions.deserialize_xy(int(robjs.get_str_codepoint(xys_encoded, 1)))
reroute_start = __new__(RoomPosition(origin_x, origin_y, origin_portal_room))
reroute_end = __new__(RoomPosition(destination_x, destination_y, destination_portal_room))
return reroute_start, reroute_end
|
class Queue:
def __init__(self):
self.data = []
def __str__(self):
values = map(str, self.data)
return ' <- '.join(values)
def enque(self, val):
self.data.append(val)
def deque(self):
return self.data.pop(0)
def peek(self):
return self.data[0]
def is_empty(self):
return self.data == []
def clear(self):
self.data = None
queue = Queue()
queue.enque(0)
queue.enque(1)
queue.enque(2)
queue.enque(3)
print('queue: ')
print(queue)
print('dequeing', queue.deque())
print('queue: ')
print(queue)
print('Peeked data', queue.peek())
print('Clearing out')
queue.clear()
print('queue is empty' if queue.is_empty() else 'queue is not empty') |
from pymysql_portable.converters import *
|
#Class List + Total Class List
c7A = ["Alice", "Bob", "Charlie"]
c8B = ["Ryu", "Ken", "Akuma"]
cTest = ["Rob", "Alex", "Ramzi"]
cBestClass = ["Ramzi", "Zoe"]
allClasses = {"7A": c7A, "8B": c8B, "TEST": cTest, "BESTCLASS": cBestClass}
#Booleans + initialisations
validClass = False
runProgram = True
validMainRunInputs = ["1","2","9"]
#Valid class input + Gathering class data
currentClass = input("Select a Class: ").upper()
while validClass == False:
if currentClass in allClasses:
print("Found")
validClass = True
else:
currentClass = input("\nNot Found, Try again: ").upper()
currentClassList = allClasses[currentClass]
#Functions, lovely Functions
def allMerit(classList):
allMeritRunner = True
print("\nALL MERIT MODE ACTIVATED\n")
classList = currentClassList
#print(classList)
namePicker = input("Select a name to remove, 'end' to finish: ")
while allMeritRunner:
if namePicker == "end":
print("\nFinished! Final Merit list is: ")
print(classList)
allMeritRunner = False
elif namePicker not in classList:
namePicker = input("\nNot found, please try again: ")
else:
classList.remove(namePicker)
print("\n" + namePicker + " has been removed.")
namePicker = input("\nFound, input another name or 'end': ")
def gainMerit():
print("gain merit mode inFunc")
#Begin the main program
print("\nWelcome, " + currentClass + "\n")
print("People in class: ")
print(currentClassList)
print("\n")
while runProgram:
selection = input("What do you want to do? \n1: All Merit Mode \n2: Gain Merit Mode \n9: Quit\n:")
if selection not in validMainRunInputs:
selection = input("\nTry again, please input another number: ")
elif selection == "1":
allMerit(currentClassList)
exit()
elif selection == "2":
print("GAIN MODE ACTIVATED")
gainMerit(currentClassList)
exit()
elif selection == "9":
print("bai bai")
runProgram = False
else:
print("You should not be here....uh oh")
|
#!/usr/bin/env python
import threading, logging, time,random
from kafka import KafkaProducer
tempSensors = [['roomStudy:temp',17],['roomSleep1:temp',17],['roomSleep2:temp',17]]
binSensors = [['home:mailBox',0],['home:door',0],['kitchen:smoke',0]]
class Manual_Producer():
def start(self):
producer = KafkaProducer(bootstrap_servers='192.168.160.211:9092')
while True:
producer.send('sensors', 'kitchen:temp#'+raw_input('sensor value - Kitchen: '))
class Auto_Producer():
def start(self):
producer = KafkaProducer(bootstrap_servers='192.168.160.211:9092')
while True:
for t in tempSensors:
t[1] += random.randint(-2,2)
producer.send('sensors',t[0]+"#"+str(t[1]))
time.sleep(2)
for b in binSensors:
b[1] = random.randint(0,1)
producer.send('sensors',b[0]+"#"+str(b[1]))
print(b[0]+"#"+str(b[1]))
time.sleep(2)
time.sleep(2)
#Auto_Producer().start()
Manual_Producer().start()
|
# -*- coding: utf-8 -*-
"""generator.py
A module for generating lists of Vector2s. These can be generated randomly within a square, within a rectangle, within a
circle, withing an ellipse, or on an axis. They can also be generated as vertices distributed uniformly on a grid, but
this method is not random.
License:
http://www.apache.org/licenses/LICENSE-2.0"""
import random
from typing import List, Tuple
from ffg_geo.vector2 import Vector2
class Generator:
"""A random Vector2 generator."""
@staticmethod
def seed(a: int = 0) -> None:
"""Seeds the generator.
Args:
a: The seed."""
random.seed(a)
@staticmethod
def __make_square(x_min: float, x_max: float, y_min: float, y_max: float) -> Tuple[float, float, float, float]:
"""Returns the input parameters adjusted such that the region defined by the output parameters is the largest
square contained in and centered on the region defined by the input parameters.
Args:
x_min: The minimum x value of the rectangular region.
x_max: The maximum x value of the rectangular region.
y_min: The minimum y value of the rectangular region.
y_max: The maximum y value of the rectangular region.
Returns:
The x_min, x_max, y_min, y_max of the resulting square region."""
x_width = x_max - x_min
y_height = y_max - y_min
x_center = (x_min + x_max) * 0.5
y_center = (y_min + y_max) * 0.5
r_min = min(x_width, y_height) * 0.5
return x_center - r_min, x_center + r_min, y_center - r_min, y_center + r_min
@staticmethod
def in_square(n: int, x_min: float, x_max: float, y_min: float, y_max: float) -> List[Vector2]:
"""Generates random Vector2s within a square.
Args:
n: The number of Vector2s to randomly generate.
x_min: The minimum x value of the square.
x_max: The maximum x value of the square.
y_min: The minimum y value of the square.
y_max: The maximum y value of the square.
Returns:
A list of Vector2s."""
x_min, x_max, y_min, y_max = Generator.__make_square(x_min, x_max, y_min, y_max)
return Generator.in_rect(n, x_min, x_max, y_min, y_max)
@staticmethod
def in_rect(n: int, x_min: float, x_max: float, y_min: float, y_max: float) -> List[Vector2]:
"""Generates random Vector2s within a rectangle.
Args:
n: The number of Vector2s to randomly generate.
x_min: The minimum x value of the rectangle.
x_max: The maximum x value of the rectangle.
y_min: The minimum y value of the rectangle.
y_max: The maximum y value of the rectangle.
Returns:
A list of Vector2s."""
result = []
while len(result) < n:
v = Vector2(random.uniform(x_min, x_max), random.uniform(y_min, y_max))
result.append(v)
return result
@staticmethod
def in_circle(n: int, x_min: float, x_max: float, y_min: float, y_max: float) -> List[Vector2]:
"""Generates random Vector2s within a circle.
Args:
n: The number of Vector2s to randomly generate.
x_min: The minimum x value of the circle.
x_max: The maximum x value of the circle.
y_min: The minimum y value of the circle.
y_max: The maximum y value of the circle.
Returns:
A list of Vector2s."""
x_min, x_max, y_min, y_max = Generator.__make_square(x_min, x_max, y_min, y_max)
return Generator.in_ellipse(n, x_min, x_max, y_min, y_max)
@staticmethod
def in_ellipse(n: int, x_min: float, x_max: float, y_min: float, y_max: float) -> List[Vector2]:
"""Generates random Vector2s within an ellipse.
Args:
n: The number of Vector2s to randomly generate.
x_min: The minimum x value of the ellipse.
x_max: The maximum x value of the ellipse.
y_min: The minimum y value of the ellipse.
y_max: The maximum y value of the ellipse.
Returns:
A list of Vector2s."""
result = []
r_x = abs(x_max - x_min) * 0.5
r_y = abs(y_max - y_min) * 0.5
x_center = (x_min + x_max) * 0.5
y_center = (y_min + y_max) * 0.5
while len(result) < n:
v = Vector2(random.uniform(x_min, x_max), random.uniform(y_min, y_max))
if (((v[0] - x_center) ** 2) / (r_x ** 2)) + (((v[1] - y_center) ** 2) / (r_y ** 2)) > 1.0:
continue
result.append(v)
return result
@staticmethod
def on_axis(n: int, v_axis_min: float, v_axis_max: float, c_axis_val: float, v_axis: int,
uniform: bool) -> List[Vector2]:
"""Generate random Vector2's on an axis.
Args:
n: The number of Vector2s to randomly generate.
v_axis_min: The minimum value on the variable axis.
v_axis_max: The maximum value on the variable axis.
c_axis_val: The value of the constant axis.
v_axis: The variable axis.
uniform: If the points should be uniform rather than random.
Returns:
A list of Vector2s."""
result = []
if uniform:
v_axis_step = (v_axis_max - v_axis_min) / (n - 1)
for index in range(n):
coordinates = [0.0, 0.0]
coordinates[v_axis] = v_axis_min + (v_axis_step * index)
coordinates[1 - v_axis] = c_axis_val
result.append(Vector2(coordinates[0], coordinates[1]))
else:
for index in range(n):
coordinates = [0.0, 0.0]
coordinates[v_axis] = random.uniform(v_axis_min, v_axis_max)
coordinates[1 - v_axis] = c_axis_val
result.append(Vector2(coordinates[0], coordinates[1]))
return result
@staticmethod
def in_regional_cut(n: int, cuts: int, axis: int, alternate: bool, x_min: float, x_max: float, y_min: float,
y_max: float, method: str) -> List[Vector2]:
"""Generates random Vector2s by recursively cutting the region into halves and then in the base case generating
n points per region using the specified method. Can be used to generate points in a more uniform manner.
Args:
n: The number of Vector2s to randomly generate per leaf region.
cuts: The number of recursive divisions to make.
axis: The axis to cut on initially.
alternate: If the axis to cut on should alternate in subsequent cuts.
x_min: The minimum x value of the initial region.
x_max: The maximum x value of the initial region.
y_min: The minimum y value of the initial region.
y_max: The maximum y value of the initial region.
method: The method to use to generate points at the end of the cuts.
Returns:
A list of Vector2s."""
if cuts < 1:
if method == 's':
# Square.
return Generator.in_square(n, x_min, x_max, y_min, y_max)
elif method == 'h':
# Horizontal line.
return Generator.on_axis(n, x_min, x_max, (y_min + y_max) * 0.5, 0, False)
elif method == 'v':
# Vertical line.
return Generator.on_axis(n, y_min, y_max, (x_min + x_max) * 0.5, 1, False)
elif method == 'c':
# Circle.
return Generator.in_circle(n, x_min, x_max, y_min, y_max)
elif method == 'e':
# Ellipse.
return Generator.in_ellipse(n, x_min, x_max, y_min, y_max)
elif method == 'o':
# On exact center.
x_center = (x_min + x_max) * 0.5
y_center = (y_min + y_max) * 0.5
return [Vector2(x_center, y_center) for _ in range(n)]
else:
# Rectangle.
return Generator.in_rect(n, x_min, x_max, y_min, y_max)
# Determine next axis.
if alternate:
n_axis = 1 - axis
else:
n_axis = axis
# Perform recursive cuts.
if axis == 0:
# Cut on x axis.
y_center = (y_min + y_max) * 0.5
lower = Generator.in_regional_cut(n, cuts - 1, n_axis, alternate, x_min, x_max, y_min, y_center, method)
upper = Generator.in_regional_cut(n, cuts - 1, n_axis, alternate, x_min, x_max, y_center, y_max, method)
return lower + upper
# Cut on y axis.
x_center = (x_min + x_max) * 0.5
left = Generator.in_regional_cut(n, cuts - 1, n_axis, alternate, x_min, x_center, y_min, y_max, method)
right = Generator.in_regional_cut(n, cuts - 1, n_axis, alternate, x_center, x_max, y_min, y_max, method)
return left + right
@staticmethod
def duplicate(vertices: List[Vector2], min_duplicates: int, max_duplicates: int) -> List[Vector2]:
"""
Given a list of vertices, returns a shuffled list of duplicates of those vertices. A random number of duplicates
is chosen uniformly between [min_duplicates, max_duplicates] for every vertex independently. Every vertex from
the source list will appear in the resulting list at least once: the number of duplicates is the total number
minus 1.
Args:
vertices: The vertices to duplicate.
min_duplicates: The minimum number of duplicates per vertex.
max_duplicates: The maximum number of duplicates per vertex.
Returns:
A shuffled list of duplicate vertices."""
result = []
for vertex in vertices:
num_duplicates = random.randrange(min_duplicates, max_duplicates + 1)
for _ in range(num_duplicates + 1):
new_vertex = Vector2(vertex[0], vertex[1])
result.append(new_vertex)
random.shuffle(result)
return result
@staticmethod
def in_grid(x_min: float, x_max: float, y_min: float, y_max: float, x_div: int, y_div: int) -> List[Vector2]:
"""Generates axis aligned points in a grid.
Args:
x_min: The minimum x value of the region to generate the grid within.
x_max: The maximum x value of the region to generate the grid within.
y_min: The minimum y value of the region to generate the grid within.
y_max: The maximum y value of the region to generate the grid within.
x_div: The number of horizontal grid lines in the region.
y_div: The number of vertical grid lines in the region.
Returns:
A list of vertices."""
result = []
x_div = min(x_div, 1)
y_div = min(y_div, 1)
x_step = (x_max - x_min) / x_div
y_step = (y_max - y_min) / y_div
for x_factor in range(x_div + 1):
x = x_step * x_factor + x_min
for y_factor in range(y_div + 1):
y = y_step * y_factor + y_min
result.append(Vector2(x, y))
return result
|
#!/usr/bin/env python
import unittest
import xml.etree.cElementTree as eTree
from latex2mathml import converter
__author__ = "Ronie Martinez"
__copyright__ = "Copyright 2016-2017, Ronie Martinez"
__credits__ = ["Ronie Martinez"]
__license__ = "MIT"
__maintainer__ = "Ronie Martinez"
__email__ = "[email protected]"
__status__ = "Development"
class ConverterTest(unittest.TestCase):
def setUp(self):
self.math = eTree.Element('math')
self.row = eTree.SubElement(self.math, 'mrow')
def test_single_identifier(self):
mi = eTree.SubElement(self.row, 'mi')
mi.text = 'x'
self.assertEqual(eTree.tostring(self.math), converter.convert('x'))
def test_multiple_identifiers(self):
mi = eTree.SubElement(self.row, 'mi')
mi.text = 'x'
mi = eTree.SubElement(self.row, 'mi')
mi.text = 'y'
mi = eTree.SubElement(self.row, 'mi')
mi.text = 'z'
self.assertEqual(eTree.tostring(self.math), converter.convert('xyz'))
def test_single_number(self):
mn = eTree.SubElement(self.row, 'mn')
mn.text = '3'
self.assertEqual(eTree.tostring(self.math), converter.convert('3'))
def test_multiple_numbers(self):
mn = eTree.SubElement(self.row, 'mn')
mn.text = '333'
self.assertEqual(eTree.tostring(self.math), converter.convert('333'))
def test_decimal_numbers(self):
mn = eTree.SubElement(self.row, 'mn')
mn.text = '12.34'
self.assertEqual(eTree.tostring(self.math), converter.convert('12.34'))
def test_numbers_and_identifiers(self):
mn = eTree.SubElement(self.row, 'mn')
mn.text = '12'
mi = eTree.SubElement(self.row, 'mi')
mi.text = 'x'
self.assertEqual(eTree.tostring(self.math), converter.convert('12x'))
def test_single_operator(self):
mo = eTree.SubElement(self.row, 'mo')
mo.text = '+'
self.assertEqual(eTree.tostring(self.math), converter.convert('+'))
def test_numbers_and_operators(self):
mn = eTree.SubElement(self.row, 'mn')
mn.text = '3'
mo = eTree.SubElement(self.row, 'mo')
mo.text = '−'
mn = eTree.SubElement(self.row, 'mn')
mn.text = '2'
self.assertEqual(eTree.tostring(self.math), converter.convert('3-2'))
def test_numbers_and_identifiers_and_operators(self):
mn = eTree.SubElement(self.row, 'mn')
mn.text = '3'
mi = eTree.SubElement(self.row, 'mi')
mi.text = 'x'
mo = eTree.SubElement(self.row, 'mo')
mo.text = '*'
mn = eTree.SubElement(self.row, 'mn')
mn.text = '2'
self.assertEqual(eTree.tostring(self.math), converter.convert('3x*2'))
def test_single_group(self):
mrow = eTree.SubElement(self.row, 'mrow')
mi = eTree.SubElement(mrow, 'mi')
mi.text = 'a'
self.assertEqual(eTree.tostring(self.math), converter.convert('{a}'))
def test_multiple_groups(self):
mrow = eTree.SubElement(self.row, 'mrow')
mi = eTree.SubElement(mrow, 'mi')
mi.text = 'a'
mrow = eTree.SubElement(self.row, 'mrow')
mi = eTree.SubElement(mrow, 'mi')
mi.text = 'b'
self.assertEqual(eTree.tostring(self.math), converter.convert('{a}{b}'))
def test_inner_group(self):
mrow = eTree.SubElement(self.row, 'mrow')
mi = eTree.SubElement(mrow, 'mi')
mi.text = 'a'
mo = eTree.SubElement(mrow, 'mo')
mo.text = '+'
mrow = eTree.SubElement(mrow, 'mrow')
mi = eTree.SubElement(mrow, 'mi')
mi.text = 'b'
self.assertEqual(eTree.tostring(self.math), converter.convert('{a+{b}}'))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
#
# Copyright (c) 2019 -2021 MINRES Technolgies GmbH
#
# SPDX-License-Identifier: Apache-2.0
#
import cppyy
import os.path
import site
from sysconfig import get_paths
import sys
import re
import logging
from contextlib import (redirect_stdout, redirect_stderr)
import io
lang_symbols = {
3: '199711L',
11:'201103L',
14:'201402L',
17:'201703L'}
lang_level=11
sysIncludeDirs = set()
includeDirs = set()
interactive = False
def find_file(name, paths):
for path in paths:
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
def read_config_from_conan(build_dir, build_type='Release'):
data={}
with io.open(os.path.join(build_dir, 'conanbuildinfo.txt'), encoding='utf-8') as conan_file:
sl = conan_file.readlines()
key=''
for item in sl:
stripped_item = item.rstrip()
match = re.search(r'\[(\S+)\]', stripped_item)
if match:
key=match.group(1)
data[key]=[]
elif len(stripped_item):
data[key].append(stripped_item)
# set include pathes and load libraries
for p in data['includedirs']:
add_sys_include_path(p)
for l in data['libdirs']:
if os.path.exists(l+'/'+'libsystemc.so'):
cppyy.load_library(l+'/'+'libsystemc.so')
for b in data['builddirs']:
if '/systemc/' in b:
os.environ['SYSTEMC_HOME'] =b
elif '/systemc-cci/' in b:
os.environ['CCI_HOME'] = b
elif '/systemc-scv/' in b:
os.environ['SCV_HOME'] = b
systemc_loaded=False
cci_loaded=False
def load_systemc():
if 'SYSTEMC_HOME' in os.environ:
add_sys_include_path(os.path.join(os.environ['SYSTEMC_HOME'], 'include'))
for l in ['lib', 'lib64', 'lib-linux', 'lib-linux64']:
for f in ['libsystemc.so']:
full_file=os.path.join(os.environ['SYSTEMC_HOME'], l, f)
if os.path.isfile(full_file):
cppyy.load_library(full_file)
cppyy.cppdef("""
#define SC_CPLUSPLUS %s
#include "systemc"
#include "tlm"
namespace sc_core { extern void pln(); }
""" % lang_symbols[lang_level])
systemc_loaded=True
_load_systemc_cci()
break
if systemc_loaded: break;
if not interactive: cppyy.gbl.sc_core.pln()
cppyy.gbl.sc_core.sc_in_action=True
_load_pythonization_lib()
return True
return False
def _load_systemc_scv():
if 'SCV_HOME' in os.environ:
add_sys_include_path(os.path.join(os.environ['SCV_HOME'], 'include'))
for l in ['lib', 'lib64', 'lib-linux', 'lib-linux64']:
for f in ['libscv.so']:
full_file = os.path.join(os.environ['SCV_HOME'], l, f)
if os.path.isfile(full_file):
cppyy.load_library(full_file)
cppyy.include("cci_configuration")
cci_loaded=True
return True
return False
def _load_systemc_cci():
for home_dir in ['CCI_HOME', 'SYSTEMC_HOME']:
if home_dir in os.environ:
add_sys_include_path(os.path.join(os.environ[home_dir], 'include'))
for l in ['lib', 'lib64', 'lib-linux', 'lib-linux64']:
for f in ['libcciapi.so']:
full_file = os.path.join(os.environ[home_dir], l, f)
if os.path.isfile(full_file):
cppyy.load_library(full_file)
cppyy.include("cci_configuration")
cci_loaded=True
return True
return False
def _load_pythonization_lib(debug = False):
plat_info = get_paths()
# check for standard search path
for key in plat_info:
plat_dir =plat_info[key]
if os.path.isdir(plat_dir):
if debug: logging.debug("Checking for pythonization lib in platform dir %s"%plat_dir)
for file in os.listdir(plat_dir):
if re.match(r'pysyscsc.*\.so', file):
cppyy.load_library(os.path.join(plat_dir, file))
full_path = os.path.join(plat_dir, '../../../include/site/python%d.%d/PySysC/PyScModule.h' % sys.version_info[:2])
if debug: logging.debug('found %s, looking for %s'%(file, full_path))
if full_path and os.path.isfile(full_path):
cppyy.include(full_path)
return
# check site packages first to check for venv
for site_dir in site.getsitepackages():
if os.path.isdir(site_dir):
if debug: logging.debug("Checking for pythonization lib in site package dir %s"%site_dir)
for file in os.listdir(site_dir):
if re.match(r'pysyscsc.*\.so', file):
cppyy.load_library(os.path.join(site_dir, file))
full_path = find_file('PyScModule.h', site.PREFIXES)
if debug: logging.debug('found %s, looking at %s for %s'%(file, site.PREFIXES, full_path))
if full_path and os.path.isfile(full_path):
cppyy.include(full_path)
return
if site.ENABLE_USER_SITE:
#check user site packages (re.g. ~/.local)
user_site_dir = site.getusersitepackages()
if os.path.isdir(user_site_dir):
if debug: logging.debug("Checking for pythonization lib in user site dir %s"%user_site_dir)
for file in os.listdir(user_site_dir):
if re.match(r'pysyscsc.*\.so', file):
cppyy.load_library(os.path.join(user_site_dir, file))
user_base = site.USER_BASE
full_path = user_base + '/include/python%d.%d/PySysC/PyScModule.h' % sys.version_info[:2]
if debug: logging.debug('found %s, looking at %s for %s'%(file, user_base, full_path))
if os.path.isfile(full_path):
cppyy.include(full_path)
return
# could not be found in install, maybe development environment
pkgDir = os.path.join(os.path.dirname( os.path.realpath(__file__)), '..')
if os.path.isdir(pkgDir):
if debug: logging.debug("Checking for pythonization lib in source dir %s"%pkgDir)
for file in os.listdir(pkgDir):
if re.match(r'pysyscsc.*\.so', file):
cppyy.load_library(os.path.join(pkgDir, file))
full_path = os.path.join(pkgDir, 'PyScModule.h')
if full_path and os.path.isfile(full_path):
cppyy.include(full_path)
return
sys.exit("No Pythonization found")
def add_library(header, lib, project_dir=None):
lib_path = lib
if(project_dir is not None):
for root, dirs, files in os.walk(project_dir):
if lib in files:
lib_path = os.path.join(root, lib)
break
buf = io.StringIO()
with redirect_stdout(buf), redirect_stderr(buf):
cppyy.load_library(lib_path)
cppyy.include(header)
return buf.getvalue()
def add_include_path(incl):
includeDirs.add(incl)
cppyy.add_include_path(incl)
def add_sys_include_path(incl):
sysIncludeDirs.add(incl)
cppyy.add_include_path(incl)
# prepare a pythonizor
def _pythonizor(clazz, name):
# A pythonizor receives the freshly prepared bound C++ class, and a name stripped down to
# the namespace the pythonizor is applied. Also accessible are clazz.__name__ (for the
# Python name) and clazz.__cpp_name__ (for the C++ name)
if name == 'sc_time':
clazz.__repr__ = lambda self: repr(self.to_string())
clazz.__str__ = lambda self: self.to_string()
elif name in ['sc_object', 'sc_module']:
clazz.__repr__ = lambda self: repr(self.name())
elif len(name) > 8 and name[:7] == 'sc_port<':
clazz.__repr__ = lambda self: repr(self.name())
elif len(name) > 10 and name[:9] == 'sc_export<':
clazz.__repr__ = lambda self: repr(self.name())
# install the pythonizor as a callback on namespace 'sc_core' (default is the global namespace)
cppyy.py.add_pythonization(_pythonizor, 'sc_core')
# reflection methods
def get_members(sc_object):
def is_cpp_data_type(name, module):
matches = [x for x in ['int', 'char', 'float', 'double'] if name == x]
if len(matches) > 0 or module[:10] == "cppyy.gbl.":
return True
else:
return False
members = [(e, getattr(sc_object, e)) for e in dir(sc_object)]
return [(k,v) for k,v in members if is_cpp_data_type(type(v).__name__, type(v).__module__)]
def get_methods(sc_object):
members = [(e, getattr(sc_object, e)) for e in dir(sc_object)]
return [(k,v) for k,v in members if type(v).__name__=='CPPOverload']
def get_ports(module):
res = []
for elem in dir(module):
attr=getattr(module, elem)
if isinstance(attr, cppyy.gbl.sc_core.sc_port_base) and not isinstance(attr, cppyy.gbl.tlm.tlm_base_socket_if):
res.append(attr)
return res
def get_exports(module):
res = []
for elem in dir(module):
attr=getattr(module, elem)
if isinstance(attr, cppyy.gbl.sc_core.sc_export_base) and not isinstance(attr, cppyy.gbl.tlm.tlm_base_socket_if):
res.append(attr)
return res
def get_inititator_sockets(module):
res = []
for elem in dir(module):
attr=getattr(module, elem)
if isinstance(attr, cppyy.gbl.sc_core.sc_port_base) and isinstance(attr, cppyy.gbl.tlm.tlm_base_socket_if):
res.append(attr)
return res
def get_target_sockets(module):
res = []
for elem in dir(module):
attr=getattr(module, elem)
if isinstance(attr, cppyy.gbl.sc_core.sc_export_base) and isinstance(attr, cppyy.gbl.tlm.tlm_base_socket_if):
res.append(attr)
return res
def get_submodules(module):
res = []
for elem in dir(module):
attr=getattr(module, elem)
if isinstance(attr, cppyy.gbl.sc_core.sc_module):
res.append(attr)
return res
import time
class timewith():
def __init__(self, name=''):
self.name = name
self.start = time.time()
@property
def elapsed(self):
return time.time() - self.start
def checkpoint(self, name=''):
return '{timer} {checkpoint} took {elapsed} seconds'.format(timer=self.name, checkpoint=name, elapsed=self.elapsed).strip()
def __enter__(self):
return self
def __exit__(self, mytype, value, traceback):
print(self.checkpoint('finished'))
pass
|
#!/usr/bin/env python
# coding: utf-8 -*-
# pylint: disable=bare-except
# pylint: disable=dangerous-default-value
# flake8: noqa: W503
# pylint: disable=logging-format-interpolation
# flake8: noqa: W1202
# pylint: disable = duplicate-code
# flake8: noqa: R0801
#
# GNU General Public License v3.0+
#
# Copyright 2019 Arista Networks AS-EMEA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import traceback
import logging
from typing import List
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.arista.cvp.plugins.module_utils.device_tools import FIELD_CONFIGLETS
import ansible_collections.arista.cvp.plugins.module_utils.logger # noqa # pylint: disable=unused-import
from ansible_collections.arista.cvp.plugins.module_utils.response import CvApiResult, CvManagerResult, CvAnsibleResponse
try:
from cvprac.cvp_client import CvpClient # noqa # pylint: disable=unused-import
from cvprac.cvp_client_errors import CvpClientError # noqa # pylint: disable=unused-import
from cvprac.cvp_client_errors import CvpApiError # noqa # pylint: disable=unused-import
HAS_CVPRAC = True
except ImportError:
HAS_CVPRAC = False
CVPRAC_IMP_ERR = traceback.format_exc()
# try:
# import jsonschema
# HAS_JSONSCHEMA = True
# except ImportError:
# HAS_JSONSCHEMA = False
import ansible_collections.arista.cvp.plugins.module_utils.schema_v3 as schema
MODULE_LOGGER = logging.getLogger('arista.cvp.container_tools_v3')
MODULE_LOGGER.info('Start cv_container_v3 module execution')
# CONSTANTS for fields in API data
FIELD_COUNT_DEVICES = 'childNetElementCount'
FIELD_COUNT_CONTAINERS = 'childContainerCount'
FIELD_PARENT_ID = 'parentContainerId'
FIELD_PARENT_NAME = 'parentContainerName'
FIELD_NAME = 'name'
FIELD_KEY = 'key'
FIELD_TOPOLOGY = 'topology'
FIELD_CONFIGLETS = 'configlets'
FIELD_CONTAINER_ID = 'containerId'
class ContainerInput(object):
"""
ContainerInput Object to manage Container Topology in context of arista.cvp collection.
[extended_summary]
"""
def __init__(self, user_topology: dict, container_root_name: str = 'Tenant', schema=schema.SCHEMA_CV_CONTAINER):
self.__topology = user_topology
self.__parent_field: str = FIELD_PARENT_NAME
self.__root_name = container_root_name
self.__schema = schema
self.__normalize()
def __normalize(self):
"""
__normalize Parse inventory and add keys that are optional from schema.
"""
for container_name in self.__topology:
if FIELD_CONFIGLETS not in self.__topology[container_name]:
self.__topology[container_name].update({FIELD_CONFIGLETS: []})
def __get_container_data(self, container_name: str, key_name: str):
"""
_get_container_data Get a specific subset of data for a given container
Parameters
----------
container_name : str
Name of the container
key_name : str
Name of the key to extract
Returns
-------
Any
Value of the key. None if not found
"""
MODULE_LOGGER.debug('Receive request to get data for container %s about its %s key', str(
container_name), str(key_name))
if container_name in self.__topology:
if key_name in self.__topology[container_name]:
MODULE_LOGGER.debug(' -> Found data for container %s: %s', str(
container_name), str(self.__topology[container_name][key_name]))
return self.__topology[container_name][key_name]
return None
@property
def is_valid(self):
"""
check_schemas Validate schemas for user's input
"""
MODULE_LOGGER.info('start json schema validation')
if not schema.validate_cv_inputs(user_json=self.__topology, schema=self.__schema):
MODULE_LOGGER.error(
"Invalid configlet input : \n%s\n\n%s", str(self.__topology), self.__schema)
return False
return True
@property
def ordered_list_containers(self):
"""
ordered_list_containers List of container from root to the bottom
Returns
-------
list
List of containers
"""
result_list = list()
MODULE_LOGGER.info("Build list of container to create from %s", str(self.__topology))
while(len(result_list) < len(self.__topology)):
container_added = False
for container in self.__topology:
if self.__topology[container][self.__parent_field] == self.__root_name and container not in result_list:
container_added = True
result_list.append(container)
if (any(element == self.__topology[container][self.__parent_field] for element in result_list)
and container not in result_list):
container_added = True
result_list.append(container)
if container_added is False:
containerWithoutParent = [item for item in self.__topology.keys() if item not in result_list]
MODULE_LOGGER.warning(
'Breaking the while loop as the following containers dont have a parent present in the topology %s',
str(containerWithoutParent))
result_list = result_list + containerWithoutParent
break
MODULE_LOGGER.info('List of containers to apply on CV: %s', str(result_list))
return result_list
def get_parent(self, container_name: str, parent_key: str = FIELD_PARENT_NAME):
"""
get_parent Expose name of parent container for the given container
Parameters
----------
container_name : str
Container Name
parent_key : str, optional
Key to use for the parent container name, by default 'parent_container'
Returns
-------
str
Name of the parent container, None if not found
"""
return self.__get_container_data(container_name=container_name, key_name=parent_key)
def get_configlets(self, container_name: str, configlet_key: str = FIELD_CONFIGLETS):
"""
get_configlets Read and extract list of configlet names for a container
Parameters
----------
container_name : str
Name of the container to search configlets
configlet_key : str, optional
Key where configlets are saved in inventory, by default 'configlets'
Returns
-------
list
List of configlet names
"""
return self.__get_container_data(container_name=container_name, key_name=configlet_key)
def has_configlets(self, container_name: str, configlet_key: str = FIELD_CONFIGLETS):
"""
has_configlets Test if container has configlets configured in inventory
Parameters
----------
container_name : str
Name of the container
configlet_key : str, optional
Field name where configlets are defined, by default 'configlets'
Returns
-------
bool
True if configlets attached, False if not
"""
if self.__get_container_data(container_name=container_name, key_name=configlet_key) is None:
return False
return True
class CvContainerTools(object):
"""
CvContainerTools Class to manage container actions for arista.cvp.cv_container module
"""
def __init__(self, cv_connection, ansible_module: AnsibleModule = None, check_mode: bool = False):
self.__cvp_client = cv_connection
self.__ansible = ansible_module
self.__check_mode = ansible_module.check_mode if ansible_module is not None else check_mode
#############################################
# Private functions
#############################################
def __standard_output(self, source: dict):
"""
__standard_output Filter dict to create a standard output with relevant leys
Parameters
----------
source : dict
Original dictionary
Returns
-------
dict
Standardized dict.
"""
standard_keys = [FIELD_KEY, FIELD_NAME, FIELD_COUNT_CONTAINERS,
FIELD_COUNT_DEVICES, FIELD_PARENT_ID]
return {k: v for k, v in source.items() if k in standard_keys}
def __get_attached_configlets(self, container_name: str):
"""
__get_attached_configlets Extract configlet information for all attached configlets to a container
Example
-------
>>> CvContainerTools._get_attached_configlets(container_name='demo')
[
{
'name': 'test',
'key': 'container-23243-23234-3423423'
}
]
Parameters
----------
container_name : str
Name of the container
Returns
-------
list
List of dict {key:, name:} of attached configlets
"""
list_configlet = list()
info = self.__cvp_client.api.get_configlets_by_container_id(
c_id=container_name)
info = {k.lower(): v for k, v in info.items()}
for attached_configlet in info['configletList']:
list_configlet.append(
self.__standard_output(source=attached_configlet))
return list_configlet
def __get_all_configlets(self):
"""
__get_all_configlets Extract information for all configlets
Example
-------
>>> CvContainerTools._get_all_configlets()
[
{
'name': 'test',
'key': 'container-23243-23234-3423423'
}
]
Returns
-------
list
List of dict {key:, name:} of attached configlets
"""
result = list()
list_configlets = self.__cvp_client.api.get_configlets()
list_configlets = {k.lower(): v for k, v in list_configlets.items()}
for configlet in list_configlets['data']:
result.append(self.__standard_output(source=configlet))
return result
def __get_configlet_info(self, configlet_name: str):
"""
__get_configlet_info Get information of a configlet from CV
Example
>>> CvContainerTools._get_configlet_info(configlet_name='test')
{
name: 'test',
key: 'container-sdsaf'
}
Parameters
----------
configlet_name : str
Name of the configlet to get information
Returns
-------
dict
Configlet information in a filtered maner
"""
MODULE_LOGGER.info('Getting information for configlet %s', str(configlet_name))
data = self.__cvp_client.api.get_configlet_by_name(name=configlet_name)
if data is not None:
return self.__standard_output(source=data)
return None
def __configlet_add(self, container: dict, configlets: list, save_topology: bool = True):
"""
__configlet_add Add a list of configlets to a container on CV
Only execute an API call to attach a list of configlets to a container.
All configlets must be provided with information and not only name
Example
-------
>>> CvContainerTools._configlet_add(container='test', configlets=[ {key: 'configlet-xxx-xxx-xxx-xxx', name: 'ASE_DEVICE-ALIASES'} ])
{
'success': True,
'taskIDs': [],
'container': 'DC3',
'configlets': ['ASE_DEVICE-ALIASES']
}
Parameters
----------
container : dict
Container information to use in API call. Format: {key:'', name:''}
configlets : list
List of configlets information to use in API call
save_topology : bool, optional
Send a save-topology, by default True
Returns
-------
dict
API call result
"""
configlet_names = list()
container_name = 'Undefined'
change_response = CvApiResult(action_name=container_name)
# Protect aginst non-existing container in check_mode
if container is not None:
configlet_names = [entry.get('name')
for entry in configlets if entry.get('name')]
change_response.name = container['name'] + ':' + ':'.join(configlet_names)
if self.__check_mode:
change_response.success = True
change_response.taskIds = ['check_mode']
change_response.add_entry(
container['name'] + ':' + ':'.join(configlet_names))
MODULE_LOGGER.warning(
'[check_mode] - Fake container creation of %s', str(container['name']))
else:
try:
resp = self.__cvp_client.api.apply_configlets_to_container(
app_name="ansible_cv_container",
new_configlets=configlets,
container=container,
create_task=save_topology
)
except CvpApiError as e:
message = "Error configuring configlets " + str(configlets) + " to container " + str(container) + ". Exception: " + str(e)
MODULE_LOGGER.error(message)
self.__ansible.fail_json(msg=message)
else:
if 'data' in resp and resp['data']['status'] == 'success':
# We assume there is a change as API does not provide information
# resp = {'data': {'taskIds': [], 'status': 'success'}}
change_response.taskIds = resp['data']['taskIds']
change_response.success = True
change_response.changed = True
return change_response
def __configlet_del(self, container: dict, configlets: list, save_topology: bool = True):
"""
__configlet_del Remove a list of configlet from container in CV
Only execute an API call to reemove a list of configlets from a container.
All configlets must be provided with information and not only name
Example
-------
>>> CvContainerTools._configlet_del(container='test', configlets=[ {key: 'configlet-xxx-xxx-xxx-xxx', name: 'ASE_DEVICE-ALIASES'} ])
{
'success': True,
'taskIDs': [],
'container': 'DC3',
'configlets': ['ASE_DEVICE-ALIASES']
}
Parameters
----------
container : dict
Container information to use in API call. Format: {key:'', name:''}
configlets : list
List of configlets information to use in API call
save_topology : bool, optional
Send a save-topology, by default True
Returns
-------
dict
API call result
"""
configlet_names = list()
configlet_names = [entry.get('name')
for entry in configlets if entry.get('name')]
change_response = CvApiResult(action_name=container['name'] + ':' + ':'.join(configlet_names))
if self.__check_mode:
change_response.success = True
change_response.taskIds = ['check_mode']
change_response.add_entry(
container['name'] + ':' + ':'.join(configlet_names))
else:
try:
resp = self.__cvp_client.api.remove_configlets_from_container(
app_name="ansible_cv_container",
del_configlets=configlets,
container=container,
create_task=save_topology
)
except CvpApiError as e:
message = "Error removing configlets " + str(configlets) + " from container " + str(container) + ". Exception: " + str(e)
MODULE_LOGGER.error(message)
self.__ansible.fail_json(msg=message)
else:
if 'data' in resp and resp['data']['status'] == 'success':
change_response.taskIds = resp['data']['taskIds']
# We assume there is a change as API does not provide information
# resp = {'data': {'taskIds': [], 'status': 'success'}}
change_response.success = True
change_response.changed = True
return change_response
#############################################
# Generic functions
#############################################
def get_container_info(self, container_name: str):
"""
get_container_info Collect container information from CV
Extract information from Cloudvision using provisioning/filterTopology call
Example
-------
>>> CvContainerTools.get_container_info(container_name='DC2')
{
"key": "container_55effafb-2991-45ca-86e5-bf09d4739248",
"name": "DC1_L3LEAFS",
"childContainerCount": 5,
"childNetElementCount": 0,
"parentContainerId": "container_614c6678-1769-4acf-9cc1-214728238c2f"
}
Parameters
----------
container_name : str
Name of the searched container
Returns
-------
dict
A standard dictionary with Key, Name, ParentID, Number of children and devices.
"""
cv_response = self.__cvp_client.api.get_container_by_name(
name=container_name)
MODULE_LOGGER.debug('Get container ID (%s) response from cv for container %s', str(cv_response), str(container_name))
if cv_response is not None and FIELD_KEY in cv_response:
container_id = cv_response[FIELD_KEY]
container_facts = self.__cvp_client.api.filter_topology(node_id=container_id)[
FIELD_TOPOLOGY]
MODULE_LOGGER.debug('Return info for container %s', str(container_name))
return self.__standard_output(source=container_facts)
return None
def get_configlets(self, container_name: str):
"""
get_configlets Get list of configured configlets for a container
Example
-------
>>> CvContainerTools.get_configlets(container_name='DC2')
[
{
"key": "configlet_267cc5b4-791d-47d4-a79c-000fc0732802",
"name": "ASE_GLOBAL-ALIASES",
"reconciled": false,
"config": "...",
"user": "ansible",
"note": "Managed by Ansible",
"containerCount": 0,
"netElementCount": 0,
"dateTimeInLongFormat": 1600694234181,
"isDefault": "no",
"isAutoBuilder": "",
"type": "Static",
"editable": true,
"sslConfig": false,
"visible": true,
"isDraft": false,
"typeStudioConfiglet": false
}
]
Parameters
----------
container_name : str
Name of the container to lookup
Returns
-------
list
List of configlets configured on container
"""
container_id = self.get_container_id(container_name=container_name)
configlets_and_mappers = self.__cvp_client.api.get_configlets_and_mappers()
configlets_list = configlets_and_mappers['data']['configlets']
mappers = configlets_and_mappers['data']['configletMappers']
configlets_configured = list()
MODULE_LOGGER.info('container %s has id %s', str(container_name), str(container_id))
for mapper in mappers:
if mapper['objectId'] == container_id:
MODULE_LOGGER.info(
'Found 1 mappers for container %s : %s', str(container_name), str(mapper))
configlets_configured.append(
next((x for x in configlets_list if x['key'] == mapper['configletId'])))
MODULE_LOGGER.debug('List of configlets from CV is: %s', str(
[x['name'] for x in configlets_configured]))
return configlets_configured
def get_container_id(self, container_name: str):
"""
get_container_id Collect container ID from CV for a given container
Example
>>> CvContainerTools.get_container_id(container_name='DC2')
container_55effafb-2991-45ca-86e5-bf09d4739248
Parameters
----------
container_name : str
Name of the container to get ID
Returns
-------
str
Container ID sent by CV
"""
container_info = self.__cvp_client.api.get_container_by_name(
name=container_name)
if FIELD_KEY in container_info:
return container_info[FIELD_KEY]
return None
#############################################
# Boolean & getters functions
#############################################
def is_empty(self, container_name: str):
"""
is_empty Test if container has no child AND no devices attached to it
Example
-------
>>> CvContainerTools.is_empty(container_name='DC2')
True
Parameters
----------
container_name : str
Name of the container to test
Returns
-------
bool
True if container has no child nor devices
"""
container = self.get_container_info(container_name=container_name)
if FIELD_COUNT_CONTAINERS in container and FIELD_COUNT_DEVICES in container:
if container[FIELD_COUNT_CONTAINERS] == 0 and container[FIELD_COUNT_DEVICES] == 0:
return True
return False
def is_container_exists(self, container_name):
"""
is_container_exists Test if a given container exists on CV
Example
-------
>>> CvContainerTools.is_container_exists(container_name='DC2')
True
Parameters
----------
container_name : [type]
Name of the container to test
Returns
-------
bool
True if container exists, False if not
"""
try:
cv_data = self.__cvp_client.api.get_container_by_name(name=container_name)
except (CvpApiError, CvpClientError) as error:
message = "Error getting information for container " + \
str(container_name) + \
": " + str(error)
MODULE_LOGGER.error(message)
self.__ansible.fail_json(msg=message)
return True
if cv_data is not None:
return True
return False
#############################################
# Public API
#############################################
def create_container(self, container: str, parent: str):
"""
create_container Worker to send container creation API call to CV
Example
-------
>>> CvContainerTools.create_container(container='DC2', parent='DCs')
{
"success": True,
"taskIDs": [],
"container": 'DC2'
}
Parameters
----------
container : str
Name of the container to create
parent : str
Container name where new container will be created
Returns
-------
dict
Creation status
"""
resp = dict()
change_result = CvApiResult(action_name=container)
if self.is_container_exists(container_name=parent):
parent_id = self.__cvp_client.api.get_container_by_name(name=parent)[
FIELD_KEY]
MODULE_LOGGER.debug('Parent container (%s) for container %s exists', str(parent), str(container))
if self.is_container_exists(container_name=container) is False:
if self.__check_mode:
change_result.success = True
change_result.changed = True
change_result.add_entry(container['name'])
else:
try:
resp = self.__cvp_client.api.add_container(
container_name=container, parent_key=parent_id, parent_name=parent)
except CvpApiError as e:
# Add Ansible error management
message = "Error creating container " + str(container) + " on CV. Exception: " + str(e)
MODULE_LOGGER.error(message)
self.__ansible.fail_json(msg=message)
else:
if resp['data']['status'] == "success":
change_result.taskIds = resp['data']['taskIds']
change_result.success = True
change_result.changed = True
change_result.count += 1
else:
message = "Parent container (" + str(
parent) + ") is missing for container " + str(container)
MODULE_LOGGER.error(message)
self.__ansible.fail_json(msg=message)
MODULE_LOGGER.info('Container creation result is %s', str(change_result.results))
return change_result
def delete_container(self, container: str, parent: str):
"""
delete_container Worker to send container deletion API call to CV
Example
-------
>>> CvContainerTools.delete_container(container='DC2', parent='DCs')
{
"success": True,
"taskIDs": [],
"container": 'DC2'
}
Parameters
----------
container : str
Name of the container to delete
parent : str
Container name where container will be deleted
Returns
-------
dict
Deletion status
"""
resp = dict()
change_result = CvApiResult(action_name=container)
if self.is_container_exists(container_name=container) is False:
message = "Unable to delete container " + \
str(container) + ": container does not exist on CVP"
MODULE_LOGGER.error(message)
self.__ansible.fail_json(msg=message)
elif self.is_empty(container_name=container) is False:
message = "Unable to delete container " + str(container) + ": container not empty - either it has child container(s) or \
some device(s) are attached to it on CVP"
MODULE_LOGGER.error(message)
self.__ansible.fail_json(msg=message)
else:
parent_id = self.get_container_id(container_name=parent)
container_id = self.get_container_id(container_name=container)
# ----------------------------------------------------------------#
# COMMENT: Check mode does report parial change as there is no #
# validation that attached containers would be removed in a #
# previous run of this function #
# ----------------------------------------------------------------#
if self.__check_mode:
change_result.success = True
change_result.add_entry(container['name'])
else:
try:
resp = self.__cvp_client.api.delete_container(
container_name=container, container_key=container_id, parent_key=parent_id, parent_name=parent)
except CvpApiError as e:
# Add Ansible error management
message = "Error deleting container " + str(container) + " on CV. Exception: " + str(e)
MODULE_LOGGER.error(message)
self.__ansible.fail_json(msg=message)
else:
if resp['data']['status'] == "success":
change_result.taskIds = resp['data']['taskIds']
change_result.success = True
change_result.changed = True
change_result.count += 1
return change_result
def configlets_attach(self, container: str, configlets: List[str], strict: bool = False):
"""
configlets_attach Worker to send configlet attach to container API call
Example
-------
>>> CvContainerTools.configlet_attach(container='DC3', configlets=['ASE_DEVICE-ALIASES'])
{
'success': True,
'taskIDs': [],
'container': 'DC3',
'configlets': ['ASE_DEVICE-ALIASES']
}
Parameters
----------
container : str
Name of the container
configlets : List[str]
List of configlets to attach
strict : bool, optional
Remove configlet not listed in configlets var -- NOT SUPPORTED -- , by default False
Returns
-------
dict
Action result
"""
container_info = self.get_container_info(container_name=container)
attach_configlets = list()
for configlet in configlets:
data = self.__get_configlet_info(configlet_name=configlet)
if data is not None:
attach_configlets.append(data)
return self.__configlet_add(container=container_info, configlets=attach_configlets)
def configlets_detach(self, container: str, configlets: List[str]):
"""
configlets_attach Worker to send configlet detach from container API call
Example
-------
>>> CvContainerTools.configlets_detach(container='DC3', configlets=['ASE_DEVICE-ALIASES'])
{
'success': True,
'taskIDs': [],
'container': 'DC3',
'configlets': ['ASE_DEVICE-ALIASES']
}
Parameters
----------
container : str
Name of the container
configlets : List[str]
List of configlets to detach
Returns
-------
dict
Action result
"""
MODULE_LOGGER.info('Running configlet detach for container %s', str(container))
container_info = self.get_container_info(container_name=container)
detach_configlets = list()
for configlet in configlets:
data = self.__get_configlet_info(configlet_name=configlet['name'])
if data is not None:
detach_configlets.append(data)
MODULE_LOGGER.info('Sending data to self.__configlet_del: %s', str(detach_configlets))
return self.__configlet_del(container=container_info, configlets=detach_configlets)
def build_topology(self, user_topology: ContainerInput, present: bool = True, apply_mode: str = 'loose'):
"""
build_topology Class entry point to build container topology on Cloudvision
Run all actions to provision containers on Cloudvision:
- Create or delete containers
- Attach or detach configlets to containers
Creation or deleation is managed with present flag
Parameters
----------
user_topology : ContainerInput
User defined containers topology to build
present : bool, optional
Enable creation or deletion process, by default True
apply_mode: str, optional
Define how builder will apply configlets to container: loose (only attach listed configlets) or strict (attach listed configlets, remove others)
Returns
-------
CvAnsibleResponse
Formatted ansible response message
"""
response = CvAnsibleResponse()
container_add_manager = CvManagerResult(builder_name='container_added')
container_delete_manager = CvManagerResult(
builder_name='container_deleted')
cv_configlets_attach = CvManagerResult(
builder_name='configlets_attached')
cv_configlets_detach = CvManagerResult(
builder_name='configlets_detached', default_success=True)
# Create containers topology in Cloudvision
if present is True:
for user_container in user_topology.ordered_list_containers:
MODULE_LOGGER.info('Start creation process for container %s under %s', str(
user_container), str(user_topology.get_parent(container_name=user_container)))
resp = self.create_container(
container=user_container, parent=user_topology.get_parent(container_name=user_container))
container_add_manager.add_change(resp)
if user_topology.has_configlets(container_name=user_container):
resp = self.configlets_attach(
container=user_container, configlets=user_topology.get_configlets(container_name=user_container))
cv_configlets_attach.add_change(resp)
if apply_mode == 'strict':
attached_configlets = self.get_configlets(container_name=user_container)
configlet_to_remove = list()
for attach_configlet in attached_configlets:
if attach_configlet['name'] not in user_topology.get_configlets(container_name=user_container):
configlet_to_remove.append(attach_configlet)
if len(configlet_to_remove) > 0:
resp = self.configlets_detach(container=user_container, configlets=configlet_to_remove)
cv_configlets_detach.add_change(resp)
# Remove containers topology from Cloudvision
else:
for user_container in reversed(user_topology.ordered_list_containers):
MODULE_LOGGER.info('Start deletion process for container %s under %s', str(
user_container), str(user_topology.get_parent(container_name=user_container)))
resp = self.delete_container(
container=user_container, parent=user_topology.get_parent(container_name=user_container))
container_delete_manager.add_change(resp)
# Create ansible message
response.add_manager(container_add_manager)
response.add_manager(container_delete_manager)
response.add_manager(cv_configlets_attach)
response.add_manager(cv_configlets_detach)
MODULE_LOGGER.debug(
'Container manager is sending result data: %s', str(response))
return response
|
"""
[x] terminate based on duality gap at 1e-3
[x] check against cvxpy
[x] experiment with mu; inner newton steps and total newton steps
[x] plot log duality gap vs total newton steps
- textbook format step plot
python -m ee364a.a11_8
"""
import fire
import numpy as np
import torch
import tqdm
from .a10_4 import Soln, LPCenteringProb, infeasible_start_newton_solve
def barrier_solve(soln: Soln, prob: LPCenteringProb, t: float, mu: float, epsilon=1e-3, verbose=False):
this_step = 0
steps = []
newton_steps = []
gaps = []
while True:
prob.t = t # Solve the right problem.
soln.nu = torch.zeros_like(soln.nu)
soln, this_newton_steps, _, _, _ = infeasible_start_newton_solve(
soln=soln, prob=prob, max_steps=2000, epsilon=1e-5,
)
this_step += 1
this_gap = prob.m / t
this_newton_steps = this_newton_steps[-1]
steps.append(this_step)
gaps.append(this_gap)
newton_steps.append(this_newton_steps)
if verbose:
print(f'this_step: {this_step}, this_gap: {this_gap}, newton_steps: {this_newton_steps}')
if this_gap < epsilon:
break
t = mu * t
return soln, steps, gaps, newton_steps
def _generate_prob():
m = 100
n = 500
A = torch.randn(m, n)
A[0].abs_()
rank = torch.linalg.matrix_rank(A)
assert rank == m
p = torch.randn(n).abs() # Make positive.
b = A @ p
c = torch.randn(n)
in_domain = lambda soln: torch.all(soln.x > 0)
x = torch.randn(n).exp() # Make positive.
nu = torch.zeros(m)
return Soln(x=x, nu=nu), LPCenteringProb(A=A, b=b, c=c, in_domain=in_domain)
@torch.no_grad()
def main(seed=0, t=0.5, mu=8):
torch.manual_seed(seed)
torch.set_default_dtype(torch.float64) # Single precision makes this fail.
soln_init, prob = _generate_prob()
soln, steps, gaps, newton_steps = barrier_solve(
soln=soln_init, prob=prob, t=t, mu=mu, verbose=True
)
# duality gap vs cumulative Newton steps
from swissknife import utils
utils.plot_wrapper(
img_path=utils.join('.', 'ee364a', 'plots', 'a11_8'),
suffixes=(".png", '.pdf'),
steps=[dict(x=np.cumsum(newton_steps), y=gaps)],
options=dict(yscale='log', xlabel='cumulative Newton steps', ylabel='duality gap')
)
# compare against CVXPY
import cvxpy as cp
x = cp.Variable(prob.n)
obj = cp.Minimize(prob.c.numpy() @ x)
con = [
x >= 0.,
prob.A.numpy() @ x == prob.b.numpy()
]
cp.Problem(obj, con).solve()
# diff should be small
print(np.sum(prob.c.numpy() * soln.x.numpy())) # -762.7124775791468
print(np.sum(prob.c.numpy() * x.value)) # -762.7143847548298
# vary mu
avg_steps = []
tot_steps = []
mus = (2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048)
for mu in tqdm.tqdm(mus, desc="mu"):
_, steps, gaps, newton_steps = barrier_solve(
soln=soln_init, prob=prob, t=t, mu=mu, verbose=False
)
avg_steps.append(np.mean(newton_steps))
tot_steps.append(np.sum(newton_steps))
utils.plot_wrapper(
img_path=utils.join('.', 'ee364a', 'plots', 'a11_8_2'),
suffixes=(".png", '.pdf'),
plots=[
dict(x=mus, y=avg_steps, label='average steps per centering'),
dict(x=mus, y=tot_steps, label='total steps'),
],
options=dict(ylabel='Newton steps', xlabel='mu'),
)
if __name__ == "__main__":
fire.Fire(main)
|
from .model import TreeNode
"""
Space : O(n)
Time : O(n)
"""
class Solution:
# preorder transversal
def getLeaf(self, root) -> List[int]:
ans = []
if root:
ans += self.getLeaf(root.left)
ans.append(root.val)
ans += self.getLeaf(root.right)
return ans
def getAllElements(self, root1: TreeNode, root2: TreeNode) -> List[int]:
ans = []
value1 = self.getLeaf(root1)
value2 = self.getLeaf(root2)
ans.extend(value1)
ans.extend(value2)
return sorted(ans) |
from __future__ import absolute_import, division, print_function
import types
from numbers import Number
from typing import List, Tuple, Union, Sequence, Optional
import numpy as np
import tensorflow as tf
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import KBinsDiscretizer
from tensorflow import Tensor
from tensorflow_probability.python.distributions import Distribution, Normal
from typing_extensions import Literal
from odin.utils import as_tuple
__all__ = [
'discretizing',
'permute_dims',
'traverse_dims',
'prepare_ssl_inputs',
'split_ssl_inputs',
'marginalize_categorical_labels',
]
def _gmm_discretizing_predict(self, X):
# self._check_is_fitted()
means = self.means_.ravel()
ids = self._estimate_weighted_log_prob(X).argmax(axis=1)
# sort by increasing order of means_
return np.expand_dims(np.argsort(means)[ids], axis=1)
def discretizing(*factors: List[np.ndarray],
independent: bool = True,
n_bins: Union[int, List[int]] = 5,
strategy: Literal['uniform', 'quantile', 'kmeans',
'gmm'] = 'quantile',
return_model: bool = False,
seed: int = 1,
**gmm_kwargs):
"""Transform continuous value into discrete
Note: the histogram discretizer is equal to
`KBinsDiscretizer(n_bins=n, encode='ordinal', strategy='uniform')`
Arguments:
factors : array-like or list of array-like
independent : a Boolean, if `True` (by default), each factor (i.e. column)
is discretize independently.
n_bins : int or array-like, shape (n_features,) (default=5)
The number of bins to produce. Raises ValueError if ``n_bins < 2``.
strategy : {'uniform', 'quantile', 'kmeans', 'gmm'}, (default='quantile')
Strategy used to define the widths of the bins.
uniform - All bins in each feature have identical widths.
quantile - All bins in each feature have the same number of points.
kmeans - Values in each bin have the same nearest center of a 1D
k-means cluster.
gmm - using the components (in sorted order of mean) of Gaussian
mixture to label.
"""
encode = 'ordinal'
# onehot - sparse matrix of one-hot encoding and
# onehot-dense - dense one-hot encoding. Ignored features are always stacked to
# the right.
# ordinal - Return the bin identifier encoded as an integer value.
strategy = str(strategy).strip().lower()
if 'histogram' in strategy:
strategy = 'uniform'
# ====== GMM base discretizer ====== #
if 'gmm' in strategy:
create_gmm = lambda: GaussianMixture(
n_components=n_bins, random_state=seed, **gmm_kwargs) # fix random state
if independent:
gmm = []
for f in factors[0].T:
gm = create_gmm()
gm.fit(np.expand_dims(f, axis=1))
gm.predict = types.MethodType(_gmm_discretizing_predict, gm)
gmm.append(gm)
transform = lambda x: np.concatenate([
gm.predict(np.expand_dims(col, axis=1)) for gm, col in zip(gmm, x.T)
],
axis=1)
else:
gmm = create_gmm()
gmm.fit(np.expand_dims(factors[0].ravel(), axis=1))
gmm.predict = types.MethodType(_gmm_discretizing_predict, gmm)
transform = lambda x: np.concatenate(
[gmm.predict(np.expand_dims(col, axis=1)) for col in x.T], axis=1)
disc = gmm
# ====== start with bins discretizer ====== #
else:
disc = KBinsDiscretizer(n_bins=n_bins, encode=encode, strategy=strategy)
if independent:
disc.fit(factors[0])
transform = lambda x: disc.transform(x).astype(np.int64)
else:
disc.fit(np.expand_dims(factors[0].ravel(), axis=-1))
transform = lambda x: np.hstack([
disc.transform(np.expand_dims(i, axis=-1)).astype(np.int64)
for i in x.T
])
# ====== returns ====== #
factors = tuple([transform(i) for i in factors])
factors = factors[0] if len(factors) == 1 else factors
if return_model:
return factors, disc
return factors
# ===========================================================================
# Helper for semi-supervised learning
# ===========================================================================
def _batch_size(x):
batch_size = x.shape[0]
if batch_size is None:
batch_size = tf.shape(x)[0]
return batch_size
def prepare_ssl_inputs(
inputs: Union[Tensor, List[Tensor]],
mask: Tensor,
n_unsupervised_inputs: int,
) -> Tuple[List[Tensor], List[Tensor], Tensor]:
"""Prepare the inputs for the semi-supervised learning,
three cases are considered:
- Only the unlabeled data given
- Only the labeled data given
- A mixture of both unlabeled and labeled data, indicated by mask
Parameters
----------
inputs : Union[TensorTypes, List[TensorTypes]]
n_unsupervised_inputs : int
mask : TensorTypes
The `mask` is given as indicator, `1` for labeled sample and
`0` for unlabeled samples
Returns
-------
Tuple[tf.Tensor, tf.Tensor, tf.Tensor]
- List of inputs tensors
- List of labels tensors (empty if there is only unsupervised data)
- mask tensor
"""
inputs = tf.nest.flatten(as_tuple(inputs))
batch_size = _batch_size(inputs[0])
## no labels provided
if len(inputs) == n_unsupervised_inputs:
X = inputs
y = []
mask = tf.cast(tf.zeros(shape=(batch_size,)), dtype=tf.bool)
## labels is provided
else:
X = inputs[:n_unsupervised_inputs]
y = inputs[n_unsupervised_inputs:]
if mask is None: # all data is labelled
mask = tf.cast(tf.ones(shape=(batch_size,)), dtype=tf.bool)
y = [i for i in y if i is not None]
return X, y, mask
def split_ssl_inputs(
X: List[Tensor],
y: List[Tensor],
mask: Tensor,
) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]:
"""Split semi-supervised inputs into unlabelled and labelled data
Parameters
----------
X : List[tf.Tensor]
y : List[tf.Tensor]
mask : tf.Tensor
Returns
-------
Tuple[List[tf.Tensor], List[tf.Tensor], List[tf.Tensor], tf.Tensor]
- list of unlablled inputs
- list of labelled inputs
- list of labels
"""
if not isinstance(X, (tuple, list)):
X = [X]
if y is None:
y = []
elif not isinstance(y, (tuple, list)):
y = [y]
if mask is None:
mask = tf.cast(tf.zeros(shape=(_batch_size(X[0]), 1)), dtype=tf.bool)
# flatten the mask
mask = tf.reshape(mask, (-1,))
# split into unlabelled and labelled data
X_unlabelled = [tf.boolean_mask(i, tf.logical_not(mask), axis=0) for i in X]
X_labelled = [tf.boolean_mask(i, mask, axis=0) for i in X]
y_labelled = [tf.boolean_mask(i, mask, axis=0) for i in y]
return X_unlabelled, X_labelled, y_labelled
def marginalize_categorical_labels(X: tf.Tensor,
n_classes: int,
dtype: tf.DType = tf.float32):
"""Marginalize discrete variable by repeating the input tensor for
all possible discrete values of the distribution.
Example:
```
# shape: [batch_size * n_labels, n_labels]
y = marginalize_categorical_labels(batch_size=inputs[0].shape[0],
num_classes=n_labels,
dtype=self.dtype)
# shape: [batch_size * n_labels, n_dims]
X = [tf.repeat(i, n_labels, axis=0) for i in inputs]
```
"""
n = X.shape[0]
if n is None:
n = tf.shape(X)[0]
y = tf.expand_dims(tf.eye(n_classes, dtype=dtype), axis=0)
y = tf.repeat(y, n, axis=0)
y = tf.reshape(y, (-1, n_classes))
X = tf.repeat(X, n_classes, axis=0)
return X, y
# ===========================================================================
# Dimensions manipulation
# ===========================================================================
@tf.function(autograph=True)
def permute_dims(z):
r""" Permutation of latent dimensions Algorithm(1):
```
input: matrix-(batch_dim, latent_dim)
output: matrix-(batch_dim, latent_dim)
foreach latent_dim:
shuffle points along batch_dim
```
Parameters
-----------
z : A Tensor
a tensor of shape `[batch_size, latent_dim]`
Reference
-----------
Kim, H., Mnih, A., 2018. Disentangling by Factorising.
arXiv:1802.05983 [cs, stat].
"""
shape = z.shape
batch_dim, latent_dim = shape[-2:]
perm = tf.TensorArray(dtype=z.dtype,
size=latent_dim,
dynamic_size=False,
clear_after_read=False,
element_shape=shape[:-1])
ids = tf.range(batch_dim, dtype=tf.int32)
# iterate over latent dimension
for i in tf.range(latent_dim):
# shuffle among minibatch
z_i = tf.gather(z[..., i], tf.random.shuffle(ids), axis=-1)
perm = perm.write(i, z_i)
return tf.transpose(perm.stack(),
perm=tf.concat([tf.range(1, tf.rank(z)), (0,)], axis=0))
def traverse_dims(
x: Union[np.ndarray, tf.Tensor, Distribution],
feature_indices: Optional[Sequence[int]] = None,
min_val: Union[float, np.ndarray] = -2.0,
max_val: Union[float, np.ndarray] = 2.0,
n_traverse_points: int = 11,
mode: Literal['linear', 'quantile', 'gaussian'] = 'linear') -> np.ndarray:
"""Traversing a dimension of a matrix between given range
Parameters
----------
x : Union[np.ndarray, tf.Tensor, Distribution]
the 2-D array for performing dimension traverse
feature_indices : Union[int, List[int]]
a single index or list of indices for traverse (i.e. which columns in the
last dimension are for traverse)
min_val : int, optional
minimum value of the traverse, by default -2.0
max_val : int, optional
maximum value of the traverse, by default 2.0
n_traverse_points : int, optional
number of points in the traverse, must be odd number, by default 11
mode : {'linear', 'quantile', 'gaussian'}, optional
'linear' mode take linear interpolation between the `min_val` and
`max_val`.
'quantile' mode return `num` quantiles based on min and max values inferred
from the data. 'gaussian' mode takes `num` Gaussian quantiles,
by default 'linear'
Returns
-------
np.ndarray
the ndarray with traversed axes
Example
--------
For `n_traverse_points=3`, and `feature_indices=[0]`,
the return latents are:
```
[[-2., 0.47],
[ 0., 0.47],
[ 2., 0.47]]
```
"""
if feature_indices is None:
feature_indices = list(
range(
x.event_shape[-1] if isinstance(x, Distribution) else x.shape[-1]))
if hasattr(feature_indices, 'numpy'):
feature_indices = feature_indices.numpy()
if isinstance(feature_indices, np.ndarray):
feature_indices = feature_indices.tolist()
feature_indices = as_tuple(feature_indices, t=int)
# === 0. list of indices, repeat for each index
if len(feature_indices) > 1:
arr = [
traverse_dims(
x,
feature_indices=i,
min_val=min_val,
max_val=max_val,
n_traverse_points=n_traverse_points,
mode=mode) for i in feature_indices]
return np.concatenate(arr, axis=0)
# === 1. single index
if not isinstance(min_val, Number):
assert len(min_val) == x.shape[-1]
min_val = min_val[feature_indices]
if not isinstance(max_val, Number):
assert len(max_val) == x.shape[-1]
max_val = max_val[feature_indices]
feature_indices = feature_indices[0]
n_traverse_points = int(n_traverse_points)
assert n_traverse_points % 2 == 1, \
('n_traverse_points must be odd number, '
f'i.e. centered at 0, given {n_traverse_points}')
assert n_traverse_points > 1, \
f'n_traverse_points > 1 but given: n_traverse_points={n_traverse_points}.'
### check the mode
all_mode = ('quantile', 'linear', 'gaussian')
mode = str(mode).strip().lower()
assert mode in all_mode, \
f"Only support traverse mode:{all_mode}, but given '{mode}'"
px = None
if isinstance(x, Distribution):
px = x
x = px.mean()
elif mode == 'gaussian':
raise ValueError('A distribution must be provided for mean and stddev '
'in Gaussian mode.')
### sample
x_org = x
x = np.array(x)
assert len(x.shape) == 2, f'input arrays x must be 2D-array, given: {x.shape}'
### ranges
# z_range is a matrix [n_latents, num]
# linear range
if mode == 'linear':
x_range = np.linspace(min_val, max_val, num=n_traverse_points)
# min-max quantile
elif mode == 'quantile':
if x_org.shape[0] == 1:
vmin, vmax = np.min(x_org), np.max(x_org)
else:
vmin = np.min(x_org[:, feature_indices])
vmax = np.max(x_org[:, feature_indices])
x_range = np.linspace(vmin, vmax, num=n_traverse_points)
# gaussian quantile
elif mode == 'gaussian':
dist = Normal(loc=tf.reduce_mean(px.mean(), 0)[feature_indices],
scale=tf.reduce_max(px.stddev(), 0)[feature_indices])
x_range = []
for i in np.linspace(1e-6, 1.0 - 1e-6,
num=n_traverse_points,
dtype=np.float32):
x_range.append(dist.quantile(i))
x_range = np.array(x_range)
else:
raise ValueError(f'Unknown mode="mode"')
### traverse
X = np.repeat(x, len(x_range), axis=0)
# repeat for each sample
for i in range(x.shape[0]):
s = i * len(x_range)
e = (i + 1) * len(x_range)
# note, this should be added not simple assignment
X[s:e, feature_indices] += x_range.astype(X.dtype)
return X
|
from argparse import Namespace
import sys
import os
import numpy as np
# Data analysis.
from nasws.cnn.search_space.nasbench201.nasbench201_search_space import NASBench201Benchmark, NASBench201SearchSpace
from nasws.cnn.search_space.nasbench101.nasbench_search_space import NASBench_v2, NASbenchSearchSpace, NasBenchSearchSpaceFixChannels, NasBenchSearchSpaceSubsample
from nasws.cnn.search_space.nds.nds_search_space import DARTSSearchSpaceNDS
from nasws.cnn.policy.cnn_search_configs import build_default_args
from nasws.cnn.utils import Rank, compute_percentile, compute_sparse_kendalltau, sort_hash_perfs
from .tool import spares_kdt_compute_mean_std, prob_surpass_random, MAXRANK, load_data_from_experiment_root_dir, FILTER_LOW_ACC
# load the all these nasbench here, for query the final performance.
def get_space(name):
args = build_default_args()
if name == 'nasbench101':
args.num_intermediate_nodes = 5
space = NASbenchSearchSpace(args)
elif name == 'nasbench101_fix_channels':
space = NasBenchSearchSpaceFixChannels(args)
elif name == 'nasbench101_subsample':
space = NasBenchSearchSpaceSubsample(args)
elif 'nasbench201' in name:
args.num_intermediate_nodes = 4
space = NASBench201SearchSpace(args)
elif 'darts_nds' in name:
space = DARTSSearchSpaceNDS(args)
else:
raise NotImplementedError("Not yet supported")
return space
def initialize_space_data_dict():
#initialize space data dict
space_data_dict = {
'nasbench101': {},
'nasbench201': {},
'darts_nds': {}
}
keys = ['acc', 'perf', 'acc-std', 'perf-std', 'search_ids', 'search_perfs', 'args', 'p-random', 'res_dicts',
'kdt', 'kdt-std', 'skdt','skdt-std',
'spr', 'spr-std', 'sspr', 'sspr-std']
for s in space_data_dict.values():
for k in keys:
s[k] = []
return space_data_dict
def add_supernetacc_finalperf_kdt(_res_dicts, search_spaces, space_data_dict,
final_result=False, threshold=0.4, use_hash=False, sort_best_model_fn=None):
"""Analysis the supernet accordingly.
Parameters
----------
_res_dicts : [type]
[description]
search_spaces : [type]
[description]
space_data_dict : dict
dict[spacename][dataname]
spacename as "nasbench101" etc
dataname: ['acc', 'kdt', 'perf', 'acc-std', 'kdt-std', 'perf-std']
Returns
-------
space_data_dict
"""
def data_fn(res):
# ouput the data
return None
args = _res_dicts[0]['args']
steps = list(sorted(list(_res_dicts[0]['eval_epochs'])))
TOP_K = 10 if args.search_space == 'nasbench201' else 5
# TOP_K = 3
# this is fixed
# .
if final_result:
_steps = steps[-1:]
else:
_steps = steps
for s in _steps:
# print("step running here", s)
r = spares_kdt_compute_mean_std(_res_dicts, search_spaces, 'eval_arch_' + str(s), 'eval_perf_' + str(s), use_hash=use_hash, sort_best_model_fn=sort_best_model_fn)
arch_ids = r[5]
arch_supernet_perfs = r[4]
s_arch_ids, _ = sort_hash_perfs(arch_ids, arch_supernet_perfs)
search_arch_ids = s_arch_ids[-TOP_K:]
gt_perfs = search_spaces[args.search_space].query_gt_perfs(search_arch_ids)
gt_std = np.std(gt_perfs)
ws_perf = np.mean(arch_supernet_perfs)
ws_std = np.std(arch_supernet_perfs)
if ws_perf < threshold:
if FILTER_LOW_ACC:
print("Data is not trained, filtered!")
continue
p_r = prob_surpass_random(max(search_arch_ids), MAXRANK[args.search_space], repeat=TOP_K)
# skdt, sspr, kdt, spr,
space_data_dict[args.search_space]['args'].append(args)
space_data_dict[args.search_space]['acc'].append(ws_perf)
space_data_dict[args.search_space]['acc-std'].append(ws_std)
space_data_dict[args.search_space]['skdt'].append(r[0][0])
space_data_dict[args.search_space]['skdt-std'].append(r[0][1])
space_data_dict[args.search_space]['kdt'].append(r[2][0])
space_data_dict[args.search_space]['kdt-std'].append(r[2][1])
space_data_dict[args.search_space]['spr'].append(r[3][0])
space_data_dict[args.search_space]['spr-std'].append(r[3][1])
space_data_dict[args.search_space]['sspr'].append(r[1][0])
space_data_dict[args.search_space]['sspr-std'].append(r[1][1])
space_data_dict[args.search_space]['perf'].append(np.mean(gt_perfs))
space_data_dict[args.search_space]['perf-std'].append(gt_std)
space_data_dict[args.search_space]['search_perfs'].append(arch_supernet_perfs)
space_data_dict[args.search_space]['search_ids'].append(arch_ids)
space_data_dict[args.search_space]['p-random'].append(p_r)
space_data_dict[args.search_space]['res_dicts'].append(_res_dicts)
return space_data_dict
def print_basic_statistics_for_folder_table(lr_dir, str_filter, space_name, filter_fn, search_spaces):
"""Statistics to write in the paper.
Parameters
----------
lr_dir : [type]
[description]
str_filter : [type]
[description]
space_name : [type]
[description]
filter_fn : [type]
[description]
search_spaces : [type]
[description]
"""
accs, kdt, best_models, p_random, res_dicts = load_data_from_experiment_root_dir(lr_dir, str_filter, original_args=True, target_fn=filter_fn)
space_data_dict = initialize_space_data_dict()
for k in res_dicts[space_name].keys():
space_data_dict = add_supernetacc_finalperf_kdt(res_dicts[space_name][k], search_spaces, space_data_dict, final_result=True)
print([filter_fn(a) for a in space_data_dict[space_name]['args']])
for k in ['acc','acc-std', 'kdt', 'kdt-std', 'perf', 'perf-std', 'p-random']:
print(k)
print(space_data_dict[space_name][k], np.mean(space_data_dict[space_name][k]))
|
import os
import pytest
import shutil
from mlflow import cli
from click.testing import CliRunner
from mlflow.utils import process
EXAMPLES_DIR = "examples"
def get_free_disk_space():
# https://stackoverflow.com/a/48929832/6943581
return shutil.disk_usage("/")[-1] / (2 ** 30)
@pytest.fixture(scope="function", autouse=True)
def clean_envs_and_cache():
yield
if get_free_disk_space() < 7.0: # unit: GiB
process.exec_cmd(["./utils/remove-conda-envs.sh"])
@pytest.mark.parametrize(
"directory, params",
[
("IrisClassification", ["-P", "max_epochs=10"]),
("MNIST", ["-P", "max_epochs=1"]),
("IrisClassificationTorchScript", ["-P", "max_epochs=10"]),
("BertNewsClassification", ["-P", "max_epochs=1", "-P", "num_samples=100"]),
("E2EBert", ["-P", "max_epochs=1", "-P", "num_samples=100"]),
("Titanic", ["-P", "max_epochs=100", "-P", "lr=0.1"]),
],
)
def test_mlflow_run_example(directory, params):
example_dir = os.path.join(EXAMPLES_DIR, directory)
cli_run_list = [example_dir] + params
res = CliRunner().invoke(cli.run, cli_run_list)
assert res.exit_code == 0, "Got non-zero exit code {0}. Output is: {1}".format(
res.exit_code, res.output
)
|
#AULA 7: OPERADORES ARITMÉTICOS
nome = input('Qual é o seu nome?\n')
print('Prazer em te conhecer, {:20}.' .format(nome))
#Com :20, posso fazer 20 espaços (centralizado).
#Além disso, posso informar a direção desses espaços com :> (direita) ou :< (esquerda).
n1 = int(input('Um valor: '))
n2 = int(input('Outro número: '))
print('A soma vale {}' .format(n1 + n2))
#Soma rápida para mostrar na tela.
n1 = int(input('Um novo valor: '))
n2 = int(input('Outro novo valor: '))
s = n1 + n2
p = n1 * n2
d = n1 / n2
di = n1 // n2
e = n1 ** n2
print(' Soma: {}\n Produto: {}\n Divisão: {:.3f}\n' .format(s, p, d), end= ' ')
print('Divisão inteira: {}\n Potência: {}\n' .format(di, e))
# end=' ' serve para não quebrar a linha.
#Para quebrar linhas em uma string, use \n.
#:.2f = duas casas decimais flutuantes (pontos)
|
from .permission import InternalPermission
class CRUDPermissions:
@classmethod
def __init_defaults__(cls):
cls.create = InternalPermission(f"{cls.name}_create")
cls.delete = InternalPermission(f"{cls.name}_delete")
cls.read = InternalPermission(f"{cls.name}_read")
cls.update = InternalPermission(f"{cls.name}_update")
return cls
|
import pandas as pd
import numpy as np
from sklearn.cluster import MeanShift
from collections import defaultdict
from .structure_extractor import StructureExtractor
TOP_LEFT_X, TOP_LEFT_Y, TOP_RIGHT_X, TOP_RIGHT_Y, \
BOTTOM_RIGHT_X, BOTTOM_RIGHT_Y, BOTTOM_LEFT_X, \
BOTTOM_LEFT_Y, TEXT = 'top_left_x', 'top_left_y', 'top_right_x', \
'top_right_y', 'bottom_right_x', 'bottom_right_y', \
'bottom_left_x', 'bottom_left_y', 'text'
__all__ = ['TableExtractor']
class TableExtractor:
def __init__(self, document_filepath=None, endpoint=None, subscription_key=None,
operation_url=None, ocr_outputs=None, api_type='azure',
api='read', vertical_columns=None, horizontal_columns=None,
horizontal_keywords=None):
self.ocr_outputs = ocr_outputs
self.operation_url = operation_url
self.vertical_columns = vertical_columns
self.horizontal_columns = horizontal_columns
self.horizontal_keywords = horizontal_keywords
structure_extractor = StructureExtractor(
document_filepath=document_filepath,
endpoint=endpoint,
subscription_key=subscription_key,
operation_url = self.operation_url,
ocr_outputs = self.ocr_outputs,
api=api
)
self.word_dataframe = structure_extractor.word_dataframe
self.ocr_outputs = structure_extractor.ocr_outputs
self.is_scanned = structure_extractor.is_scanned
# self.line_dataframe = structure_extractor.structure_extraction(self.line_dataframe)
self.line_dataframe = structure_extractor.structure_extraction(structure_extractor.line_dataframe)
self.ocr_outputs = structure_extractor.ocr_outputs
self.operation_url = structure_extractor.operation_url
def _close_matches(self, word, possibilities):
lower_possibilities = [str(item).lower() for item in possibilities]
lower_word = str(word).lower()
lower_possibilities_maxout = []
[lower_possibilities_maxout.extend(j) for j in [i.split() for i in lower_possibilities]]
if len(lower_word.split()) and lower_word.split()[0] in [i.strip() for i in lower_possibilities_maxout]:
present_bool = [lower_word in l.strip() for l in lower_possibilities]
# if True not in present_bool:
# present_bool = [lower_word.split()[0] in l.strip() for l in lower_possibilities]
if True in present_bool:
match = lower_word and possibilities[present_bool.index(True)]
if match:
return word
else:
return None
def _nearest_matches(self, given_value, possibilities):
"""
*Author: Vaibhav Hiwase
*Details: This function gives closest match number form possibilities
of number such that the number given should be in a range
of maximum difference between any two consecutive numbers
in a sequence of possibilities.
"""
absolute_difference_function = lambda list_value : abs(list_value - given_value)
closest_value = min(possibilities, key=absolute_difference_function)
if len(possibilities)==1 and abs(given_value-closest_value) > 0.9:
return None
elif len(possibilities)==1 and abs(given_value-closest_value) <= 0.9:
return possibilities[0]
if abs(given_value-closest_value) < max(map(lambda x, y: abs(x-y), possibilities[1:], possibilities[:-1])):
return closest_value
def _get_key_val(self, x, horizontal_keywords=None):
"""
*Author: Vaibhav Hiwase
*Details: Functioning creating mapping of horizontal text splitted by
any one word in horizantal_keyword.
"""
if horizontal_keywords is None:
horizontal_keywords = self.horizontal_keywords
for i in horizontal_keywords:
if x.startswith(i):
return (i, x.replace(i, ''))
elif x in horizontal_keywords:
return x
else:
return None
def _filter_val(self, x):
"""
*Author: Vaibhav Hiwase
*Details Filter for removing ":"
"""
if type(x)==tuple:
return x and {x[0]: x[1].replace(':', '').strip()}
else:
return x
def _vertical_tables(self, line_dataframe, vertical_columns=None, horizontal_columns=None, horizontal_keywords=None):
"""
*Author: Vaibhav Hiwase
*Details: Mapping tables based on table column names
"""
if vertical_columns is None:
vertical_columns = self.vertical_columns
if horizontal_columns is None:
horizontal_columns = self.horizontal_columns
if horizontal_keywords is None:
horizontal_keywords = self.horizontal_keywords
key_dict = {}
MIN_TABLE_COLUMN = 2
MIN_TABLE_PAGE_BREAK = 1
table_count = 0
prev_page = sorted(set(line_dataframe['page']))[0]
line_dataframe['parent_table_number'] = None
for para_num in sorted(set(line_dataframe['paragraph_number'])):
df_para = line_dataframe[line_dataframe['paragraph_number']==para_num]
df_para['horizontal_mapping'] = df_para[TEXT].apply(self._close_matches, possibilities=horizontal_columns)
for line_num in sorted(set(df_para['line_number'])):
temp_df = df_para[df_para['line_number'] ==line_num]
temp_df['horizontal_mapping'] = temp_df[TEXT].apply(self._get_key_val, horizontal_keywords=horizontal_keywords)
temp_df['horizontal_mapping'] = temp_df['horizontal_mapping'].apply(self._filter_val)
df_para.loc[temp_df.index, 'horizontal_mapping'] = temp_df['horizontal_mapping'].tolist()
line_dataframe.loc[df_para.index, 'horizontal_mapping'] = df_para['horizontal_mapping'].tolist()
df_para['vertical_mapping'] = df_para[TEXT].apply(self._close_matches, possibilities=vertical_columns)
line_dataframe.loc[df_para.index, 'vertical_mapping'] = df_para['vertical_mapping'].tolist()
if abs(df_para['page'].unique()[0] - prev_page) > MIN_TABLE_PAGE_BREAK:
key_dict = {}
df_table = df_para[~pd.isna(df_para['vertical_mapping'])]
if not df_table.empty and not any(pd.isna(df_table['table_number'])):
df_table = df_para[df_para['table_number']==df_table['table_number'].mode().unique()[0]]
key_df = df_table[~pd.isna(df_table['vertical_mapping'])]
if len(key_df) < 2:
key_df = pd.DataFrame()
if not key_df.empty :
line = key_df['line_number'].unique()[0]
if all(~pd.isna(df_table[df_table['line_number']==line]['vertical_mapping'])):
key_dict = {}
table_count += 1
for k, v in zip(key_df['column'], key_df[TEXT]):
left_x_column = key_df[key_df['column']==k][TOP_LEFT_X].mean()
key_dict[k]=(v, left_x_column)
df_para.loc[key_df.index, 'parent_table_number']= table_count
line_dataframe.loc[key_df.index, 'parent_table_number']= table_count
prev_page = df_para['page'].unique()[0]
value_df = df_para[pd.isna(df_para['vertical_mapping'])]
value_df = value_df[pd.isna(df_para['horizontal_mapping'])]
for table_num in sorted(set(value_df['table_number'])):
if not key_dict:
break
table = value_df[value_df['table_number'] == table_num]
# define data
table_row_left_x = []
for column_num in sorted(set(table['column'])):
column = table[table['column']==column_num]
left_x_row = column[TOP_LEFT_X].mean()
table_row_left_x.append(left_x_row)
table_column_available = [None for _ in range(int(max(key_dict))+1)]
for k, v in key_dict.items():
table_column_available[int(k)] = v[1]
table_column = list(filter(None, table_column_available))
table_row = [None for _ in range(len(table_column))]
for given_value in table_row_left_x:
matched = self._nearest_matches(given_value=given_value, possibilities=table_column)
if matched:
matched_index = table_column.index(matched)
table_row[matched_index] = matched
table_row_without_none = list(filter(None, table_row))
if len(table_row_without_none) >= MIN_TABLE_COLUMN:
table_count+= 1
df_para.loc[table.index, 'parent_table_number']= table_count
line_dataframe.loc[table.index, 'parent_table_number']= table_count
prev_page = df_para['page'].unique()[0]
for page_num in sorted(set(line_dataframe['page'])):
df_page = line_dataframe[line_dataframe['page']==page_num]
#check first 20 line
first_five_lines = sorted(set(df_page['line_number']))[:20]
extract_lines = []
for line_num in first_five_lines:
temparary_df = df_page[df_page['line_number']==line_num]
if sorted(filter(None, temparary_df['vertical_mapping'].tolist())):
break
extract_lines.append(line_num)
for line_num in extract_lines:
temparary_df = df_page[df_page['line_number']==line_num]
if min(temparary_df['column']):
df_page.loc[temparary_df.index, 'table_number']=None
line_dataframe.loc[temparary_df.index, 'table_number']=None
else:
break
starting_indexes = []
for i in df_page.index:
if pd.isna(df_page['table_number'][i]) and not df_page['is_header'][i]:
starting_indexes.append(i)
if page_num > 1:
prev_page_df = line_dataframe[line_dataframe["page"] == page_num-1]
distinct_table_numbers = prev_page_df[~pd.isna(prev_page_df['table_number'])]['table_number'].tolist()
if distinct_table_numbers:
line_dataframe.loc[starting_indexes, 'table_number'] = max(distinct_table_numbers)
my_table_number = []
prev = 0
for table_num in line_dataframe['table_number']:
if table_num < prev:
my_table_number.append(None)
else:
my_table_number.append(table_num)
prev = table_num
line_dataframe['table_number'] = my_table_number
for table_num in sorted(set(line_dataframe['table_number'])):
df_para = line_dataframe[line_dataframe['table_number']==table_num]
key_df = df_para[~pd.isna(df_para['vertical_mapping'])]
parent_num_redefined = list(filter(None, key_df['parent_table_number'].unique()))
if parent_num_redefined:
parent_num_redefined = parent_num_redefined.pop(0)
else:
parent_num_redefined = None
df_para.loc[key_df.index, 'parent_table_number'] = parent_num_redefined
line_dataframe.loc[key_df.index, 'parent_table_number'] = parent_num_redefined
tab_number = 0
for tab_num in sorted(set(line_dataframe[~pd.isna(line_dataframe['table_number'])]["table_number"])):
tab_df = line_dataframe[line_dataframe['table_number'] == tab_num]
val = tab_df[tab_df['sum_of_column_up_space']==max(tab_df['sum_of_column_up_space'])]['table_identifier'].iloc[0,]
truth_values = tab_df['table_identifier']==val
truth_values = truth_values.tolist()
flag = truth_values and truth_values.pop(0)
table_number = []
while flag :
tab_number += 1
while flag is True:
flag = truth_values and truth_values.pop(0)
table_number.append(tab_number)
while flag is False:
flag = truth_values and truth_values.pop(0)
table_number.append(tab_number)
if table_number:
line_dataframe.loc[tab_df.index, 'table_number2'] = table_number
line_dataframe['table_number'] = line_dataframe['table_number2']
for table_number in sorted(set(line_dataframe['table_number'])):
table_dataframe = line_dataframe[line_dataframe['table_number']==table_number]
key_dict = defaultdict(list)
for k in zip(sorted(set(table_dataframe['column']))):
left_x_column = table_dataframe[table_dataframe['column']==k][TOP_LEFT_X].mean()
index = table_dataframe[table_dataframe['column']==k].index.tolist()
key_dict[left_x_column].extend(index)
key_dict = dict(key_dict)
key_dict.values()
key = list(key_dict.keys())
X = np.array(key).reshape(-1,1)
model = MeanShift(n_jobs=-1)
if np.any(X):
xhat = model.fit_predict(X)
my_tuple = []
for k, xkey in zip(key, xhat):
my_tuple.append((k, xkey, key_dict[k]))
my_tuple = sorted(my_tuple, key=lambda x: x[0])
my_final_dict = defaultdict(list)
for i, my_tup in enumerate(my_tuple):
k, xkey, klist = my_tup
my_final_dict[xkey].extend(klist)
my_dict = {}
for i, v in enumerate(my_final_dict.values()):
my_dict[i] = v
for col, index in my_dict.items():
table_dataframe.loc[index, 'column'] = col
line_dataframe.loc[table_dataframe.index, 'column'] = table_dataframe['column'].tolist()
line_data = line_dataframe[line_dataframe['is_header']==False]
line_data = line_data[line_data['is_footer']==False]
table_dictionary = {}
for parent_num in sorted(filter(None, set(line_data['parent_table_number']))):
df_parent = line_data[line_data['parent_table_number']==parent_num]
key_df = df_parent[~pd.isna(df_parent['vertical_mapping'])]
if len(key_df) < 2:
key_df = pd.DataFrame()
if not key_df.empty :
line = key_df['line_number'].unique()[0]
if all(~pd.isna(line_data[line_data['line_number']==line]['vertical_mapping'])):
key_dict = {}
for k, v in zip(key_df['column'], key_df[TEXT]):
left_x_column = key_df[key_df['column']==k][TOP_LEFT_X].mean()
key_dict[k]=(v, left_x_column)
my_dict = {}
for column in sorted(set(key_df['column'])):
col_df = key_df[key_df['column']==column]
left_x_column = col_df[TOP_LEFT_X].mean()
text = ' '.join(col_df[TEXT].tolist())
my_dict[left_x_column]=text
value_df = df_parent[pd.isna(df_parent['vertical_mapping'])]
for para_num in sorted(set(value_df['paragraph_number'])):
df_para = value_df[value_df['paragraph_number']==para_num]
for table_num in sorted(set(df_para['table_number'])):
if not key_dict:
break
table = line_data[line_data['table_number'] == table_num]
# define data
table_row_left_x = []
table_row_text = []
for column_num in sorted(set(table['column'])):
column = table[table['column']==column_num]
left_x_row = column[TOP_LEFT_X].mean()
cell_text = ' '.join(column[TEXT].tolist())
table_row_left_x.append(left_x_row)
table_row_text.append(cell_text)
table_column_available = [None for _ in range(int(max(key_dict))+1)]
for k, v in key_dict.items():
table_column_available[int(k)] = v[1]
table_column = list(filter(None, table_column_available))
table_row = [None for _ in range(len(table_column))]
for given_value in table_row_left_x:
matched = self._nearest_matches(given_value=given_value, possibilities=table_column)
if matched:
matched_index = table_column.index(matched)
table_row[matched_index] = matched
table_row_without_none = list(filter(None, table_row))
table_row_dict = {}
for text, left_x in zip(table_row_text, table_row_without_none):
key = my_dict[left_x]
value = text
table_row_dict[key]= value
for col_name in my_dict.values():
if col_name not in table_row_dict.keys():
table_row_dict[col_name] = ''
table_dictionary[parent_num] = table_row_dict
return table_dictionary, line_dataframe
def _horizontal_tables(self, line_dataframe, horizontal_columns=None, horizontal_keywords=None):
"""
*Author: Vaibhav Hiwase
*Details: Mapping tables based on horizontal alignment in horizontal_columns and horizontak_keywords
"""
if horizontal_columns is None:
horizontal_columns = self.horizontal_columns
if horizontal_keywords is None:
horizontal_keywords = self.horizontal_keywords
horizontal_mapped_dict = {}
for para_num in sorted(set(line_dataframe['paragraph_number'])):
df_para = line_dataframe[line_dataframe['paragraph_number']==para_num]
df_para['horizontal_mapping'] = df_para[TEXT].apply(self._close_matches, possibilities=horizontal_columns)
for line_num in sorted(set(df_para['line_number'])):
temp_df = df_para[df_para['line_number'] ==line_num]
temp_df['horizontal_mapping'] = temp_df[TEXT].apply(self._get_key_val, horizontal_keywords=horizontal_keywords)
temp_df['horizontal_mapping'] = temp_df['horizontal_mapping'].apply(self._filter_val)
df_para.loc[temp_df.index, 'horizontal_mapping'] = temp_df['horizontal_mapping'].tolist()
horizontal_mapping_lines = sorted(set(df_para[~pd.isna(df_para['horizontal_mapping'])]['line_number']))
for line_num in horizontal_mapping_lines :
temp_df = df_para[df_para['line_number'] ==line_num]
key_df = temp_df[~pd.isna(temp_df['horizontal_mapping'])]
value_df = temp_df[pd.isna(temp_df['horizontal_mapping'])]
if not key_df.empty and not value_df.empty:
key = ' '.join(key_df[TEXT].tolist())
value = ' '.join(value_df[TEXT].tolist())
elif not key_df.empty and value_df.empty:
if key_df['horizontal_mapping'].tolist() and type(key_df['horizontal_mapping'].tolist()[0]) == dict:
item = key_df['horizontal_mapping'].tolist()[0]
for k, v in item.items():
key = k
value = v
else:
key = ' '.join(key_df[TEXT].tolist())
value = ''
else:
continue
key_value_dict = {}
key_value_dict[key]=value
horizontal_mapped_dict[line_num] = key_value_dict
return horizontal_mapped_dict, line_dataframe
def table_extraction(self, line_dataframe=None):
"""
*Author: Vaibhav Hiwase
*Details: Extracting tables of vertical mapping and horizontal mapping.
"""
if line_dataframe is False:
return {}
if line_dataframe is None:
line_dataframe = self.line_dataframe.copy()
table_dictionary, line_dataframe = self._vertical_tables(line_dataframe)
horizontal_line_dictionary, line_dataframe = self._horizontal_tables(line_dataframe)
prev_parent_table_no = None
for line_num, parent_table_no in zip(line_dataframe['line_number'], line_dataframe['parent_table_number']):
if parent_table_no:
prev_parent_table_no = parent_table_no
elif prev_parent_table_no and line_num in horizontal_line_dictionary.keys():
table_dictionary[prev_parent_table_no].update(horizontal_line_dictionary[line_num])
count = 0
my_table_dict = {}
for k, v in table_dictionary.items():
my_table_dict[count] = v
count += 1
return my_table_dict
if __name__ == '__main__':
import json
endpoint = ''
subscription_key = ''
document_filepath = ''
json_path = '/'.join(document_filepath.split('/')[:-1]) + '/' + document_filepath.split('/')[-1].split('.')[0] + '.json'
VERTICAL_COLUMNS=[]
HORIZONTAL_COLUMNS=[]
HORIZONTAL_KEYWORDS=[]
table_extractor = TableExtractor(
document_filepath=document_filepath,
endpoint=endpoint,
subscription_key=subscription_key,
operation_url = None,
ocr_outputs = None,
api_type='azure',
api='read',vertical_columns=VERTICAL_COLUMNS,
horizontal_columns=HORIZONTAL_COLUMNS,
horizontal_keywords=HORIZONTAL_KEYWORDS
)
word_dataframe = table_extractor.word_dataframe
line_dataframe = table_extractor.line_dataframe
ocr_outputs = table_extractor.ocr_outputs
is_scanned = table_extractor.is_scanned
table_dict = table_extractor.table_extraction()
with open(json_path, 'w') as f:
f.write(json.dumps(table_dict,indent=4, ensure_ascii=False, sort_keys=False))
|
from couchstore import CouchStore, DocumentInfo
from tempfile import mkdtemp
import os
import os.path as path
import struct
import unittest
class ChangeCountTest(unittest.TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.dbname = path.join(self.tmpdir, "testing.couch")
self.db = CouchStore(self.dbname, 'c');
def tearDown(self):
try:
self.db.commit()
self.db.close()
except:
pass
try:
os.remove(self.dbname)
except:
pass
try:
os.rmdir(self.tmpdir)
except:
pass
def bulkSet(self, prefix, n):
ids = [prefix + str(x) for x in xrange(n)]
datas = ["val" + str(x) for x in xrange(n)]
self.db.saveMultiple(ids, datas)
def testRewind(self):
# Save some docs
self.db.save("foo1", "bar")
self.db.save("foo2", "baz")
self.db.save("foo3", "bell")
self.db.save("foo4", "a")
self.assertEqual(self.db.changesCount(0,100), 4)
self.db.save("foo1", "new_bar")
self.db.save("foo2", "new_baz")
self.db.save("foo3", "new_bell")
self.db.save("foo4", "new_a")
self.assertEqual(self.db.changesCount(0,100), 4)
self.bulkSet("foo", 100)
self.assertEqual(self.db.changesCount(0, 108), 100)
self.assertEqual(self.db.changesCount(0, 100), 92)
self.assertEqual(self.db.changesCount(1, 100), 92)
self.assertNotEqual(self.db.changesCount(12, 100), 92)
self.assertEqual(self.db.changesCount(50, 99), 50)
self.assertEqual(self.db.changesCount(50, 100), 51)
self.assertEqual(self.db.changesCount(50, 108), 59)
self.assertEqual(self.db.changesCount(51, 100), 50)
self.assertEqual(self.db.changesCount(91, 1000), 18)
self.db.save("foo88", "tval")
self.assertEqual(self.db.changesCount(50, 108), 58)
self.assertEqual(self.db.changesCount(50, 109), 59)
if __name__ == '__main__':
unittest.main()
|
import click
from flask.cli import with_appcontext
@click.command("init")
@with_appcontext
def init():
"""Create a new admin user"""
from {{cookiecutter.app_name}}.extensions import db
from {{cookiecutter.app_name}}.models import User
click.echo("create user")
user = User(username="{{cookiecutter.admin_user_username}}", email="{{cookiecutter.admin_user_email}}", password="{{cookiecutter.admin_user_password}}", active=True)
db.session.add(user)
db.session.commit()
click.echo("created user admin")
|
import matplotlib.pyplot as plt
import os
import torch
class EvaluateTask:
def __init__(self, mantra_model):
task = mantra_model.task
if task:
task.latest_loss = task.evaluate(mantra_model)
print('%s: %s' % (task.evaluation_name, task.latest_loss))
if hasattr(mantra_model.task, 'secondary_metrics'):
task.secondary_metrics_values = {}
for met in task.secondary_metrics:
metric_result = getattr(task, met)(mantra_model)
task.secondary_metrics_values[met] = float(metric_result)
print('%s: %s' % (met.capitalize(), metric_result))
class ModelCheckpoint:
def __init__(self, mantra_model, torch_model):
checkpoint_dir = '%s/trials/%s/checkpoint/' % (os.getcwd(), mantra_model.trial.trial_folder_name)
if not os.path.isdir(checkpoint_dir):
os.makedirs(checkpoint_dir)
if mantra_model.task:
mantra_model.task.latest_loss = mantra_model.task.evaluate(self)
if not hasattr(mantra_model.task, 'best_loss'):
mantra_model.task.best_loss = None
mantra_model.task.best_loss = mantra_model.task.latest_loss
if mantra_model.save_best_only:
if mantra_model.task.latest_loss < mantra_model.task.best_loss:
torch.save(torch_model.state_dict(), '%s/trials/%s/checkpoint/model_weights.pt' % (os.getcwd(), mantra_model.trial.trial_folder_name))
mantra_model.task.best_loss = mantra_model.task.latest_loss
else:
torch.save(torch_model.state_dict(), '%s/trials/%s/checkpoint/model_weights.pt' % (os.getcwd(), mantra_model.trial.trial_folder_name))
else:
torch.save(torch_model.state_dict(), '%s/trials/%s/checkpoint/model_weights.pt' % (os.getcwd(), mantra_model.trial.trial_folder_name))
class SavePlot:
def __init__(self, mantra_model, plt, plt_name='default.png'):
path = '%s/trials/%s/media' % (os.getcwd(), mantra_model.trial.trial_folder_name)
if not os.path.exists(path):
os.makedirs(path)
plt.savefig(path + "/%s" % plt_name)
class StoreTrial:
def __init__(self, mantra_model, epoch):
mantra_model.store_trial_data(epoch)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of walt
# https://github.com/scorphus/walt
# Licensed under the BSD-3-Clause license:
# https://opensource.org/licenses/BSD-3-Clause
# Copyright (c) 2021, Pablo S. Blum de Aguiar <[email protected]>
"""storages provides entities responsible for writing a Result to external resources"""
import aiopg
import psycopg2
from psycopg2 import sql
from walt import logger
from walt import queries
from walt.result import ResultType
class PostgresResultStorage:
"""PostgresResultStorage manages the database and Result tables, and inserts
data into the tables depending on the type of Result"""
def __init__(self, host, port, user, password, dbname):
self._dbname = dbname
self._dsn = f"host={host} port={port} user={user} password={password}"
self._pool = None
def create_database(self):
"""create_database creates the database"""
with psycopg2.connect(self._dsn) as conn, conn.cursor() as cur:
logger.info("Creating database %s", self._dbname)
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cur.execute(sql.SQL("CREATE DATABASE {}").format(sql.Identifier(self._dbname)))
def create_tables(self):
"""create_tables creates all tables"""
with psycopg2.connect(f"{self._dsn} dbname={self._dbname}") as conn, conn.cursor() as cur:
logger.info("Creating tables on %s", self._dbname)
cur.execute(queries.CREATE_TABLES_SQL)
def drop_database(self):
"""drop_database drops the database"""
with psycopg2.connect(self._dsn) as conn, conn.cursor() as cur:
logger.info("Dropping database %s", self._dbname)
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cur.execute(sql.SQL("DROP DATABASE {}").format(sql.Identifier(self._dbname)))
def drop_tables(self):
"""drop_tables drops all tables"""
with psycopg2.connect(f"{self._dsn} dbname={self._dbname}") as conn, conn.cursor() as cur:
logger.info("Dropping tables from %s", self._dbname)
cur.execute(queries.DROP_TABLES_SQL)
async def connect(self):
self._pool = await aiopg.create_pool(f"{self._dsn} dbname={self._dbname}")
async def disconnect(self):
self._pool.close()
await self._pool.wait_closed()
async def save(self, result):
"""save wraps _save and logs exceptions if any"""
try:
await self._save(result)
except Exception:
logger.exception("Failed to save result %s", repr(str(result)))
async def _save(self, result):
"""_save inserts one Result according on its type"""
if not self._pool:
raise RuntimeError("Not connected. Did you forget to call `connect()`?")
async with self._pool.acquire() as conn, conn.cursor() as cur:
logger.info("Saving a result of type %s", result.result_type.name)
result_dict = result.as_dict()
if result.result_type is ResultType.RESULT:
logger.debug("Inserting a result: %s", result_dict)
await cur.execute(queries.RESULT_INSERT_SQL, result_dict)
else:
logger.debug("Inserting an error: %s", result_dict)
await cur.execute(queries.ERROR_INSERT_SQL, result_dict)
|
import json
import re
import redis
from model_mommy import mommy
from django.core.cache import cache
from django.conf import settings
from copy import deepcopy
from django.test.client import Client
from django.core.files.uploadedfile import SimpleUploadedFile
from django import forms
from django.forms import widgets
from django.template.loader import get_template
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from survey.models import *
from survey.online.views import respond
from survey.tests.base_test import BaseTest
from survey.forms.question_set import BatchForm
from survey.forms.answer import SurveyAllocationForm, AddMoreLoopForm
from survey.tests.models.survey_base_test import SurveyBaseTest
class OnlineFlowsTest(BaseTest):
fixtures = ['enumeration_area', 'locations', 'location_types']
def setUp(self):
self.client = Client()
self.survey = mommy.make(Survey)
self.batch_data = {'name': 'test-batch', 'access_channels': [ODKAccess.choice_name(), ],
'survey': self.survey.id}
self.questions_data = []
# create a inline flows for this listing
for answer_class in [NumericalAnswer, TextAnswer, DateAnswer, MultiChoiceAnswer]:
self.questions_data.append({'text': 'text: %s' % answer_class.choice_name(),
'answer_type': answer_class.choice_name(),
'identifier': 'id_%s' % answer_class.choice_name().replace(' ', '_')})
self.questions_data[-1]['options'] = ['Yes', 'No']
raj = self.assign_permission_to(User.objects.create_user('demo12', '[email protected]', 'demo12'),
'can_view_batches')
self.client.login(username='demo12', password='demo12')
def test_add_batch(self):
show_batch_page_url = reverse('batch_index_page', args=(self.survey.pk, ))
response = self.client.get(show_batch_page_url)
self.assertEquals(response.status_code, 200)
self.assertEquals(Batch.objects.count(), 0)
create_batch_url = reverse('new_batch_page', args=(self.survey.pk, ))
response = self.client.post(create_batch_url, data=self.batch_data)
self.assertEquals(response.status_code, 302)
self.assertEquals(Batch.objects.count(), 1)
def test_create_group(self):
group_params = {'text': 'age 1', 'answer_type': NumericalAnswer.choice_name(), 'identifier': 'age'}
# check if you can reach the show params to add one
show_param_url = reverse('show_%s' % ParameterTemplate.resolve_tag())
response = self.client.post(show_param_url)
self.assertEquals(response.status_code, 200)
self.assertEquals(ParameterTemplate.objects.count(), 0)
create_params_url = reverse('new_%s' % ParameterTemplate.resolve_tag())
response = self.client.post(create_params_url, data=group_params)
self.assertEquals(response.status_code, 302)
self.assertEquals(ParameterTemplate.objects.count(), 1)
# check if grou
group_params = {'text': 'Choose', 'answer_type': MultiChoiceAnswer.choice_name(), 'identifier': 'choice',
'options': ['Yes', 'No']}
# check if you can reach the show params to add one
response = self.client.post(create_params_url, data=group_params)
self.assertEquals(response.status_code, 302)
self.assertEquals(ParameterTemplate.objects.count(), 2)
show_groups_url = reverse('respondent_groups_page')
response = self.client.get(show_groups_url)
self.assertEquals(response.status_code, 302) # only users with 'auth.can_view_household_groups' can access
self.assign_permission_to(User.objects.create_user('user1', '[email protected]', 'demo123'),
'can_view_household_groups')
client = Client()
client.login(username='user1', password='demo123')
response = client.get(show_groups_url)
self.assertEquals(response.status_code, 200)
create_group_url = reverse('new_respondent_groups_page')
group_data = {'name': 'group1', 'description': 'etc',
'test_question': ParameterTemplate.objects.order_by('created').first().id,
'validation_test': 'between', 'min': 3, 'max': 5}
response = client.post(create_group_url, data=group_data)
self.assertEquals(response.status_code, 302)
self.assertEquals(RespondentGroup.objects.count(), 1)
group = RespondentGroup.objects.first()
edit_group_url = reverse('respondent_groups_edit', args=(group.id, ))
self.assertIn(edit_group_url, response.url)
group_data = {'name': 'group1', 'description': 'etc', 'validation_test': 'equals',
'test_question': ParameterTemplate.objects.order_by('created').last().id,
'value': 1}
response = client.post(edit_group_url, data=group_data) #just post to same url
self.assertEquals(RespondentGroup.objects.first().group_conditions.count(), 2)
self.assertEquals(response.status_code, 302)
self.assertIn(edit_group_url, response.url)
def test_add_questions_to_batch(self):
self.test_add_batch()
self.test_create_group()
batch = Batch.objects.first()
group = RespondentGroup.objects.first()
create_sq_url = reverse('new_qset_question_page', args=(batch.pk, ))
i = 0
question_data = self.questions_data
question_data[1]['group'] = group.id
question_data[3]['group'] = group.id
for idx, data in enumerate(question_data):
data['qset'] = batch.pk
response = self.client.post(create_sq_url, data=data)
self.assertEquals(len(batch.flow_questions), idx+1),
question = batch.flow_questions[-1]
self.assertEquals(question.text, data['text'])
self.assertEquals(question.identifier, data['identifier'])
self.assertEquals(question.answer_type, data['answer_type'])
self.assertEquals(question.qset.pk, batch.pk)
if 'group' in data:
self.assertEquals(question.group.id, data['group'])
# now check that the first batch question is parameter first.
first_param = ParameterQuestion.objects.order_by('created').first().pk
self.assertEquals(batch.g_first_question.pk, first_param)
last_param = ParameterQuestion.objects.order_by('created').last()
self.assertEquals(batch.all_questions[1].pk, last_param.pk)
self.assertEquals(len(batch.all_questions), ParameterQuestion.objects.count() + BatchQuestion.objects.count())
class USSDFlowTest(SurveyBaseTest):
def setUp(self):
super(USSDFlowTest, self).setUp()
self.ussd_access = USSDAccess.objects.create(interviewer=self.interviewer, user_identifier='312313800')
self._create_ussd_group_questions()
def test_ussd_flow_invalid_msisdn_gives_error_msg(self):
ussd_url = reverse('ussd')
data = {settings.USSD_MOBILE_NUMBER_FIELD: '',
settings.USSD_MSG_FIELD: ''}
response = self.client.get(ussd_url, data=data)
self.assertEquals('Invalid mobile number', response.content)
data = {settings.USSD_MOBILE_NUMBER_FIELD: '+254795466275',
settings.USSD_MSG_FIELD: ''}
response = self.client.get(ussd_url, data=data)
self.assertEquals('Invalid mobile number for your region', response.content)
def test_ussd_flow_with_valid_msisdn(self):
ussd_url = reverse('ussd')
data = {settings.USSD_MOBILE_NUMBER_FIELD: self.ussd_access.user_identifier,
settings.USSD_MSG_FIELD: ''}
response = self.client.get(ussd_url, data=data)
# at this stage no open surveys yet even though there is survey allocation
template = get_template('interviews/no-open-survey.html')
self.assertIn(template.render({'access': self.ussd_access, 'display_format': 'text'}).strip(),
response.content.strip())
# now open all locations
country = Location.country()
for location in country.get_children():
self.qset.open_for_location(location)
response = self.client.get(ussd_url, data=data)
# check this form requires user to select EA
self.assertIn(self.survey_allocation.allocation_ea.name.upper(), response.content.upper())
data[settings.USSD_MSG_FIELD] = 1
response = self.client.get(ussd_url, data=data)
self.assertIn(self.qset.g_first_question.text.upper(), response.content.upper()) # numeric answer
data[settings.USSD_MSG_FIELD] = 3
response = self.client.get(ussd_url, data=data)
all_questions = self.qset.all_questions
self.assertIn(all_questions[1].text.upper(), response.content.upper()) # text answer
def test_ussd_flow_no_uid(self):
url = reverse('ussd')
data = {'format': 'text'}
request = RequestFactory().get(url, data=data)
response = respond(request)
self.assertEquals(response.status_code, 200)
def test_flow_with_sampled_flow(self):
listing_form = mommy.make(ListingTemplate)
lquestion = mommy.make(Question, qset=listing_form,
answer_type=NumericalAnswer.choice_name(), text='numeric-sample')
listing_form.start_question = lquestion
listing_form.save()
self.survey.has_sampling = True
self.survey.listing_form = listing_form
self.survey.sample_size = 2
self.survey.random_sample_label = 'Facility-{{%s}}' % listing_form.all_questions[-1].identifier
self.survey.save()
self.qset.name = 'first-batch'
self.qset.save()
batch2 = mommy.make(Batch, survey=self.survey, name='a-new-batch')
question = mommy.make(Question, qset=batch2,
answer_type=TextAnswer.choice_name(), text='text-batch-sample')
# self._create_ussd_non_group_questions(listing_form)
data = {settings.USSD_MOBILE_NUMBER_FIELD: self.ussd_access.user_identifier,
settings.USSD_MSG_FIELD: ''}
url = reverse('ussd')
country = Location.country()
# now open for batch2 locations
for location in country.get_children():
self.qset.open_for_location(location) # open batches in locations
for location in country.get_children():
batch2.open_for_location(location) # open batches in locations
response = self.client.get(url, data=data)
# confirm select ea
self.assertIn(self.survey_allocation.allocation_ea.name.upper(), response.content.upper())
data[settings.USSD_MSG_FIELD] = 1
response = self.client.get(url, data=data)
# for as long as sample size is not reached keep asking listing question
self.assertIn('numeric-sample', response.content)
data[settings.USSD_MSG_FIELD] = 17
response = self.client.get(url, data=data)
self.assertIn('Survey Completed', response.content)
# any value should work now
data[settings.USSD_MSG_FIELD] = '8ddhjsd6'
response = self.client.get(url, data=data)
# should return to home page
self.assertIn('Welcome', response.content)
# since sample size is two it should repeat listing at least twice. After the second,
data[settings.USSD_MSG_FIELD] = 1
response = self.client.get(url, data=data)
data[settings.USSD_MSG_FIELD] = 25
response = self.client.get(url, data=data)
self.assertIn('Survey Completed', response.content)
# any value should work now
data[settings.USSD_MSG_FIELD] = '8ddau783ehj'
# return to home page
response = self.client.get(url, data=data)
self.assertIn('select', response.content.lower())
self.assertIn('ea', response.content.lower())
data[settings.USSD_MSG_FIELD] = 1
response = self.client.get(url, data=data)
# at this point ask if user wants to continue listing or not
self.assertIn('continue', response.content.lower())
self.assertIn('listing', response.content.lower())
self.assertIn('listing', response.context['answer_form'].render_extra_ussd_html().lower())
self.assertIn('batch', response.context['answer_form'].render_extra_ussd_html().lower())
data[settings.USSD_MSG_FIELD] = 1
response = self.client.get(url, data=data)
# if yes, confirm listing question is present
self.assertIn('numeric-sample', response.content)
data[settings.USSD_MSG_FIELD] = 29
response = self.client.get(url, data=data)
self.assertIn('Survey Completed', response.content)
# any value should restart to welcome page
data[settings.USSD_MSG_FIELD] = '8ddau783ehj'
response = self.client.get(url, data=data)
self.assertIn('select', response.content.lower())
self.assertIn('ea', response.content.lower())
# ea select and proceed to choose listing or batch
data[settings.USSD_MSG_FIELD] = 1
response = self.client.get(url, data=data)
self.assertIn('continue', response.content.lower())
self.assertIn('listing', response.content.lower())
data[settings.USSD_MSG_FIELD] = 2 # choose to start batch
response = self.client.get(url, data=data)
sample_strings = re.findall('.+(Facility-[0-9]{2}).*', response.content)
sample_base = ['Facility-%s' % answer.as_text for answer in lquestion.answer.all()]
for sample in sample_strings: # to start batch, you need to select randaom sample
self.assertIn(sample, sample_base)
self.assertIn(sample.lower(), response.context['answer_form'].render_extra_ussd_html().lower())
data[settings.USSD_MSG_FIELD] = 2 # select second sample
response = self.client.get(url, data=data)
self.assertIn(self.qset.name, response.content) # confirm second batch is returned
self.assertIn(batch2.name, response.content)
self.assertIn(self.qset.name, response.context['answer_form'].render_extra_ussd())
self.assertIn(batch2.name, response.context['answer_form'].render_extra_ussd_html())
data[settings.USSD_MSG_FIELD] = 2 # this should select qset.name since its second entry ordered by name
response = self.client.get(url, data=data)
all_questions = self.qset.all_questions
self.assertIn(self.qset.g_first_question.text, response.content)
self.assertIn(all_questions[0].text, response.content) # confirm param question
data[settings.USSD_MSG_FIELD] = 88
response = self.client.get(url, data=data)
data[settings.USSD_MSG_FIELD] = 23
self.assertIn(all_questions[1].text, response.content) # confirm numeric
response = self.client.get(url, data=data)
self.assertIn(all_questions[2].text, response.content) # confirm text
data[settings.USSD_MSG_FIELD] = 'something nice'
response = self.client.get(url, data=data)
self.assertIn(all_questions[3].text, response.content) # confirm multichoice
data[settings.USSD_MSG_FIELD] = '2' # basically select no
response = self.client.get(url, data=data)
self.assertIn(all_questions[4].text, response.content) # confirm auto
response = self.client.get(url, data=data)
self.assertIn('Survey Completed', response.content)
def test_non_ussd_flows(self):
qset = self.qset1
country = Location.country()
for location in country.get_children():
qset.open_for_location(location)
# just create batch with non-ussd questions
dq = mommy.make(Question, qset=qset, text='date_answer-sdosod', answer_type=DateAnswer.choice_name())
odkq = mommy.make(Question, qset=qset, text='odk_answer-0sd384s', answer_type=GeopointAnswer.choice_name())
ikq = mommy.make(Question, qset=qset, text='image_answer-9923uhdisb', answer_type=ImageAnswer.choice_name())
qset.start_question = dq
qset.save()
mommy.make(QuestionFlow, question=dq, next_question=odkq)
mommy.make(QuestionFlow, question=odkq, next_question=ikq)
url = reverse("test_qset_flow", args=(qset.id, ))
data = dict()
data['uid'] = self.access_channel.user_identifier
self.client = Client()
raj = self.assign_permission_to(User.objects.create_user('demo12', '[email protected]', 'demo12'),
'can_view_batches')
self.client.login(username='demo12', password='demo12')
response = self.client.get(url)
self.assertIn(dq.text, response.content)
self.assertTrue(isinstance(response.context['answer_form'].fields['value'].widget, widgets.DateInput))
response = self.client.get(url, data={'value': '21-07-2017'})
self.assertIn(odkq.text, response.content)
response = self.client.get(url, data={'value': '12 9 20 1'})
self.assertIn(ikq.text, response.content)
self.assertTrue(isinstance(response.context['answer_form'].fields['value'].widget, widgets.FileInput))
# test go back
response = self.client.get(url, data={'value': '', 'go-back': True})
self.assertIn(odkq.text, response.content)
self.assertTrue(isinstance(response.context['answer_form'].fields['value'].widget, widgets.TextInput))
# answer again
response = self.client.get(url, data={'value': '15 9 10 1'})
self.assertIn(ikq.text, response.content)
import os
BASE_DIR = os.path.dirname(__file__)
image_path = os.path.join(BASE_DIR, 'testimage.png')
sfi = SimpleUploadedFile('sample_image.png', open(image_path).read(), content_type='image/png')
response = self.client.post(url, {'value': sfi})
self.assertEquals(response.status_code, 200)
def test_flow_with_loop(self):
all_questions = self.qset.all_questions
# loops cannot start with param questions
loop = mommy.make(QuestionLoop, loop_starter=all_questions[1], loop_ender=all_questions[3],
repeat_logic=QuestionLoop.FIXED_REPEATS)
mommy.make(FixedLoopCount, value=2, loop=loop)
# mommy.make(QuestionLoop, loop_starter=all_questions[3], loop_ender=all_questions[4],
# repeat_logic=QuestionLoop.PREVIOUS_QUESTION)
qset = self.qset
url = reverse("test_qset_flow", args=(qset.id,))
data = dict()
self.client = Client()
raj = self.assign_permission_to(User.objects.create_user('demo12', '[email protected]', 'demo12'),
'can_view_batches')
self.client.login(username='demo12', password='demo12')
data['uid'] = self.access_channel.user_identifier
response = self.client.get(url, data=data)
self.assertIn(all_questions[0].text, response.content)
data['value'] = 12
response = self.client.get(url, data=data)
self.assertIn(all_questions[1].text, response.content)
data['value'] = 4
response = self.client.get(url, data=data)
self.assertIn(all_questions[2].text, response.content)
data['value'] = 'hey man'
response = self.client.get(url, data=data)
self.assertIn(all_questions[3].text, response.content)
data['value'] = 2
response = self.client.get(url, data=data)
# at this point, we must return to first loop
self.assertIn(all_questions[1].text, response.content)
data['value'] = 18
response = self.client.get(url, data=data)
self.assertIn(all_questions[2].text, response.content)
data['value'] = 'hey boy'
response = self.client.get(url, data=data)
self.assertIn(all_questions[3].text, response.content)
data['value'] = 1
response = self.client.get(url, data=data)
self.assertIn(all_questions[4].text, response.content)
data['value'] = 17
response = self.client.get(url, data=data)
self.assertIn(response.context['template_file'], 'interviews/completed.html')
def test_auto_answer_loop_flows(self):
qset = self.qset1
country = Location.country()
for location in country.get_children():
qset.open_for_location(location)
# just create batch with non-ussd questions
nq = mommy.make(Question, qset=qset, text='numeric_answer-9923uhdisb',
answer_type=NumericalAnswer.choice_name())
aq = mommy.make(Question, qset=qset, text='auto_answer-sdosod', answer_type=AutoResponse.choice_name())
tq = mommy.make(Question, qset=qset, text='text_answer-0sd384s', answer_type=TextAnswer.choice_name())
tq2 = mommy.make(Question, qset=qset, text='text2_answer-99siusuddisb', answer_type=TextAnswer.choice_name())
qset.start_question = nq
qset.save()
mommy.make(QuestionFlow, question=nq, next_question=aq)
mommy.make(QuestionFlow, question=aq, next_question=tq)
mommy.make(QuestionFlow, question=tq, next_question=tq2)
all_questions = qset.all_questions
loop = mommy.make(QuestionLoop, loop_starter=all_questions[1], loop_ender=all_questions[3],
repeat_logic=QuestionLoop.PREVIOUS_QUESTION)
mommy.make(PreviousAnswerCount, value=all_questions[0], loop=loop)
#url = reverse("test_qset_flow", args=(qset.id,))
url = reverse('ussd')
data = {settings.USSD_MOBILE_NUMBER_FIELD: self.ussd_access.user_identifier, }
self.client = Client()
raj = self.assign_permission_to(User.objects.create_user('demo12', '[email protected]', 'demo12'),
'can_view_batches')
self.client.login(username='demo12', password='demo12')
# data['uid'] = self.access_channel.user_identifier
response = self.client.get(url, data=data)
self.assertIn(self.survey_allocation.allocation_ea.name.upper(), response.content.upper())
data[settings.USSD_MSG_FIELD] = 1
response = self.client.get(url, data=data)
self.assertIn(all_questions[0].text, response.content)
data[settings.USSD_MSG_FIELD] = 2
response = self.client.get(url, data=data)
self.assertIn(all_questions[1].text, response.content)
data[settings.USSD_MSG_FIELD] = 4
response = self.client.get(url, data=data)
self.assertIn(all_questions[2].text, response.content)
data[settings.USSD_MSG_FIELD] = 'Somethin nice'
response = self.client.get(url, data=data)
self.assertIn(all_questions[3].text, response.content)
data[settings.USSD_MSG_FIELD] = 'Cool man'
response = self.client.get(url, data=data)
# should repead the loop 2 times as per first quest
self.assertIn(all_questions[1].text, response.content)
data[settings.USSD_MSG_FIELD] = 34
response = self.client.get(url, data=data)
self.assertIn(all_questions[2].text, response.content)
data[settings.USSD_MSG_FIELD] = 'Somethin nice2'
response = self.client.get(url, data=data)
self.assertIn(all_questions[3].text, response.content)
data[settings.USSD_MSG_FIELD] = 'Cool man2'
response = self.client.get(url, data=data)
self.assertIn(response.context['template_file'],'interviews/completed.html')
def test_restart_flow(self):
access = self.access_channel
url = reverse('ussd')
country = Location.country()
# now open for batch2 locations
for location in country.get_children():
self.qset.open_for_location(location) # open batches in locations
data = {settings.USSD_MOBILE_NUMBER_FIELD: '312313801', settings.USSD_MSG_FIELD: ''}
response = self.client.get(url, data=data)
self.assertIn('No such interviewer', response.content)
data[settings.USSD_MOBILE_NUMBER_FIELD] = self.ussd_access.user_identifier
response = self.client.get(url, data=data)
all_questions = self.qset.all_questions
# confirm select ea
self.assertIn(self.survey_allocation.allocation_ea.name.upper(), response.content.upper())
self.assertIn(self.survey_allocation.allocation_ea.name.upper(),
response.context['answer_form'].render_extra_ussd_html().upper())
self.assertIn(self.survey_allocation.allocation_ea.name.upper(),
response.context['answer_form'].render_extra_ussd().upper())
data[settings.USSD_MSG_FIELD] = 1
response = self.client.get(url, data=data)
# ea selected. answer question 1
self.assertIn(all_questions[0].text, response.content)
data[settings.USSD_MSG_FIELD] = 17
response = self.client.get(url, data=data)
self.assertIn(all_questions[1].text, response.content)
data[settings.USSD_MSG_FIELD] = 27
response = self.client.get(url, data=data)
self.assertIn(all_questions[2].text, response.content)
refresh_url = reverse('refresh_data_entry', args=(self.ussd_access.id, ))
self.client.get(refresh_url, data=data)
del data[settings.USSD_MSG_FIELD]
response = self.client.get(url, data=data)
self.assertIn(self.survey_allocation.allocation_ea.name.upper(), response.content.upper())
data[settings.USSD_MSG_FIELD] = 1
response = self.client.get(url, data=data)
# ea selected. answer question 1
self.assertIn(all_questions[0].text, response.content)
class AnswerFormExtra(SurveyBaseTest):
def setUp(self):
super(AnswerFormExtra, self).setUp()
self.ussd_access = USSDAccess.objects.create(interviewer=self.interviewer, user_identifier='312313800')
def test_loop_answer_form(self):
url = reverse('ussd')
request = RequestFactory().get(url)
request.user = User.objects.create_user('demo12', '[email protected]', 'demo12')
answer_form = AddMoreLoopForm(request, self.ussd_access)
self.assertTrue(isinstance(answer_form.fields['value'].widget, forms.NumberInput))
for choice in AddMoreLoopForm.CHOICES:
self.assertIn('%s: %s' % choice, answer_form.render_extra_ussd())
self.assertIn('%s: %s' % choice, answer_form.render_extra_ussd_html())
|
from kit_dl.core import Scraper
from tests.base import BaseUnitTest
class TestCore(BaseUnitTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.scraper = Scraper(None, cls.dao, True)
def test_format_assignment_name_leading_zero(self):
format = "Blatt_$$"
self.assertEqual("Blatt_01", self.scraper.format_assignment_name(format, 1))
def test_format_assignment_name_digit_overflow(self):
format = "Blatt_$"
self.assertEqual("Blatt_10", self.scraper.format_assignment_name(format, 10))
def test_detect_assignment_format(self):
assignment_files = ["some_file.py", "AB-01.pdf"]
self.assertEqual("AB-$$", self.scraper.detect_format(assignment_files))
def test_invalid_assignment_format_should_not_be_detected(self):
assignment_files = ["some_file.py", "not_an_assignment.pdf"]
self.assertIsNone(self.scraper.detect_format(assignment_files))
def test_find_latest_assignment(self):
assignment_files = ["Blatt-05.pdf", "Blatt-01.pdf", "Blatt-08.pdf", "Blatt-06.pdf"]
self.assertEqual(8, self.scraper.get_latest_assignment(assignment_files, "Blatt-$$"))
def test_find_latest_assignment_two_digit_num(self):
assignment_files = ["Blatt-05.pdf", "Blatt-10.pdf"]
self.assertEqual(10, self.scraper.get_latest_assignment(assignment_files, "Blatt-$$"))
def test_on_start_update_latest_assignment_found(self):
actual = self.scraper.get_on_start_update_msg("la", 9, "Blatt$$")
self.assertEqual("Updating LA assignments, latest: Blatt09.pdf", actual)
def test_on_start_update_latest_assignment_not_found_negative(self):
actual = self.scraper.get_on_start_update_msg("la", -1, "Blatt$$")
self.assertEqual("No assignments found in LA directory, starting at 1.", actual)
def test_on_start_update_latest_assignment_not_found_zero(self):
actual = self.scraper.get_on_start_update_msg("la", 0, "Blatt$$")
self.assertEqual("No assignments found in LA directory, starting at 1.", actual)
|
import multiprocessing
import threading
import time
import warnings
from collections import namedtuple
from dagster_postgres.utils import get_conn
from six.moves.queue import Empty
from dagster import check
from dagster.core.definitions.environment_configs import SystemNamedDict
from dagster.core.events.log import EventRecord
from dagster.core.serdes import (
ConfigurableClass,
ConfigurableClassData,
deserialize_json_to_dagster_namedtuple,
serialize_dagster_namedtuple,
)
from dagster.core.storage.event_log import WatchableEventLogStorage
from dagster.core.types import Field, String
from .pynotify import await_pg_notifications
CREATE_EVENT_LOG_SQL = '''
CREATE TABLE IF NOT EXISTS event_log (
id BIGSERIAL PRIMARY KEY,
run_id VARCHAR(255) NOT NULL,
event_body VARCHAR NOT NULL
)
'''
WIPE_EVENT_LOG_SQL = 'DELETE FROM event_log'
DELETE_EVENT_LOG_SQL = 'DELETE FROM event_log WHERE run_id = %s'
DROP_EVENT_LOG_SQL = 'DROP TABLE IF EXISTS event_log'
SELECT_EVENT_LOG_SQL = 'SELECT event_body FROM event_log WHERE id = %s'
CHANNEL_NAME = 'run_events'
# Why? Because this is about as long as we expect a roundtrip to RDS to take.
WATCHER_POLL_INTERVAL = 0.2
class PostgresEventLogStorage(WatchableEventLogStorage, ConfigurableClass):
def __init__(self, postgres_url, inst_data=None):
self.conn_string = check.str_param(postgres_url, 'postgres_url')
self._event_watcher = create_event_watcher(self.conn_string)
conn = get_conn(self.conn_string)
conn.cursor().execute(CREATE_EVENT_LOG_SQL)
self._inst_data = check.opt_inst_param(inst_data, 'inst_data', ConfigurableClassData)
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return SystemNamedDict('PostgresRunStorageConfig', {'postgres_url': Field(String)})
@staticmethod
def from_config_value(inst_data, config_value, **kwargs):
return PostgresEventLogStorage(inst_data=inst_data, **dict(config_value, **kwargs))
@staticmethod
def create_clean_storage(conn_string):
check.str_param(conn_string, 'conn_string')
conn = get_conn(conn_string)
conn.cursor().execute(DROP_EVENT_LOG_SQL)
return PostgresEventLogStorage(conn_string)
def get_logs_for_run(self, run_id, cursor=-1):
'''Get all of the logs corresponding to a run.
Args:
run_id (str): The id of the run for which to fetch logs.
cursor (Optional[int]): Zero-indexed logs will be returned starting from cursor + 1,
i.e., if cursor is -1, all logs will be returned. (default: -1)
'''
check.str_param(run_id, 'run_id')
check.int_param(cursor, 'cursor')
check.invariant(cursor >= -1, 'Cursor must be -1 or greater')
with get_conn(self.conn_string).cursor() as curs:
FETCH_SQL = (
'SELECT event_body FROM event_log WHERE run_id = %s ORDER BY id ASC OFFSET %s;'
)
curs.execute(FETCH_SQL, (run_id, cursor + 1))
rows = curs.fetchall()
return list(map(lambda r: deserialize_json_to_dagster_namedtuple(r[0]), rows))
def store_event(self, event):
'''Store an event corresponding to a pipeline run.
Args:
run_id (str): The id of the run that generated the event.
event (EventRecord): The event to store.
'''
check.inst_param(event, 'event', EventRecord)
with get_conn(self.conn_string).cursor() as curs:
event_body = serialize_dagster_namedtuple(event)
curs.execute(
'''INSERT INTO event_log (run_id, event_body) VALUES (%s, %s) RETURNING run_id, id;''',
(event.run_id, event_body),
)
res = curs.fetchone()
curs.execute(
'''NOTIFY {channel}, %s; '''.format(channel=CHANNEL_NAME),
(res[0] + '_' + str(res[1]),),
)
def wipe(self):
'''Clear the log storage.'''
with get_conn(self.conn_string).cursor() as curs:
curs.execute(WIPE_EVENT_LOG_SQL)
def delete_events(self, run_id):
with get_conn(self.conn_string).cursor() as curs:
curs.execute(DELETE_EVENT_LOG_SQL, (run_id,))
def watch(self, run_id, start_cursor, callback):
self._event_watcher.watch_run(run_id, start_cursor, callback)
def end_watch(self, run_id, handler):
self._event_watcher.unwatch_run(run_id, handler)
@property
def event_watcher(self):
return self._event_watcher
def __del__(self):
# Keep the inherent limitations of __del__ in Python in mind!
self._event_watcher.close()
EventWatcherProcessStartedEvent = namedtuple('EventWatcherProcessStartedEvent', '')
EventWatcherStart = namedtuple('EventWatcherStart', '')
EventWatcherEvent = namedtuple('EventWatcherEvent', 'payload')
EventWatchFailed = namedtuple('EventWatchFailed', 'message')
EventWatcherEnd = namedtuple('EventWatcherEnd', '')
EventWatcherThreadEvents = (
EventWatcherProcessStartedEvent,
EventWatcherStart,
EventWatcherEvent,
EventWatchFailed,
EventWatcherEnd,
)
EventWatcherThreadNoopEvents = (EventWatcherProcessStartedEvent, EventWatcherStart)
EventWatcherThreadEndEvents = (EventWatchFailed, EventWatcherEnd)
POLLING_CADENCE = 0.25
def _postgres_event_watcher_event_loop(conn_string, queue, run_id_dict):
init_called = False
queue.put(EventWatcherProcessStartedEvent())
try:
for notif in await_pg_notifications(
conn_string, channels=[CHANNEL_NAME], timeout=POLLING_CADENCE, yield_on_timeout=True
):
if not init_called:
init_called = True
queue.put(EventWatcherStart())
if notif is not None:
run_id, index = notif.payload.split('_')
if run_id in run_id_dict:
queue.put(EventWatcherEvent((run_id, index)))
else:
# The polling window has timed out
pass
except Exception as e: # pylint: disable=broad-except
queue.put(EventWatchFailed(message=str(e)))
finally:
queue.put(EventWatcherEnd())
def create_event_watcher(conn_string):
check.str_param(conn_string, 'conn_string')
queue = multiprocessing.Queue()
m_dict = multiprocessing.Manager().dict()
process = multiprocessing.Process(
target=_postgres_event_watcher_event_loop, args=(conn_string, queue, m_dict)
)
process.start()
# block and ensure that the process has actually started. This was required
# to get processes to start in linux in buildkite.
check.inst(queue.get(block=True), EventWatcherProcessStartedEvent)
return PostgresEventWatcher(process, queue, m_dict, conn_string)
def watcher_thread(conn_string, queue, handlers_dict, dict_lock, watcher_thread_exit):
done = False
while not done and not watcher_thread_exit.is_set():
event_list = []
while not queue.empty():
try:
event_list.append(queue.get_nowait())
except Empty:
pass
for event in event_list:
if not isinstance(event, EventWatcherThreadEvents):
warnings.warn(
'Event watcher thread got unexpected event {event}'.format(event=event)
)
continue
if isinstance(event, EventWatcherThreadNoopEvents):
continue
elif isinstance(event, EventWatcherThreadEndEvents):
done = True
else:
assert isinstance(event, EventWatcherEvent)
run_id, index_str = event.payload
index = int(index_str)
with dict_lock:
handlers = handlers_dict.get(run_id, [])
with get_conn(conn_string).cursor() as curs:
curs.execute(SELECT_EVENT_LOG_SQL, (index,))
dagster_event = deserialize_json_to_dagster_namedtuple(curs.fetchone()[0])
for (cursor, callback) in handlers:
if index >= cursor:
callback(dagster_event)
time.sleep(WATCHER_POLL_INTERVAL)
class PostgresEventWatcher:
def __init__(self, process, queue, run_id_dict, conn_string):
self.process = check.inst_param(process, 'process', multiprocessing.Process)
self.run_id_dict = check.inst_param(
run_id_dict, 'run_id_dict', multiprocessing.managers.DictProxy
)
self.handlers_dict = {}
self.dict_lock = threading.Lock()
self.queue = check.inst_param(queue, 'queue', multiprocessing.queues.Queue)
self.conn_string = conn_string
self.watcher_thread_exit = threading.Event()
self.watcher_thread = threading.Thread(
target=watcher_thread,
args=(
self.conn_string,
self.queue,
self.handlers_dict,
self.dict_lock,
self.watcher_thread_exit,
),
)
self.watcher_thread.start()
def has_run_id(self, run_id):
with self.dict_lock:
_has_run_id = run_id in self.run_id_dict
return _has_run_id
def watch_run(self, run_id, start_cursor, callback):
with self.dict_lock:
if run_id in self.run_id_dict:
self.handlers_dict[run_id].append((start_cursor, callback))
else:
# See: https://docs.python.org/2/library/multiprocessing.html#multiprocessing.managers.SyncManager
run_id_dict = self.run_id_dict
run_id_dict[run_id] = None
self.run_id_dict = run_id_dict
self.handlers_dict[run_id] = [(start_cursor, callback)]
def unwatch_run(self, run_id, handler):
with self.dict_lock:
if run_id in self.run_id_dict:
self.handlers_dict[run_id] = [
(start_cursor, callback)
for (start_cursor, callback) in self.handlers_dict[run_id]
if callback != handler
]
if not self.handlers_dict[run_id]:
del self.handlers_dict[run_id]
run_id_dict = self.run_id_dict
del run_id_dict[run_id]
self.run_id_dict = run_id_dict
def close(self):
self.process.terminate()
self.process.join()
self.watcher_thread_exit.set()
|
import discord
from discord.ext import commands
from bs4 import BeautifulSoup
class Define:
def __init__(self, bot):
self.bot = bot
self.aiohttp_session = bot.aiohttp_session
self.color = bot.user_color
self.url = 'https://google.com/search'
self.headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/41.0.2228.0 Safari/537.36'}
self.parts_of_speech = {'noun': 'n.', 'verb': 'v.', 'adjective': 'adj.', 'adverb': 'adv.',
'interjection': 'interj.', 'conjunction': 'conj.', 'preposition': 'prep.',
'pronoun': 'pron.', 'contraction': 'cont.'}
@commands.command(aliases=['def'])
async def define(self, ctx, word: str):
""" Find the definition of a word """
params = {'q': f'define+{word}', 'source': 'hp'}
# Request page source with custom headers and params from above
async with self.aiohttp_session.get(self.url, params=params, headers=self.headers) as r:
html = await r.text()
# Soup
soup = BeautifulSoup(html, 'lxml')
# Looks for google's embedded word data, raised AttributeError is caught to inform of possible reasons
# to why no definition was found
try:
defn = soup.find('div', attrs={'data-dobid': 'dfn'}).text
pos = soup.find('div', attrs={'class': 'lr_dct_sf_h'}).span.text
syn_list = [x.text for x in soup.find_all('span', attrs={'class': '_Yht'})]
except AttributeError:
print('Unable to find definition. Ensure you do not have to do a Google captcha.')
return await ctx.error(f'Unable to find a definition for `{word}`.')
# Create embed
em = discord.Embed(title=word.capitalize(), color=self.color)
em.add_field(name='Definition', value=f'_{self.parts_of_speech[pos]}_, {defn}')
msg = f"**{word.capitalize()}:**\n_{self.parts_of_speech[pos]}_, {defn}"
if len(syn_list) != 0:
msg += f"\n**Synonyms:**\n{', '.join(syn_list[:5])}"
await ctx.message.edit(content=msg)
def setup(bot):
bot.add_cog(Define(bot))
|
import os
os.system("title Loading...")
os.system("python -m pip install pywin32")
os.system("python -m pip install pillow")
os.system("python -m pip install matplotlib")
os.system('cls')
import win32con
from win32api import keybd_event, mouse_event
import time
import random
import win32api
import time
os.system('cd C:\Program Files (x86)\MineAI')
from PIL import ImageGrab
from image import *
from MCKeyboard import *
from check import *
def try_get():
print("Move into Minecraft")
time.sleep(5)
MCK = MC_Window()
MCK.move_forward(1)
def new_main():
print("Move into Minecraft")
time.sleep(4)
im = ImageGrab.grab()
arr = analyzeImage(im)
while amount_in_array("wood",arr) > .01:
im = ImageGrab.grab()
arr = analyzeImage(im)
hor,ver = where_is("wood",arr)
if hor == "middle" and ver =="center":
break
if hor == "left":
rotate(45)
elif hor == "right":
rotate(-45)
elif ver == "top":
v_rotate(45)
elif ver == "bottom":
v_rotate(-45)
def main():
os.system('cls')
os.system('echo Recived.')
print()
os.system('title MineAI')
print("Loading AI...")
time.sleep(2)
print()
os.system('cls')
print("Move into Minecraft")
time.sleep(2)
images = []
MCK = MC_Window()
try:
while True:
im = ImageGrab.grab()
images.append(im)
analyzeImage(im)
MCK.move_forward(1)
im = ImageGrab.grab()
analyzeImage(im)
images.append(im)
MCK.move_left(1)
im = ImageGrab.grab()
analyzeImage(im)
images.append(im)
MCK.move_back(1)
im = ImageGrab.grab()
analyzeImage(im)
images.append(im)
MCK.move_right(1)
im = ImageGrab.grab()
analyzeImage(im)
images.append(im)
time.sleep(1)
MCK.mine(1)
MCK.rotate(170)
MCK.get_inventory()
time.sleep(1)
MCK.leave_inventory()
except Exception as e:
print(e,"Got exception!")
return images
return images
os.system('echo off')
os.system('cls')
os.system('echo Recived.')
print()
os.system('title Waiting for you to type...')
debug1 = input('Not A Developer? Press Enter. ')
debug1 = debug1.upper()
if debug1 != "":
debug()
else:
main()
print()
|
import copy
import posixpath
import re
from typing import Optional, Sequence, List, Dict, Any
from urllib.request import urlopen
from kubragen import KubraGen
from kubragen.builder import Builder
from kubragen.configfile import ConfigFile, ConfigFileRenderMulti, ConfigFileRender_Yaml, ConfigFileRender_RawStr, \
ConfigFileRender_Ini, ConfigFileOutput_Dict
from kubragen.data import ValueData
from kubragen.exception import InvalidNameError, InvalidParamError, InvalidOperationError
from kubragen.helper import LiteralStr
from kubragen.kdata import IsKData, KData_Secret, KData_ConfigMap, KData_ConfigMapManual, KData_SecretManual, \
KData_Value
from kubragen.kdatahelper import KDataHelper_Volume, KDataHelper_Env
from kubragen.merger import Merger
from kubragen.object import ObjectItem, Object
from kubragen.types import TBuild, TBuildItem
from kubragen.util import is_allowed_types
from .configfile import GrafanaConfigFile
from .option import GrafanaOptions, GrafanaDashboardSource_KData, GrafanaDashboardSource, GrafanaDashboardSource_Str, \
GrafanaDashboardSource_Url, GrafanaDashboardSource_LocalFile, GrafanaDashboardSource_GNet
from .util import get_size
class GrafanaBuilder(Builder):
"""
Grafana builder.
.. list-table::
:header-rows: 1
* - build
- description
* - BUILD_CONFIG
- creates configurations
* - BUILD_SERVICE
- creates deployments and services
.. list-table::
:header-rows: 1
* - build item
- description
* - BUILDITEM_CONFIG
- ConfigMap
* - BUILDITEM_CONFIG_DASHBOARD
- ConfigMap with dashboard sources
* - BUILDITEM_CONFIG_SECRET
- Secret
* - BUILDITEM_DEPLOYMENT
- StatefulSet
* - BUILDITEM_SERVICE
- Service
.. list-table::
:header-rows: 1
* - object name
- description
- default value
* - config
- ConfigMap
- ```<basename>-config```
* - config-dashboard
- ConfigMap
- ```<basename>-config-dashboard```
* - config-secret
- Secret
- ```<basename>-config-secret```
* - service
- Service
- ```<basename>```
* - deployment
- Deployment
- ```<basename>```
* - pod-label-all
- label *app* to be used by selection
- ```<basename>```
"""
options: GrafanaOptions
configfile: Optional[str]
_namespace: str
SOURCE_NAME = 'kg_grafana'
BUILD_CONFIG = TBuild('config')
BUILD_SERVICE = TBuild('service')
BUILDITEM_CONFIG = TBuildItem('config')
BUILDITEM_CONFIG_SECRET = TBuildItem('config-secret')
BUILDITEM_DEPLOYMENT = TBuildItem('deployment')
BUILDITEM_SERVICE = TBuildItem('service')
def __init__(self, kubragen: KubraGen, options: Optional[GrafanaOptions] = None):
super().__init__(kubragen)
if options is None:
options = GrafanaOptions()
self.options = options
self.configfile = None
self._namespace = self.option_get('namespace')
self.object_names_init({
'config': self.basename('-config'),
'config-dashboard': self.basename('-config-dashboard'),
'config-secret': self.basename('-config-secret'),
'service': self.basename(),
'deployment': self.basename(),
'pod-label-app': self.basename(),
})
def option_get(self, name: str):
return self.kubragen.option_root_get(self.options, name)
def is_config_provisioning_item(self, item: str) -> bool:
itemvalue = self.option_get('config.provisioning.{}'.format(item))
if itemvalue is None or (isinstance(itemvalue, Sequence) and len(itemvalue) == 0):
return False
return True
def basename(self, suffix: str = ''):
return '{}{}'.format(self.option_get('basename'), suffix)
def namespace(self):
return self._namespace
def build_names(self) -> Sequence[TBuild]:
return [self.BUILD_CONFIG, self.BUILD_SERVICE]
def build_names_required(self) -> Sequence[TBuild]:
return [self.BUILD_CONFIG, self.BUILD_SERVICE]
def builditem_names(self) -> Sequence[TBuildItem]:
return [
self.BUILDITEM_CONFIG,
self.BUILDITEM_CONFIG_SECRET,
self.BUILDITEM_DEPLOYMENT,
self.BUILDITEM_SERVICE,
]
def internal_build(self, buildname: TBuild) -> Sequence[ObjectItem]:
if buildname == self.BUILD_CONFIG:
return self.internal_build_config()
elif buildname == self.BUILD_SERVICE:
return self.internal_build_service()
else:
raise InvalidNameError('Invalid build name: "{}"'.format(buildname))
def internal_build_config(self) -> Sequence[ObjectItem]:
ret: List[ObjectItem] = []
ret.append(
Object({
'apiVersion': 'v1',
'kind': 'ConfigMap',
'metadata': {
'name': self.object_name('config'),
'namespace': self.namespace(),
},
'data': {
'grafana.ini': LiteralStr(self.configfile_get()),
},
}, name=self.BUILDITEM_CONFIG, source=self.SOURCE_NAME, instance=self.basename()),
)
if self.option_get('config.dashboards') is not None:
# Create one ConfigMap per provider to avoid ConfigMap size limit
configd_data: Dict[Any, Any] = {}
for dashboard in self.option_get('config.dashboards'):
if not isinstance(dashboard, GrafanaDashboardSource_KData):
if dashboard.provider not in configd_data:
configd_data[dashboard.provider] = {}
configd_data[dashboard.provider]['dashboard-{}.json'.format(dashboard.name)] = LiteralStr(self._dashboard_fetch(dashboard))
max_config_size = self.option_get('config.dashboard_config_max_size')
for cprovider, cproviderdata in configd_data.items():
if max_config_size is not None:
current_config_size = get_size(cproviderdata)
if current_config_size > max_config_size:
raise InvalidOperationError('Maximum ConfigMap size reached for dashboard provider "{}". '
'Set "config.dashboard_config_max_size" to None to disable this check. '
'Max: {} Current: {}"'.format(cprovider, max_config_size, current_config_size))
ret.append(
Object({
'apiVersion': 'v1',
'kind': 'ConfigMap',
'metadata': {
'name': '{}-{}'.format(self.object_name('config-dashboard'), cprovider),
'namespace': self.namespace(),
},
'data': cproviderdata,
}, name='config-dashboard-{}'.format(cprovider), source=self.SOURCE_NAME, instance=self.basename()),
)
secret_data = {}
if not IsKData(self.option_get('config.admin.user')):
if self.option_get('config.admin.user') is not None:
secret_data.update({
'admin_user': self.kubragen.secret_data_encode(self.option_get('config.admin.user')),
})
if not IsKData(self.option_get('config.admin.password')):
if self.option_get('config.admin.password') is not None:
secret_data.update({
'admin_password': self.kubragen.secret_data_encode(self.option_get('config.admin.password')),
})
if self.is_config_provisioning_item('datasources'):
secret_data['datasources.yaml'] = self.kubragen.secret_data_encode(
self.configfile_provisioning_get('datasources', 'config.provisioning.datasources'))
if self.is_config_provisioning_item('plugins'):
secret_data['plugins.yaml'] = self.kubragen.secret_data_encode(
self.configfile_provisioning_get('apps', 'config.provisioning.plugins'))
if self.is_config_provisioning_item('dashboards'):
secret_data['dashboards.yaml'] = self.kubragen.secret_data_encode(
self.configfile_provisioning_get('providers', 'config.provisioning.dashboards'))
ret.append(Object({
'apiVersion': 'v1',
'kind': 'Secret',
'metadata': {
'name': self.object_name('config-secret'),
'namespace': self.namespace(),
},
'type': 'Opaque',
'data': secret_data,
}, name=self.BUILDITEM_CONFIG_SECRET, source=self.SOURCE_NAME, instance=self.basename()))
return ret
def internal_build_service(self) -> Sequence[ObjectItem]:
ret: List[ObjectItem] = []
extra_volumes = []
extra_volumemounts = []
if self.option_get('config.dashboards') is not None:
# Configure dashboards mounts
providers: Dict[Any, Any] ={}
kdata_providers: List[str] = []
for dashboard in self.option_get('config.dashboards'):
if not isinstance(dashboard, GrafanaDashboardSource_KData):
if dashboard.provider in kdata_providers:
raise InvalidParamError('Provider was already used with a KData source')
if dashboard.provider not in providers:
providers[dashboard.provider] = {
'name': 'dashboard-{}'.format(dashboard.provider),
'configMap': {
'name': '{}-{}'.format(self.object_name('config-dashboard'), dashboard.provider),
'items': [],
}
}
extra_volumemounts.append({
'name': 'dashboard-{}'.format(dashboard.provider),
'mountPath': self._dashboard_path(dashboard.provider),
})
itemkey = 'dashboard-{}.json'.format(dashboard.name)
if next((k for k in providers[dashboard.provider]['configMap']['items'] if k['key'] == itemkey), None) is not None:
raise InvalidParamError('Duplicated name "{}" for provider "{}"'.format(dashboard.name, dashboard.provider))
providers[dashboard.provider]['configMap']['items'].append({
'key': itemkey,
'path': '{}.json'.format(dashboard.name),
})
else:
if not is_allowed_types(dashboard.kdata, [
KData_Value, KData_ConfigMap, KData_ConfigMapManual, KData_Secret, KData_SecretManual
]):
raise InvalidParamError('Only ConfigMap and Secret KData is allowed')
if dashboard.provider in providers:
raise InvalidParamError('KData source must be on a separate provider')
vname = 'dashboard-{}'.format(dashboard.provider)
providers[dashboard.provider] = KDataHelper_Volume.info(base_value={
'name': vname,
}, value=dashboard.kdata)
extra_volumemounts.append({
'name': 'dashboard-{}'.format(dashboard.provider),
'mountPath': self._dashboard_path(dashboard.provider),
})
kdata_providers.append(dashboard.provider)
for prv in providers.values():
extra_volumes.append(prv)
ret.extend([
Object({
'apiVersion': 'apps/v1',
'kind': 'Deployment',
'metadata': {
'name': self.object_name('deployment'),
'namespace': self.namespace(),
'labels': {
'app': self.object_name('pod-label-app'),
}
},
'spec': {
'selector': {
'matchLabels': {
'app': self.object_name('pod-label-app'),
}
},
'replicas': 1,
'template': {
'metadata': {
'labels': {
'app': self.object_name('pod-label-app'),
}
},
'spec': {
'containers': [{
'name': 'grafana',
'image': self.option_get('container.grafana'),
'ports': [{
'containerPort': 3000,
'protocol': 'TCP'
}],
'env': [
KDataHelper_Env.info(base_value={
'name': 'GF_SECURITY_ADMIN_USER',
}, value_if_kdata=self.option_get('config.admin.user'), default_value={
'valueFrom': {
'secretKeyRef': {
'name': self.object_name('config-secret'),
'key': 'admin_user'
}
},
}, disable_if_none=True),
KDataHelper_Env.info(base_value={
'name': 'GF_SECURITY_ADMIN_PASSWORD',
}, value_if_kdata=self.option_get('config.admin.password'), default_value={
'valueFrom': {
'secretKeyRef': {
'name': self.object_name('config-secret'),
'key': 'admin_password'
}
},
}, disable_if_none=True),
ValueData({
'name': 'GF_INSTALL_PLUGINS',
'value': ','.join(self.option_get('config.install_plugins')),
}, enabled=self.option_get('config.install_plugins') is not None and len(self.option_get('config.install_plugins')) > 0),
],
'volumeMounts': [
{
'mountPath': '/var/lib/grafana',
'name': 'data',
},
{
'name': 'grafana-config',
'mountPath': '/etc/grafana/grafana.ini',
'subPath': 'grafana.ini',
},
ValueData(value={
'name': 'provisioning-datasources',
'mountPath': '/etc/grafana/provisioning/datasources',
'readOnly': True,
}, enabled=self.is_config_provisioning_item('datasources')),
ValueData(value={
'name': 'provisioning-plugins',
'mountPath': '/etc/grafana/provisioning/plugins',
'readOnly': True,
}, enabled=self.is_config_provisioning_item('plugins')),
ValueData(value={
'name': 'provisioning-dashboards',
'mountPath': '/etc/grafana/provisioning/dashboards',
'readOnly': True,
}, enabled=self.is_config_provisioning_item('dashboards')),
*extra_volumemounts
],
'livenessProbe': ValueData(value={
'httpGet': {
'path': '/api/health',
'port': 3000,
},
'initialDelaySeconds': 60,
'timeoutSeconds': 30,
'failureThreshold': 10,
}, enabled=self.option_get('config.probes')),
'readinessProbe': ValueData(value={
'httpGet': {
'path': '/api/health',
'port': 3000,
},
'initialDelaySeconds': 60,
'timeoutSeconds': 30,
}, enabled=self.option_get('config.probes')),
'resources': ValueData(value=self.option_get('kubernetes.resources.deployment'), disabled_if_none=True),
}],
'restartPolicy': 'Always',
'volumes': [
{
'name': 'grafana-config',
'configMap': {
'name': self.object_name('config'),
},
},
KDataHelper_Volume.info(base_value={
'name': 'data',
}, value=self.option_get('kubernetes.volumes.data')),
ValueData(value={
'name': 'provisioning-datasources',
'secret': {
'secretName': self.object_name('config-secret'),
'items': [{
'key': 'datasources.yaml',
'path': 'datasources.yaml',
}]
}
}, enabled=self.is_config_provisioning_item('datasources')),
ValueData(value={
'name': 'provisioning-plugins',
'secret': {
'secretName': self.object_name('config-secret'),
'items': [{
'key': 'plugins.yaml',
'path': 'plugins.yaml',
}]
}
}, enabled=self.is_config_provisioning_item('plugins')),
ValueData(value={
'name': 'provisioning-dashboards',
'secret': {
'secretName': self.object_name('config-secret'),
'items': [{
'key': 'dashboards.yaml',
'path': 'dashboards.yaml',
}]
}
}, enabled=self.is_config_provisioning_item('dashboards')),
*extra_volumes
]
}
}
}
}, name=self.BUILDITEM_DEPLOYMENT, source=self.SOURCE_NAME, instance=self.basename()),
Object({
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'name': self.object_name('service'),
'namespace': self.namespace(),
},
'spec': {
'selector': {
'app': self.object_name('pod-label-app')
},
'ports': [{
'port': self.option_get('config.service_port'),
'protocol': 'TCP',
'targetPort': 3000
}]
}
}, name=self.BUILDITEM_SERVICE, source=self.SOURCE_NAME, instance=self.basename()),
])
return ret
def configfile_get(self) -> str:
if self.configfile is None:
configfile = self.option_get('config.grafana_config')
if configfile is None:
configfile = GrafanaConfigFile()
if isinstance(configfile, str):
self.configfile = configfile
else:
configfilerender = ConfigFileRenderMulti([
ConfigFileRender_Ini(),
ConfigFileRender_RawStr()
])
self.configfile = configfilerender.render(configfile.get_value(self))
return self.configfile
def configfile_provisioning_get(self, filetype: str, optionname: str) -> str:
ofile = self.option_get(optionname)
if ofile is None:
raise InvalidParamError('Config file option "{}" is empty'.format(optionname))
configfilerender = ConfigFileRenderMulti([
ConfigFileRender_Yaml(),
ConfigFileRender_RawStr()
])
if isinstance(ofile, ConfigFile):
return configfilerender.render(ofile.get_value(self))
elif isinstance(ofile, str):
return ofile
else:
if filetype == 'providers':
newofile = []
for og in ofile:
if 'type' not in og or og['type'] == 'file':
# Auto add "options.path" if not set
newog = copy.deepcopy(og)
if 'options' not in newog or 'path' not in newog['options']:
Merger.merge(newog, {
'options': {
'path': self._dashboard_path(newog['name']),
},
})
newofile.append(newog)
else:
newofile.append(og)
ofile = newofile
ogfile = {
'apiVersion': 1,
filetype: ofile,
}
return configfilerender.render(ConfigFileOutput_Dict(ogfile))
def _dashboard_path(self, name: str):
return posixpath.join(self.option_get('config.dashboards_path'), name)
def _dashboard_fetch(self, source: GrafanaDashboardSource):
if isinstance(source, GrafanaDashboardSource_Str):
return source.source
if isinstance(source, GrafanaDashboardSource_Url):
try:
with urlopen(source.url) as u:
return u.read().decode('utf-8')
except Exception as e:
raise InvalidParamError('Error downloading url: {}'.format(str(e))) from e
if isinstance(source, GrafanaDashboardSource_LocalFile):
with open(source.filename, mode='r', encoding='utf-8') as f:
return f.read()
if isinstance(source, GrafanaDashboardSource_GNet):
try:
with urlopen(f'https://grafana.com/api/dashboards/{source.gnetId}/revisions/{source.revision}/download') as u:
src = u.read().decode('utf-8')
if source.datasource is not None:
return re.sub(r'"datasource":.*,', '"datasource": "{}",'.format(source.datasource), src)
return src
except Exception as e:
raise InvalidParamError('Error downloading url: {}'.format(str(e))) from e
raise InvalidParamError('Unsupported dashboard source: "{}"'.format(repr(source)))
|
import matplotlib.pyplot as plt
from matplotlib import style
import random
import datetime
from graphpkg.live.graph import LiveTrend,LiveScatter
style.use("dark_background")
def get_new_data():
return datetime.datetime.now(), [random.randrange(5, 10),random.randrange(1,5)]
# def get_new_data1():
# y_data = random.randrange(0, 10)
# return None, y_data if y_data > 5 else None
def func2():
return random.randrange(1, 100), [random.randrange(1, 100), random.randrange(1, 100), random.randrange(1, 100)]
def func3(*args):
return random.randrange(1, args[0]), [random.randrange(1, args[0]), random.randrange(1, 100)]
if __name__ == "__main__":
lg1 = LiveTrend(
func_for_data=get_new_data,
interval=1000,
title="Live trend with date time"
)
lg1.start()
g2 = LiveScatter(func_for_data=func2, interval=1000,
title="scatter with 3 plots", window=1000)
g2.start()
g3 = LiveScatter(func_for_data=func3, func_args=(
500,), interval=1000, title="scatter with 2 plots", window=500)
g3.start()
plt.show()
|
from time import time
from bst.pygasus.core import ext
from bst.pygasus.session.interfaces import ISession
from bst.pygasus.session.interfaces import IClientIdentification
from bst.pygasus.session.interfaces import DEFAULT_EXPIRATION
class UserSessionData(dict):
def __init__(self):
self.reset_lastchanged()
def reset_lastchanged(self):
self.lastchanged = time()
class RamSessionData(dict):
def __init__(self, expiration=None):
if expiration is None:
expiration = DEFAULT_EXPIRATION
self.expiration = expiration
ram = RamSessionData()
class RamSession(ext.Adapter):
""" simple session thats store data unencrypted in ram. After shutdown
the server all data will lost.
For a persistent session class you properly should
inherit from this class.
"""
ext.implements(ISession)
ext.context(ext.IRequest)
user_session_data_cls = UserSessionData
def __init__(self, request):
self.request = request
def __setitem__(self, key, value):
client = IClientIdentification(self.request)
identification = client.identification()
if identification is None or identification not in self.store():
identification = client.apply()
data = self.store().setdefault(identification, self.user_session_data_cls())
data[key] = self.encrypt(value)
data.reset_lastchanged()
def __getitem__(self, key):
self.refresh()
identification = IClientIdentification(self.request).identification()
if identification is None or identification not in self.store():
raise KeyError('user has no identification or data')
return self.decrypt(self.store()[identification][key])
def __delitem__(self, key):
identification = IClientIdentification(self.request).identification()
if identification is None or identification not in self.store():
raise KeyError('user has no identification or data')
del self.store()[identification]
def __contains__(self, key):
identification = IClientIdentification(self.request).identification()
if identification is None:
return False
if identification not in self.store():
return False
return key in self.store()[identification]
def set_expiration_time(self, time):
self.store().expiration = time
def get_expiration_time(self):
return self.store().expiration
def store(self):
""" return a store in form of a dict
"""
return ram
def decrypt(self, value):
""" this function do nothing but
can easily overridden in a subclass
"""
return value
def encrypt(self, value):
""" this function do nothing but
can easily overridden in a subclass
"""
return value
def refresh(self):
removes = list()
for key, data in self.store().items():
if data.lastchanged + self.get_expiration_time() < time():
removes.append(key)
for key in removes:
del self.store()[key]
|
def resolve():
'''
code here
'''
N, K = [int(item) for item in input().split()]
A_list = [int(item) for item in input().split()]
bit_k = bin(K)[2:]
telepo_list = [A_list]
for _ in range(len(bit_k)-1):
temp_list = telepo_list[-1]
new_telep = [temp_list[i-1] for i in temp_list]
telepo_list.append(new_telep)
# print(telepo_list)
goto = 1
for i in range(len(bit_k)):
temp_list = telepo_list[i]
if ((K >> i) & 1):
goto = temp_list[goto - 1]
# print(i, goto)
print(goto)
if __name__ == "__main__":
resolve()
|
# -*- coding: utf-8 -*-
'''
File name: code\rudinshapiro_sequence\sol_384.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #384 :: Rudin-Shapiro sequence
#
# For more information see:
# https://projecteuler.net/problem=384
# Problem Statement
'''
Define the sequence a(n) as the number of adjacent pairs of ones in the binary expansion of n (possibly overlapping).
E.g.: a(5) = a(1012) = 0, a(6) = a(1102) = 1, a(7) = a(1112) = 2
Define the sequence b(n) = (-1)a(n).
This sequence is called the Rudin-Shapiro sequence.
Also consider the summatory sequence of b(n): .
The first couple of values of these sequences are:
n 0 1 2 3 4 5 6 7
a(n) 0 0 0 1 0 0 1 2
b(n) 1 1 1 -1 1 1 -1 1
s(n) 1 2 3 2 3 4 3 4
The sequence s(n) has the remarkable property that all elements are positive and every positive integer k occurs exactly k times.
Define g(t,c), with 1 ≤ c ≤ t, as the index in s(n) for which t occurs for the c'th time in s(n).
E.g.: g(3,3) = 6, g(4,2) = 7 and g(54321,12345) = 1220847710.
Let F(n) be the fibonacci sequence defined by:
F(0)=F(1)=1 and
F(n)=F(n-1)+F(n-2) for n>1.
Define GF(t)=g(F(t),F(t-1)).
Find ΣGF(t) for 2≤t≤45.
'''
# Solution
# Solution Approach
'''
'''
|
import cv2
import numpy as np
import sys
from pathlib import Path
###################
# Core Parameters #
###################
# Size of image to compact during preprocessing
adjust_size = 500
# Rho (distance from top-left corner) threshold for detecting duplicates
rho_threshold = 10
# Theta (angle from top horizontal line) threshold for detecting duplicates
theta_threshold = 30*np.pi/180
# Theta threshold for grouping lines horizontally and vertically
group_threshold = 15*np.pi/180
# Gap ratio threshold between two adjacent lines to expected for finding missing lines
missing_threshold = 1.2
# Save output box images into ./output directory
save_output = True
# Save image of extracted lines overlapped to the original image into ./converted.jpg
save_lines = True
###################
###################
def angle_diff(theta1, theta2):
return min(abs(theta1 - theta2), abs(theta1+np.pi - theta2), abs(theta1-np.pi - theta2))
def dist_diff(rho1, rho2):
return abs(abs(rho1) - abs(rho2))
# Read the image
file_path = sys.argv[1]
orig_img = cv2.imread(file_path)
# Resize the image
height, width, _ = orig_img.shape
img = cv2.resize(orig_img, dsize=(adjust_size, adjust_size), interpolation=cv2.INTER_AREA)
# Change the image into a gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Pre image processing
edges = cv2.Canny(gray, 45, 150, apertureSize=3)
kernel = np.ones((3, 3), np.uint8)
edges = cv2.dilate(edges, kernel, iterations=1)
kernel = np.ones((5, 5), np.uint8)
edges = cv2.erode(edges, kernel, iterations=1)
# Detect lines in the image
lines = cv2.HoughLines(edges, 1, 0.5*np.pi/180, 150)
if not lines.any():
print('No lines were found')
exit()
# Reduce similar lines into one representative
filtered_lines = []
## Calculate how many lines are similar to a given one
similar_lines = {i : [] for i in range(len(lines))}
for i in range(len(lines)):
for j in range(len(lines)):
if i == j:
continue
rho_i, theta_i = lines[i][0]
rho_j, theta_j = lines[j][0]
if dist_diff(rho_i, rho_j) < rho_threshold and angle_diff(theta_i, theta_j) < theta_threshold:
similar_lines[i].append(j)
## Order the indices of the lines by how many are similar to them
indices = [i for i in range(len(lines))]
indices.sort(key=lambda x : len(similar_lines[x]))
## Construct line flags which is the base for the filtering
line_flags = len(lines)*[True]
for i in range(len(lines) - 1):
if not line_flags[indices[i]]:
continue
for j in range(i + 1, len(lines)):
if not line_flags[indices[j]]:
continue
rho_i,theta_i = lines[indices[i]][0]
rho_j,theta_j = lines[indices[j]][0]
if dist_diff(rho_i, rho_j) < rho_threshold and angle_diff(theta_i, theta_j) < theta_threshold:
line_flags[indices[j]] = False
## Filter out duplicated lines
for i in range(len(lines)):
if line_flags[i]:
filtered_lines.append(lines[i])
# Group horizontal and vertical lines, respectively
similar_lines = {i : set() for i in range(len(filtered_lines))}
for i in range(len(filtered_lines)):
for j in range(len(filtered_lines)):
if i == j:
continue
rho_i, theta_i = filtered_lines[i][0]
rho_j, theta_j = filtered_lines[j][0]
if angle_diff(theta_i, theta_j) < group_threshold:
similar_lines[i].add(j)
groupped_lines = set()
groups = []
for i, line in enumerate(filtered_lines):
if i in groupped_lines:
group_idx = [j for j, g in enumerate(groups) if i in g][0]
else:
groups.append(set())
group_idx = len(groups) - 1
groupped_lines.add(i)
groups[group_idx] |= similar_lines[i]
groups[group_idx].add(i)
groupped_lines |= groups[group_idx]
groups.sort(key=len)
height_groups, width_groups = groups[-2:]
width_lines = [filtered_lines[i][0] for i in width_groups]
height_lines = [filtered_lines[i][0] for i in height_groups]
if np.abs(width_lines[0][1]) < np.abs(height_lines[0][1]):
width_lines, height_lines = height_lines, width_lines
# Sort lines by distances (rho)
width_lines = sorted(width_lines, key=lambda l: abs(l[0]))
height_lines = sorted(height_lines, key=lambda l: abs(l[0]))
width_adj_dists = [dist_diff(width_lines[i][0], width_lines[i-1][0]) for i in range(1, len(width_lines))]
height_adj_dists = [dist_diff(height_lines[i][0], height_lines[i-1][0]) for i in range(1, len(height_lines))]
width_adj_dists = sorted(width_adj_dists)
height_adj_dists = sorted(height_adj_dists)
line_gap = width_adj_dists[len(width_adj_dists)//6]
_width_adj_dists_filtered = [e for e in width_adj_dists if e > 1.5*line_gap]
line_height = _width_adj_dists_filtered[len(_width_adj_dists_filtered)//6]
letter_width = height_adj_dists[len(height_lines)//2]
# Find and fill missing vertical lines
missing_height_lines = []
for i in range(1, len(height_lines)):
dist = dist_diff(height_lines[i][0], height_lines[i-1][0])
angle_dist = angle_diff(height_lines[i][1], height_lines[i-1][1])
if letter_width * (1-missing_threshold) <= dist <= letter_width * missing_threshold:
continue
missing_count = np.round(dist/letter_width)-1
for j in range(1, 1+int(missing_count)):
rho = height_lines[i-1][0] + dist*j/(1+missing_count)
theta = height_lines[i-1][1] + angle_dist*j/(1+missing_count)
missing_height_lines.append(np.array([rho, theta]))
height_lines += missing_height_lines
height_lines = sorted(height_lines, key=lambda l: abs(l[0]))
# Find and fill missing horizontal lines
missing_width_lines = []
is_letter_area = True
removal_idx = []
i = 0
while i < len(width_lines)-1:
i += 1
dist = dist_diff(width_lines[i][0], width_lines[i-1][0])
angle_dist = angle_diff(width_lines[i][1], width_lines[i-1][1])
expected_line_gap = np.abs(line_height - dist) > np.abs(line_gap - dist)
dist_combination = line_height + line_gap
is_combination = np.abs(line_height - dist) > np.abs(dist_combination - dist)
if i == 1 and expected_line_gap:
removal_idx.append(0)
continue
if is_letter_area:
if expected_line_gap:
removal_idx.append(i-1)
elif is_combination:
rho = width_lines[i-1][0] + line_height
theta = (width_lines[i-1][1] + width_lines[i][1])/2
missing_width_lines.append(width_lines[i-1])
width_lines[i-1] = np.array([rho, theta])
is_letter_area = False
i -= 1
else:
is_letter_area = False
else:
if is_combination:
rho = width_lines[i-1][0] + line_gap
theta = (width_lines[i-1][1] + width_lines[i][1])/2
missing_width_lines.append(width_lines[i-1])
width_lines[i-1] = np.array([rho, theta])
i -= 1
elif not expected_line_gap:
missing_width_lines.append(width_lines[i-1])
i -= 1
is_letter_area = True
width_lines = [l for i, l in enumerate(width_lines) if i not in removal_idx]
width_lines += missing_width_lines
width_lines = sorted(width_lines, key=lambda l: abs(l[0]))
# Convert coord system into Cartesian from rho-theta plane
cartesian_width_lines = []
for line in width_lines:
rho, theta = line
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = (x0-b) * width / adjust_size
x2 = (x0+b) * width / adjust_size
y1 = (y0+a) * height / adjust_size
y2 = (y0-a) * height / adjust_size
cartesian_width_lines.append(np.array([[x1, y1], [x2, y2]]))
cartesian_height_lines = []
for line in height_lines:
rho, theta = line
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = (x0-b) * width / adjust_size
x2 = (x0+b) * width / adjust_size
y1 = (y0+a) * height / adjust_size
y2 = (y0-a) * height / adjust_size
cartesian_height_lines.append(np.array([[x1, y1], [x2, y2]]))
# Obtain letter boxes
def order_points(pts):
rect = np.zeros((4, 2), dtype = "float32")
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def four_point_transform(image, pts):
rect = order_points(pts)
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warped
def perp(a) :
b = np.empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
def seg_intersect(a1, a2, b1, b2) :
da = a2-a1
db = b2-b1
dp = a1-b1
dap = perp(da)
denom = np.dot(dap, db)
num = np.dot(dap, dp)
return (num / denom.astype(float))*db + b1
boxes = []
for i_width in range(0, len(cartesian_width_lines)-1, 2):
for i_height in range(len(cartesian_height_lines)-1):
a1, a2 = cartesian_width_lines[i_width]
b1, b2 = cartesian_height_lines[i_height]
p1 = seg_intersect(a1, a2, b1, b2)
a1, a2 = cartesian_width_lines[i_width+1]
b1, b2 = cartesian_height_lines[i_height]
p2 = seg_intersect(a1, a2, b1, b2)
a1, a2 = cartesian_width_lines[i_width]
b1, b2 = cartesian_height_lines[i_height+1]
p3 = seg_intersect(a1, a2, b1, b2)
a1, a2 = cartesian_width_lines[i_width+1]
b1, b2 = cartesian_height_lines[i_height+1]
p4 = seg_intersect(a1, a2, b1, b2)
rect = order_points(np.array([p1, p2, p3, p4]))
tl, _, br, _ = rect
if save_output:
boxes.append(four_point_transform(orig_img, np.array([p1, p2, p3, p4])))
else:
boxes.append((tl, br))
if not save_output:
print(boxes)
# Save each box images
if save_output:
Path("output").mkdir(exist_ok=True)
box_num = 1
for box in boxes:
box = cv2.cvtColor(box, cv2.COLOR_BGR2GRAY)
_, box = cv2.threshold(box, 200, 255, cv2.THRESH_BINARY)
h, w = box.shape
box = box[h//16:-h//16, w//10:-w//10]
h, w = box.shape
inner_box = box[h//4:-h//4, w//4:-w//4]
if np.sum(inner_box < 200) > 0:
n = str(box_num).zfill(1+np.log10(len(boxes)).astype(int))
cv2.imwrite(f'output/box-{n}.jpg', box)
box_num += 1
# Show extracted lines in the original image
def draw(line, color):
rho,theta = line
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int((x0 + 1000*(-b)) * width / adjust_size)
y1 = int((y0 + 1000*(a)) * height / adjust_size)
x2 = int((x0 - 1000*(-b)) * width / adjust_size)
y2 = int((y0 - 1000*(a)) * height / adjust_size)
cv2.line(orig_img, (x1,y1) , (x2,y2), color, 2)
if save_lines:
for line in width_lines:
draw(line, (0, 0, 150))
for line in height_lines:
draw(line, (150, 0, 0))
cv2.imwrite('converted.jpg', orig_img)
|
import json
from django.utils.encoding import force_bytes, force_text
def load_json(data):
return json.loads(force_text(data))
def dump_json(data):
'''
Converts a Python object to a JSON formatted string.
'''
json_kwargs = {
'sort_keys': True,
'indent': 4,
'separators': (', ', ': ')
}
return force_bytes(json.dumps(data, **json_kwargs))
|
from numba.core.decorators import njit
import numpy as np
import os, shutil
from SuperSafety.Utils.utils import init_file_struct
class FollowTheGap:
def __init__(self, conf, agent_name):
self.name = agent_name
self.conf = conf
self.map = None
self.cur_scan = None
self.cur_odom = None
self.max_speed = conf.max_v
self.max_steer = conf.max_steer
self.v_min_plan = conf.v_min_plan
self.speed = conf.vehicle_speed
path = os.getcwd() + "/" + conf.vehicle_path + self.name
init_file_struct(path)
def plan(self, obs):
if obs['linear_vels_x'][0] < self.v_min_plan:
return np.array([0, 7])
ranges = np.array(obs['scans'][0], dtype=np.float)
angle_increment = np.pi / len(ranges)
max_range = 5
ranges = preprocess_lidar(ranges, max_range)
bubble_r = 0.1
ranges = create_zero_bubble(ranges, bubble_r)
start_i, end_i = find_max_gap(ranges)
aim = find_best_point(start_i, end_i, ranges[start_i:end_i])
half_pt = len(ranges) /2
steering_angle = angle_increment * (aim - half_pt)
return np.array([steering_angle, self.speed])
@njit
def preprocess_lidar(ranges, max_range):
proc_ranges = np.array([min(ran, max_range) for ran in ranges])
return proc_ranges
@njit
def create_zero_bubble(input_vector, bubble_r):
centre = np.argmin(input_vector)
min_dist = input_vector[centre]
input_vector[centre] = 0
size = len(input_vector)
current_idx = centre
while(current_idx < size -1 and input_vector[current_idx] < (min_dist + bubble_r)):
input_vector[current_idx] = 0
current_idx += 1
current_idx = centre
while(current_idx > 0 and input_vector[current_idx] < (min_dist + bubble_r)):
input_vector[current_idx] = 0
current_idx -= 1
return input_vector
@njit
def find_max_gap(input_vector):
max_start = 0
max_size = 0
current_idx = 0
size = len(input_vector)
while current_idx < size:
current_start = current_idx
current_size = 0
while current_idx< size and input_vector[current_idx] > 1:
current_size += 1
current_idx += 1
if current_size > max_size:
max_start = current_start
max_size = current_size
current_size = 0
current_idx += 1
if current_size > max_size:
max_start = current_start
max_size = current_size
return max_start, max_start + max_size - 1
# @njit
def find_best_point(start_i, end_i, ranges):
# return best index to goto
mid_i = (start_i + end_i) /2
best_i = np.argmax(ranges)
best_i = (mid_i + (best_i + start_i)) /2
return int(best_i)
|
from __future__ import absolute_import
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class NodeStoreConfig(AppConfig):
label = 'nodestore'
name = 'cobra.apps.nodestore'
verbose_name = _('NodeStore')
|
def hanoi(n,start,temp,end):
if n == 1:
print "move from " + start + " to " + end
else:
hanoi(n - 1,start,end,temp)
print "move from " + start + " to " + end
hanoi(n - 1,temp,start,end)
hanoi(2,"A","B","C")
print "fin"
hanoi(3,"X","temp","Y")
|
from selenium.webdriver import Remote as RemoteWebDriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from pages.base_page import BasePage
from pages.locators import BasketPageLocators
class BasketPage(BasePage):
def basket_not_items(self):
assert self.is_not_element_present(*BasketPageLocators.BASKET_HAVE_ITEMS), "Item's have in basket"
def basket_have_a_text(self):
assert self.is_element_present(*BasketPageLocators.BASKET_HAVE_A_TEXT), "Basket not have a text"
|
from __future__ import annotations
import logging
from random import Random
from typing import TYPE_CHECKING
from ...models import (
Planning,
MAX_INT,
)
from ..abc import (
Algorithm,
)
from ..heuristics import (
InsertionAlgorithm,
)
if TYPE_CHECKING:
from typing import (
Type,
Optional,
)
from ...models import (
Result,
)
logger = logging.getLogger(__name__)
class IterativeAlgorithm(Algorithm):
def __init__(self, episodes: int = 3, algorithm_cls: Type[Algorithm] = None, seed: int = 56, *args, **kwargs):
super().__init__(*args, **kwargs)
if algorithm_cls is None:
algorithm_cls = InsertionAlgorithm
self.episodes = episodes
self.algorithm_cls = algorithm_cls
self.random = Random(seed)
self.args = args
self.kwargs = kwargs
def build_algorithm(self, *args, **kwargs) -> Algorithm:
args = (*self.args, *args)
kwargs.update(self.kwargs)
return self.algorithm_cls(*args, **kwargs)
def _optimize(self) -> Planning:
best: Optional[Result] = None
for i in range(self.episodes):
seed = self.random.randint(0, MAX_INT)
current = self.build_algorithm(seed=seed).optimize()
best = self.objective.best(best, current)
assert best is not None
return best.planning
|
from wolframclient.language import wl
from wolframclient.serializers import export, wolfram_encoder
# define a hierarchy of classes.
class Animal(object):
pass
class Fish(Animal):
pass
class Tuna(Fish):
pass
# will not have its own encoder.
class Salmon(Fish):
pass
# register a new encoder for Animal.
@wolfram_encoder.dispatch(Animal)
def encode_animal(serializer, animal):
return serializer.encode(wl.Animal)
# register a new encoder for Fish.
@wolfram_encoder.dispatch(Fish)
def encode_fish(serializer, animal):
return serializer.encode(wl.Fish)
# register a new encoder for Tuna.
@wolfram_encoder.dispatch(Tuna)
def encode_tuna(serializer, animal):
# encode the class as a function using class name
return serializer.encode(wl.Tuna)
expr = {'fish' : Fish(), 'tuna': Tuna(), 'salmon': Salmon()}
result = export(expr)
print(result) # b'<|"fish" -> Fish, "tuna" -> Tuna, "salmon" -> Fish|>' |
import os
import glob
import json
import time
import pickle
import shutil
import random
import warnings
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
from multiprocessing import Process, cpu_count, Array
from omsdetector.mof import Helper
from omsdetector.mof import MofStructure
from omsdetector.atomic_parameters import Atom
from sys import exit
pd.options.display.max_rows = 1000
class MofCollection:
"""A collection to hold and analyse MOF structures from CIF files"""
separator = "".join(['-'] * 50)
def __init__(self, path_list, analysis_folder='analysis_folder'):
"""Create a MofCollection from a list of path names.
:param path_list: List of paths to MOF CIF files to be added to the
collection.
:param analysis_folder: Path to the folder where the results will
be stored. (default: 'analysis_folder')
"""
self._analysis_folder = analysis_folder
self.path_list = path_list
self.mof_coll = []
self.batches = []
self._metal_site_df = None
self._mof_oms_df = None
self._properties = {}
self.load_balance_index = {}
self.analysis_limit = None
self.filter_functions = {
"density": self._apply_filter_range,
"oms_density": self._apply_filter_range,
"uc_volume": self._apply_filter_range,
"metal_species": self._apply_filter_in_value,
"non_metal_species": self._apply_filter_in_value,
"cif_okay": self._apply_filter_value,
"has_oms": self._apply_filter_value,
"mof_name": self._apply_value_in_filter
}
self._load_mofs()
def __len__(self):
return len(self.mof_coll)
def __repr__(self):
print_str = self.separator
print_str += "\nThis collection holds information for "
print_str += "{} MOFs.\n".format(len(self))
if self.analysis_folder is None:
print_str += "Analysis folder is not set.\n"
else:
f = os.path.abspath(self.analysis_folder)
print_str += "Analysis folder is: {}\n\n".format(f)
print_str += "List of cif files in collection:\n\n"
for mc in self.mof_coll:
print_str += "{}\n".format(mc['mof_file'])
print_str += self.separator
return print_str
@property
def analysis_folder(self):
"""Get value of the analysis folder."""
Helper.make_folder(self._analysis_folder)
return self._analysis_folder
@analysis_folder.setter
def analysis_folder(self, analysis_folder):
"""Set value of the analysis folder."""
self._analysis_folder = analysis_folder
@property
def oms_results_folder(self):
"""Get value of the OMS results folder."""
orf = self.analysis_folder + '/oms_results'
Helper.make_folder(orf)
return orf
@property
def summary_folder(self):
"""Get value of the summary folder."""
sf = self.analysis_folder + '/summary'
Helper.make_folder(sf)
return sf
@property
def _properties_filename(self):
"""Get value of the properties pickle file."""
return self.analysis_folder + '/properties.pickle'
@property
def properties(self):
"""Get value for the MOF properties. If the property variable is not
None and the pickle file exists, then load the file and return it."""
if not self._properties and os.path.isfile(self._properties_filename):
with open(self._properties_filename, 'rb') as properties_file:
self._properties = pickle.load(properties_file)
return self._properties
@property
def mof_oms_df(self):
"""Get a pandas DataFrame that lists for each MOF whether it has an OMS
or not and if it has an OMS what metal types it is.
"""
if self._mof_oms_df is not None:
return self._mof_oms_df
if not self._validate_properties(['has_oms'])[1]:
print('OMS analysis not finished for all MOFs in collection.')
return False
mof_info = {}
for mi in self.mof_coll:
mp = self.properties[mi['checksum']]
if 'metal_sites' not in mp:
continue
metal_sites = mp['metal_sites']
if len(metal_sites) == 0:
print('No Metal Found in {}'.format(mp['name']))
oms_types = [ms["metal"] for ms in metal_sites
if ms["is_open"] and ms["unique"]]
oms_types = list(set(oms_types))
if oms_types:
oms_types = ",".join(oms_types)
else:
oms_types = "N/A"
if mp['has_oms']:
has_oms = 'Yes'
else:
has_oms = 'No'
all_metal_species = ",".join(set(mp['metal_species']))
mof_info[mp['name']] = {'Metal Types': all_metal_species,
'Has OMS': has_oms,
'OMS Types': oms_types}
self._metal_site_df = pd.DataFrame.from_dict(mof_info,
orient='index')
return self._metal_site_df
@property
def metal_site_df(self):
"""Get a pandas DataFrame that lists the OMS results for each metal
type.
"""
if self._metal_site_df is not None:
return self._metal_site_df
if not self._validate_properties(['has_oms'])[1]:
print('OMS analysis not finished for all MOFs in collection.')
return False
site_info = {}
for mi in self.mof_coll:
mp = self.properties[mi['checksum']]
if 'metal_sites' not in mp:
continue
metal_sites = mp['metal_sites']
if len(metal_sites) == 0:
print('No Metal Found in {}'.format(mp['name']))
for i, ms in enumerate(metal_sites):
key = mp['name'] + '_' + str(i)
site_info[key] = ms
if 'all_dihedrals' in ms:
del site_info[key]['all_dihedrals']
if 'min_dihedral' in ms:
del site_info[key]['min_dihedral']
site_info[key]['mof_name'] = mp['name']
self._metal_site_df = pd.DataFrame.from_dict(site_info, orient='index')
return self._metal_site_df
@classmethod
def from_folder(cls, collection_folder, analysis_folder='analysis_folder',
name_list=None):
"""Create a MofCollection from a the CIF files in a folder.
:param collection_folder: Path to the folder containing the CIF files to
be added to the collection.
:param analysis_folder: Path to the folder where the results will
be stored. (default: 'analysis_folder')
:param name_list: List of MOF names to include in the collection. If
set, all the other CIF files in the folder will be excluded.
(default: None)
:return: A MofCollection object holding the specified MOF structures.
"""
if name_list:
print(cls.separator)
print('Using only MOFs in the name list.')
print(cls.separator)
d = collection_folder
path_list = [d+'/'+name for name in name_list]
else:
path_list = glob.glob(collection_folder + "/*.cif")
return cls(path_list, analysis_folder)
def analyse_mofs(self, overwrite=False, num_batches=1, analysis_limit=None):
"""Run OMS analysis for the MOFs in the collection.
:param overwrite: Controls if the results will be overwritten or not
(default: False)
:param num_batches: Sets the number of batches the structures will be
split in and analyzed on a separate process. (default: 1)
:param analysis_limit: Analyze only up to the number of MOFs set by
analysis_limit, if set to None all MOFs will be analyzed (default: None)
"""
print(self.separator)
print("Running OMS Analysis...")
self.analysis_limit = analysis_limit
t0 = time.time()
self._make_batches(num_batches, overwrite)
status = Array('i', [0 for i in range(num_batches)])
for i, batch in enumerate(self.batches):
p = Process(target=self._run_batch,
args=(i, batch, overwrite,status))
p.start()
lbs = [len(batch)/100.0 for batch in self.batches]
wait_time = 0.0
status_prev = [0 for i in range(num_batches)]
while True:
# Create a list from the shared array to make sure it doesnt change
# during the iteration
status_ = list(status)
if all([sp == s for sp, s in zip(status_prev, status_)]):
wait_time = min(25, 0.1+wait_time)
time.sleep(wait_time)
status_prev = status_
sout = ["Batch {} Finished.".format(b + 1)
if len(self.batches[b]) == 0 or s < 0 else
"Batch {} {:.2f} % : Analysing {:}"
"".format(b+1, (s+1)/lbs[b], self.batches[b][s]['mof_name'])
for b, s in enumerate(status_)]
print("|**| ".join(sout) + 100 * " ", end='\r', flush=True)
if all([s < 0 for s in status_]):
break
if overwrite:
for mi in self.mof_coll:
self._update_property_from_oms_result(mi)
self._validate_properties(['has_oms'])
t1 = time.time()
print('\nAnalysis Finished. Time required:{:.2f} sec'.format(t1 - t0))
print(self.separator)
def check_structures(self):
"""Iterate over all the MOFs in the collection and validate that they
can be read and a MofStructure can be created.
"""
self._validate_properties(['cif_okay'])
not_read = [mi for mi in self.mof_coll
if not self.properties[mi['checksum']]['cif_okay']]
read_len = len(self.mof_coll) - len(not_read)
print('\nChecked {} structures.'.format(len(self.mof_coll)))
msg1 = {0: '\r',
1: '{} was read.'.format(read_len),
2: '{} were read.'.format(read_len)}
msg2 = {0: '\r',
1: '{} was NOT read.'.format(len(not_read)),
2: '{} were NOT read.'.format(len(not_read))}
print(msg1[min(2, read_len)])
print(msg2[min(2, len(not_read))])
msg = {0: "\r", 1: "\nThe following structures could not be read:"}
print(msg[min(1, len(not_read))])
for i, mi in enumerate(not_read):
print("{}".format(mi['mof_name']))
mofs_no_metal = [mi for mi in self.mof_coll
if self.properties[mi['checksum']]['cif_okay']
and not
self.properties[mi['checksum']]['metal_species']]
msg = {0: "\r", 1: "The following structures contain no metal:"}
print(msg[min(1, len(mofs_no_metal))])
for mi in mofs_no_metal:
p = self.properties[mi['checksum']]
print("{}.cif {}".format(p['name'],
p['metal_species']+p['non_metal_species']))
print('\nFinished checking structures.')
def check_analysis_status(self):
"""Iterate over all the MOFs in the collection and check if the results
from the OMS analysis exist.
"""
print(self.separator)
not_done = [mi['mof_file'] for mi in self.mof_coll
if not self._check_if_results_exist(mi['mof_name'])]
done = len(self.mof_coll) - len(not_done)
msg1 = {0: '\nAnalysis for no structures has been completed.',
1: '\nAnalysis for {} out of {} structures have been completed.'
.format(done, len(self.mof_coll))}
msg2 = {0: "\r", 1: "\nThe following structures are missing:"}
print(msg1[min(1, done)])
print(msg2[min(1, len(not_done))])
for nd in not_done:
print(nd)
print(self.separator)
def sample_collection(self, sample_size=50):
"""Randomly select a sample of MOFs in the collection and
return a new collection with the MOFs in the sample.
:param sample_size: Number of MOFs to be selected. Default value is 50.
"""
ll = len(self.mof_coll)
if sample_size > ll:
sample_size = ll
print(f"Can only sample up to the number of MOFs "
f"in the collection ({ll}).")
mof_list = [mi['mof_file'] for mi in self.mof_coll]
sampled_list = random.sample(mof_list, sample_size)
return MofCollection(sampled_list, analysis_folder=self.analysis_folder)
def filter_collection(self, using_filter=None,
new_collection_folder=None,
new_analysis_folder=None):
"""Filter a collection given a number of filters.
Calling this method of a MofCollection applies the filter and creates a
new collection for the MOFs that match the filter. The cif files that
match the filter are copied to the new_collection_folder.
The filters can be one or more of the following:
'density': [min, max] (range of values)
'oms_density': [min, max] (range of values)
'uc_volume': [min, max] (range of values)
'metal_species': ["Cu", "Zn", ...] (list of metal species)
'non_metal_species': ["C", "N", ...] (list of non metal species)
'cif_okay': True (boolean value)
'has_oms': True (boolean value)
'mof_name': [mof_name1, mof_name2] (string values)
:param using_filter: Filter used to identify MOFs with certain
characteristics. Has to be a python dictionary (default: None)
:param new_collection_folder: Path to the folder where the CIF files of
the filtered collection will be stored. If set to None the CIF files
will not be copied. (default: None)
:param new_analysis_folder: Path to the folder where the OMS result
files of the filtered collection will be stored. If set to None the
result files will not be copied. (default: None)
:return: A MofCollection with only the filtered MOFs. If
new_collection_folder or new_analysis_folder is not set then the
collection will point to the original location of these files.
"""
print(self.separator)
if any([f not in self.filter_functions for f in using_filter]):
print('Unknown filter. Try again using one of the following '
'filters:\n\"{}\"'.format(", ".join(self.filter_functions)))
print(self.separator)
return
validation_level, cf = self._validate_properties(using_filter)
if validation_level == 1 and not cf:
print('Properties from CIF files could not be validated.'
'Check that all CIF files can be read')
return
elif validation_level == 2 and not cf:
print('Requested a filter that needs OMS information but the '
'OMS analysis does not appear to be complete.\n'
'Run it first and try again.')
return
print(self.separator)
print('Filtering collection.')
filtered_list = []
for i, mi in enumerate(self.mof_coll):
mp = self.properties[mi['checksum']]
fun = self._apply_filter
if all([fun(f, mp[f], using_filter[f]) for f in using_filter]):
filtered_list.append(mi['mof_file'])
found_s = {0: "No", 1: len(filtered_list)}[min(1, len(filtered_list))]
print('\n{} MOFs were matched using the provided'
' filter.'.format(found_s))
if len(filtered_list) == 0:
print('No collection returned.')
return None
print('Returning a new collection using the matched MOFs.')
sub_collection = MofCollection(filtered_list,
analysis_folder=self.analysis_folder)
print(self.separator)
sub_collection.copy_cifs(new_collection_folder)
sub_collection.copy_results(new_analysis_folder)
return sub_collection
def read_cif_files(self):
"""Iterate over all MOF files in the collection, load each CIF and
store MOF properties such as density, unit cell volume etc.
"""
print(self.separator)
print('Reading CIF files and updating properties...')
self._loop_over_collection(self._update_property_from_cif_file)
self._store_properties()
print('Done')
print(self.separator)
def read_oms_results(self):
"""Iterate over all MOF files in the collection, load each OMS result
file and store OMS information to the MOF properties.
"""
print(self.separator)
print('Adding results to properties.')
self._loop_over_collection(self._update_property_from_oms_result)
print('Done')
self._store_properties()
print(self.separator)
def copy_cifs(self, target_folder):
"""Copy cif files from their existing location to the specified
target_folder.
:param target_folder: Path of folder to copy collection CIF files to.
"""
if target_folder is None:
return
tf_abspath = os.path.abspath(target_folder)
Helper.make_folder(tf_abspath)
print(self.separator)
print('The cif files for this collection will be copied to'
' the specified folder:\n\"{}\"'.format(tf_abspath))
print('The cif paths will be updated.')
for i, mi in enumerate(list(self.mof_coll)):
destination_path = "{}/{}.cif".format(tf_abspath, mi['mof_name'])
self.mof_coll[i] = {"mof_name": mi['mof_name'],
"mof_file": destination_path,
"checksum": mi['checksum']}
if not os.path.isfile(destination_path):
shutil.copyfile(mi['mof_file'], destination_path)
print(self.separator)
def copy_results(self, target_folder):
"""Copy OMS result files from their existing location to the specified
target_folder.
:param target_folder: Path of folder to copy collection OMS result
files to.
"""
if target_folder is None:
return
print(self.separator)
tf_abspath = os.path.abspath(target_folder)
destination_path = tf_abspath + '/oms_results'
print('The result files for this collection will be copied to the '
'specified folder:\n{}\nThe analysis folder will be updated.'
''.format(tf_abspath))
Helper.make_folder(tf_abspath)
Helper.make_folder(destination_path)
for i, mi in enumerate(self.mof_coll):
mof_name = mi['mof_name']
if self._check_if_results_exist(mof_name):
source_path = "{}/{}".format(self.oms_results_folder, mof_name)
Helper.copy_folder(destination_path, source_path)
self.analysis_folder = tf_abspath
self._validate_properties(['has_oms'])
print(self.separator)
def summarize_results(self, max_atomic_number=None):
"""Create a summary table for the OMS results of the collection, group
results by metal type.
:param max_atomic_number: Maximum atomic number to be included in
summary table. If not defined all metal atoms will be considered
(default: None)
"""
df = self.metal_site_df.copy()
site_df_u = df.loc[df['unique']]
site_df_o = site_df_u.loc[site_df_u['is_open']]
all_sites = self._group_and_summarize(site_df_u, ['MOFs',
'Metal Sites'])
open_sites = self._group_and_summarize(site_df_o, ['MOFs_with_OMS',
'OMS'])
s_df = pd.concat([all_sites, open_sites], axis=1)
s_df.fillna(0.0, inplace=True)
s_df = s_df.astype(int)
s_df['MOFs_with_OMS(%)'] = 100.0 * s_df['MOFs_with_OMS']/s_df['MOFs']
s_df['OMS (%)'] = 100.0 * s_df['OMS'] / s_df['Metal Sites']
cols = ['MOFs', 'MOFs_with_OMS', 'Metal Sites', 'OMS',
'MOFs_with_OMS(%)', 'OMS (%)']
s_df = s_df[cols]
s_df['MOFs_with_OMS(%)'] = s_df['MOFs_with_OMS(%)'].apply('{:.2f} %'
''.format)
s_df['OMS (%)'] = s_df['OMS (%)'].apply('{:.2f} %'.format)
s_df.sort_values("MOFs", inplace=True, ascending=False)
num_mofs = df['mof_name'].nunique()
num_oms_mofs = df[df['is_open']]['mof_name'].nunique()
num_sites = len(site_df_u)
num_oms_sites = len(site_df_u[site_df_u['is_open']])
print(self.separator)
print('Number of total MOFs: {}'.format(num_mofs))
print('Number of total MOFs with open metal sites: {}'
''.format(num_oms_mofs))
print('Number of total unique sites: {}'.format(num_sites))
print('Number of total unique open metal sites: {}'
''.format(num_oms_sites))
print(self.separator)
msg = "Summary Table\n"
fname = "{0}/stats.out".format(self.summary_folder, max_atomic_number)
if max_atomic_number:
subset = pd.Series(s_df.index).apply(
lambda x: Atom(x).atomic_number <= max_atomic_number)
s_df = s_df.loc[subset.values]
fname = "{0}/stats_less_{1}.out".format(self.summary_folder,
max_atomic_number)
msg = "Summary Table for metal atoms with atomic number smaller " \
"than {}.\n".format(max_atomic_number)
print(msg)
print(s_df)
s_df.to_csv(fname, sep=' ')
def summarize_tfactors(self):
"""Summarize the t-factor information and make histograms for all the
MOFs in the collection.
"""
tfac_analysis_folder = self.summary_folder + '/tfac_analysis'
Helper.make_folder(self.summary_folder)
Helper.make_folder(tfac_analysis_folder)
df = self.metal_site_df.copy()
sites_u = df[df['unique']]
for n in range(4, 7):
self._write_t_factors(sites_u, n, tfac_analysis_folder)
def _load_mofs(self):
"""Add MOfs to collection, use CIF file checksum as an identifier."""
print('Loading CIF files...')
li = max(int(len(self.path_list) / 1000), 1)
lm = len(self.path_list) / 100.0
for i, mof_file in enumerate(self.path_list):
if i % li == 0:
print("{:4.1f} %".format((i+1) / lm), end="\r", flush=True)
checksum = Helper.get_checksum(mof_file)
mof_name = os.path.splitext(os.path.basename(mof_file))[0]
mof_info = {"mof_name": mof_name,
"mof_file": mof_file,
"checksum": checksum}
self.mof_coll.append(mof_info)
if checksum not in self.properties:
self.properties[checksum] = {"mof_name": mof_name}
else:
if self.properties[checksum]["mof_name"] != mof_name:
exit("MOF name and CIF checksum mismatch for {}.cif "
"{}.cif. Either the CIF files has already been "
"processed with a different name, or the CIF file "
"has changed since it was processed."
"".format(mof_name,
self.properties[checksum]['mof_name']))
if self._check_if_results_exist(mof_name):
self._compare_checksums(mof_file, mof_name, checksum)
print("\nAll Done.")
self._store_properties()
def _compare_checksums(self, mof_file, mof_name, checksum):
"""If OMS results exist for one of the CIF names in the collection then
ensure that the CIF checksum matches the one in the result file.
"""
mof_folder = "{0}/{1}/".format(self.oms_results_folder,
mof_name)
results_file = "{0}/{1}.json".format(mof_folder, mof_name)
with open(results_file, 'r') as f:
results_dict = json.load(f)
if results_dict['checksum'] != checksum:
print("Results for a MOF named {0} appear to already exist"
" in the analysis folder \n\"{1}\".\nHowever the "
"file checksum in the result file does not match the "
"checksum of \n\"{2}\".\n\nHave the CIF files in the "
"collection changed since the results were computed?"
"\nClear results and try again.".format(mof_name,
mof_folder,
mof_file))
exit(1)
def _run_batch(self, b, batch, overwrite, status):
"""Run OMS analysis for each of the batches."""
for i, mi in enumerate(batch):
status[b] = i
self._analyse(mi, overwrite)
status[b] = -1
def _analyse(self, mi, overwrite):
"""For a given CIF file, create MofStructure object and run OMS
analysis. If overwrite is false check if results already exist first.
"""
mof_folder = "{}/{}".format(self.oms_results_folder, mi['mof_name'])
results_exist = self._check_if_results_exist(mi['mof_name'])
if not overwrite and results_exist:
print("Skipping {}. Results already exist and overwrite is set "
"to False.".format(mi['mof_name']))
return
mof = self._create_mof_from_cif_file(mi['mof_file'])
if mof.summary['cif_okay']:
mof.analyze_metals(output_folder=mof_folder)
def _make_batches(self, num_batches=1, overwrite=False):
"""Split collection into number of batches
:param num_batches: Number of batches (default: 1)
:param overwrite: Controls if the results will be overwritten or not
(default: False)
"""
print(self.separator)
if cpu_count() < num_batches:
warnings.warn('You requested {} batches but there are only {}'
' CPUs available.'.format(num_batches, cpu_count()))
b_s = {1: 'batch', 2: 'batches'}[min(num_batches, 2)]
print('{} {} requested. '.format(num_batches, b_s))
print('Overwrite is set to {}. '.format(overwrite))
print('Storing results in {}. '.format(self.oms_results_folder))
print(self.separator)
self._validate_properties(['load_balancing_index'])
print(self.separator)
lbi = {}
for mi in self.mof_coll:
mp = self.properties[mi['checksum']]
lbi[mi['mof_name']] = mp['load_balancing_index']
# Remove any structures not in load balancing index.
subset = [mc for mc in self.mof_coll if mc['mof_name'] in lbi]
# If there is no balancing info for a MOF at this point it means
# that it could not be read.
if len(self.mof_coll) != len(subset):
print('\nSkipping {} structures that could not be read.'
' '.format(len(self.mof_coll)-len(subset)))
# Remove any structures already completed
if not overwrite:
print('Checking if results for any of the MOFs exist...')
all_ = len(subset)
subset = [mc for mc in subset if not
self._check_if_results_exist(mc['mof_name'])]
msg = {0: "Will not skip any MOFs",
1: "Skipping {} MOFs because results were found. "
"".format(all_ - len(subset))}
print(msg[min(1, all_ - len(subset))])
# Sort mof list using the load balancing index
subset.sort(key=lambda x: lbi[x['mof_name']])
sum_load_balance = sum(lbi[mi["mof_name"]] for mi in subset)
lb_per_batch = sum_load_balance / num_batches
# Select only up to analysis_limit to work with
if self.analysis_limit and len(subset) > self.analysis_limit:
subset = subset[0:self.analysis_limit]
self.batches = [[] for b in range(num_batches)]
for i, mi in enumerate(subset):
sum_lb = sum([lbi[mi["mof_name"]] for mi in subset[0:i]])
batch = int(sum_lb / lb_per_batch)
self.batches[batch].append(mi)
print(self.separator)
for i, batch in enumerate(self.batches):
print("Batch {0} has {1} MOFs".format(i+1, len(batch)))
print(self.separator)
def _check_if_results_exist(self, mof_name):
"""Check if OMS results already exist for a MOF"""
mof_folder = "{}/{}".format(self.oms_results_folder, mof_name)
if os.path.isfile(mof_folder+'/'+mof_name+'.json'):
if not os.path.isfile(mof_folder + '/' + 'analysis_running'):
return True
return False
def _loop_over_collection(self, func):
"""Iterate over all the MOFs in the collection and run the specified
function.
:param func: Function to use.
"""
li = max(int(len(self.mof_coll) / 1000), 1)
lm = len(self.mof_coll) / 100
for i, mi in enumerate(self.mof_coll):
if i % li == 0:
print("{:4.1f} % {} {:100}".format((i+1)/lm, mi['mof_name'],
" "), end="\r", flush=True)
func(mi)
print()
def _apply_filter(self, filter_, v, f):
"""Apply the proper filter_function for the given filter"""
return self.filter_functions[filter_](v, f)
@staticmethod
def _apply_filter_value(v, f):
"""Filter function to match a value. Returns false if values is None"""
if not v:
return False
return v == f
@staticmethod
def _apply_filter_in_value(v, f):
"""Filter function to match all values of a list"""
if not v:
return False
return all([f_ in v for f_ in f])
@staticmethod
def _apply_value_in_filter(v, f):
"""Filter function to match any of the values of a list"""
if not v:
return False
return v in f
@staticmethod
def _apply_filter_range(v, f):
"""Filter function to match a range of values"""
if not v:
return False
return min(f) <= v <= max(f)
def _validate_properties(self, keys):
"""Check if a given property can be found in the properties dictionary.
If not try to read the CIF file and check again. If the check fails
again try to read the OMS results and check again. If the check fails
a third time return False, the property cannot be validated."""
msg = {1: "Validating property", 2: "Validating properties"}
print('\n{} : '.format(msg[min(2, len(keys))]), end='')
print("\"{}\"".format(", ".join([k for k in keys])))
validation_level = 0
li = max(int(len(self.mof_coll)/1000), 1)
lm = len(self.mof_coll) / 100
for i, mi in enumerate(self.mof_coll):
if i % li == 0:
print("{:4.1f} % {} {:100}".format((i+1) / lm, mi['mof_name'],
" "), end="\r", flush=True)
mp = self.properties[mi['checksum']]
if not self._validate_property(mp, keys):
self._update_property_from_cif_file(mi)
validation_level = 1
if not self._validate_property(mp, keys):
self._update_property_from_oms_result(mi)
validation_level = 2
if not self._validate_property(mp, keys):
self._store_properties()
print('\nProperty Missing\n{}'.format(self.separator))
return validation_level, False
self._store_properties()
print("Validated 100 % "+100*" ", end="\r")
print()
return validation_level, True
@staticmethod
def _validate_property(mp, keys):
"""Check if property exists."""
test1 = all([f in mp for f in keys])
if test1 and all([mp[f] != 'N/A' for f in keys]):
return True
if test1 and not mp['cif_okay']:
return True
return False
def _update_property_from_cif_file(self, mi):
"""Update properties dictionary from a CIF file."""
mp = self.properties[mi['checksum']]
mof = self._create_mof_from_cif_file(mi['mof_file'])
if mof:
mp.update(mof.summary)
self.load_balance_index[mi['mof_name']] = len(mof) * len(mof)
mp['load_balancing_index'] = self.load_balance_index[mi['mof_name']]
def _update_property_from_oms_result(self, mi):
"""Update properties dictionary from an OMS result file."""
mp = self.properties[mi['checksum']]
mof_name = mp["mof_name"]
mof_folder = "{0}/{1}/".format(self.oms_results_folder, mof_name)
results_file = "{0}/{1}.json".format(mof_folder, mof_name)
results_dict = None
if os.path.isfile(results_file):
results_dict = json.load(open(results_file))
if isinstance(results_dict, dict):
results_dict['source_name'] = mof_folder
mp.update(results_dict)
def _store_properties(self):
"""Store properties dictionary as a python pickle file."""
with open(self._properties_filename, 'wb') as properties_file:
pickle.dump(self._properties, properties_file)
@staticmethod
def _create_mof_from_cif_file(path_to_mof):
"""Create and return a MofStructure object from a path to a CIF file."""
mof = MofStructure.from_file(path_to_mof, primitive=False)
return mof
def _write_t_factors(self, sites, n, target):
"""Summarize the findings in table form and histograms for a give
t-factor.
"""
s_n = sites.loc[sites['number_of_linkers'] == n].copy()
s_n['is_open_yn'] = np.where(s_n['is_open'], 'yes', 'no')
s_n = s_n[['mof_name', 'is_open_yn', 't_factor']]
for flag in ['yes', 'no']:
outpath = "{}/{}_{}.out".format(target, flag, str(n))
s = s_n[s_n['is_open_yn'] == flag]
s.to_csv(outpath, index=False)
fout = "{}/{}_{}_hist.out".format(target, flag, n)
self._write_histogram(s['t_factor'], True, fout)
fout = "{}/{}_{}_hist_abs.out".format(target, flag, n)
self._write_histogram(s['t_factor'], False, fout)
fig = plt.figure(figsize=(10, 5))
plt.title('t-{} factor'.format(n))
s_yes = s_n[s_n['is_open_yn'] == 'yes']
s_yes['t_factor'].hist(bins=50, range=(0, 1), normed=False)
s_no = s_n[s_n['is_open_yn'] == 'no']
s_no['t_factor'].hist(bins=50, range=(0, 1), normed=False)
plt.show()
@staticmethod
def _write_histogram(sites, dens, target):
"""Generate histograms to be used for summarizing the t-factor
results.
"""
hist, edges = np.histogram(sites, bins=50, range=(0, 1), density=dens)
with open(target, 'w') as hist_file:
w = (edges[1] - edges[0]) / 2
for e, h in zip(edges, hist):
print(e + w, h, file=hist_file)
@staticmethod
def _group_and_summarize(df, names=None):
"""Group the DataFrame holding the OMS results by metal type and rename
its columns.
"""
rename = {"mof_name": names[0], "is_open": names[1]}
agg_dict = {"mof_name": pd.Series.nunique, "is_open": "count"}
return df.groupby('metal').agg(agg_dict).rename(columns=rename)
|
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
class TesBoundingBoxDecode(TestCase):
def test_decode_shape_format_fp32(self, device):
input1 = torch.tensor([[1., 2., 3., 4.], [3.,4., 5., 6.]], dtype = torch.float32).to("npu")
input2 = torch.tensor([[5., 6., 7., 8.], [7.,8., 9., 6.]], dtype = torch.float32).to("npu")
expedt_output = torch.tensor([[2.5000, 6.5000, 9.0000, 9.0000],
[9.0000, 9.0000, 9.0000, 9.0000]], dtype = torch.float32)
output = torch.npu_bounding_box_decode(input1, input2, 0, 0, 0, 0, 1, 1, 1, 1, (10, 10), 0.1)
self.assertRtolEqual(expedt_output, output.cpu())
instantiate_device_type_tests(TesBoundingBoxDecode, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
|
# coding: utf-8
from __future__ import division, print_function, unicode_literals
import pytest
from formatcode.convert.errors import PartsCountError
from formatcode.convert.fc import FormatCode
from formatcode.convert.parts import NegativePart, PositivePart, StringPart, ZeroPart
def test_parts_from_tokens():
fc = FormatCode('0.0;\\-0.0;General;"Hello, "@')
assert isinstance(fc.parts[0], PositivePart)
assert len(fc.parts[0].tokens) == 3
assert isinstance(fc.parts[1], NegativePart)
assert len(fc.parts[1].tokens) == 4
assert isinstance(fc.parts[2], ZeroPart)
assert len(fc.parts[2].tokens) == 1
assert isinstance(fc.else_part, ZeroPart)
assert isinstance(fc.parts[3], StringPart)
assert len(fc.parts[3].tokens) == 2
with pytest.raises(PartsCountError):
FormatCode('0.0;\\-0.0;General;"Hello, "@;0.0')
fc = FormatCode('0.0')
assert fc.parts[1].tokens is None
assert fc.parts[2].tokens is None
assert fc.parts[3].tokens is None
|
# Jetfuel Game Engine- A SDL-based 2D game-engine
# Copyright (C) 2017 InfernoStudios
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ctypes import c_void_p
from ctypes import c_int
from ctypes import c_wchar_p
from ctypes import c_bool
from jetfuel.draw.color import color
class text_characteristics_replacement(object):
_jetfuel = None;
textcharsreplacementref = None;
def __init__(self, jetfuelsoloader, font=None):
self._jetfuel = jetfuelsoloader.jetfuelso;
if(font is not None):
self._jetfuel.Text_characteristics_new_from_font.argtypes = [
c_void_p]
self._jetfuel.Text_characteristics_new_from_font.restype = c_void_p;
self.textcharsreplacementref = self._jetfuel.\
Text_characteristics_new_from_font(font.fontref);
else:
self._jetfuel.Text_characteristics_new.restype = c_void_p;
self.textcharsreplacementref = self._jetfuel.\
Text_characteristics_new();
def delete_ref(self):
if(self.textcharsreplacementref is not None):
self._jetfuel.Text_characteristics_delete.argtypes = [c_void_p];
self._jetfuel.Text_characteristics_delete(
self.textcharsreplacementref);
def get_render_mode(self):
self._jetfuel.Text_characteristics_get_render_mode.\
argtypes = [c_void_p];
self._jetfuel.Text_characteristics_get_render_mode.\
restype = c_int;
return self._jetfuel.Text_characteristics_get_render_mode(
self.textcharsreplacementref);
def set_render_mode(self, rendermode):
self._jetfuel.Text_characteristics_set_render_mode.\
argtypes = [c_void_p, c_int];
self._jetfuel.Text_characteristics_set_render_mode(
self.textcharsreplacementref, rendermode);
def get_text_string(self):
self._jetfuel.Text_characteristics_get_text_string.\
argtypes = [c_void_p];
self._jetfuel.Text_characteristics_get_text_string.\
restype = c_wchar_p;
return self._jetfuel.Text_characteristics_get_text_string(
self.textcharsreplacementref);
def set_text_string(self, textstring):
self._jetfuel.Text_characteristics_set_text_string.\
argtypes = [c_void_p, c_wchar_p];
self._jetfuel.Text_characteristics_set_text_string(
self.textcharsreplacementref, textstring);
def get_text_color(self, jetfuelsoloader):
self._jetfuel.Text_characteristics_get_text_color.argtypes = [c_void_p];
self._jetfuel.Text_characteristics_get_text_color.restype = c_void_p;
currentcolor = color(jetfuelsoloader.jetfuelso);
self._jetfuel.Color_delete.argtypes = [c_void_p];
self._jetfuel.Color_delete(currentcolor.colorref);
currentcolor.colorref = self._jetfuel.\
Text_characteristics_get_text_color(self.textcharsreplacementref);
return currentcolor;
def set_text_color(self, textcolor):
self._jetfuel.Text_characteristics_set_text_color.argtypes = [c_void_p,
c_void_p];
self._jetfuel.Text_characteristics_set_text_color(
self.textcharsreplacementref, textcolor.colorref);
def get_background_color(self, jetfuelsoloader):
self._jetfuel.Text_characteristics_get_background_color.argtypes = [
c_void_p];
self._jetfuel.Text_characteristics_get_background_color.restype = \
c_void_p;
currentcolor = color(jetfuelsoloader.jetfuelso);
self._jetfuel.Color_delete.argtypes = [c_void_p];
self._jetfuel.Color_delete(currentcolor.colorref);
currentcolor.colorref = self._jetfuel.\
Text_characteristics_get_background_color(self.textcharsreplacementref);
return currentcolor;
def set_background_color(self, backgroundcolor):
self._jetfuel.Text_set_background_color.argtypes = [c_void_p, c_void_p];
self._jetfuel.Text_set_background_color(self.textcharsreplacementref,
backgroundcolor.colorref);
def get_font_outline_width(self):
self._jetfuel.Text_characteristics_get_font_outline_width.\
argtypes = [c_void_p];
self._jetfuel.Text_characteristics_get_font_outline_width.\
restype = c_int;
return self._jetfuel.\
Text_characteristics_get_font_outline_width(
self.textcharsreplacementref);
def set_font_outline_width(self, fontoutline):
self._jetfuel.Text_characteristics_set_font_outline_width.\
argtypes = [c_void_p, c_int];
self._jetfuel.Text_characteristics_set_font_outline_width(
self.textcharsreplacementref, fontoutline);
def get_font_kerning_status(self):
self._jetfuel.Text_characteristics_get_font_kerning_status.\
argtypes = [c_void_p];
self._jetfuel.Text_characteristics_get_font_kerning_status.\
restype = c_bool;
return self._jetfuel.\
Text_characteristics_get_font_kerning_status(
self.textcharsreplacementref);
def set_font_kerning_status(self, kerningstatus):
self._jetfuel.Text_characteristics_set_font_kerning_status.\
argtypes = [c_void_p, c_bool];
self._jetfuel.Text_characteristics_set_font_kerning_status(
self.textcharsreplacementref, kerningstatus);
def get_font_style(self):
self._jetfuel.Text_characteristics_get_font_style.\
argtypes = [c_void_p];
self._jetfuel.Text_characteristics_get_font_style.\
restype = c_int;
return self._jetfuel.\
Text_characteristics_get_font_style(
self.textcharsreplacementref);
def set_font_style(self, fontstyle):
self._jetfuel.Text_characteristics_set_font_style.\
argtypes = [c_void_p, c_int];
self._jetfuel.Text_characteristics_set_font_style(
self.textcharsreplacementref, fontstyle);
def get_font_size(self):
self._jetfuel.Text_characteristics_get_font_size.\
argtypes = [c_void_p];
self._jetfuel.Text_characteristics_get_font_size.\
restype = c_int;
return self._jetfuel.\
Text_characteristics_get_font_size(
self.textcharsreplacementref);
def set_font_size(self, fontsize):
self._jetfuel.Text_characteristics_set_font_size.\
argtypes = [c_void_p, c_int];
self._jetfuel.Text_characteristics_set_font_size(
self.textcharsreplacementref, fontsize);
def get_font_hinting(self):
self._jetfuel.Text_characteristics_get_font_hinting.\
argtypes = [c_void_p];
self._jetfuel.Text_characteristics_get_font_hinting.\
restype = c_int;
return self._jetfuel.\
Text_characteristics_get_font_hinting(
self.textcharsreplacementref);
def set_font_hinting(self, fonthinting):
self._jetfuel.Text_characteristics_set_font_hinting.\
argtypes = [c_void_p, c_int];
self._jetfuel.Text_characteristics_set_font_hinting(
self.textcharsreplacementref, fonthinting);
def get_font(self):
self._jetfuel.Text_characteristics_get_font.\
argtypes = [c_void_p];
self._jetfuel.Text_characteristics_get_font.\
restype = c_void_p;
return self._jetfuel.\
Text_characteristics_get_font(
self.textcharsreplacementref);
|
# Databricks notebook source
dbutils.library.installPyPI("azureml-sdk", version = '1.8.0')
dbutils.library.installPyPI("azureml-train-automl-runtime", version = '1.8.0')
dbutils.library.installPyPI('azure-mgmt-resource', version="10.2.0")
dbutils.library.restartPython()
# COMMAND ----------
import azureml.core
from azureml.core import Workspace
from azureml.core.authentication import ServicePrincipalAuthentication
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.train.automl import AutoMLConfig
from azureml.train.automl.run import AutoMLRun
import os
import random
import time
import json
import pandas as pd
import numpy as np
import logging
# COMMAND ----------
# MAGIC %md Loading data to Pandas
# COMMAND ----------
df = pd.read_csv('/dbfs/FileStore/shared_uploads/[email protected]/sample.csv')
# COMMAND ----------
# MAGIC %md Connecting to Azure ML for AutomML tracking
# COMMAND ----------
secret = dbutils.secrets.get(scope="secrets", key = "spsecret2")
# COMMAND ----------
svc_pr = ServicePrincipalAuthentication(tenant_id="",service_principal_id="",service_principal_password=secret)
# COMMAND ----------
ws = Workspace(
workspace_name="azure-ml-hudua",
subscription_id = "",
resource_group = "",
auth = svc_pr
)
# COMMAND ----------
print("Found workspace {} at location {}".format(ws.name, ws.location))
# COMMAND ----------
project_folder = './Sample_ML'
if not os.path.isdir(project_folder):
os.mkdir(project_folder)
print('Projects will be created in {}.'.format(project_folder))
# COMMAND ----------
# MAGIC %md now let's do automated ML with Databricks (local) compute
# COMMAND ----------
automl_config = AutoMLConfig(task='classification',
primary_metric='accuracy',
debug_log='automl_ResourceType.log',
experiment_timeout_minutes=20,
training_data=df,
label_column_name="class",
enable_early_stopping=True,
n_cross_validations=5,
verbosity=logging.INFO
)
# COMMAND ----------
experiment_name = 'automl_classification_experiment'
experiment = Experiment(ws, experiment_name)
local_run = experiment.submit(automl_config, show_output=False)
local_run.wait_for_completion()
# COMMAND ----------
best_run, fitted_model = local_run.get_output()
print(best_run)
print(fitted_model)
# COMMAND ----------
# MAGIC %md This is just to study overfitting (so using same training dataset for predictions. In practice, you should divide dataset into train and test)
# COMMAND ----------
X_test
# COMMAND ----------
X_test = df.reset_index(drop=True)
X_test['predictions'] = fitted_model.predict(X_test[['sepal_length','sepal_width','petal_length','petal_width']])
# COMMAND ----------
X_test
# COMMAND ----------
|
import os
import string
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from common.utils import DATA_DIR
pos_tags_features = [
'noun',
'verb',
'adjective',
'adverb'
]
cos_sim_features = [
'cos_sim'
]
sentiment_features = [
'positive_count',
'negative_count'
]
word_related_features = [
'unique_lemma',
'unique_tokens'
]
tf_idf_features = [
'tf_idf'
]
boosting_params = {
"n_estimators": [20, 50, 100, 200, 300, 400],
"max_depth": [1, 2, 3, 5, 8, 10],
"learning_rate": [0.01, 0.03, 0.05]
}
rf_params = {
"n_estimators": [20, 50, 100, 200, 300, 400],
"max_depth": [1, 2, 3, 5],
"max_features": [1, None, "sqrt"]
}
svm_params = {
"loss": ['hinge', 'squared_hinge'],
"C": [0.5, 1.0, 10],
"max_iter": [10000]
}
# boosting_params = {
# "n_estimators": [20],
# "max_depth": [1],
# "learning_rate": [0.01]
# }
# rf_params = {
# "n_estimators": [20],
# "max_depth": [1],
# "max_features": [1]
# }
# svm_params = {
# "loss": ['hinge'],
# "C": [0.5],
# "max_iter": [10000]
# }
def get_answers(removed_ids):
answers = pd.read_csv(os.path.join(DATA_DIR, 'all_data.csv'))
answers = answers[~answers['id'].isin(removed_ids)]
answers = answers.iloc[:, 2:]
# remove punctuation
for answer in answers:
answers[answer] = answers[answer].str.replace('[{}]'.format(string.punctuation), '')
return answers
def add_tfidf_feature_to_data(orig_df, columns, v):
X_td = v.fit_transform(columns)
tf_idf = pd.DataFrame(X_td.toarray(), columns=v.get_feature_names())
tf_idf = tf_idf.add_prefix('tf_idf-')
tf_idf = orig_df.reset_index(drop=True).join(tf_idf.reset_index(drop=True), rsuffix='_r')
return tf_idf
def add_tfidf_features(data, removed_ids):
answers = get_answers(removed_ids)
all_answers = answers['question 1']
v = TfidfVectorizer(lowercase=False)
for answer in answers.iloc[:, 1:]:
all_answers = all_answers + " " + answers[answer].fillna('')
data = add_tfidf_feature_to_data(data, all_answers, v)
return data
def get_features(df_input, patterns, ans=None):
if not ans:
ans = '[0-9]+'
features = pd.DataFrame()
for pattern in patterns:
if pattern != 'tf_idf':
regex = 'q_{}_{}.*'.format(ans, pattern)
else:
regex = pattern
cols = df_input.filter(regex=regex, axis=1)
features = pd.concat([features, cols], axis=1)
return features
|
from crownstone_core.protocol.BlePackets import ControlStateSetPacket, ControlPacket
from crownstone_core.protocol.BluenetTypes import StateType, ControlType
from crownstone_uart.core.UartEventBus import UartEventBus
from crownstone_uart.core.uart.uartPackets.UartMessagePacket import UartMessagePacket
from crownstone_uart.core.uart.UartTypes import UartTxType, UartMessageType
from crownstone_uart.core.uart.uartPackets.UartWrapperPacket import UartWrapperPacket
from crownstone_uart.topics.SystemTopics import SystemTopics
class UsbDevHandler:
def setAdvertising(self, enabled):
"""
Enable/ disable the advertising
:param enabled: Boolean
:return:
"""
self._send(UartTxType.ENABLE_ADVERTISEMENT, self._getPayload(enabled))
def setMeshing(self, enabled):
"""
Enable/ disable the Meshing
:param enabled: Boolean
:return:
"""
self._send(UartTxType.ENABLE_MESH, self._getPayload(enabled))
def requestCrownstoneId(self):
"""
Request the Crownstone ID. This is a uint16
:return:
"""
self._send(UartTxType.GET_CROWNSTONE_ID, [])
def requestMacAddress(self):
"""
Request the MAC address ID.
:return:
"""
self._send(UartTxType.GET_MAC_ADDRESS, [])
def increaseCurrentRange(self):
"""
Increase the GAIN on the current sensing
:return:
"""
self._send(UartTxType.ADC_CONFIG_INC_RANGE_CURRENT, [])
def decreaseCurrentRange(self):
"""
Decrease the GAIN on the current sensing
:return:
"""
self._send(UartTxType.ADC_CONFIG_DEC_RANGE_CURRENT, [])
def increaseVoltageRange(self):
"""
Increase the GAIN on the voltage sensing
:return:
"""
self._send(UartTxType.ADC_CONFIG_INC_RANGE_VOLTAGE, [])
def decreaseVoltageRange(self):
"""
Decrease the GAIN on the voltage sensing
:return:
"""
self._send(UartTxType.ADC_CONFIG_DEC_RANGE_VOLTAGE, [])
def setDifferentialModeCurrent(self, enabled):
"""
Enable/disable differential mode on the current sensing
:param enabled: Boolean
:return:
"""
self._send(UartTxType.ADC_CONFIG_DIFFERENTIAL_CURRENT, self._getPayload(enabled))
def setDifferentialModeVoltage(self, enabled):
"""
Enable/disable differential mode on the voltage sensing
:param enabled: Boolean
:return:
"""
self._send(UartTxType.ADC_CONFIG_DIFFERENTIAL_VOLTAGE, self._getPayload(enabled))
def setVoltageChannelPin(self, pin):
"""
Select the measurement pin for the voltage sensing
:param pin: int [0 .. 255]
:return:
"""
self._send(UartTxType.ADC_CONFIG_VOLTAGE_PIN, [pin])
def toggleVoltageChannelPin(self):
"""
Select the measurement pin for the voltage sensing
:return:
"""
self._send(UartTxType.ADC_CONFIG_VOLTAGE_PIN, [])
def setSendCurrentSamples(self, enabled):
"""
Enable/ disable the sending of the measured current buffer.
:param enabled: Boolean
:return:
"""
self._send(UartTxType.POWER_LOG_CURRENT, self._getPayload(enabled))
def setSendVoltageSamples(self, enabled):
"""
Enable/ disable the sending of the measured voltage buffer.
:param enabled: Boolean
:return:
"""
self._send(UartTxType.POWER_LOG_VOLTAGE, self._getPayload(enabled))
def setSendFilteredCurrentSamples(self, enabled):
"""
Enable/ disable the sending of the filtered current sample buffer.
:param enabled: Boolean
:return:
"""
self._send(UartTxType.POWER_LOG_FILTERED_CURRENT, self._getPayload(enabled))
def setSendFilteredVoltageSamples(self, enabled):
"""
Enable/ disable the sending of the filtered voltage sample buffer.
:param enabled: Boolean
:return:
"""
self._send(UartTxType.POWER_LOG_FILTERED_VOLTAGE, self._getPayload(enabled))
def setSendCalculatedSamples(self, enabled):
"""
Enable/ disable the sending of the calculated power samples.
:param enabled: Boolean
:return:
"""
self._send(UartTxType.POWER_LOG_CALCULATED_POWER, self._getPayload(enabled))
def setUartMode(self, mode):
"""
Set UART mode.
:param mode: : 0=none 1=RX only, 2=TX only, 3=TX and RX
:return:
"""
if (mode < 0) or (mode > 3):
return
controlPacket = ControlStateSetPacket(StateType.UART_ENABLED).loadUInt8(mode).getPacket()
self._send(UartTxType.CONTROL, controlPacket)
def resetCrownstone(self):
"""
Reset the Crownstone
:return:
"""
resetPacket = ControlPacket(ControlType.RESET).getPacket()
self._send(UartTxType.CONTROL, resetPacket)
def toggleRelay(self, isOn):
val = 0
if isOn:
val = 1
switchPacket = ControlPacket(ControlType.RELAY).loadUInt8(val).getPacket()
self._send(UartTxType.CONTROL, switchPacket)
def toggleIGBTs(self, isOn):
val = 0
if isOn:
val = 100
switchPacket = ControlPacket(ControlType.PWM).loadUInt8(val).getPacket()
self._send(UartTxType.CONTROL, switchPacket)
def toggleAllowDimming(self, isOn):
val = 0
if isOn:
val = 1
instructionPacket = ControlPacket(ControlType.ALLOW_DIMMING).loadUInt8(val).getPacket()
self._send(UartTxType.CONTROL, instructionPacket)
# MARK: internal methods
def _getPayload(self, boolean):
payload = 0
if boolean:
payload = 1
return [payload]
def _send(self, opCode: UartTxType, payload: list):
# send over uart
uartMessage = UartMessagePacket(opCode, payload).getPacket()
uartPacket = UartWrapperPacket(UartMessageType.UART_MESSAGE, uartMessage).getPacket()
UartEventBus.emit(SystemTopics.uartWriteData, uartPacket)
|
import hashlib
import six
# construct file path
def construct_file_path(base_path, scope, lfn):
hash = hashlib.md5()
hash.update(six.b('%s:%s' % (scope, lfn)))
hash_hex = hash.hexdigest()
correctedscope = "/".join(scope.split('.'))
dstURL = "{basePath}/{scope}/{hash1}/{hash2}/{lfn}".format(basePath=base_path,
scope=correctedscope,
hash1=hash_hex[0:2],
hash2=hash_hex[2:4],
lfn=lfn)
return dstURL
|
#!/usr/bin/env python
"""consistency tests
While many of the tests utilize similar trees and input data, the overlap
is not necessarily 100%. Many of these inputs are written with specific tests
in mind.
"""
__author__ = "Donovan Park"
__copyright__ = "Copyright 2014, The tax2tree project"
__credits__ = ["Donovan Park"]
__license__ = "BSD"
__version__ = "1.0"
__maintainer__ = "Donovan Park"
__email__ = "[email protected]"
__status__ = "Development"
from unittest import TestCase, main
import t2t.nlevel as nl
from t2t.consistency import Consistency
class ConsistencyTests(TestCase):
def setUp(self):
pass
def tearDown(self):
nl.set_rank_order(['d', 'p', 'c', 'o', 'f', 'g', 's'])
def test_consistency_missing(self):
"""Test consistency of taxa in tree with missing taxa"""
seed_con = 'f__Lachnospiraceae; g__Bacteroides; s__'
nl.determine_rank_order(seed_con)
tipname_map = {'a': ['f__Lachnospiraceae', 'g__Bacteroides', None],
'c': ['f__Lachnospiraceae', 'g__Bacteroides', 's__Bacteroides pectinophilus'],
'b': ['f__Lachnospiraceae', 'g__Bacteroides', None], 'e': [None, None, None],
'd': ['f__Lachnospiraceae', 'g__Bacteroides', 's__Bacteroides pectinophilus'],
'g': [None, None, None], 'f': ['f__Lachnospiraceae', 'g__Lachnospira', None],
'h': ['f__Lachnospiraceae', 'g__Lachnospira', 's__Bacteroides pectinophilus']}
tree = nl.load_tree('(((a,b),(c,d)),((e,f),(g,h)));', tipname_map)
counts = nl.collect_names_at_ranks_counts(tree)
nl.decorate_ntips_rank(tree)
nl.decorate_name_counts(tree)
# determine taxonomic consistency of rooted tree
#expected_consistency_index
c = Consistency(counts, len(nl.RANK_ORDER))
consistency_index = c.calculate(tree, rooted=True)
self.assertAlmostEqual(consistency_index[0]['f__Lachnospiraceae'], 1.0)
self.assertAlmostEqual(consistency_index[1]['g__Bacteroides'], 1.0)
self.assertAlmostEqual(consistency_index[1]['g__Lachnospira'], 1.0)
self.assertAlmostEqual(consistency_index[2]['s__Bacteroides pectinophilus'], 1.0)
#determine consistency of unrooted tree
consistency_index = c.calculate(tree, rooted=False)
self.assertAlmostEqual(consistency_index[0]['f__Lachnospiraceae'], 1.0)
self.assertAlmostEqual(consistency_index[1]['g__Bacteroides'], 1.0)
self.assertAlmostEqual(consistency_index[1]['g__Lachnospira'], 1.0)
self.assertAlmostEqual(consistency_index[2]['s__Bacteroides pectinophilus'], 1.0)
def test_consistency_unrooted(self):
"""Test consistency of taxa with a taxa that is only monophyletic in unrooted tree"""
seed_con = 'f__Lachnospiraceae; g__Bacteroides; s__'
nl.determine_rank_order(seed_con)
tipname_map = {'a': ['f__Lachnospiraceae', 'g__Bacteroides', 's__Bacteroides pectinophilus'],
'b': ['f__Lachnospiraceae', 'g__Bacteroides', 's__Bacteroides pectinophilus'],
'c': ['f__Lachnospiraceae', 'g__Bacteroides', 's__Bacteroides pectinophilus'],
'd': ['f__Lachnospiraceae', 'g__Bacteroides', 's__Bacteroides acidifaciens'],
'e': ['f__Lachnospiraceae', 'g__Bacteroides', 's__Bacteroides acidifaciens']}
tree = nl.load_tree('((a,b),(c,(d,e)));', tipname_map)
counts = nl.collect_names_at_ranks_counts(tree)
nl.decorate_ntips_rank(tree)
nl.decorate_name_counts(tree)
# determine taxonomic consistency of rooted tree
#expected_consistency_index
c = Consistency(counts, len(nl.RANK_ORDER))
consistency_index = c.calculate(tree, rooted=True)
self.assertAlmostEqual(consistency_index[0]['f__Lachnospiraceae'], 1.0)
self.assertAlmostEqual(consistency_index[1]['g__Bacteroides'], 1.0)
self.assertAlmostEqual(consistency_index[2]['s__Bacteroides pectinophilus'], 0.66666666)
self.assertAlmostEqual(consistency_index[2]['s__Bacteroides acidifaciens'], 1.0)
#determine consistency of unrooted tree
consistency_index = c.calculate(tree, rooted=False)
self.assertAlmostEqual(consistency_index[0]['f__Lachnospiraceae'], 1.0)
self.assertAlmostEqual(consistency_index[1]['g__Bacteroides'], 1.0)
self.assertAlmostEqual(consistency_index[2]['s__Bacteroides pectinophilus'], 1.0)
self.assertAlmostEqual(consistency_index[2]['s__Bacteroides acidifaciens'], 1.0)
if __name__ == '__main__':
main()
|
import json
import logging
import os
from deployer import conf
from deployer.components.deployment import Deployment
from deployer.connectors.okeanos import OkeanosConnector
__author__ = 'Giannis Giannakopoulos'
def configure_logger():
"""
Logging configuration
:return:
"""
logging.basicConfig()
logging.getLogger("root").setLevel(conf.LOG_LEVEL)
logging.getLogger("vmgroup").setLevel(conf.LOG_LEVEL)
logging.getLogger("vm").setLevel(conf.LOG_LEVEL)
def transform_description(description, path_prefix):
"""
This function replaces the script paths with script contents.
:param description:
:return:
"""
logging.getLogger("root").info("Transforming application description")
groups = description['groups']
for g in groups:
scripts = g['scripts']
for s in scripts:
if 'path' in s:
f = open(path_prefix+"/"+s['path'])
con = f.read()
f.close()
s['content'] = con
s.pop("path", None)
return description
def parse_description_file(description_file_path):
"""
:param description_file_path:
:return: The description in dictionary form
"""
logging.getLogger("root").info("Parsing application description")
f = file(description_file_path)
content_json = f.read()
f.close()
content = json.loads(content_json)
description = transform_description(
content,
os.path.dirname(os.path.abspath(description_file_path)))
return description
def configure_connector(provider):
"""
Configures a new cloud connector and authenticates the cloud user.
:param cloud_name:
:param credentials:
:return:
"""
logging.getLogger("root").info("Configuring the cloud connector")
if provider['name'] == "~okeanos" or provider['name'] == "okeanos":
connector = OkeanosConnector()
connector.configure(provider)
return connector
else:
raise NotImplemented("The connector is not supported")
def start_deployment(cloud_connector, description):
"""
Starting a new deployment
:param cloud_connector:
:param description:
:return:
"""
logging.getLogger("root").info("Preparing the connector")
cloud_connector.prepare()
logging.getLogger("root").info("Starting new deployment")
deployment = Deployment()
deployment.cloud_connector = cloud_connector
deployment.configure(description)
logging.getLogger("root").info("Launching deployment")
deployment.launch()
logging.getLogger("root").info("Executing deployment scripts")
while deployment.has_more_steps():
deployment.execute_script()
return deployment
def terminate_deployment(deployment):
"""
Terminate deployment ability
:param deployment:
:return:
"""
logging.getLogger("root").info("Terminating deployment")
deployment.terminate()
def load_state_file(statefile_path):
"""
This method loads the state file and create a deployment object and a cloud connector.
:param statefile_path: the path where the statefile exists
:return: deployment object, cloud connector object
"""
logging.getLogger("root").info("Loading state file")
f = open(statefile_path, 'r')
json_content = f.read()
f.close()
state = json.loads(json_content)
cloud_connector = configure_connector(state['provider'])
deployment = Deployment()
deployment.deserialize(state['deployment'], cloud_connector)
return deployment, cloud_connector
def save_state_file(deployment, description, statefile_path, indent=2):
"""
Save the statefile of the deployment to the specified path
:param deployment:
:param statefile_path:
:return:
"""
logging.getLogger("root").info("Saving state file")
dictionary = dict()
dictionary['deployment'] = deployment.serialize()
dictionary['provider'] = description['provider']
json_content = json.dumps(dictionary, indent=indent)
f = open(statefile_path, 'w')
f.write(json_content)
f.flush()
f.close() |
from __future__ import print_function, division
import os
import torch
from torch.autograd import Variable
from torch.utils.data import Dataset
from skimage import io
import pandas as pd
import numpy as np
from . import transformation as tf
import scipy.io
import matplotlib
import matplotlib.pyplot as plt
class ImagePairDataset(Dataset):
"""
Image pair dataset used for weak supervision
Args:
csv_file (string): Path to the csv file with image names and transformations.
training_image_path (string): Directory with the images.
output_size (2-tuple): Desired output size
transform (callable): Transformation for post-processing the training pair (eg. image normalization)
"""
def __init__(
self,
dataset_csv_path,
dataset_csv_file,
dataset_image_path,
dataset_size=0,
output_size=(240, 240),
transform=None,
random_crop=False,
keypoints_on=False,
original=True,
test=False,
):
self.category_names = [
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
self.random_crop = random_crop
self.out_h, self.out_w = output_size
self.annotations = os.path.join(
dataset_image_path, "PF-dataset-PASCAL", "Annotations"
)
self.train_data = pd.read_csv(os.path.join(dataset_csv_path, dataset_csv_file))
if dataset_size is not None and dataset_size != 0:
dataset_size = min((dataset_size, len(self.train_data)))
self.train_data = self.train_data.iloc[0:dataset_size, :]
self.img_A_names = self.train_data.iloc[:, 0]
self.img_B_names = self.train_data.iloc[:, 1]
self.set = self.train_data.iloc[:, 2].values
self.test = test
if self.test == False:
self.flip = self.train_data.iloc[:, 3].values.astype("int")
self.dataset_image_path = dataset_image_path
self.transform = transform
# no cuda as dataset is called from CPU threads in dataloader and produces confilct
self.affineTnf = tf.AffineTnf(
out_h=self.out_h, out_w=self.out_w, use_cuda=False
) # resize
self.keypoints_on = keypoints_on
self.original = original
def __len__(self):
return len(self.img_A_names)
def __getitem__(self, idx):
# get pre-processed images
image_set = self.set[idx]
if self.test == False:
flip = self.flip[idx]
else:
flip = False
cat = self.category_names[image_set - 1]
image_A, im_size_A, kp_A, bbox_A = self.get_image(
self.img_A_names, idx, flip, category_name=cat
)
image_B, im_size_B, kp_B, bbox_B = self.get_image(
self.img_B_names, idx, flip, category_name=cat
)
A, kp_A = self.get_gt_assignment(kp_A, kp_B)
sample = {
"source_image": image_A,
"target_image": image_B,
"source_im_size": im_size_A,
"target_im_size": im_size_B,
"set": image_set,
"source_points": kp_A,
"target_points": kp_B,
"source_bbox": bbox_A,
"target_bbox": bbox_B,
"assignment": A,
}
if self.transform:
sample = self.transform(sample)
if self.original:
sample["source_original"] = image_A
sample["target_original"] = image_B
# # get key points annotation
# np_img_A = sample['source_original'].numpy().transpose(1,2,0)
# np_img_B = sample['target_original'].numpy().transpose(1,2,0)
# print('bbox_A', bbox_A)
# print('bbox_B', bbox_B)
# rect = matplotlib.patches.Rectangle((bbox_A[0],bbox_A[1]),bbox_A[2]-bbox_A[0],bbox_A[3]-bbox_A[1],linewidth=1,edgecolor='r',facecolor='none')
# print(rect)
# fig=plt.figure(figsize=(1, 2))
# ax0 = fig.add_subplot(1, 2, 1)
# ax0.add_patch(rect)
# plt.imshow(np_img_A)
# # dispaly bounding boxes
# for i, kp in enumerate(kp_A):
# if kp[0] == kp[0]:
# ax0.scatter(kp[0],kp[1], s=5, color='r',alpha=1.)
# ax1 = fig.add_subplot(1, 2, 2)
# rect = matplotlib.patches.Rectangle((bbox_B[0],bbox_B[1]),bbox_B[2]-bbox_B[0],bbox_B[3]-bbox_B[1],linewidth=1,edgecolor='r',facecolor='none')
# print(rect)
# ax1.add_patch(rect)
# plt.imshow(np_img_B)
# for i, kp in enumerate(kp_B):
# if kp[0] == kp[0]:
# ax1.scatter(kp[0],kp[1], s=5, color='r',alpha=1.)
# plt.show()
return sample
def get_gt_assignment(self, kp_A, kp_B):
"""
get_gt_assigment() get the ground truth assignment matrix
Arguments:
kp_A [Tensor, float32] Nx3: ground truth key points from the source image
kp_B [Tensor, float32] Nx3: ground truth key points from the target image
Returns:
A [Tensor, float32] NxN: ground truth assignment matrix
kp_A [Tensor, float32] Nx3: ground truth key points + change original idx into target column idx
"""
s = kp_A[:, 2].long()
t = kp_B[:, 2].long()
N = s.shape[0]
A = torch.zeros(N, N)
for n in range(N):
if s[n] == 0:
continue
idx = (t == s[n]).nonzero()
if idx.nelement() == 0:
continue
A[n, idx] = 1
kp_A[n, 2] = idx + 1
return A, kp_A
def get_image(self, img_name_list, idx, flip, category_name=None):
img_name = os.path.join(self.dataset_image_path, img_name_list.iloc[idx])
image = io.imread(img_name)
# if grayscale convert to 3-channel image
if image.ndim == 2:
image = np.repeat(np.expand_dims(image, 2), axis=2, repeats=3)
if self.keypoints_on:
keypoints, bbox = self.get_annotations(
img_name_list.iloc[idx], category_name
)
# do random crop
if self.random_crop:
h, w, c = image.shape
top = np.random.randint(h / 4)
bottom = int(3 * h / 4 + np.random.randint(h / 4))
left = np.random.randint(w / 4)
right = int(3 * w / 4 + np.random.randint(w / 4))
image = image[top:bottom, left:right, :]
# get image size
im_size = np.asarray(image.shape)
# flip horizontally if needed
if flip:
image = np.flip(image, 1)
if self.keypoints_on:
N, _ = keypoints.shape
for n in range(N):
if keypoints[n, 2] > 0:
keypoints[n, 0] = im_size[1] - keypoints[n, 0]
bbox[0] = im_size[1] - bbox[0]
bbox[2] = im_size[1] - bbox[2]
tmp = bbox[0]
bbox[0] = bbox[2]
bbox[2] = tmp
# convert to torch Variable
image = np.expand_dims(image.transpose((2, 0, 1)), 0)
image = torch.Tensor(image.astype(np.float32))
image_var = Variable(image, requires_grad=False)
# Resize image using bilinear sampling with identity affine tnf
image = self.affineTnf(image_var).data.squeeze(
0
) # the resized image becomes 400 x 400
im_size = torch.Tensor(im_size.astype(np.float32)) # original image sise
if self.keypoints_on:
keypoints[:, 0] = keypoints[:, 0] / float(im_size[1]) * float(self.out_w)
keypoints[:, 1] = keypoints[:, 1] / float(im_size[0]) * float(self.out_h)
bbox[0] = bbox[0] / float(im_size[1]) * float(self.out_w)
bbox[1] = bbox[1] / float(im_size[0]) * float(self.out_h)
bbox[2] = bbox[2] / float(im_size[1]) * float(self.out_w)
bbox[3] = bbox[3] / float(im_size[0]) * float(self.out_h)
return (image, im_size, keypoints, bbox)
else:
return (image, im_size)
def construct_graph(self, kp):
"""
construct_graph() construct a sparse graph represented by G and H.
Arguments:
kp [np array float, N x 3] stores the key points
Returns
G [np.array float, 32 x 96]: stores nodes by edges, if c-th edge leaves r-th node
H [np.array float, 32 x 96]: stores nodes by edges, if c-th edge ends at r-th node
"""
N = kp.shape[0]
G = np.zeros(32, 96)
H = np.zeros(32, 96)
return G, H
def get_annotations(self, keypoint_annotation, category_name):
"""
get_annotations() get key points annotation
Arguments:
keypoint_annotations str: the file name of the key point annotations
category_name str: the category name of the image
Returns:
keypoint [Tensor float32] 32x3
bbox [Tensor float32] 4
"""
base, _ = os.path.splitext(os.path.basename(keypoint_annotation))
# print('base', os.path.join(self.annotations, category_name, base +'.mat'))
anno = scipy.io.loadmat(
os.path.join(self.annotations, category_name, base + ".mat")
)
keypoint = np.zeros((32, 3), dtype=np.float32)
annotation = anno["kps"]
N = annotation.shape[0]
for i in range(N):
if (
annotation[i, 0] == annotation[i, 0]
and annotation[i, 1] == annotation[i, 1]
): # not nan
keypoint[i, :2] = annotation[i]
keypoint[i, 2] = i + 1
np.random.shuffle(keypoint)
keypoint = torch.Tensor(keypoint.astype(np.float32))
bbox = anno["bbox"][0].astype(np.float32)
return keypoint, bbox
class ImagePairDatasetKeyPoint(ImagePairDataset):
def __init__(
self,
dataset_csv_path,
dataset_csv_file,
dataset_image_path,
dataset_size=0,
output_size=(240, 240),
transform=None,
random_crop=False,
):
super(ImagePairDatasetKeyPoint, self).__init__(
dataset_csv_path,
dataset_csv_file,
dataset_image_path,
dataset_size=dataset_size,
output_size=output_size,
transform=transform,
random_crop=random_crop,
)
|
import pytest
import os
import json
@pytest.fixture(scope="function")
def stdio(cli, config, is_encrypted_test):
if not config.get("network.nano_node.rpc_url", None):
# Set the 'rpc_url' field even if we're not connecting to network
# to ensure configuration is complete
config.set("network.nano_node.rpc_url", "http://127.0.0.1:9076")
config.save()
def run_stdio(args, env=None, success=True, raw=False):
if not env:
env = {}
if is_encrypted_test:
env["PASSPHRASE"] = "password"
environ_copy = os.environ.copy()
try:
os.environ.update(env)
result = cli(
args=["--config", config.path, "-vvv", "--ui", "stdio"] + args
)
output = result.out
finally:
os.environ.clear()
os.environ.update(environ_copy)
if raw:
return result.out + result.err
# Try to remove prompt output before the actual JSON result
if "{" in output:
output = output[output.find("{"):]
output = json.loads(output)
if success:
assert output["status"] == "success", "Expected success, got {} instead".format(output)
else:
assert output["status"] == "error", "Expected failure, got {} instead".format(output)
return output
return run_stdio
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['pandas']
setup_requirements = ['pandas', 'pyensembl']
setup(
author="Gokcen Eraslan",
author_email='[email protected]',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Python package for conversions between ENSEMBL IDs and gene names (annotables + pyensembl)",
install_requires=requirements,
license="Apache Software License 2.0",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='pyannotables',
name='pyannotables',
packages=find_packages(),
package_data={'': ['datafile_*']},
setup_requires=setup_requirements,
url='https://github.com/gokceneraslan/pyannotables',
version='0.5',
zip_safe=False,
)
|
from flask import Flask,json
import unittest
from app import app
from app.models import User, Favorites, db
class FavoritesUnit(unittest.TestCase):
app = Flask(__name__)
def test_getfavorites_401(self):
user="user1"
found = 0
teststring = "Health"
find_user = User.query.filter_by(username=user).first()
if(find_user is not None):
User.query.filter_by(username=user).delete()
Favorites.query.filter_by(username=user).delete()
resp1 = app.test_client().get('/api/' + user + '/getFavorites')
data = json.loads(resp1.get_data(as_text=True))
assert data["dataout"] == "None"
assert resp1.status_code == 401
def test_getfavorites_200(self):
user="user1"
password="pass1"
resp2 = None
found = 0
type_in = "add"
testdata = {"type":"hey","title":"hey", "author": "there",
"pub_date":"127","desc": "this is a test",
"image":"https://hey", "source":"https://there"}
data_final = "None"
find_user = User.query.filter_by(username=user).first()
if(find_user is not None):
find_user = User.query.filter_by(username=user).delete()
resp1 = app.test_client().post('/api/signup',
data=json.dumps({"username":user,"password":password}),
content_type='application/json')
find_user = User.query.filter_by(username=user).first()
if(find_user is not None):
resp2 = app.test_client().post("/api/" + user + "/" + type_in + "/submitFavorite",
data=json.dumps(testdata),
content_type='application/json')
resp3 = app.test_client().get('/api/' + user + '/getFavorites')
data_final = json.loads(resp3.get_data(as_text=True))
assert data_final != "None"
def test_submitfavorites_add(self):
user="user1"
password="pass1"
resp2 = None
found = 0
type_in = "add"
testdata = {"type":"hey","title":"hey", "author": "there",
"pub_date":"127","desc": "this is a test",
"image":"https://hey", "source":"https://there"}
data_final = "None"
find_user = User.query.filter_by(username=user).first()
if(find_user is not None):
find_user = User.query.filter_by(username=user).delete()
resp1 = app.test_client().post('/api/signup',
data=json.dumps({"username":user,"password":password}),
content_type='application/json')
find_user = User.query.filter_by(username=user).first()
if(find_user is not None):
resp2 = app.test_client().post("/api/" + user + "/" + type_in + "/submitFavorite",
data=json.dumps(testdata),
content_type='application/json')
data_final = json.loads(resp2.get_data(as_text=True))
assert data_final != "None"
def test_submitfavorites_rem(self):
user="user1"
password="pass1"
resp2 = None
found = 0
type_in = "add"
testdata = {"type":"hey","title":"hey", "author": "there",
"pub_date":"127","desc": "this is a test",
"image":"https://hey", "source":"https://there"}
data_final = "None"
find_user = User.query.filter_by(username=user).first()
if(find_user is not None):
find_user = User.query.filter_by(username=user).delete()
resp1 = app.test_client().post('/api/signup',
data=json.dumps({"username":user,"password":password}),
content_type='application/json')
find_user = User.query.filter_by(username=user).first()
if(find_user is not None):
resp2 = app.test_client().post("/api/" + user + "/" + type_in + "/submitFavorite",
data=json.dumps(testdata),
content_type='application/json')
find_fav = Favorites.query.filter_by(username=user).first()
if(find_fav is not None):
type_in = "rem"
resp3 = app.test_client().post("/api/" + user + "/" + type_in + "/submitFavorite",
data=json.dumps(testdata),
content_type='application/json')
data_final = json.loads(resp3.get_data(as_text=True))
assert data_final != "None"
def test_submitfavorites_401(self):
user="user1"
found = 0
teststring = "Health"
type_in = "rem"
testdata = {"type":"hey","title":"hey", "author": "there",
"pub_date":"127","desc": "this is a test",
"image":"https://hey", "source":"https://there"}
find_user = User.query.filter_by(username=user)
if(find_user is not None):
User.query.filter_by(username=user).delete()
Favorites.query.filter_by(username=user).delete()
resp2 = app.test_client().post("/api/" + user + "/" + type_in + "/submitFavorite",
data=json.dumps(testdata),
content_type='application/json')
data1 = json.loads(resp2.get_data(as_text=True))
assert data1["dataout"] == "None"
assert resp2.status_code == 401
if __name__ == "__main__":
unittest.main() |
from Statistics.standard_deviation import standard_deviation
from Statistics.mean import mean
from Calculator.division import division
from pprint import pprint
def z_score(data):
try:
z_mean = mean(data)
std_dev_result = standard_deviation(data)
z_list = []
for i in data:
z = round(division(std_dev_result, (i-z_mean)), 5)
z_list.append(z)
return z_list
except IndexError:
print("List is empty")
except ZeroDivisionError:
print("ERROR: Can't divide by zero")
except ValueError:
print("ERROR: Check your input value")
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import sys
from typing import Iterator
import click
from jina import Flow, Document, DocumentArray
import logging
MAX_DOCS = int(os.environ.get("JINA_MAX_DOCS", 0))
cur_dir = os.path.dirname(os.path.abspath(__file__))
def config(dataset: str = "star-wars") -> None:
if dataset == "star-wars":
os.environ["JINA_DATA_FILE"] = os.environ.get("JINA_DATA_FILE", "data/StarWars_Descriptions.txt")
os.environ.setdefault('JINA_WORKSPACE', os.path.join(cur_dir, 'workspace'))
os.environ.setdefault(
'JINA_WORKSPACE_MOUNT',
f'{os.environ.get("JINA_WORKSPACE")}:/workspace/workspace')
os.environ.setdefault('JINA_LOG_LEVEL', 'INFO')
os.environ.setdefault('JINA_PORT', str(45678))
def input_generator(file_path: str, num_docs: int) -> Iterator[Document]:
with open(file_path) as file:
lines = file.readlines()
num_lines = len(lines)
if num_docs:
for i in range(min(num_docs, num_lines)):
yield Document(text=lines[i])
else:
for i in range(num_lines):
yield Document(text=lines[i])
def index(num_docs: int) -> None:
flow = Flow().load_config('flows/flow-index.yml')
data_path = os.path.join(os.path.dirname(__file__), os.environ.get("JINA_DATA_FILE", None))
with flow:
flow.post(on="/index", inputs=input_generator(data_path, num_docs), show_progress=True)
def query(top_k: int) -> None:
flow = Flow().load_config('flows/flow-query.yml')
with flow:
text = input('Please type a question: ')
doc = Document(content=text)
result = flow.post(on='/search', inputs=DocumentArray([doc]),
parameters={'top_k': top_k},
line_format='text',
return_results=True,
)
for doc in result[0].data.docs:
print(f"\n\nAnswer: {doc.tags['answer']}")
@click.command()
@click.option(
'--task',
'-t',
type=click.Choice(['index', 'query'], case_sensitive=False),
)
@click.option('--num_docs', '-n', default=MAX_DOCS)
@click.option('--top_k', '-k', default=5)
@click.option('--data_set', '-d', type=click.Choice(['star-wars']), default='star-wars')
def main(task: str, num_docs: int, top_k: int, data_set: str) -> None:
config()
workspace = os.environ['JINA_WORKSPACE']
logger = logging.getLogger('star-wars-qa')
if 'index' in task:
if os.path.exists(workspace):
logger.error(
f'\n +------------------------------------------------------------------------------------+ \
\n | 🤖🤖🤖 | \
\n | The directory {workspace} already exists. Please remove it before indexing again. | \
\n | 🤖🤖🤖 | \
\n +------------------------------------------------------------------------------------+'
)
sys.exit(1)
if 'query' in task:
if not os.path.exists(workspace):
# logger.error(f'The directory {workspace} does not exist. Please index first via `python app.py -t index`')
# sys.exit(1)
logger.info(f"The directory {workspace} does not exist. Running indexing...")
index(num_docs)
if task == 'index':
index(num_docs)
elif task == 'query':
query(top_k)
if __name__ == '__main__':
main()
|
import errno
import gzip
from os import path
from datetime import datetime
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.functional import LazyObject, SimpleLazyObject
from compressor.conf import settings
class CompressorFileStorage(FileSystemStorage):
"""
Standard file system storage for files handled by django-compressor.
The defaults for ``location`` and ``base_url`` are ``COMPRESS_ROOT`` and
``COMPRESS_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.COMPRESS_ROOT
if base_url is None:
base_url = settings.COMPRESS_URL
super(CompressorFileStorage, self).__init__(location, base_url,
*args, **kwargs)
def accessed_time(self, name):
return datetime.fromtimestamp(path.getatime(self.path(name)))
def created_time(self, name):
return datetime.fromtimestamp(path.getctime(self.path(name)))
def modified_time(self, name):
return datetime.fromtimestamp(path.getmtime(self.path(name)))
def get_available_name(self, name):
"""
Deletes the given file if it exists.
"""
if self.exists(name):
self.delete(name)
return name
def delete(self, name):
"""
Handle deletion race condition present in Django prior to 1.4
https://code.djangoproject.com/ticket/16108
"""
try:
super(CompressorFileStorage, self).delete(name)
except OSError, e:
if e.errno != errno.ENOENT:
raise
compressor_file_storage = SimpleLazyObject(
lambda: get_storage_class('compressor.storage.CompressorFileStorage')())
class GzipCompressorFileStorage(CompressorFileStorage):
"""
The standard compressor file system storage that gzips storage files
additionally to the usual files.
"""
def save(self, filename, content):
filename = super(GzipCompressorFileStorage, self).save(filename, content)
out = gzip.open(u'%s.gz' % self.path(filename), 'wb')
out.writelines(open(self.path(filename), 'rb'))
out.close()
return filename
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.COMPRESS_STORAGE)()
default_storage = DefaultStorage()
|
from collections import defaultdict
# Puzzle Input ----------
with open('Day15-Input.txt', 'r') as file:
puzzle = file.read().split('\n')
with open('Day15-Test01.txt', 'r') as file:
test01 = file.read().split('\n')
# Main Code ----------
# Memorize the lowest risk routes to this points
memory = defaultdict(lambda: float('inf'))
# Possible movements
movements = [[0, 1], [1, 0], [0, -1], [-1, 0]]
# Check if a step is valid
def is_valid(next_step: tuple, end: tuple):
# If it's out of bounds, it's not valid
if next_step[0] < 0 or next_step[1] < 0:
return False
# If it's after the end, it's not valid
if next_step[0] > end[0] or next_step[1] > end[1]:
return False
return True
# Recursively test every path
def recursive_pathing(cavern_map: list, start: tuple, end: tuple, current_path: set, current_risk: int):
global memory, movements
# If we can get here faster by another route, abandon this path
if current_risk >= memory[start]:
return
# If we can't get to the end fast enough, abandon this path
if current_risk + abs(start[0] - end[0]) + abs(start[1] - end[1]) > memory[end]:
return
# This is the shortest path here, save it
memory[start] = current_risk
if start == end:
return
# Try every step from here
for delta in movements:
next_step = (start[0] + delta[0], start[1] + delta[1])
# If we have already been in next_step or it isn't valid, skip it
if next_step in current_path:
continue
if not is_valid(next_step, end):
continue
# Recursively search the next step
next_step_risk = cavern_map[next_step[1]][next_step[0]]
recursive_pathing(cavern_map, next_step, end, current_path | set(next_step), current_risk + next_step_risk)
# Find the lowest risk path
def lowest_risk_path(data: list):
global memory
# Reset the memory for the different inputs
memory = defaultdict(lambda: float('inf'))
# Parse the cavern
cavern_map = list(map(lambda x: list(map(int, x)), data))
cavern_size = len(cavern_map)
# Get the star and end tiles
start = (0, 0)
end = (cavern_size - 1, cavern_size - 1)
# Maximum risk for the end point
memory[end] = 9 * 2 * (cavern_size - 1)
# Find and return the lowest risk path
recursive_pathing(cavern_map, start, end, set(), 0)
return memory[end]
# Tests and Solution ----------
print(lowest_risk_path(test01))
print(lowest_risk_path(puzzle))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.