content
stringlengths 5
1.05M
|
---|
from handwrite.sheettopng import SHEETtoPNG
from handwrite.pngtosvg import PNGtoSVG
from handwrite.svgtottf import SVGtoTTF
from handwrite.cli import converters
|
# $Id$
#
# @rocks@
# Copyright (c) 2000 - 2010 The Regents of the University of California
# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org
# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt
# @rocks@
#
# $Log$
# Revision 1.2 2010/09/07 23:53:03 bruno
# star power for gb
#
# Revision 1.1 2010/06/07 23:50:12 bruno
# added a command to swap two interfaces
#
#
from stack.argument_processors.host import HostArgProcessor
import stack.commands
class command(HostArgProcessor, stack.commands.swap.command):
pass
|
import os
import argparse
from utils.transaction import Crawl
from utils.aws import Resource
from utils.mysql import MySQL
from utils.catch_exception import *
from selenium.common.exceptions import NoSuchElementException
def selenium_test():
# sleep 3초(delay=3), no headless(gui=True)
crawl = Crawl(delay=3, gui=True)
# python 을 검색한 google 페이지
crawl.fetch('https://www.google.co.kr/search?q=python')
# <a> 태그를 가진 Selenium Web Element 들의 객체 리스트
a_tags_elements = crawl.chrome.browser.find_elements_by_css_selector('#rso div.rc h3.r a')
# <a> 태그 추출
a_tags = [a.get_attribute('href') for a in a_tags_elements]
for link in a_tags:
crawl.fetch(link)
# 웹 드라이버 파괴
crawl.destroy_webdriver()
class MainExample:
def __init__(self):
# Selenium headless
self.crawl = Crawl(delay=20, telegram_notify=False, gui=True)
self.aws = Resource()
self.mysql = MySQL()
@context_manager
def transaction(self, keyword, control_instances):
# Base page, 생략된 결과를 포함하여 검색
self.crawl.fetch('https://www.google.co.kr/search?q={}&filter=0'.format(keyword))
# 페이지 탐색
self.navigate_page(5)
# AWS 'Auxiliary' 인스턴스 실행
self.aws.ec2_on_off(control_instances)
def navigate_page(self, max_page):
# paginate pattern
# nav > tbody > tr > td.cur > span
# nav > tbody > tr > td:nth-child([next]) > a > span
cur_page = None
for _ in range(max_page):
# <a> 태그를 가진 Selenium Web Element 들의 객체 리스트
a_tags_elements = self.crawl.chrome.browser.find_elements_by_css_selector('#rso div.rc h3.r a')
# <a> 태그 추출
a_tags = [a.get_attribute('href') for a in a_tags_elements]
# 현재 페이지 데이터베이스 저장
self.mysql.upload_page_lists(a_tags)
# 현재 페이지 찾기
paginate = self.crawl.chrome.browser.find_elements_by_css_selector('#nav > tbody > tr > td')
for i, e in enumerate(paginate):
if e.get_attribute('class') == 'cur':
cur_page = i
break
# 다음 페이지 없음
try:
next_element = paginate[cur_page + 1].find_element_by_css_selector('a')
next_page = next_element.get_attribute('href')
except NoSuchElementException:
break
# 다음 페이지 요청
self.crawl.fetch(next_page)
class AuxExample:
def __init__(self):
# Selenium headless
self.crawl = Crawl(delay=20, telegram_notify=False, gui=False)
self.mysql = MySQL()
@context_manager
def transaction(self):
while True:
# 데이터베이스에서 URL 가져오기
url = self.mysql.select_one_for_update()
if not url:
break
# 해당 URL 요청
html = self.crawl.fetch(url)
# raw_html 테이블에 업로드
self.mysql.upload_html(url, html)
# 정상적으로 완료된 경우 자동으로 인스턴스 종료
os.system('poweroff')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--query')
parser.add_argument('--instance_name')
parser.add_argument('--aux', action='store_true')
args = parser.parse_args()
if args.aux:
example = AuxExample()
example.transaction()
else:
if args.query is not None:
example = MainExample()
# 검색어 변경 가능
example.transaction(args.query, args.instance_name)
else:
print('Required Search Keyword!')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Configuration file for sphinxmark documentation."""
import os
import sys
from datetime import datetime
try:
import sphinx_rtd_theme
except ImportError:
sphinx_rtd_theme = None
try:
from sphinxcontrib import spelling
except ImportError as e:
print(e)
spelling = None
sys.path.insert(0, os.path.abspath("../sphinxmark"))
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.viewcode",
]
if spelling is not None:
extensions.append("sphinxcontrib.spelling")
source_suffix = ".rst"
master_doc = "index"
project = "sphinxmark"
copyright = f"{datetime.now().year}, Brian Moss"
author = "Brian Moss"
version = "1.0.0"
language = None
exclude_patterns = ["_build", "README.rst"]
pygments_style = "sphinx"
if sphinx_rtd_theme:
html_theme = "sphinx_rtd_theme"
else:
html_theme = "default"
html_logo = "_static/sphinx.png"
html_favicon = "_static/sphinx.ico"
html_use_smartypants = False
htmlhelp_basename = "doc"
html_permalinks = True
html_permalinks_icon = "#"
|
# Copyright (c) 2020 Daniel Pietz
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import owl
import csv
import sys
import os
import datetime
import threading
import time as tm
SERVER = "172.19.34.203"
BodyCount = 4
def recordThread():
global shouldStop
global frameIsReady
global markerWriter
global bodiesWriter
global BodyCount
global MarkerCount
i = 0
# instantiate context
o = owl.Context()
# connect to server with timeout of 10000000 microseconds
o.open(SERVER, "timeout=10000000")
# initialize session
o.initialize("streaming=1")
BodyFiles = BodyCount * [None]
BodyWriters = BodyCount * [None]
for i in range(0, BodyCount):
#BodyFiles[i] = open(os.path.join(SESSIONPATH,"Body " + str(i)) + ".csv", 'a+', newline='')
#BodyWriters[i] = csv.writer(BodyFiles[i], delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
pass
# main loop
evt = None
while (evt or (o.isOpen() and o.property("initialized"))) and (shouldStop == False):
#while (frameIsReady == True):
#pass
# poll for events with a timeout (microseconds)
evt = o.nextEvent(1000000)
# nothing received, keep waiting
if not evt: continue
# process event
if evt.type_id == owl.Type.FRAME:
# print rigids
if "rigids" in evt:
for r in evt.rigids:
BodyEventArr = [getTime(), r.pose]
for P in r.pose:
BodyEventArr.append(P)
pass
print(BodyEventArr)
#BodyWriters[r.id].writerow(BodyEventArr)
pass
elif evt.type_id == owl.Type.ERROR:
# handle errors
print(evt.name, evt.data)
if evt.name == "fatal":
break
elif evt.name == "done":
# done event is sent when master connection stops session
print("done")
break
#frameIsReady = True
# end main loop
# end session
o.done()
# close socket
o.close()
pass
def getTime():
return tm.time() * 1000000 - startTime
def userInputThread():
global shouldStop
while(True):
inputStr = input()
if inputStr == "stop":
print("STOPPING")
shouldStop = True
break
def main():
global shouldStop
global file
shouldStop = False
pThread = threading.Thread(target = userInputThread)
RecordThread = threading.Thread(target = recordThread)
pThread.start()
RecordThread.start()
print("Python Ready")
print(file)
pThread.join()
if __name__ == "__main__":
global startTime, file
startTime = int(sys.argv[1:][0])
file = sys.argv[1:][1]
SESSIONPATH = os.path.join(file, "MoCap.csv")
main()
|
from flask_jwt_extended import JWTManager
from dawdle.utils.errors import (build_400_error_response,
build_401_error_response)
jwt = JWTManager()
@jwt.expired_token_loader
def expired_token_loader(_):
return build_400_error_response(messages={
"token": [
"Token expired.",
],
})
@jwt.invalid_token_loader
def invalid_token_loader(_):
return build_400_error_response(messages={
"token": [
"Invalid token.",
],
})
@jwt.needs_fresh_token_loader
def needs_fresh_token_loader():
return build_400_error_response(messages={
"token": [
"Needs fresh token.",
],
})
@jwt.unauthorized_loader
def unauthorized_loader(_):
return build_401_error_response()
|
from model.contact import Contact
from random import randrange
def test_contact_firstname(app, db, check_ui):
if app.contact.count() == 0:
app.contact.contact_create(
Contact(firstname="Сергей", middlename="Сергеевич", lastname="Сергеев", nickname="Серега",
address="г. Казань",
homephone="11111", mobile="22222",
work="333333", email="[email protected]", email2=111, email3=222, bday="", bmounth="May", byear="1975"))
old_contacts = db.get_contact_list()
index = randrange(len(old_contacts))
contact = Contact(firstname="aaaaaa", middlename="aaaaaaa", lastname="aaaaaaa", nickname="aaaaa", address="aaaaa",
homephone="777777", mobile="77777",
work="77777", email="[email protected]", email2=33, email3=44, bday="", bmounth="", byear="1111")
contact.id = old_contacts[index].id
app.contact.contact_update_by_index(index, contact)
new_contacts = db.get_contact_list()
assert len(old_contacts) == app.contact.count()
old_contacts[index] = contact
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.group.get_group_list(), key=Contact.id_or_max)
|
from django.urls import path
from . import views
urlpatterns = [
# 127.0.0.1:8000/bookstore/all_book
path('all_book',views.all_book),
# 127.0.0.1:8000/bookstore/add_book
path('add_book',views.add_book),
# 127.0.0.1:8000/bookstore/update_book/1
path('update_book/<int:bid>',views.update_book),
# 127.0.0.1:8000/bookstore/delete_book
path('delete_book', views.delete_book),
] |
from generate_gerrit_jenkins_project.generate_gerrit_jenkins_project import generate_gerrit_jenkins_project
def main():
generate_gerrit_jenkins_project() |
import numpy as np
from .simulator_model_datagen import ModelAnimDataGenerator
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, FancyBboxPatch
import matplotlib.animation as animation
class ModelSimulator():
def __init__(self, datapacket, modelname="S_I_R"):
# Initialise the Figure
plt.rcParams['figure.dpi'] = 50
self.sim_fig, self.sim_ax = plt.subplots(figsize=(19.5,8))
self.sim_fig.tight_layout()
_,_,self.scaleFactor = datapacket
# Initialise Datageneator
self.dataGenerator = ModelAnimDataGenerator(modelname=modelname)
self.modelDataGenerator = self.dataGenerator.dataGen(_dp = datapacket)
self.sim_ani = animation.FuncAnimation(self.sim_fig, self.update_canvas, interval=100,
init_func = self.setup_canvas, blit=True)
plt.show()
def setup_canvas(self):
# Check the dates
dates, data = next(self.modelDataGenerator)
self.sim_ax.tick_params(left=False, labelleft=False,)
self.sim_ax.set_facecolor('k')
self.sim_ax.grid(which='both',linestyle='-.')
self.sim_ax.set_ylim(0,1)
# Add Pandemic Name Label
prop = dict(facecolor='none', edgecolor='lightgray', lw=4, boxstyle='round, pad=0.2')
self.sim_ax.text(0.015,0.87, 'COVID-19',
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=60, bbox = prop)
# Add population and succeptible
prop = dict(facecolor='#5c5c5c', edgecolor='#ffe680', lw=2, boxstyle='round')
self.pop_text = '1,00,00,000'
self.sim_ax.text(0.015,0.77, 'Population',
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=10, bbox = prop)
self.poptext = self.sim_ax.text(0.015,0.735, self.pop_text,
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=10)
prop = dict(facecolor='#5c5c5c', edgecolor='#ffe680', lw=2, boxstyle='round')
self.succep_text = '1,00,000'
self.sim_ax.text(0.075,0.77, 'Succeptible',
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=10, bbox = prop)
self.succeptext = self.sim_ax.text(0.075,0.735, self.succep_text,
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=10)
# Add Infected
prop = dict(facecolor='#ff7575', edgecolor='#ffe680', lw=2, boxstyle='round')
self.sim_fig.patches.extend([Rectangle((0.035,0.55), 0.1, 0.152, fill=True,
alpha=1, facecolor='none', edgecolor='w', lw=1,
transform=self.sim_fig.transFigure, figure=self.sim_fig, zorder=1)])
self.sim_fig.patches.extend([Rectangle((0.08,0.55), 0.055, 0.07, fill=True,
alpha=1, facecolor='none', edgecolor='w', lw=1,
transform=self.sim_fig.transFigure, figure=self.sim_fig, zorder=1)])
self.sim_fig.patches.extend([Rectangle((0.08,0.63), 0.055, 0.07, fill=True,
alpha=1, facecolor='none', edgecolor='w', lw=1,
transform=self.sim_fig.transFigure, figure=self.sim_fig, zorder=1)])
prop = dict(facecolor='#a34d4d', edgecolor='w', lw=2)
self.infected_text = '{0}\n %'.format('00.0')
self.sim_ax.text(0.0153,0.558,'I\nN\nF\nE\nC\nT\nE\nD',
transform=self.sim_ax.transAxes, color='w',
fontweight='light', fontsize=8.2, bbox = prop)
self.infectedtext = self.sim_ax.text(0.026,0.615, self.infected_text,
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=13)
prop = dict(facecolor='#a34d4d', edgecolor='w', lw=1)
self.testing_text = '{0}%'.format('00.0')
self.sim_ax.text(0.062,0.68,' Tested ',
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=10, bbox = prop)
self.testingtext = self.sim_ax.text(0.067,0.64,self.testing_text,
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=15)
prop = dict(facecolor='#a34d4d', edgecolor='w', lw=1)
self.asympto_text = '{0}%'.format('00.0')
self.sim_ax.text(0.061,0.6,' Asympto ',
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=10, bbox = prop)
self.asymptotext = self.sim_ax.text(0.067,0.56,self.asympto_text,
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=15)
# Add Removed
prop = dict(facecolor='#5c5c5c', edgecolor='#ffe680', lw=2, boxstyle='round')
self.sim_fig.patches.extend([Rectangle((0.035,0.35), 0.1, 0.152, fill=True,
alpha=1, facecolor='none', edgecolor='w', lw=1,
transform=self.sim_fig.transFigure, figure=self.sim_fig, zorder=1)])
self.sim_fig.patches.extend([Rectangle((0.08,0.35), 0.055, 0.07, fill=True,
alpha=1, facecolor='none', edgecolor='w', lw=1,
transform=self.sim_fig.transFigure, figure=self.sim_fig, zorder=1)])
self.sim_fig.patches.extend([Rectangle((0.08,0.43), 0.055, 0.07, fill=True,
alpha=1, facecolor='none', edgecolor='w', lw=1,
transform=self.sim_fig.transFigure, figure=self.sim_fig, zorder=1)])
prop = dict(facecolor='#5c5c5c', edgecolor='w', lw=2)
self.removed_text = '{0}\n %'.format('00.0')
self.sim_ax.text(0.015,0.342,'R\nE\nM\nO\nV\nE\nD\n',
transform=self.sim_ax.transAxes, color='w',
fontweight='light', fontsize=8.2, bbox = prop)
self.removedtext = self.sim_ax.text(0.025, 0.4, self.infected_text,
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=13)
prop = dict(facecolor='#5c5c5c', edgecolor='w', lw=2)
self.recov_text = '{0}%'.format('00.0')
self.sim_ax.text(0.0625, 0.47,' Recovered ',
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=10, bbox = prop)
self.recovtext = self.sim_ax.text(0.067,0.43,self.recov_text,
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=15)
prop = dict(facecolor='#5c5c5c', edgecolor='w', lw=2)
self.death_text = '{0}%'.format('00.0')
self.sim_ax.text(0.062,0.38,' Death ',
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=10, bbox = prop)
self.deathtext = self.sim_ax.text(0.066, 0.34,self.death_text,
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=15)
self.infectedfill = self.sim_ax.fill_between(dates, data[:,1], interpolate=True, alpha=1.0,
facecolor='#f5653d', edgecolor='k')
self.removedfill = self.sim_ax.fill_between(dates, 1-data[:,2], y2=1, interpolate=True, alpha=0.5,
facecolor='#999999', edgecolor='k')
infec_val = data[:,1][-1]
self.infeclbl_text = 'Infected \n{0}%'
self.infeclbltext = self.sim_ax.text(0.96, infec_val*0.5,
self.infeclbl_text.format(str(np.round(infec_val*100,2))),
transform=self.sim_ax.transAxes, color='r',
fontweight='heavy', fontsize=10)
succep_val = data[:,0][-1]
self.succeclbl_text ='Succeptible \n{0}%'
self.succeclbltext = self.sim_ax.text(0.955, infec_val+succep_val*0.5,
self.succeclbl_text.format(str(np.round(succep_val*100,2))),
transform=self.sim_ax.transAxes, color='w',
fontweight='heavy', fontsize=10)
recov_val = data[:, 2][-1]
self.recovlbl_text = 'Removed \n{0}%'
self.recovlbltext = self.sim_ax.text(0.955,1 - 0.5*recov_val - 0.04,
self.recovlbl_text.format(str(np.round(recov_val*100,2))),
transform=self.sim_ax.transAxes, color='lightgray',
fontweight='heavy', fontsize=10)
return self.sim_fig,
def update_canvas(self, i):
dates, data = next(self.modelDataGenerator)
self.sim_ax.set_xlim(dates[0], dates[-1])
succep_pct_lst = data[:,0][-1]
infec_pct_lst = data[:,1][-1]
remov_pct_lst = data[:,2][-1]
self.succeptext.set_text(str(int(np.round(succep_pct_lst*self.scaleFactor))))
self.succeptext.set_text(str(int(np.round(succep_pct_lst*self.scaleFactor))))
self.infected_text = '{0}%'.format(np.round(infec_pct_lst*100, 1))
self.infectedtext.set_text(self.infected_text)
self.testingtext.set_text('--.-')
self.asymptotext.set_text('--.-')
self.removed_text = '{0}%'.format(np.round(remov_pct_lst*100, 1))
self.removedtext.set_text(self.removed_text)
self.recovtext.set_text('--.-')
self.deathtext.set_text('--.-')
_infectext = self.infeclbl_text.format(str(np.round(infec_pct_lst*100,2)))
self.infeclbltext.set_text(_infectext)
self.infeclbltext.set_position((0.96, infec_pct_lst*0.5))
_succectext = self.succeclbl_text.format(str(np.round(succep_pct_lst*100,2)))
self.succeclbltext.set_text(_succectext)
self.succeclbltext.set_position((0.955, infec_pct_lst+succep_pct_lst*0.5))
_removtext = self.recovlbl_text.format(str(np.round(remov_pct_lst*100,2)))
self.recovlbltext.set_text(_removtext)
self.recovlbltext.set_position((0.955,1 - 0.5*remov_pct_lst - 0.04))
self.infectedfill.remove()
self.infectedfill = self.sim_ax.fill_between(dates, data[:,1], interpolate=True, alpha=1.0,
facecolor='#f5653d', edgecolor='k')
self.removedfill.remove()
self.removedfill = self.sim_ax.fill_between(dates, 1-data[:,2], y2=1, interpolate=True, alpha=0.5,
facecolor='#999999', edgecolor='k')
return self.sim_fig,
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 15:49:54 2020
@author: sdesnoo
"""
import logging
from pulse_lib.schedule.hardware_schedule import HardwareSchedule
from .hvi2_schedule_extensions import add_extensions
from hvi2_script.system import HviSystem
from hvi2_script.sequencer import HviSequencer
import keysightSD1 as SD1
import uuid
class Hvi2Schedule(HardwareSchedule):
verbose = False
def __init__(self, hardware, script):
self.hardware = hardware
self.script = script
self.extensions = add_extensions
self.hvi_system = None
self.hvi_sequence = None
self.hvi_exec = None
self._is_loaded = False
self._might_be_loaded = False
self.schedule_parms = {}
self.hvi_id = uuid.uuid4()
def set_schedule_parameters(self, **kwargs):
for key,value in kwargs.items():
self.schedule_parms[key] = value
def configure_modules(self):
for awg in self.hardware.awgs:
for ch in range(1, 5):
awg.awg_stop(ch)
awg.set_channel_wave_shape(SD1.SD_Waveshapes.AOU_AWG, ch)
awg.awg_queue_config(ch, SD1.SD_QueueMode.CYCLIC)
for dig in self.hardware.digitizers:
dig.daq_stop_multiple(0b1111)
dig.daq_flush_multiple(0b1111)
def reconfigure_modules(self):
for awg in self.hardware.awgs:
for ch in range(1, 5):
awg.awg_stop(ch)
# rewrite amplitude and offset bypassing cache.
amplitude = awg._settings_cache['amplitude'][ch]
if amplitude is not None:
awg._settings_cache['amplitude'][ch] = None
awg.set_channel_amplitude(amplitude, ch)
offset = awg._settings_cache['offset'][ch]
if offset is not None:
awg._settings_cache['offset'][ch] = None
awg.set_channel_offset(offset, ch)
awg.set_channel_wave_shape(SD1.SD_Waveshapes.AOU_AWG, ch)
awg.awg_queue_config(ch, SD1.SD_QueueMode.CYCLIC)
for dig in self.hardware.digitizers:
dig.daq_stop_multiple(0b1111)
dig.daq_flush_multiple(0b1111)
def compile(self):
logging.info(f"Build HVI2 schedule with script '{self.script.name}'")
hvi_system = HviSystem()
for awg in self.hardware.awgs:
sd_aou = awg.awg
hvi_system.add_awg(sd_aou, awg.name)
for dig in self.hardware.digitizers:
sd_ain = dig.SD_AIN
hvi_system.add_digitizer(sd_ain, dig.name)
self.hvi_system = hvi_system
if self.extensions is not None:
self.extensions(hvi_system)
sequencer = HviSequencer(hvi_system)
self.sequencer = sequencer
self.script.sequence(sequencer, self.hardware)
if self.verbose:
logging.debug(f"Script '{self.script.name}':\n" + self.sequencer.describe())
try:
self.hvi_exec = self.sequencer.compile()
except:
logging.error(f"Exception in compilation of '{self.script.name}'", exc_info=True)
raise
def is_loaded(self):
return self._is_loaded
def load(self):
if self._is_loaded:
logging.info(f'HVI2 schedule already loaded')
return
self.hardware.release_schedule()
self.configure_modules()
if self.hvi_exec is None:
self.compile()
logging.info(f"Load HVI2 schedule with script '{self.script.name}' (id:{self.hvi_id})")
self.hardware.set_schedule(self)
self._might_be_loaded = True
self.hvi_exec.load()
if self.hvi_exec.is_running():
logging.warning(f'HVI running after load; attempting to stop HVI and modules')
self.hvi_exec.stop()
self.reconfigure_modules()
if self.hvi_exec.is_running():
logging.eror(f'Still Running after stop')
self._is_loaded = True
def unload(self):
if not self.hvi_exec:
return
if self._is_loaded:
self.script.stop(self.hvi_exec)
self._is_loaded = False
if self._might_be_loaded:
logging.info(f"Unload HVI2 schedule with script'{self.script.name}' (id:{self.hvi_id})")
self.hvi_exec.unload()
self._might_be_loaded = False
self.hardware.release_schedule()
def is_running(self):
return self.hvi_exec.is_running()
def start(self, waveform_duration, n_repetitions, sequence_variables):
hvi_params = {**self.schedule_parms, **sequence_variables}
if self.verbose:
logging.debug(f'start: {hvi_params}')
self.script.start(self.hvi_exec, waveform_duration, n_repetitions, hvi_params)
def stop(self):
self.script.stop(self.hvi_exec)
def close(self):
self.unload()
self.hvi_exec = None
def __del__(self):
if self._is_loaded:
try:
logging.warning(f'Automatic close of Hvi2Schedule in __del__()')
# self.unload()
except:
logging.error(f'Exception unloading HVI', exc_info=True) |
import os
import sys
import numpy as np
# Root directory of the project
from PIL import Image
ROOT_DIR = os.path.abspath(".")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
sys.path.append("/content/backlash") # To find local version of the library on Google Colab
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
import skimage.io
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Directory of images to run detection on
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
# web server with tasks to process
WEB_SERVER = "http://backlash.graycake.com"
import samples.coco.coco as coco
import samples.backlash.backlash as backlash
import samples.backlash2.backlash2 as backlash2
class MaskRCNNModel():
MODEL_FILE_NAME = "mask_rcnn_coco.h5"
CLASSES = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
CONFIG_CLASS = coco.CocoConfig
def __init__(self, model_file_name=None):
# Local path to trained weights file
COCO_MODEL_PATH = model_file_name if model_file_name else os.path.join(ROOT_DIR, self.MODEL_FILE_NAME)
class InferenceConfig(self.CONFIG_CLASS):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = len(self.CLASSES) # COCO has 80 classes
self.config = InferenceConfig()
# config.display()
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=self.config)
# Load weights
model.load_weights(COCO_MODEL_PATH, by_name=True)
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = self.CLASSES
self.model = model
class BacklashMaskRCNNModel(MaskRCNNModel):
CLASSES = ['BG', 'policeman']
MODEL_FILE_NAME = "logs/police220211106T1204/mask_rcnn_police2_0019.h5"
CONFIG_CLASS = backlash.PoliceConfig
class Backlash2MaskRCNNModel(MaskRCNNModel):
CLASSES = ['BG', 'policeman', 'protester']
MODEL_FILE_NAME = "logs/police220211106T1204/mask_rcnn_police2_0019.h5"
CONFIG_CLASS = backlash2.PoliceConfig
# IMAGE_DIR = os.path.join(ROOT_DIR, "datasets/police/val")
# file_names = next(os.walk(IMAGE_DIR))[2]
# file_names = list(filter(lambda x: not x.endswith("json"), file_names))
# image = skimage.io.imread(os.path.join(IMAGE_DIR, file_names[0]))
def process_image(image, color=(1.0, 1.0, 0.0)):
# Run detection
global model_full, model
if model_full is None:
model = BacklashMaskRCNNModel()
model_full = MaskRCNNModel()
results = model_full.model.detect([image], verbose=1)
results_policeman = model.model.detect([image], verbose=1)
mask_other = np.logical_or.reduce(results[0]['masks'][:,:,results[0]['class_ids'] == 1], axis=2)
mask_policeman = np.logical_or.reduce(results_policeman[0]['masks'][:,:,results_policeman[0]['scores'] > 0.5], axis=2)
# mask = np.logical_or(mask_policeman, mask_other)
# plt.imshow(mask.astype(np.uint8))
# masked_image = image.astype(np.uint32).copy()
# masked_image = visualize.apply_mask(masked_image, mask, visualize.random_colors(2)[0], alpha=1)
# # plt.imshow(masked_image.astype(np.uint8))
mask = np.logical_and(mask_other, np.logical_not(mask_policeman))
masked_image = image.astype(np.uint32).copy()
masked_image = visualize.apply_mask(masked_image, mask, color, alpha=1)
# plt.imshow(masked_image.astype(np.uint8))
return masked_image
if __name__ == '__main__':
image = process_image(skimage.io.imread("images/no-cops-test.jpg"))
image = Image.fromarray(image.astype(np.uint8))
image.save("test.jpg") |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 12 02:57:59 2020
@author: eduardo
"""
import warnings
from .country import *
from .dataset import *
from .functions import *
from .region import *
from .models import *
from .stat import *
# neglecting warnings
warnings.filterwarnings('ignore') |
import os
import pytest
from dvc.compat import fspath, fspath_py35
from dvc.dvcfile import Dvcfile
@pytest.mark.parametrize("cached", [True, False])
def test_update_import(tmp_dir, dvc, erepo_dir, cached):
gen = erepo_dir.dvc_gen if cached else erepo_dir.scm_gen
with erepo_dir.branch("branch", new=True), erepo_dir.chdir():
gen("version", "branch", "add version file")
old_rev = erepo_dir.scm.get_rev()
stage = dvc.imp(fspath(erepo_dir), "version", "version", rev="branch")
assert (tmp_dir / "version").read_text() == "branch"
assert stage.deps[0].def_repo["rev_lock"] == old_rev
# Update version file
with erepo_dir.branch("branch", new=False), erepo_dir.chdir():
gen("version", "updated", "update version content")
new_rev = erepo_dir.scm.get_rev()
assert old_rev != new_rev
dvc.update([stage.path])
assert (tmp_dir / "version").read_text() == "updated"
stage = Dvcfile(dvc, stage.path).stage
assert stage.deps[0].def_repo["rev_lock"] == new_rev
def test_update_import_after_remote_updates_to_dvc(tmp_dir, dvc, erepo_dir):
old_rev = None
with erepo_dir.branch("branch", new=True), erepo_dir.chdir():
erepo_dir.scm_gen("version", "branch", commit="add version file")
old_rev = erepo_dir.scm.get_rev()
stage = dvc.imp(fspath(erepo_dir), "version", "version", rev="branch")
imported = tmp_dir / "version"
assert imported.is_file()
assert imported.read_text() == "branch"
assert stage.deps[0].def_repo == {
"url": fspath(erepo_dir),
"rev": "branch",
"rev_lock": old_rev,
}
new_rev = None
with erepo_dir.branch("branch", new=False), erepo_dir.chdir():
erepo_dir.scm.repo.index.remove(["version"])
erepo_dir.dvc_gen("version", "updated")
erepo_dir.scm.add(["version", "version.dvc"])
erepo_dir.scm.commit("upgrade to DVC tracking")
new_rev = erepo_dir.scm.get_rev()
assert old_rev != new_rev
(status,) = dvc.status([stage.path])["version.dvc"]
(changed_dep,) = list(status["changed deps"].items())
assert changed_dep[0].startswith("version ")
assert changed_dep[1] == "update available"
dvc.update([stage.path])
assert dvc.status([stage.path]) == {}
assert imported.is_file()
assert imported.read_text() == "updated"
stage = Dvcfile(dvc, stage.path).stage
assert stage.deps[0].def_repo == {
"url": fspath(erepo_dir),
"rev": "branch",
"rev_lock": new_rev,
}
def test_update_before_and_after_dvc_init(tmp_dir, dvc, git_dir):
with git_dir.chdir():
git_dir.scm_gen("file", "first version", commit="first version")
old_rev = git_dir.scm.get_rev()
stage = dvc.imp(fspath(git_dir), "file", "file")
with git_dir.chdir():
git_dir.init(dvc=True)
git_dir.scm.repo.index.remove(["file"])
os.remove("file")
git_dir.dvc_gen("file", "second version", commit="with dvc")
new_rev = git_dir.scm.get_rev()
assert old_rev != new_rev
assert dvc.status([stage.path]) == {
"file.dvc": [
{
"changed deps": {
"file ({})".format(fspath(git_dir)): "update available"
}
}
]
}
dvc.update([stage.path])
assert (tmp_dir / "file").read_text() == "second version"
assert dvc.status([stage.path]) == {}
def test_update_import_url(tmp_dir, dvc, tmp_path_factory):
import_src = tmp_path_factory.mktemp("import_url_source")
src = import_src / "file"
src.write_text("file content")
dst = tmp_dir / "imported_file"
stage = dvc.imp_url(fspath(src), fspath(dst))
assert dst.is_file()
assert dst.read_text() == "file content"
# update data
src.write_text("updated file content")
assert dvc.status([stage.path]) == {}
dvc.update([stage.path])
assert dvc.status([stage.path]) == {}
assert dst.is_file()
assert dst.read_text() == "updated file content"
def test_update_rev(tmp_dir, dvc, scm, git_dir):
with git_dir.chdir():
git_dir.scm_gen({"foo": "foo"}, commit="first")
dvc.imp(fspath(git_dir), "foo")
assert (tmp_dir / "foo.dvc").exists()
with git_dir.chdir(), git_dir.branch("branch1", new=True):
git_dir.scm_gen({"foo": "foobar"}, commit="branch1 commit")
branch1_head = git_dir.scm.get_rev()
with git_dir.chdir(), git_dir.branch("branch2", new=True):
git_dir.scm_gen({"foo": "foobar foo"}, commit="branch2 commit")
branch2_head = git_dir.scm.get_rev()
stage = dvc.update(["foo.dvc"], rev="branch1")[0]
assert stage.deps[0].def_repo == {
"url": fspath(git_dir),
"rev": "branch1",
"rev_lock": branch1_head,
}
with open(fspath_py35(tmp_dir / "foo")) as f:
assert "foobar" == f.read()
stage = dvc.update(["foo.dvc"], rev="branch2")[0]
assert stage.deps[0].def_repo == {
"url": fspath(git_dir),
"rev": "branch2",
"rev_lock": branch2_head,
}
with open(fspath_py35(tmp_dir / "foo")) as f:
assert "foobar foo" == f.read()
def test_update_recursive(tmp_dir, dvc, erepo_dir):
with erepo_dir.branch("branch", new=True), erepo_dir.chdir():
erepo_dir.scm_gen(
{"foo1": "text1", "foo2": "text2", "foo3": "text3"},
commit="add foo files",
)
old_rev = erepo_dir.scm.get_rev()
tmp_dir.gen({"dir": {"subdir": {}}})
stage1 = dvc.imp(
fspath(erepo_dir), "foo1", os.path.join("dir", "foo1"), rev="branch",
)
stage2 = dvc.imp(
fspath(erepo_dir),
"foo2",
os.path.join("dir", "subdir", "foo2"),
rev="branch",
)
stage3 = dvc.imp(
fspath(erepo_dir),
"foo3",
os.path.join("dir", "subdir", "foo3"),
rev="branch",
)
assert (tmp_dir / os.path.join("dir", "foo1")).read_text() == "text1"
assert (
tmp_dir / os.path.join("dir", "subdir", "foo2")
).read_text() == "text2"
assert (
tmp_dir / os.path.join("dir", "subdir", "foo3")
).read_text() == "text3"
assert stage1.deps[0].def_repo["rev_lock"] == old_rev
assert stage2.deps[0].def_repo["rev_lock"] == old_rev
assert stage3.deps[0].def_repo["rev_lock"] == old_rev
with erepo_dir.branch("branch", new=False), erepo_dir.chdir():
erepo_dir.scm_gen(
{"foo1": "updated1", "foo2": "updated2", "foo3": "updated3"},
"",
"update foo content",
)
new_rev = erepo_dir.scm.get_rev()
assert old_rev != new_rev
dvc.update(["dir"], recursive=True)
stage1 = Dvcfile(dvc, stage1.path).stage
stage2 = Dvcfile(dvc, stage2.path).stage
stage3 = Dvcfile(dvc, stage3.path).stage
assert stage1.deps[0].def_repo["rev_lock"] == new_rev
assert stage2.deps[0].def_repo["rev_lock"] == new_rev
assert stage3.deps[0].def_repo["rev_lock"] == new_rev
|
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
# Runtime: 188 ms
# Memory: 14.5 MB
# Binary search
start = 0
end = len(nums) - 1
while start <= end:
mid = (start + end) // 2
if nums[mid] == target:
return mid
elif nums[mid] > target:
end = mid - 1
else:
start = mid + 1
return -1
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
# Runtime: 196 ms
# Memory: 14.7 MB
# Linear search
try:
return nums.index(target)
except ValueError:
return -1
|
"""Support for a ScreenLogic 'circuit' switch."""
import logging
from screenlogicpy.const import ON_OFF
from homeassistant.components.switch import SwitchEntity
from . import ScreenlogicEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up entry."""
entities = []
data = hass.data[DOMAIN][config_entry.entry_id]
coordinator = data["coordinator"]
for switch in data["devices"]["switch"]:
entities.append(ScreenLogicSwitch(coordinator, switch))
async_add_entities(entities, True)
class ScreenLogicSwitch(ScreenlogicEntity, SwitchEntity):
"""ScreenLogic switch entity."""
@property
def name(self):
"""Get the name of the switch."""
return f"{self.gateway_name} {self.circuit['name']}"
@property
def is_on(self) -> bool:
"""Get whether the switch is in on state."""
return self.circuit["value"] == 1
async def async_turn_on(self, **kwargs) -> None:
"""Send the ON command."""
return await self._async_set_circuit(ON_OFF.ON)
async def async_turn_off(self, **kwargs) -> None:
"""Send the OFF command."""
return await self._async_set_circuit(ON_OFF.OFF)
async def _async_set_circuit(self, circuit_value) -> None:
if await self.hass.async_add_executor_job(
self.gateway.set_circuit, self._data_key, circuit_value
):
_LOGGER.debug("Screenlogic turn %s %s", circuit_value, self._data_key)
await self.coordinator.async_request_refresh()
else:
_LOGGER.info("Screenlogic turn %s %s error", circuit_value, self._data_key)
@property
def circuit(self):
"""Shortcut to access the circuit."""
return self.circuits_data[self._data_key]
@property
def circuits_data(self):
"""Shortcut to access the circuits data."""
return self.coordinator.data["circuits"]
|
import django_filters
from .models import Organization, Application
class ApplicationFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='icontains')
organization = django_filters.ModelMultipleChoiceFilter(queryset=Organization.objects.all())
business_criticality = django_filters.MultipleChoiceFilter(choices=Application.BUSINESS_CRITICALITY_CHOICES)
platform = django_filters.MultipleChoiceFilter(choices=Application.PLATFORM_CHOICES)
lifecycle = django_filters.MultipleChoiceFilter(choices=Application.LIFECYCLE_CHOICES)
origin = django_filters.MultipleChoiceFilter(choices=Application.ORIGIN_CHOICES)
asvs_level = django_filters.MultipleChoiceFilter(choices=Application.ASVS_CHOICES)
class Meta:
model = Application
fields = [
'name', 'organization', 'business_criticality', 'platform', 'lifecycle', 'origin', 'external_audience',
'internet_accessible', 'technologies', 'regulations', 'service_level_agreements', 'tags', 'asvs_level'
]
|
"""Define abstract base classes of how the local chemical stock database should
behave. This interface will be implemented in one of two ways: using sqlalchemy,
and using puchdb.
"""
import typing
import hashlib
import json
import datetime
import serverlib.timelib as timelib
import serverlib.qai_helper as qai_helper
class BaseLocMutation:
VALID_OPS = frozenset(['missing', 'found', 'moved'])
def do_hash(dat: typing.Any) -> str:
"""Calculate a hash function of a data structure.
This routine works by converting a data structure to a json string,
then applying the SHA1 algorithm.
Finally, the hexdigest is returned.
Attention:
Because conversion to json is involved, only serialisable data structures
can be input.
Note:
In this routine, crucially, keys are sorted in dicts when converting
to json. This ensures that identical dicts created
differently (adding elements in a different order)
produce the same hash.
Args:
dat: A serialisable python data structure to calculate the hash of.
Returns:
A string representing a hash function.
"""
return hashlib.sha1(json.dumps(dat, sort_keys=True).encode('utf-8')).hexdigest()
class LocNode:
"""An internal helper class used to sort the hierarchical location names."""
def __init__(self, name: str) -> None:
self.name = name
self.child_dct: typing.Dict[str, "LocNode"] = {}
self.val: typing.Optional[dict] = None
def addtree(self, dct) -> None:
"""Add a location dictionary to a leaf in the tree based on its
hierarchical name."""
namelst = dct['name'].split('\\')
# print('nlst {}'.format(namelst))
n_n = self
for curname in namelst:
nextlevel = n_n.child_dct.get(curname, None)
if nextlevel is None:
nextlevel = n_n.child_dct[curname] = LocNode(curname)
n_n = nextlevel
n_n.setval(dct)
def setval(self, newval) -> None:
"""Set the value of the LocNode exactly once"""
if self.val is None:
self.val = newval
else:
raise RuntimeError('LocNode value set twice!')
def getval(self) -> typing.Optional[dict]:
"""Return this LocNode's value"""
return self.val
def dfslst(self, topname, lst) -> None:
"""Perform a DFS traversal starting from this node."""
fullname = "{}.{}".format(topname, self.name)
val = self.getval()
if val is not None:
lst.append(val)
for child in sorted(self.child_dct.values(), key=lambda a: a.name):
child.dfslst(fullname, lst)
def sortloclist(orglst: typing.List[dict]) -> typing.List[dict]:
"""Sort the list of location dicts in a hierarchically sensible order.
The method applied is the following: Nodes are added to a tree based on name,
then an in-order DFS traversal is performed (children are sorted alphabetically)
to sort the list.
Args:
orglst: the original, unsorted list of location dicts
Returns:
A copy of the input list sorted according to hierarchical location.
"""
root = LocNode('')
for dct in orglst:
root.addtree(dct)
rlst: typing.List[dict] = []
root.dfslst("", rlst)
return rlst
DBRecList = typing.List[typing.Dict[str, typing.Any]]
LocChangeTup = typing.Tuple[int, str]
LocChangeList = typing.List[LocChangeTup]
class BaseDB:
"""Define some common operations between databases. This includes
how to interact with QAI via HTTP requests.
"""
def __init__(self,
qaisession: typing.Optional[qai_helper.QAISession],
tz_name: str) -> None:
"""
This database is accessed by the stocky web server.
It is passed a :class:`qai_helper.QAISession` instance which it
uses to access the QAI database via an HTTP API.
This stock information is stored to a local file locQAIfname as an sqlite3 database
in the server state directory if a name is provided. Otherwise it is stored in memory.
Args:
qaisession: the session instance used to access the QAI server.
tz_name: the name of the local timezone.
"""
self.qaisession = qaisession
timelib.set_local_timezone(tz_name)
self._current_date = timelib.loc_nowtime().date()
self._db_has_changed = True
self._cachedct: typing.Optional[dict] = None
def has_changed(self) -> bool:
"""Return : the database has changed since the last time
data for the webclient was extracted from it.
"""
return self._db_has_changed
def get_ts_data(self) -> qai_helper.QAIChangedct:
"""Retrieve the current timestamp data from the database.
For each database table, we keep a timestamp indicating the last
time it was updated. The dict of these timestamps is returned.
"""
raise NotImplementedError("not implemented")
def update_from_qai(self) -> dict:
"""Update the local ChemStock database using the qaisession.
Returns:
A dict describing what happened (success, error messages)
"""
qaisession = self.qaisession
if qaisession is None or not qaisession.is_logged_in():
return dict(ok=False, msg="User not logged in")
# get the locally stored timestamp data from our database
cur_tsdata = self.get_ts_data()
try:
newds = qai_helper.QAIDataset(None, cur_tsdata)
except RuntimeError as err:
return dict(ok=False, msg="QAI access error: {}".format(str(err)))
# load those parts from QAI that are out of date
update_dct = qaisession.clever_update_qai_dump(newds)
# if any value is True, then we did get something from QAI...
num_updated = sum(update_dct.values())
if num_updated > 0:
try:
self._db_has_changed = self.load_qai_data(newds, update_dct)
except TypeError as err:
return dict(ok=False, msg="database error: {}".format(str(err)))
return dict(ok=True, msg="Successfully updated {} tables for QAI".format(num_updated))
def load_qai_data(self,
qai_ds: qai_helper.QAIDataset,
update_dct: typing.Optional[qai_helper.QAIUpdatedct] = None) -> bool:
"""Replace the complete database contents with the data contained in qai_ds.
If update_dct is provided, only update those tables for which
update_dct[idname] is True.
Args:
qai_ds: the dataset provided by from QAI.
update_dct: indicate those tables that need updating.
Returns:
'the update was successful'.
"""
raise NotImplementedError('override this in subclasses')
def calc_final_state(self, slst: typing.List[dict]) -> typing.Tuple[dict, bool, bool]:
""" Calculate the final state from this list of states.
We return the nominal state record and two booleans:
ismissing, hasexpired.
Strategy: we assign values to the various possible states and sort according
to these values.
Any missing record will be the first one.
The exp record is the last one (should exist, check the date with current date)
The nominal state is the second to last in the list.
"""
if len(slst) < 2:
# raise RuntimeError("state list is too short {}".format(slst))
# the list may also contain a single EXPIRED record
# or, in legacy cases, a single IN_USE record.
nom_state = exp_dict = slst[0]
ismissing = False
# if exp_dict['status'] != 'EXPIRED':
# raise RuntimeError('exp_dict is not expired {}'.format(exp_dict))
else:
odct = dict(MISSING=-1, MADE=0, VALIDATED=1, IN_USE=2,
USED_UP=5, EXPIRED=6, RUO_EXPIRED=7, DISPOSED=8)
# create tuples of input dicts with scores from odct.
try:
plst = [(d, odct.get(d['status'], None)) for d in slst]
except KeyError:
raise RuntimeError("status field missing in state record {}".format(slst))
qlst = [tt for tt in plst if tt[1] is not None]
qlst.sort(key=lambda a: a[1])
exp_dict = qlst[-1][0]
nom_state = qlst[-2][0]
ismissing = qlst[0][0]['status'] == 'MISSING'
# we could have no expired record, but a used up record instead.
exp_state = exp_dict.get('status', None)
if exp_state is None:
raise RuntimeError("status field missing in state record {}".format(exp_dict))
elif exp_state == 'EXPIRED':
# Cannot use fromisoformat in 3.6...
# expiry_date = datetime.date.fromisoformat(exp_dict['occurred'])
# the string is of the form '2011-04-20'
expiry_date = datetime.date(*[int(s) for s in exp_dict['occurred'].split('-')])
has_expired = expiry_date < self._current_date
else:
has_expired = False
# print("FFF {}".format(slst))
rtup = (nom_state, ismissing, has_expired)
# print("GGG {}".format(rtup))
return rtup
def get_location_list(self) -> DBRecList:
"""Return a list of all defined locations."""
raise NotImplementedError('not implemented')
def get_reagent_item_list(self) -> DBRecList:
"""Return a list of all reagent items."""
raise NotImplementedError('not implemented')
def get_reagent_item_status_list(self) -> DBRecList:
"""Return a list of all reagent item statuses."""
raise NotImplementedError('not implemented')
def get_reagent_list(self) -> DBRecList:
"""Return a list of all reagents."""
raise NotImplementedError('not implemented')
def _do_generate_webclient_stocklist(self) -> dict:
"""Generate the stock list in a form required by the web client.
Returns:
The dict returned has the following entries:
loclst: a list of dicts containing the stock locations, e.g.
[{'id': 10000, 'name': 'SPH'}, {'id': 10001, 'name': 'SPH\\638'}, ... ]
The itemlst is a list of dicts containing:
{'id': 18478, 'last_seen': None, 'lot_num': '2019AD3EB',
'notes': '8 bottles of spare reagents',
'qcs_location_id': 10010,
'qcs_reag_id': 6297, 'rfid': 'REPLACE ME'},
{'id': 18479, 'last_seen': None, 'lot_num': 'INT.BP.17.02',
'notes': None, 'qcs_location_id': 10016,
'qcs_reag_id': 6217, 'rfid': 'REPLACE ME'}
The itmstatlst is a list of dicts containing:
{'id': 41418, 'occurred': '2021-04-30T07:00:00Z',
'qcs_reag_item_id': 18512, 'qcs_user_id': 113, 'status': 'EXPIRED'},
{'id': 41419, 'occurred': '2018-06-01T22:54:26Z',
'qcs_reag_item_id': 18513, 'qcs_user_id': 112, 'status': 'MADE'},
{'id': 41420, 'occurred': '2020-04-03T00:00:00Z',
'qcs_reag_item_id': 18513, 'qcs_user_id': 112, 'status': 'EXPIRED'}
The reagentlst is a list of dicts containing:
{'id': 8912, 'name': 'Atazanavir-bisulfate', 'basetype': 'reagent',
'catalog_number': None, 'category': 'TDM', 'date_msds_expires': None,
'disposed': None, 'expiry_time': None,
'hazards': 'Avoid inhalation, skin and eye contact. Wear PPE.',
'location': None, 'msds_filename': 'ATV_BS_A790050MSDS.pdf',
'needs_validation': None, 'notes': None, 'qcs_document_id': None,
'storage': '-20 C', 'supplier': None},
{'id': 8932, 'name': 'Triton X-100', 'basetype': 'stockchem',
'catalog_number': 'T8787', 'category': 'Other Chemicals',
'date_msds_expires': '2020-02-28T00:00:00Z', 'disposed': None,
'expiry_time': 2555, 'hazards': None, 'location': None,
'msds_filename': None, 'needs_validation': None,
'notes': None, 'qcs_document_id': None, 'storage': 'Room Temperature',
'supplier': 'Sigma Aldrich'},
{'id': 8952, 'name': 'Proviral V3 Primary 1st PCR mix', 'basetype': 'reagent',
'catalog_number': None, 'category': 'PCR',
'date_msds_expires': None, 'disposed': None,
'expiry_time': None, 'hazards': None,
'location': '604', 'msds_filename': None,
'needs_validation': None,
'notes': None, 'qcs_document_id': None,
'storage': '-20 C', 'supplier': None}
"""
# NOTE: as we want dicts and not Location instances, we go directly to
# the 'SQL level' (session.execute() and not the 'ORM level' (session.query())
# of sqlquery.
loclst = self.get_location_list()
itmlst = self.get_reagent_item_list()
itmstat = self.get_reagent_item_status_list()
# create a Dict[locationid, List[reagentitem]] and a Dict[RFID, reagentitem]
d_d: typing.Dict[typing.Optional[int], typing.List[dict]] = {}
# rfid_reagitem_dct = ff = {}
f_f: typing.Dict[str, dict] = {}
for reag_item in itmlst:
loc_id = reag_item.get('qcs_location_id', None)
# we will keep a list of items with None locations... should not happen, but does
# then we add these to the UNKNOWN list later on
d_d.setdefault(loc_id, []).append(reag_item)
# if loc_id is not None:
# else:
# raise RuntimeError("found None location {}".format(reag_item))
#
rfidstr = reag_item.get('rfid', None)
if rfidstr is not None:
if rfidstr != 'REPLACE ME':
f_f.setdefault(rfidstr, reag_item)
else:
raise RuntimeError("found None location {}".format(reag_item))
# unmangling for None...
# find loc_id for 'UNKNOWN'...
if None in d_d:
none_lst = d_d[None]
del d_d[None]
flst = [loc for loc in loclst if loc['name'] == 'UNKNOWN']
assert len(flst) == 1, "cannot determine 'UNKNOWN' location"
unknown_lst = d_d.setdefault(flst[0]['id'], [])
unknown_lst.extend(none_lst)
#
# NOW, create a Dict[locationid, Tuple[locrecord, List[reagentitem]]]
# which we send to the client
r_r: typing.Dict[int, typing.Tuple[dict, typing.List[dict]]] = {}
locid_reagitem_dct = r_r
for location in loclst:
loc_id = location.get('id', None)
r_r[loc_id] = (location, d_d.get(loc_id, []))
assert len(r_r) == len(loclst), "problem with location ids!"
#
# collect the state records for each reagent item...
z_z: typing.Dict[int, list] = {}
for state in itmstat:
reag_item_id = state['qcs_reag_item_id']
# we want to replace the occurred timedate entry with a simple date
# to present to the user, i.e.
# 'occurred': '2011-04-20T00:00:00Z' -> '2011-04-20'
dstr = state['occurred']
state['occurred'] = dstr.split('T')[0]
z_z.setdefault(reag_item_id, []).append(state)
# and evaluate the 'final state' for each reagent item
ritemdct = {}
for reag_item in itmlst:
reag_item_id = reag_item['id']
state_lst = z_z.get(reag_item_id, None)
if state_lst is None:
state_info = None
else:
state_info = self.calc_final_state(state_lst)
# print("BLAAA {} {}".format(reag_item_id, state_info))
# we eliminate any reagent item that has a state of 'USED_UP'.
dct, ismissing, hasexpired = state_info
state_info = None if dct['status'] == 'USED_UP' else state_info
if state_info is not None:
ritemdct[reag_item_id] = (reag_item, state_info)
# else:
# print("skipping {}".format(reag_item))
# create a Dict[reagentid, reagent]
rl = self.get_reagent_list()
rg = {}
for reagent in rl:
# delete the legacy location field in reagents...
reagent.pop('location', None)
reagent_id = reagent.get('id', None)
if reagent_id is not None:
rg[reagent_id] = reagent
else:
raise RuntimeError("reagent ID is None")
assert len(rg) == len(rl), "problem with reagent ids!"
# "itmstatlst": itmstat,
# finally, sort the loclst according to a hierarchy
loclst = sortloclist(loclst)
# , "rfiddct": rfid_reagitem_dct}
return {"loclst": loclst, "locdct": locid_reagitem_dct,
"ritemdct": ritemdct, "reagentdct": rg}
def generate_webclient_stocklist(self) -> dict:
"""Generate the chemical stock list in a form suitable for the webclient.
Returns:
The dict of stock items for the webclient.
Raises:
RuntimeError: if the update from QAI failed.
"""
if self._db_has_changed:
self._cachedct = self._do_generate_webclient_stocklist()
self._db_has_changed = False
if self._cachedct is None:
raise RuntimeError('Internal error')
return self._cachedct
# location changes ---
def reset_loc_changes(self) -> None:
"""Remove all location changes in the database.
A location change occurs when, during stock taking, a reagent item was
found in a location that does not agree with the database.
The user enters a location change for that item to be uploaded
to QAI at a later date.
"""
raise NotImplementedError('not implemented')
def number_of_loc_changes(self) -> int:
"""Return the number of location changes currently in the database"""
raise NotImplementedError('not implemented')
def _verify_loc_changes(self, locid: int, locdat: LocChangeList) -> None:
"""Perform a sanity check on the location changes.
This routine should be called in add_loc_changes() before any changes
are actually committed to the database."""
# NOTE: type check all records before adding any records. In this way,
# any type exception does no change the database.
if not isinstance(locid, int):
raise ValueError("locid must be an int")
print("ADDLOCCHANGES : {}".format(locdat))
for reag_itm_id, opstring in locdat:
if not isinstance(reag_itm_id, int):
raise ValueError("reag_itm_id must be an int")
if not isinstance(opstring, str):
raise ValueError("opstring must be a string")
if opstring not in BaseLocMutation.VALID_OPS:
raise ValueError("unknown opstring '{}', valid ops: {}".format(opstring,
BaseLocMutation.VALID_OPS))
def add_loc_changes(self, locid: int, locdat: LocChangeList) -> None:
"""Add a location change to the database.
Any location mutation with an existing reagent_item id will be silently overwritten
by any location id, and opstring. In addition, do_ignore will be set to False.
Args:
locid: the id of the new location of the reagent items in locdat
locdat: a list of tuple with an reagent_item id (int) and an opstring (str)
indicating the items to change the location of.
(reagent item ID, string)
For example: (18023, 'missing')
Raises:
ValueError: if the data types are not as expected.
"""
raise NotImplementedError('not implemented')
def set_ignore_flag(self, reag_item_id: int, do_ignore: bool) -> dict:
"""Set/reset the ignore location change flag.
Args:
reag_item_id: the reagent item with a location change
do_ignore: set this to True (the location change is ignored) or False
Returns:
A dict with a response that can be sent back to the webclient for diagnostics.
The dict will have an 'ok' boolean entry, and a 'msg' string entry.
"""
raise NotImplementedError('not implemented')
def get_loc_changes(self, oldhash: typing.Optional[str] = None) -> \
typing.Tuple[str, typing.Optional[typing.Dict[int, LocChangeList]]]:
"""Return all location changes in the database.
Args:
oldhash: an optional hashkey indicating the last retrieved database state.
Returns:
If oldhash does not match our current hash,
return the new hash and a new dictionary.
The dictionary keys are location id's, and the values are a list of tuples.
The tuples are of the form (reagent item id, operation string,
row id, row ignore boolean)
If the hash does match, return the newhash and None. In this case, the stocky server
will know that the webclient already has an a up-to-date version of the location changes.
"""
raise NotImplementedError('not implemented')
def perform_loc_changes(self, move_dct: dict) -> dict:
"""
* Report the required changes from the list provided to QAI.
* Update the local Locmutation table accordingly
* Purge successfully recorded locmutations
* Replenish our DB from QAI.
* Return a dict in response (success/failure)
"""
raise NotImplementedError('not implemented')
|
#
# Copyright (c) 2009-2012 Joshua Hughes <[email protected]>
#
import urllib
import webbrowser
import qmk
class BeolingusCommand(qmk.Command):
'''Look up a German word using Beolingus. A new tab will be opened
in the default web browser with the search results.'''
def __init__(self):
self._name = 'beolingus'
self._help = self.__doc__
self.__baseURL = 'http://dict.tu-chemnitz.de/?query=%s' \
'&service=deen&mini=1'
@qmk.Command.actionRequiresArgument
def action(self, arg):
webbrowser.open_new_tab(
self.__baseURL % urllib.quote_plus(arg.encode('latin_1')))
def commands(): return [ BeolingusCommand() ]
|
def much(x):
return x+1 |
#!/usr/bin/env python
from __future__ import print_function
import setuptools
import os, sys, os.path, glob, \
tempfile, subprocess, shutil
def check_for_openmp():
# Create a temporary directory
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
exit_code = 1
if os.name == 'nt': return False
try:
os.chdir(tmpdir)
# Get compiler invocation
compiler = os.getenv('CC', 'cc')
# Attempt to compile a test script.
# See http://openmp.org/wp/openmp-compilers/
filename = r'test.c'
file = open(filename,'wt', 1)
file.write(
"#include <omp.h>\n"
"#include <stdio.h>\n"
"int main() {\n"
"#pragma omp parallel\n"
"printf(\"Hello from thread %d, nthreads %d\\n\", omp_get_thread_num(), omp_get_num_threads());\n"
"}"
)
file.flush()
with open(os.devnull, 'w') as fnull:
exit_code = subprocess.call([compiler, '-fopenmp', filename],
stdout=fnull, stderr=fnull)
# Clean up
file.close()
finally:
os.chdir(curdir)
shutil.rmtree(tmpdir)
return exit_code == 0
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('lib',parent_package,top_path)
if check_for_openmp() == True:
omp_args = ['-fopenmp']
else:
omp_args = None
# Because setjmp.h is included by lots of things, and because libpng hasn't
# always properly checked its header files (see
# https://bugzilla.redhat.com/show_bug.cgi?id=494579 ) we simply disable
# support for setjmp.
config.add_extension("bitarray",
["yt/utilities/lib/bitarray.pyx"],
libraries=["m"], depends=["yt/utilities/lib/bitarray.pxd"])
config.add_extension("CICDeposit",
["yt/utilities/lib/CICDeposit.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
config.add_extension("ContourFinding",
["yt/utilities/lib/ContourFinding.pyx"],
include_dirs=["yt/utilities/lib/",
"yt/geometry/"],
libraries=["m"],
depends=["yt/utilities/lib/fp_utils.pxd",
"yt/utilities/lib/amr_kdtools.pxd",
"yt/utilities/lib/ContourFinding.pxd",
"yt/geometry/oct_container.pxd"])
config.add_extension("DepthFirstOctree",
["yt/utilities/lib/DepthFirstOctree.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
config.add_extension("fortran_reader",
["yt/utilities/lib/fortran_reader.pyx"],
include_dirs=["yt/utilities/lib/"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
config.add_extension("geometry_utils",
["yt/utilities/lib/geometry_utils.pyx"],
extra_compile_args=omp_args,
extra_link_args=omp_args,
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
config.add_extension("Interpolators",
["yt/utilities/lib/Interpolators.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
config.add_extension("alt_ray_tracers",
["yt/utilities/lib/alt_ray_tracers.pyx"],
libraries=["m"], depends=[])
config.add_extension("marching_cubes",
["yt/utilities/lib/marching_cubes.pyx",
"yt/utilities/lib/FixedInterpolator.c"],
include_dirs=["yt/utilities/lib/"],
libraries=["m"],
depends=["yt/utilities/lib/fp_utils.pxd",
"yt/utilities/lib/fixed_interpolator.pxd",
"yt/utilities/lib/FixedInterpolator.h",
])
config.add_extension("misc_utilities",
["yt/utilities/lib/misc_utilities.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
config.add_extension("pixelization_routines",
["yt/utilities/lib/pixelization_routines.pyx",
"yt/utilities/lib/pixelization_constants.c"],
include_dirs=["yt/utilities/lib/"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd",
"yt/utilities/lib/pixelization_constants.h"])
config.add_extension("Octree",
["yt/utilities/lib/Octree.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
config.add_extension("origami",
["yt/utilities/lib/origami.pyx",
"yt/utilities/lib/origami_tags.c"],
include_dirs=["yt/utilities/lib/"],
depends=["yt/utilities/lib/origami_tags.h"])
config.add_extension("image_utilities",
["yt/utilities/lib/image_utilities.pyx"],
libraries=["m"],
depends=["yt/utilities/lib/fp_utils.pxd"]),
config.add_extension("PointsInVolume",
["yt/utilities/lib/PointsInVolume.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
config.add_extension("QuadTree",
["yt/utilities/lib/QuadTree.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
config.add_extension("RayIntegrators",
["yt/utilities/lib/RayIntegrators.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
config.add_extension("mesh_utilities",
["yt/utilities/lib/mesh_utilities.pyx"],
include_dirs=["yt/utilities/lib/"],
libraries=["m"],
depends = ["yt/utilities/lib/fp_utils.pxd",
],
)
config.add_extension("grid_traversal",
["yt/utilities/lib/grid_traversal.pyx",
"yt/utilities/lib/FixedInterpolator.c",
"yt/utilities/lib/kdtree.c"],
include_dirs=["yt/utilities/lib/"],
libraries=["m"],
extra_compile_args=omp_args,
extra_link_args=omp_args,
depends = ["yt/utilities/lib/fp_utils.pxd",
"yt/utilities/lib/kdtree.h",
"yt/utilities/lib/FixedInterpolator.h",
"yt/utilities/lib/fixed_interpolator.pxd",
"yt/utilities/lib/field_interpolation_tables.pxd",
]
)
config.add_extension("write_array",
["yt/utilities/lib/write_array.pyx"])
config.add_extension("ragged_arrays",
["yt/utilities/lib/ragged_arrays.pyx"])
config.add_extension("amr_kdtools",
["yt/utilities/lib/amr_kdtools.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
config.add_subpackage("tests")
if os.environ.get("GPERFTOOLS", "no").upper() != "NO":
gpd = os.environ["GPERFTOOLS"]
idir = os.path.join(gpd, "include")
ldir = os.path.join(gpd, "lib")
print(("INCLUDE AND LIB DIRS", idir, ldir))
config.add_extension("perftools_wrap",
["yt/utilities/lib/perftools_wrap.pyx"],
libraries=["profiler"],
library_dirs = [ldir],
include_dirs = [idir],
)
config.make_config_py() # installs __config__.py
return config
|
#!/usr/bin/env python
import numpy as np
# ------ HELPER FUNCTIONS ------ #
def sq(x):
return np.sqrt(x)
def calc_l0(A,Nv):
return sq(A*4./(2*Nv-4)/sq(3))
def calc_kp(l0,lm,ks,m):
return (6*ks*pow(l0,(m+1))*pow(lm,2) - 9*ks*pow(l0,(m+2))*lm + 4*ks*pow(l0,(m+3))) / (4*pow(lm,3)-8*l0*pow(lm,2)+4*pow(l0,2)*lm)
def calc_mu0(x0,l0,ks,kp,m):
return sq(3)*ks/(4.*l0) * (x0/(2.*pow((1-x0),3)) - 1./(4.*pow((1-x0),2)) + 1./4) + sq(3)*kp*(m+1)/(4.*pow(l0,(m+1)))
# -------- COMPUTE QoIs -------- #
def compute_mu(m, x0, ks, A, Nv):
l0 = calc_l0(A,Nv)
lm = l0/x0
kp = calc_kp(l0, lm, ks, m)
return calc_mu0(x0, l0, ks, kp, m)
def compute_mu_over_ks(m, x0, A, Nv):
return compute_mu(m, x0, 1.0, A, Nv)
# from optimal UQ results
def set_rbc_params(mesh, x0, ks):
m = 2.0
Nv = len(mesh.vertices)
A = mesh.area
V = mesh.volume
# max likelihood estimate from Athena UQ, stretching
# x0 = 0.48497214
2576
# ks = 22.6814565515
kb = 1.0
prms = {
"tot_area" : A,
"tot_volume" : V,
"ka_tot" : 4900.0,
"kv_tot" : 7500.0,
"kBT" : 0.0,
"gammaC" : 52.0,
"gammaT" : 0.0,
"shear_desc": "wlc",
"ka" : 5000,
"x0" : x0,
"mpow" : m,
"ks" : ks,
"bending_desc" : "Kantor",
"theta" : 0.0,
"kb" : kb
}
mu = compute_mu(m, x0, ks, A, Nv)
return prms
# from Fedosov params
def set_rbc_params0(prms, prms_bending, mesh):
m = 2.0
Nv = len(mesh.vertices)
A = mesh.area
V = mesh.volume
# max likelihood estimate from Athena UQ, stretching
x0 = 1.0/2.2
ks = 35.429323407939094
kb = 27.105156961709344
prms = {
"tot_area" : A,
"tot_volume" : V,
"ka_tot" : 4900.0,
"kv_tot" : 7500.0,
"kBT" : 0.0,
"gammaC" : 52.0,
"gammaT" : 0.0,
"shear_desc": "wlc",
"ka" : 5000,
"x0" : x0,
"mpow" : m,
"ks" : ks,
"bending_desc" : "Kantor",
"theta" : 0.0,
"kb" : kb
}
mu = compute_mu(m, x0, ks, A, Nv)
return prms
def print_rbc_params(p):
print("A = {}".format(p.totArea))
print("V = {}".format(p.totVolume))
print("x0 = {}".format(p.x0))
print("kb = {}".format(pb.kb))
print("ks = {}".format(p.ks))
print("m = {}".format(p.mpow))
|
from hackerrank.HackerRankAPI import HackerRankAPI
key = ""
compiler = HackerRankAPI(api_key = key)
source = """
for i in range(5):
print i
"""
sample_msg = """:compile cpp
```
#include <iostream>
int main() {
std::cout << "hello world" << std::endl;
return 0;
}
```
"""
# if sample_msg.startswith(":compile"):
# arr = sample_msg.split('```')
# lang = arr[0].split(' ')[1]
# source = arr[1]
# result = compiler.run({
# 'source': source,
# 'lang':'cpp'
# })
# out = result.output
# if out != None:
# print(str(out[0]))
# else:
# print(result.message)
s = compiler.supportedlanguages()
print(s)
|
import numpy as np
x = np.c_[np.random.normal(size=1e4),
np.random.normal(scale=4, size=1e4)]
from nipy.neurospin.utils.emp_null import ENN
enn = ENN(x)
enn.threshold(verbose=True)
|
import cv2
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Conv2D,Conv2DTranspose,MaxPool2D
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Concatenate
from utils import *
class UNET:
def __init__(self, classes):
"""
:param classes: No.of classes
"""
self.classes = classes
self.ops = ops(self.classes)
def down_conv_block(self, x, filters):
"""
:param x:
:param filters:
:return:
"""
s1 = self.ops.down_conv_(x, filters, filter_size= 3, stride= 1, padding= "SAME")
s2 = self.ops.down_conv_(s1, filters, filter_size= 3, stride= 1, padding= "SAME")
return s2
def up_conv_block(self, x, filters, skip_connection):
"""
:param x: input
:param filters: no. of filters
:param skip_connection:
"""
e1 = self.ops.up_conv_(x, filters, filter_size= 2, stride= 2, padding= "SAME")
concat = tf.concat([e1, skip_connection], axis= -1)
#layer 2
conv1 = self.ops.down_conv_(concat,filters,filter_size = 3,stride = 1, padding= "SAME")
#layer3
conv2 = self.ops.down_conv_(conv1,filters,filter_size = 3,stride = 1, padding = "SAME")
return conv2
def UNet(self, x):
"""
:param x: input
:return:
Output of the U-Net
"""
#encoder
d1 = self.down_conv_block(x,32)
m1 = self.ops.max_pool_(d1,filter_size=2,stride= 2,padding= "SAME")
d2 = self.down_conv_block(m1,64)
m2 = self.ops.max_pool_(d2,filter_size =2,stride=2,padding="SAME")
d3 = self.down_conv_block(m2,128)
m3 = self.ops.max_pool_(d3,filter_size=2,stride= 2,padding = "SAME")
d4 =self.down_conv_block(m3,256)
m4 = self.ops.max_pool_(d4,filter_size=2,stride= 2,padding = "SAME")
#bottleneck
bridge = self.ops.down_conv_(m4,1024,3,1,"SAME")
bridge = self.ops.down_conv_(bridge,1024,3,1,"SAME")
#decoder
u1 = self.up_conv_block(bridge,256,d4)
u2 = self.up_conv_block(u1,128,d3)
u3 = self.up_conv_block(u2,64,d2)
u4 = self.up_conv_block(u3,32,d1)
#1x1 output
logits = tf.keras.layers.Conv2D(self.classes,kernel_size=1,strides=1,padding="SAME")(u4)
logits = tf.nn.sigmoid(logits)
return logits
def mini_batches_(self, X, Y, batch_size=64):
"""
function to produce minibatches for training
:param X: input placeholder
:param Y: mask placeholder
:param batch_size: size of each batch
:return:
minibatches for training
"""
train_length = len(X)
num_batches = int(np.floor(train_length / batch_size))
batches = []
for i in range(num_batches):
batch_x = X[i * batch_size: i * batch_size + batch_size, :, :, :]
batch_y = Y[i * batch_size:i * batch_size + batch_size, :, :]
batches.append([batch_x, batch_y])
return batches |
from bs4 import BeautifulSoup
from scrapper.article_scrapper import ArticleScrapper
class VoxArticleScrapper(ArticleScrapper):
def scrap_header(self, dom: BeautifulSoup) -> str:
return self.find_child_h1(dom, attrs={'class': 'c-page-title'}).text
def scrap_content(self, dom: BeautifulSoup) -> str:
return self.find_child_div(dom, attrs={'class': 'c-entry-content'}).text
@staticmethod
def domain():
return "vox.com"
|
from typing import Dict
from botocore.paginate import Paginator
class ListDetectors(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`GuardDuty.Client.list_detectors`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListDetectors>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'DetectorIds': [
'string',
],
}
**Response Structure**
- *(dict) --* 200 response
- **DetectorIds** *(list) --* A list of detector Ids.
- *(string) --* The unique identifier for a detector.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListFilters(Paginator):
def paginate(self, DetectorId: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`GuardDuty.Client.list_filters`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListFilters>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DetectorId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'FilterNames': [
'string',
],
}
**Response Structure**
- *(dict) --* 200 response
- **FilterNames** *(list) --* A list of filter names
- *(string) --* The unique identifier for a filter
:type DetectorId: string
:param DetectorId: **[REQUIRED]** The ID of the detector that specifies the GuardDuty service where you want to list filters.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListFindings(Paginator):
def paginate(self, DetectorId: str, FindingCriteria: Dict = None, SortCriteria: Dict = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`GuardDuty.Client.list_findings`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListFindings>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DetectorId='string',
FindingCriteria={
'Criterion': {
'string': {
'Eq': [
'string',
],
'Gt': 123,
'Gte': 123,
'Lt': 123,
'Lte': 123,
'Neq': [
'string',
]
}
}
},
SortCriteria={
'AttributeName': 'string',
'OrderBy': 'ASC'|'DESC'
},
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'FindingIds': [
'string',
],
}
**Response Structure**
- *(dict) --* 200 response
- **FindingIds** *(list) --* The list of the Findings.
- *(string) --* The unique identifier for the Finding
:type DetectorId: string
:param DetectorId: **[REQUIRED]** The ID of the detector that specifies the GuardDuty service whose findings you want to list.
:type FindingCriteria: dict
:param FindingCriteria: Represents the criteria used for querying findings.
- **Criterion** *(dict) --* Represents a map of finding properties that match specified conditions and values when querying findings.
- *(string) --*
- *(dict) --* Finding attribute (for example, accountId) for which conditions and values must be specified when querying findings.
- **Eq** *(list) --* Represents the equal condition to be applied to a single field when querying for findings.
- *(string) --*
- **Gt** *(integer) --* Represents the greater than condition to be applied to a single field when querying for findings.
- **Gte** *(integer) --* Represents the greater than equal condition to be applied to a single field when querying for findings.
- **Lt** *(integer) --* Represents the less than condition to be applied to a single field when querying for findings.
- **Lte** *(integer) --* Represents the less than equal condition to be applied to a single field when querying for findings.
- **Neq** *(list) --* Represents the not equal condition to be applied to a single field when querying for findings.
- *(string) --*
:type SortCriteria: dict
:param SortCriteria: Represents the criteria used for sorting findings.
- **AttributeName** *(string) --* Represents the finding attribute (for example, accountId) by which to sort findings.
- **OrderBy** *(string) --* Order by which the sorted findings are to be displayed.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListIPSets(Paginator):
def paginate(self, DetectorId: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`GuardDuty.Client.list_ip_sets`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListIPSets>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DetectorId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'IpSetIds': [
'string',
],
}
**Response Structure**
- *(dict) --* 200 response
- **IpSetIds** *(list) --* A list of the IP set IDs
- *(string) --* The unique identifier for an IP Set
:type DetectorId: string
:param DetectorId: **[REQUIRED]** The unique ID of the detector that you want to retrieve.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListInvitations(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`GuardDuty.Client.list_invitations`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListInvitations>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Invitations': [
{
'AccountId': 'string',
'InvitationId': 'string',
'InvitedAt': 'string',
'RelationshipStatus': 'string'
},
],
}
**Response Structure**
- *(dict) --* 200 response
- **Invitations** *(list) --* A list of invitation descriptions.
- *(dict) --* Invitation from an AWS account to become the current account's master.
- **AccountId** *(string) --* Inviter account ID
- **InvitationId** *(string) --* This value is used to validate the inviter account to the member account.
- **InvitedAt** *(string) --* Timestamp at which the invitation was sent
- **RelationshipStatus** *(string) --* The status of the relationship between the inviter and invitee accounts.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListMembers(Paginator):
def paginate(self, DetectorId: str, OnlyAssociated: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`GuardDuty.Client.list_members`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListMembers>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DetectorId='string',
OnlyAssociated='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Members': [
{
'AccountId': 'string',
'DetectorId': 'string',
'Email': 'string',
'InvitedAt': 'string',
'MasterId': 'string',
'RelationshipStatus': 'string',
'UpdatedAt': 'string'
},
],
}
**Response Structure**
- *(dict) --* 200 response
- **Members** *(list) --* A list of member descriptions.
- *(dict) --* Contains details about the member account.
- **AccountId** *(string) --* AWS account ID.
- **DetectorId** *(string) --* The unique identifier for a detector.
- **Email** *(string) --* Member account's email address.
- **InvitedAt** *(string) --* Timestamp at which the invitation was sent
- **MasterId** *(string) --* The master account ID.
- **RelationshipStatus** *(string) --* The status of the relationship between the member and the master.
- **UpdatedAt** *(string) --* The first time a resource was created. The format will be ISO-8601.
:type DetectorId: string
:param DetectorId: **[REQUIRED]** The unique ID of the detector of the GuardDuty account whose members you want to list.
:type OnlyAssociated: string
:param OnlyAssociated: Specifies what member accounts the response is to include based on their relationship status with the master account. The default value is TRUE. If onlyAssociated is set to TRUE, the response will include member accounts whose relationship status with the master is set to Enabled, Disabled. If onlyAssociated is set to FALSE, the response will include all existing member accounts.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListThreatIntelSets(Paginator):
def paginate(self, DetectorId: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`GuardDuty.Client.list_threat_intel_sets`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListThreatIntelSets>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DetectorId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ThreatIntelSetIds': [
'string',
]
}
**Response Structure**
- *(dict) --* 200 response
- **ThreatIntelSetIds** *(list) --* The list of the threat intel set IDs
- *(string) --* The unique identifier for an threat intel set
:type DetectorId: string
:param DetectorId: **[REQUIRED]** The detectorID that specifies the GuardDuty service whose ThreatIntelSets you want to list.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
|
# Copyright (c) by it's authors.
# Some rights reserved. See LICENSE, AUTHORS.
from wallaby.pf.room import *
from wallaby.pf.peer.viewer import *
from wallaby.pf.peer.editor import *
class Invoice(Room):
def __init__(self, name):
Room.__init__(self, name)
self._count = None
self._price = None
self._document = None
def customPeers(self):
Viewer(self._name, self._countChanged, 'articles.*.count', raw=True)
Viewer(self._name, self._priceChanged, 'articles.*.price', raw=True)
self._subTotal = Editor(self._name, path='articles.*.total', raw=True)
self._netto = Editor(self._name, path='netto', raw=True)
self._vat = Editor(self._name, path='vat', raw=True)
self._brutto = Editor(self._name, path='brutto', raw=True)
self.catch(Viewer.In.Document, self._setDocument)
def _setDocument(self, action, doc):
self._document = doc
def _countChanged(self, value):
try:
self._count = float(value)
except:
self._count = 0.0
self.changeTotal()
def _priceChanged(self, value):
try:
self._price = float(value)
except:
self._price = 0.0
self.changeTotal()
def changeTotal(self):
if self._document == None or self._count == None or self._price == None: return
if self._subTotal.isReadOnly(): return
self._subTotal.changeValue(self._count * self._price)
articles = self._document.get('articles')
if not articles: return
total = 0.0
for article in articles:
if 'total' in article:
try:
total = total + float(article['total'])
except:
pass
vat = total*0.19
self._vat.changeValue(vat)
self._netto.changeValue(total)
self._brutto.changeValue(total + vat)
|
import math
from typing import NoReturn, Tuple
from .general import Distribution
class Binomial(Distribution):
def __init__(self, prob=.5, size=20):
"""Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float): Representing the mean value of the distribution.
stdev (float): Representing the standard deviation
of the distribution.
data_list (list of floats): A list of floats to be extracted
from the data file.
p (float): Representing the probability of an event occurring.
n (int): The total number of trials.
"""
self.p = None
self.set_p(prob)
self.n = None
self.set_n(size)
self.calculate_mean()
self.calculate_stdev()
def set_p(self, new_value: float) -> NoReturn:
"""Mutator method realizes encapsulation of p attribute.
'p' stands for 'probability'.
Args:
new_value (float): New value of p attribute.
Returns:
NoReturn
"""
self.p = new_value
def get_p(self) -> float:
"""Accessor method realizes encapsulation of p attribute.
'p' stands for 'probability'.
Returns:
float: The probability of an event occurring.
"""
return self.p
def set_n(self, new_value: int) -> NoReturn:
"""Mutator method realizes encapsulation of n attribute.
'n' stands for total number of trials.
Args:
new_value (float): New value of n attribute.
Returns:
NoReturn
"""
self.n = new_value
def get_n(self) -> int:
"""Accessor method realizes encapsulation of n attribute.
'n' stands for total number of trials.
Returns:
int: The total number of trials.
"""
return self.n
def calculate_mean(self) -> float:
"""Function to calculate the mean from p and n.
Args:
None
Returns:
float: mean of the data set.
"""
mean = self.get_p() * self.get_n()
self.set_mean(mean)
return mean
def calculate_stdev(self) -> float:
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set.
"""
variance = self.get_n() * self.get_p() * (1 - self.get_p())
stdev = math.sqrt(variance)
self.set_stdev(stdev)
return stdev
def replace_stats_with_data(self) -> Tuple[float, int]:
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
if len(self.get_data()) > 0:
self.set_n(len(self.get_data()))
self.set_p(len([k for k in self.get_data() if k > 0]) /
len(self.get_data()))
self.calculate_mean()
self.calculate_stdev()
return self.get_p(), self.get_n()
else:
raise Exception("The data set is empty.")
def plot(self) -> NoReturn: # TODO: implement with matplotlib barplot
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
raise NotImplementedError
def pdf(self, k) -> float:
"""Probability density function calculator
for the gaussian distribution.
Args:
k (float): point for calculating the probability density function.
Returns:
float: probability density function output.
"""
likelihood = (math.factorial(int(self.get_n())) /
(math.factorial(int(k))
* math.factorial(int(self.get_n() - k)))) \
* (self.get_p()**k * (1-self.get_p())**(self.get_n() - k))
return likelihood
def plot_pdf(self) -> NoReturn: # TODO: implement with matplotlib barplot
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
raise NotImplementedError
def __add__(self, other: 'Binomial') -> 'Binomial':
"""Function to add together two Binomial distributions with equal p.
Args:
other (Binomial): Binomial instance.
Returns:
Binomial: Binomial distribution.
"""
try:
assert self.get_p() == other.get_p(), 'p values are not equal'
except AssertionError as error:
raise error
total = Binomial()
total.set_p(self.get_p())
total.set_n(self.get_n() + other.get_n())
total.calculate_mean()
total.calculate_stdev()
return total
def __repr__(self) -> str:
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return f"""mean {self.get_mean()},
standard deviation {self.get_stdev()},
p {self.get_p()}, n {self.get_n()}"""
|
adressbuch = {"JONAS": 123456,
"PETER": 8765435}
gesuchter_kontakt = input("Bitte geben Sie den Kontakt ein, den Sie suchen: ").upper() # JONAS
if gesuchter_kontakt in adressbuch:
print("Die Telefonnummer von", gesuchter_kontakt ,"ist:", adressbuch[gesuchter_kontakt])
else:
print("Dieser Eintrag ist nicht vorhanden.")
for zaehler in range(11):
print(zaehler) |
"""
File name: plot_results.py
Author: Esra Zihni
Date created: 27.07.2018
This script creates plots for performance assessment and feature importance
assessment. It reads the implementation and path options from the config.yml script.
It saves the created plots as .png files.
"""
import itertools
import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import yaml
from scipy.stats import iqr
from utils.helper_functions import plot_features_rating, plot_performance
########################################################################################
###### ASSIGN CONFIGURATION VARIABLES ##################################################
########################################################################################
# You need to add constructor in order to be able to use the join command in the yaml
# file
def join(loader, node):
seq = loader.construct_sequence(node)
return "".join(str(i) for i in seq)
yaml.add_constructor("!join", join)
# Read the config file
cfg = yaml.load(open("config.yml", "r"), Loader=yaml.Loader)
# Assign variables to use
models_to_use = cfg["models to use"]
subsampling_types = cfg["subsampling to use"]
performance_measures = cfg["final performance measures"]
scores_folder = cfg["scores folder path"]
importance_folder = cfg["importance folder path"]
figures_folder = cfg["figures folder path"]
feature_dict = cfg["features"]
########################################################################################
###### CREATE PLOTS OF PERFORMANCE AND FEATURE RATING ##################################
########################################################################################
# Check if the feature rating figures folder path to save already exists. If not,
# create folder.
if not os.path.exists(f"{figures_folder}/feature_ratings"):
os.makedirs(f"{figures_folder}/feature_ratings")
# Check if the performance figures folder path to save already exists. If not,
# create folder.
if not os.path.exists(f"{figures_folder}/final_performance_scores"):
os.makedirs(f"{figures_folder}/final_performance_scores")
# PLOT PERFORMANCE
# Iterate over subsampling types
for subs in subsampling_types:
all_scores = dict(zip(performance_measures, [None] * len(performance_measures)))
for perf in performance_measures:
scores = dict()
for mdl in models_to_use:
tmp_score = {
mdl: pd.read_csv(
f"{scores_folder}/{mdl}_{perf}_scores_{subs}_subsampling.csv",
index_col=0,
)
}
scores.update(tmp_score)
all_scores[perf] = scores
plot_performance(
scores=all_scores,
model_names=models_to_use,
sub_type=subs,
path=f"{figures_folder}/final_performance_scores",
)
# PLOT FEATURE IMPORTANCE
# Iterate over subsampling types
for subs in subsampling_types:
values = dict()
for mdl in models_to_use:
if mdl not in ["Catboost", "MLP"]:
tmp_weights = {
mdl: pd.read_csv(
f"{importance_folder}/{mdl}_weights_{subs}_subsampling.csv",
index_col=0,
)
}
tmp_weights[mdl] = tmp_weights[mdl].rename(columns=feature_dict)
values.update(tmp_weights)
elif mdl == "Catboost":
shaps = {
mdl: pd.read_csv(
f"{importance_folder}/{mdl}_shap_values_{subs}_subsampling.csv",
index_col=0,
)
}
shaps[mdl] = shaps[mdl].rename(columns=feature_dict)
values.update(shaps)
elif mdl == "MLP":
dts = {
mdl: pd.read_csv(
f"{importance_folder}/{mdl}_score_based_averaged_dt_values_{subs}_subsampling.csv",
index_col=0,
)
}
dts[mdl] = dts[mdl].rename(columns=feature_dict)
values.update(dts)
plot_features_rating(
values=values, sub_type=subs, path=f"{figures_folder}/feature_ratings"
)
|
import pkgutil
from collections import defaultdict
from importlib import import_module
from types import ModuleType
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
import click
from click.core import Command, Context
from click.formatting import HelpFormatter
from valohai_cli.utils import match_prefix
class PluginCLI(click.MultiCommand):
aliases = {
'new': 'create',
'start': 'run',
}
def __init__(self, **kwargs: Any) -> None:
self._commands_module = kwargs.pop('commands_module')
self._command_modules: List[str] = []
self._command_to_canonical_map: Dict[str, str] = {}
self.aliases = dict(self.aliases, **kwargs.get('aliases', {})) # instance level copy
super().__init__(**kwargs)
@property
def commands_module(self) -> ModuleType:
if isinstance(self._commands_module, str):
self._commands_module = import_module(self._commands_module)
return self._commands_module # type: ignore
@property
def command_modules(self) -> List[str]:
if not self._command_modules:
mod_path = self.commands_module.__path__ # type: ignore[attr-defined]
self._command_modules = sorted(c[1] for c in pkgutil.iter_modules(mod_path))
return self._command_modules
@property
def command_to_canonical_map(self) -> Dict[str, str]:
if not self._command_to_canonical_map:
command_map = {command: command for command in self.command_modules}
for alias_from, alias_to in self.aliases.items():
if alias_to in command_map:
command_map[alias_from] = command_map.get(alias_to, alias_to) # resolve aliases
self._command_to_canonical_map = command_map
return self._command_to_canonical_map
def list_commands(self, ctx: Context) -> List[str]: # noqa: U100
return self.command_modules
def get_command(self, ctx: Context, name: str) -> Optional[Union[Command, 'PluginCLI']]:
# Dashes aren't valid in Python identifiers, so let's just replace them here.
name = name.replace('-', '_')
command_map: Dict[str, str] = self.command_to_canonical_map
if name in command_map:
return self._get_command(command_map[name])
matches = match_prefix(command_map.keys(), name, return_unique=False)
if matches is None:
matches = []
if len(matches) == 1:
match = command_map[matches[0]]
return self._get_command(match)
if ' ' not in name:
cmd = self._try_suffix_match(ctx, name)
if cmd:
return cmd
if len(matches) > 1:
ctx.fail('"{name}" matches {matches}; be more specific?'.format(
name=name,
matches=', '.join(click.style(match, bold=True) for match in sorted(matches))
))
return None
def _try_suffix_match(self, ctx: Context, name: str) -> Optional[Command]:
# Try word suffix matching if possible.
# That is, if the user attempts `vh link` but we know about `vh proj link`, do that.
command_map: Dict[str, Command] = {
' '.join(trail): cmd
for (trail, cmd)
in self._get_all_commands(ctx)
}
s_matches = [key for key in command_map.keys() if ' ' in key and key.endswith(' ' + name)]
if len(s_matches) == 1:
match = s_matches[0]
click.echo('(Resolved {name} to {match}.)'.format(
name=click.style(name, bold=True),
match=click.style(match, bold=True),
), err=True)
return command_map[match]
return None
def resolve_command(self, ctx: Context, args: List[str]) -> Tuple[Optional[str], Optional[Command], List[str]]:
cmd_name, cmd, rest_args = super().resolve_command(ctx, args)
return (
getattr(cmd, "name", cmd_name), # Always use the canonical name of the command
cmd,
rest_args,
)
def _get_command(self, name: str) -> Command:
module = import_module(f'{self.commands_module.__name__}.{name}')
obj = getattr(module, name)
assert isinstance(obj, Command)
return obj
def _get_all_commands(self, ctx: Context) -> Iterable[Tuple[Tuple[str, ...], Command]]:
yield from walk_commands(ctx, self)
def walk_commands(
ctx: click.Context,
multicommand: click.MultiCommand,
name_trail: Tuple[str, ...] = (),
) -> Iterable[Tuple[Tuple[str, ...], Command]]:
for subcommand in multicommand.list_commands(ctx):
cmd = multicommand.get_command(ctx, subcommand)
if not (cmd and cmd.name):
continue
new_name_trail = name_trail + (cmd.name,)
yield (new_name_trail, cmd)
if isinstance(cmd, click.MultiCommand):
yield from walk_commands(ctx, cmd, new_name_trail)
class RecursiveHelpPluginCLI(PluginCLI):
def format_commands(self, ctx: Context, formatter: HelpFormatter) -> None:
rows_by_prefix = defaultdict(list)
for trail, command in self._get_all_commands(ctx):
prefix = (' '.join(trail[:1]) if len(trail) > 1 else '')
help = (command.short_help or command.help or '').partition('\n')[0]
rows_by_prefix[prefix.strip()].append((' '.join(trail).strip(), help))
for prefix, rows in sorted(rows_by_prefix.items()):
title = (
f'Commands ({prefix} ...)'
if prefix
else 'Commands'
)
with formatter.section(title):
formatter.write_dl(rows)
|
from helpers.common_imports import *
import clean.matrix_builder as mb
import clean.schuster as sch
class Restorer(object):
"""restores clean spectrum, algorithm steps 18 to 21 ref 2"""
def __init__(self, iterations, super_resultion_vector, number_of_freq_estimations, time_grid, max_freq):
self.__iterations = iterations
if iterations != 0:
self.__super_resultion_vector = super_resultion_vector
self.__number_of_freq_estimations = number_of_freq_estimations
self.__max_freq = max_freq
self.__uniform_time_grid = self.__build_uniform_time_grid(time_grid)
self.__index_vector = mb.generate_index_vector(
mb.size_of_spectrum_vector(self.__number_of_freq_estimations)
)
self.__freq_vector = mb.generate_freq_vector(
self.__index_vector, self.__max_freq, self.__number_of_freq_estimations
)
self.__clean_window_vector = self.__build_clean_window_vector()
def restore(self):
"""restores spectrum and series"""
# if nothing was detected:
if self.__iterations == 0:
return None
else:
ccs_restoration_result = self.__restore_ccs()
fap_restoration_result = self.__restore_fap()
result = {
'freq_vector': self.__freq_vector,
'uniform_time_grid': self.__uniform_time_grid
}
result.update(ccs_restoration_result)
result.update(fap_restoration_result)
return result
def __restore_ccs(self):
"""restores clean spectrum, algorithm steps 18 to 21 ref 2"""
clean_spectrum = self.__build_clean_spectrum()
correlogram = self.__build_correlogram(clean_spectrum)
uniform_series = self.__build_uniform_series(clean_spectrum)
result = {
'clean_spectrum': clean_spectrum,
'correlogram': correlogram,
'uniform_series': uniform_series
}
return result
def __restore_fap(self):
"""req 143 and 144 ref 2"""
non_zeroes = np.extract(np.abs(self.__super_resultion_vector) != 0, self.__super_resultion_vector)
freqs = np.extract(np.abs(self.__super_resultion_vector) != 0, self.__freq_vector)
amplitudes = 2*np.abs(non_zeroes)
phases = np.arctan2(np.imag(non_zeroes),np.real(non_zeroes))
result = {
'frequencies': freqs.reshape(-1,1),
'amplitudes': amplitudes.reshape(-1,1),
'phases': phases.reshape(-1,1)
}
return result
def __build_clean_window_vector(self):
"""eq 157 ref 2"""
clean_window_vector = mb.calculate_window_vector(
self.__uniform_time_grid, self.__number_of_freq_estimations, self.__max_freq
)
return clean_window_vector
def __build_uniform_time_grid(self, time_grid):
"""eq 158 ref 2"""
step_size = (time_grid[-1][0] - time_grid[0][0])/(time_grid.shape[0] - 1)
start = 0
stop = step_size*time_grid.shape[0]
result = np.arange(start,stop,step_size).reshape(-1,1)
return result
def __build_clean_spectrum(self):
"""eq 159 ref 2"""
number_of_rows = self.__super_resultion_vector.shape[0]
array = []
for index in range(0, number_of_rows):
max_index = index + 2*self.__number_of_freq_estimations
min_index = max_index - 2*self.__number_of_freq_estimations
subvector = self.__clean_window_vector[min_index:max_index+1]
# subvector is flipped, because in eq 159 index of vector C (k) is substracted from index for vector S (j)
array.append(np.matmul(self.__super_resultion_vector.T, np.flip(subvector, axis=0))[0][0])
result = np.array(array).reshape(-1,1)
return result
def __build_correlogram(self, clean_spectrum):
"""eq 160 ref 2"""
values = sch.squared_abs(clean_spectrum)
result = self.__build_correlogram_or_uniform_series(values)
return result
def __build_uniform_series(self, clean_spectrum):
"""eq 161 ref 2"""
result = self.__build_correlogram_or_uniform_series(clean_spectrum)
return result
def __build_correlogram_or_uniform_series(self, values):
"""eq 160 and 161 ref 2"""
result = mb.run_ft(
self.__uniform_time_grid, values, self.__freq_vector, self.__number_of_freq_estimations, 'inverse'
)
return result |
#!/usr/bin/env python3
import argparse
import sys
from pathlib import Path
from lib.reddit import Redditor
from lib.db_helper import sqliteHelper
from lib.youtube_helper import YoutubeDL
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-s', '--subreddit', required=True, help='The subreddit you want to scan')
arg_parser.add_argument('-d', '--dest', help='The path you want to save downloaded files to')
arg_parser.add_argument('--scan-only', dest='scan_only', action='store_true', default=False,
help="Don't download new tracks only scan and update the database")
arg_parser.add_argument('--download-only', dest='download_only', action='store_true', default=False,
help="Don't scan for new tracks only fetch undownloaded content")
arg_parser.add_argument('-m', '--del-failed', dest='del_failed', action='store_true', default=False,
help="Mark tracks that fail to download as downloaded so we no longer try to download them")
arg_parser.add_argument('-l', '--list', action='store_true', default=False,
help="List the current contents of the database cache and exit")
args = arg_parser.parse_args()
sql = sqliteHelper()
if args.list:
sql.display_content_table()
sys.exit()
reddit = Redditor(sql)
youtube = YoutubeDL(sql)
# Update our database with latest youtube content
if not args.download_only:
reddit.update_cache(args.subreddit)
# Collect youtube videos, extract audio, and move created mp3s to the requested destination path
if not args.scan_only:
youtube.fetch_undownloaded(args.dest, mark_failed_downloaded=args.del_failed)
if __name__ == "__main__":
main()
|
import random
def GetChar(st):
s = []
i = 0
while i < len(st):
s.append(st[i])
i = i + 1
# print(s)
return s
def CountR(st):
sout = []
cnt1 = 0
for x in st:
sout = GetChar(x)
i, j, start, cnt = 0, 0, 0, 0
for z in range(len(sout)):
if sout[z] == 'K':
if cnt == 0:
start, cnt = z, cnt + 1
else:
cnt = cnt + 1
else:
i, j, start, cnt = start, cnt, 0, 0
# i=random.randint(0,len(sout)/2)
# j=random.randint(len(sout)/2,len(sout)-1)
print(i, j)
for m in range(i, j + 1):
if sout[m] == 'R':
sout[m] = 'K'
else:
sout[m] = 'R'
print(sout)
for n in sout:
if n == 'R':
cnt1 = cnt1 + 1
print(cnt1)
cnt1 = 0
n = input()
st = []
if n <= 10:
for i in range(n):
st.insert(i, input())
CountR(st)
|
idade = int(input('Qual sua idade? '))
preço = float(input('Informe o valor: '))
desconto = 7.0
if preço >= 23 and preço <= idade:
print('Você terá um desconto de: R${:.2f}'.format(desconto))
else:
print('sem desconto')
#https://pt.stackoverflow.com/q/446362/101
|
import datetime
import logging
# LOGGING ----------------------------------------------------------------
filename = "logs/logfile {}.log".format(datetime.date.today())
handler = logging.FileHandler(filename, "a")
frm = logging.Formatter("%(asctime)s [%(levelname)-8s] [%(funcName)-20s] [%(lineno)-4s] %(message)s",
"%d.%m.%Y %H:%M:%S")
handler.setFormatter(frm)
logger = logging.getLogger()
logger.addHandler(handler)
"""
+----------+---------------+
| Level | Numeric value |
+----------+---------------+
| CRITICAL | 50 |
| ERROR | 40 |
| WARNING | 30 |
| INFO | 20 |
| DEBUG | 10 |
| NOTSET | 0 |
+----------+---------------+
"""
logger.setLevel(logging.DEBUG)
# LOGGING ----------------------------------------------------------------
logging.debug("Test: Debug")
logging.info("Test: Info")
logging.warning("Test: Warning")
logging.error("Test: Error")
logging.critical("Test: Critical")
try:
1 / 0
except ZeroDivisionError:
logging.exception("Test: Exception.")
|
"""Looks after the data set, trimming it when necessary etc"""
import logging
from google.appengine.ext import ndb
import models
from flask import Flask
import flask
app = Flask(__name__)
MAX_RECORDS = 5000
@app.route('/_maintain')
def datastore_maintenance():
"""Performs necessary checks on the datastore"""
logging.info('Beginning datastore mainenance...')
num_records = models.Message.query().count()
if num_records > MAX_RECORDS:
logging.info(' %d records, deleting %d', num_records,
num_records-MAX_RECORDS)
trim_records(num_records - MAX_RECORDS)
# what else should we check?
return '<h1>done!</h1>'
def trim_records(num_records):
"""Trims records from the datastore until there are only
`max_records`. Should remove oldest first.
Args:
num_records (int) - the number of the oldest records to delete.
Returns:
None
"""
to_go = models.Message.query().order(-models.Message.timestamp)
ndb.delete_multi(to_go.fetch(num_records, keys_only=True))
|
import re
import csv
import urllib
import datetime
from django.utils.encoding import smart_unicode, force_unicode
from django.db.models import Avg, Sum, Min, Max, Count
from time import strptime, strftime
import time
from urlparse import urljoin
from BeautifulSoup import BeautifulSoup
from fumblerooski.college.models import *
from fumblerooski.rankings.models import *
"""
The functions here are a collection of utilities that help with data loading
or otherwise populate records that are not part of the scraping process.
"""
def create_missing_collegeyears(year):
"""
Create collegeyears where they are missing (legacy data only).
>>> create_missing_collegeyears(2009)
"""
games = Game.objects.filter(season=year)
for game in games:
try:
game.team1
except CollegeYear.DoesNotExist:
try:
c = College.objects.get(pk=game.team1_id)
cy, created = CollegeYear.objects.get_or_create(college=c, season=year)
if created:
print "created CollegeYear for %s in %s" % (c, year)
except:
print "Could not find a college for %s" % game.team1_id
def opposing_coaches(coach):
coach_list = Coach.objects.raw("SELECT college_coach.id, college_coach.slug, count(college_game.*) as games from college_coach inner join college_game on college_coach.id = college_game.coach2_id where coach1_id = %s group by 1,2 order by 3 desc", [coach.id])
return coach_list
def calculate_team_year(year, month):
if int(month) < 8:
team_year = int(year)-1
else:
team_year = int(year)
return team_year
def calculate_record(totals):
"""
Given a dictionary of game results, calculates the W-L-T record from those games.
Used to calculate records for team vs opponent and coach vs coach views.
"""
d = {}
for i in range(len(totals)):
d[totals[i]['t1_result']] = totals[i]['count']
try:
wins = d['W']
except KeyError:
wins = 0
try:
losses = d['L'] or None
except KeyError:
losses = 0
try:
ties = d['T']
except KeyError:
ties = 0
return wins, losses, ties
def last_home_loss_road_win(games):
"""
Given a list of games, returns the most recent home loss and road win.
"""
try:
last_home_loss = games.filter(t1_game_type='H', t1_result='L')[0]
except:
last_home_loss = None
try:
last_road_win = games.filter(t1_game_type='A', t1_result='W')[0]
except:
last_road_win = None
return last_home_loss, last_road_win
def set_head_coaches():
"""
One-time utility to add a boolean value to college coach records. Used to prepare
the populate_head_coaches function for games.
"""
cc = CollegeCoach.objects.select_related().filter(jobs__name='Head Coach').update(is_head_coach=True)
def populate_head_coaches(game):
"""
Given a game, tries to find and save the head coaches for that game.
If it cannot, it leaves the head coaching fields as 0. Can be run on
an entire season or as part of the game loader. As college coach data
grows, will need to be run periodically on games without head coaches:
>>> games = Game.objects.filter(coach1__isnull=True, coach2__isnull=True)
>>> for game in games:
... populate_head_coaches(game)
...
"""
try:
hc = game.team1.collegecoach_set.filter(is_head_coach=True).order_by('-start_date')
if hc.count() > 0:
if hc.count() == 1:
game.coach1 = hc[0].coach
else:
coach1, coach2 = [c for c in hc]
if coach1.end_date:
if game.date < coach1.end_date:
game.coach1 = coach1.coach
elif game.date >= coach2.start_date:
game.coach1 = coach2.coach
else:
game.coach1_id = 0
else:
game.coach1_id = 0
except:
game.coach1_id = 0
game.save()
try:
hc2 = game.team2.collegecoach_set.filter(is_head_coach=True).order_by('-start_date')
if hc2.count() > 0:
if hc2.count() == 1:
game.coach2 = hc2[0].coach
else:
coach1, coach2 = [c for c in hc2]
if coach1.end_date:
if game.date < coach1.end_date:
game.coach2 = coach1.coach
elif game.date >= coach2.start_date:
game.coach2 = coach2.coach
else:
game.coach2_id = 0
else:
game.coach2_id = 0
except:
game.coach2_id = 0
game.save()
def next_coach_id():
"""
Generates the next id for newly added coaches, since their slugs (which combine the id and name fields)
are added post-commit.
"""
c = Coach.objects.aggregate(Max("id"))
return c['id__max']+1
def update_conference_membership(year):
# check prev year conference and update current year with it, then mark conf games.
previous_year = year-1
teams = CollegeYear.objects.filter(season=previous_year, conference__isnull=False)
for team in teams:
cy = CollegeYear.objects.get(season=year, college=team.college)
cy.conference = team.conference
cy.save()
def update_conf_games(year):
"""
Marks a game as being a conference game if teams are both in the same conference.
"""
games = Game.objects.filter(season=year, team1__college__updated=True, team2__college__updated=True)
for game in games:
try:
if game.team1.conference == game.team2.conference:
game.is_conference_game = True
game.save()
except:
pass
def update_quarter_scores(game):
"""
Utility to update quarter scores for existing games. New games handled via ncaa_loader.
"""
doc = urllib.urlopen(game.get_ncaa_xml_url()).read()
soup = BeautifulSoup(doc)
quarters = len(soup.findAll('score')[1:])/2
t2_quarters = soup.findAll('score')[1:quarters+1] #visiting team
t1_quarters = soup.findAll('score')[quarters+1:] #home team
for i in range(quarters):
vqs, created = QuarterScore.objects.get_or_create(game = game, team = game.team2, season=game.season, quarter = i+1, points = int(t2_quarters[i].contents[0]))
hqs, created = QuarterScore.objects.get_or_create(game = game, team = game.team1, season=game.season, quarter = i+1, points = int(t1_quarters[i].contents[0]))
def update_college_year(year):
"""
Updates season and conference records for teams. Run at the end of a game loader.
"""
teams = CollegeYear.objects.select_related().filter(season=year, college__updated=True).order_by('college_college.id')
for team in teams:
games = Game.objects.filter(team1=team, season=year, t1_result__isnull=False).values("t1_result").annotate(count=Count("id")).order_by('t1_result')
d = {}
for i in range(games.count()):
d[games[i]['t1_result']] = games[i]['count']
try:
wins = d['W']
except KeyError:
wins = 0
try:
losses = d['L']
except KeyError:
losses = 0
try:
ties = d['T']
except KeyError:
ties = 0
if team.conference:
conf_games = Game.objects.select_related().filter(team1=team, season=year, is_conference_game=True, t1_result__isnull=False).values("t1_result").annotate(count=Count("id")).order_by('t1_result')
if conf_games:
c = {}
for i in range(conf_games.count()):
c[conf_games[i]['t1_result']] = conf_games[i]['count']
try:
conf_wins = c['W']
except KeyError:
conf_wins = 0
try:
conf_losses = c['L']
except KeyError:
conf_losses = 0
try:
conf_ties = c['T']
except KeyError:
conf_ties = 0
team.conference_wins=conf_wins
team.conference_losses=conf_losses
team.conference_ties=conf_ties
team.wins=wins
team.losses=losses
team.ties=ties
team.save()
def add_college_years(year):
"""
Creates college years for teams. Used at the beginning of a new season or to backfill.
"""
teams = College.objects.all().order_by('id')
for team in teams:
cy, created = CollegeYear.objects.get_or_create(season=year, college=team)
def create_weeks(year):
"""
Given a year with games in the db, creates weeks for that year.
"""
min = Game.objects.filter(season=year).aggregate(Min('date'))['date__min']
max = Game.objects.filter(season=year).aggregate(Max('date'))['date__max']
date = min
week = 1
while date <= max:
if date.weekday() < 5:
dd = 5 - date.weekday()
end_date = date + datetime.timedelta(days=dd)
else:
end_date = date
new_week, created = Week.objects.get_or_create(season=min.year, week_num = week, end_date = end_date)
date += datetime.timedelta(days=7)
week += 1
def game_weeks(year):
"""
Populates week foreign key for games.
"""
weeks = Week.objects.filter(season=year).order_by('week_num')
for week in weeks:
games = Game.objects.filter(season=year, date__lte=week.end_date, week__isnull=True)
for game in games:
game.week = week
game.save()
def advance_coaching_staff(team, year):
"""
Takes an existing coaching staff, minus any who have an end_date value,
and creates new CollegeCoach records for them in the provided year.
Usage:
>>> from fumblerooski.utils import advance_coaching_staff
>>> from fumblerooski.college.models import *
>>> team = College.objects.get(id = 8)
>>> advance_coaching_staff(team, 2010)
"""
previous_year = int(year)-1
college = College.objects.get(id=team.id)
old_cy = CollegeYear.objects.get(college=college, year=previous_year)
new_cy = CollegeYear.objects.get(college=college, year=year)
old_staff = CollegeCoach.objects.filter(collegeyear=old_cy, end_date__isnull=True)
for coach in old_staff:
cc, created = CollegeCoach.objects.get_or_create(collegeyear=new_cy, coach=coach.coach)
for job in coach.jobs.all():
cc.jobs.add(job)
|
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
import numpy as np
import os
def calc_top_percent(betweenness_list, percent):
height = int(len(betweenness_list) * percent)
sum_top_betweenness = 0
sum_total_betweenness = sum(betweenness_list)
for i in range(height):
sum_top_betweenness += betweenness_list[i]
top_percent_betweenness = sum_top_betweenness / sum_total_betweenness
return top_percent_betweenness
def plot_line(betweenness_percentages, percentage, dates):
plt.figure()
df_plot = pd.DataFrame({'bt_percentage': betweenness_percentages}, index=dates)
ax = df_plot.plot.area(ylim=(0.65, 1), rot=20, alpha=0.65, stacked=False, color=['red'])
# Annotate
for i in range(len(dates)):
ax.annotate('{:.1f}'.format(betweenness_percentages[i] * 100) + '%', xytext=(i - 0.2, betweenness_percentages[i] + 0.02),
xy=(i, betweenness_percentages[i]), arrowprops=dict(arrowstyle='-'),fontsize=20, color='black',
rotation=90)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
ax.set_xlabel('Timestamps', fontsize=20)
ax.set_ylabel('Share of Centrality in %', fontsize=20)
ax.get_legend().remove()
Path("plots/top").mkdir(parents=True, exist_ok=True)
filePath = cwd + '/plots/top/top_line_' + str(percentage) + '.png'
# plt.savefig(filePath, bbox_inches='tight', dpi=400)
plt.show()
timestamps = [
1554112800,
1564653600,
1572606000,
1585735200,
1596276000,
1606820400,
1609498800
]
top_betweenness_percentages = list()
percentage =0.1
dates = ['01 Apr. 2019', '01 Aug. 2019', '01 Nov. 2019', '01 Apr. 2020', '01 Aug. 2020', '01 Dec. 2020', '01 Jan. 2021']
for timestamp in timestamps:
baseAmount = [10000000, 1000000000, 10000000000]
cwd = str(Path().resolve())
filepath = cwd + '/' + str(timestamp) + '/' + str(baseAmount[0])
filenames = next(os.walk(filepath), (None, None, []))[2] # [] if no file
df = pd.read_csv(cwd + '/' + str(timestamp) + '/' + str(baseAmount[0]) + '/' + filenames[0])
df_2 = pd.read_csv(cwd + '/' + str(timestamp) + '/' + str(baseAmount[1]) + '/' + filenames[0])
df_3 = pd.read_csv(cwd + '/' + str(timestamp) + '/' + str(baseAmount[2]) + '/' + filenames[0])
betweenness = (list(filter(lambda a: a != 0.0, df['betweenness'])))
betweenness_2 = (list(filter(lambda a: a != 0.0, df_2['betweenness'])))
betweenness_3 = (list(filter(lambda a: a != 0.0, df_3['betweenness'])))
avg_betwenness = list()
for b1, b2, b3 in zip(betweenness, betweenness_2, betweenness_3):
avg_betwenness.append(np.average([b1, b2, b3]))
top_betweenness_percentages.append(calc_top_percent(avg_betwenness, percentage))
plot_line(top_betweenness_percentages, percentage, dates)
|
#!/usr/bin/env python
# Copyright 2019 Banco Santander S.A.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
from tkinter import *
from tkinter import ttk
from PIL import ImageTk, Image
from tkinter.filedialog import askopenfilename, asksaveasfilename
import pandas as pd
import json
import uuid
import worker
import signature
import signingAlgorithm
import encryptionAlg
users = []
transactions = []
country = ""
numberUsers = 0
numberTransactions = 0
class App():
def __init__(self):
global root
global users
global country
global numberUsers
global transactions
global numberTransactions
global preprocess
global e0, e1, e2, e4, e5, e7
preprocess = ""
root = Tk()
root.geometry('700x520')
root.configure(bg='white')
root.title('AML client')
# Setting the countries we are using
ttk.Label(
root, text="Countries participating").grid(
row=0, padx=(100, 10), pady=(30, 7))
e0 = ttk.Entry(root)
b0 = ttk.Button(root, text='Initialize', command=self.loadCountries)
e0.grid(row=0, column=1, pady=(30, 7))
b0.grid(row=0, column=2, pady=(30, 7))
# Creating the jsons from CSVs
ttk.Label(root, text="Users CSV").grid(
row=1, padx=(100, 10), pady=(30, 4))
ttk.Label(
root, text="Transactions CSV").grid(
row=2, padx=(100, 10), pady=(0, 7))
e1 = ttk.Entry(root)
e2 = ttk.Entry(root)
b1 = ttk.Button(root, text='Select', command=self.selectUsers)
b2 = ttk.Button(root, text='Select', command=self.selectTransactions)
b3 = ttk.Button(root, text='Create Json from CSVs',
command=self.createJsonFile)
e1.grid(row=1, column=1, pady=(30, 4))
e2.grid(row=2, column=1, pady=(0, 7))
b1.grid(row=1, column=2, pady=(30, 4))
b2.grid(row=2, column=2, pady=(0, 7))
b3.grid(row=3, column=1)
# Cont
# Initializing encryption information
ttk.Label(
root, text="Worker info").grid(
row=4, padx=(100, 10), pady=(30, 4))
ttk.Label(
root, text="EC private key").grid(
row=5, padx=(100, 10), pady=(0, 7))
e4 = ttk.Entry(root)
e5 = ttk.Entry(root)
b4 = ttk.Button(root, text='Select', command=self.selectWorkerObj)
b5 = ttk.Button(root, text='Select', command=self.selecECPrivK)
b6 = ttk.Button(root, text='Init cipher info',
command=self.initCipherInfo)
e4.grid(row=4, column=1, pady=(30, 4))
e5.grid(row=5, column=1, pady=(0, 7))
b4.grid(row=4, column=2, pady=(30, 4))
b5.grid(row=5, column=2, pady=(0, 7))
b6.grid(row=6, column=1)
# Encrypt JSON
ttk.Label(
root, text="Workload's JSON").grid(
row=7, padx=(100, 10), pady=(30, 4))
e7 = ttk.Entry(root)
b7 = ttk.Button(root, text='Select', command=self.selectWorkloadJson)
b8 = ttk.Button(
root, text="Encrypt Workload's JSON",
command=self.encryptJsonInData)
e7.grid(row=7, column=1, pady=(30, 4))
b7.grid(row=7, column=2, pady=(30, 4))
b8.grid(row=8, column=1)
# Logo
img_logo = ImageTk.PhotoImage(Image.open(
"./images/santander_logo.png"))
panel_logo = Label(root, image=img_logo)
panel_logo.grid(row=10, column=1, sticky=S, pady=(40, 0))
root.mainloop()
def loadCountries(self):
global preprocess
countries = e0.get()
preprocess = countries.replace(",", "_")
def selectUsers(self):
global users_filename
global e1
users_filename = askopenfilename()
e1.delete(0, END)
e1.insert(0, users_filename.rsplit('/', 1)[-1])
def selectTransactions(self):
global transactions_filename
global e2
transactions_filename = askopenfilename()
e2.delete(0, END)
e2.insert(0, transactions_filename.rsplit('/', 1)[-1])
def createJsonFile(self):
global preprocess
global users_filename, transactions_filename
global save_json_filename
save_json_filename = asksaveasfilename()
users = pd.read_csv(users_filename)
country = users['userId'][0][0:2]
numberUsers = len(users)
transactions = pd.read_csv(transactions_filename)
numberTransactions = len(transactions)
index = 0
base_json_file = open("./json_building_blocks/base.json", "r")
inData_json_file = open("./json_building_blocks/inData.json", "r")
output_json_file = open(save_json_filename, "w")
base_json_str = base_json_file.read()
inData_json_str = inData_json_file.read()
base_json = json.loads(base_json_str)
inData_json = json.loads(inData_json_str)
inData_str = "["
if preprocess != "":
inData_json_aux = inData_json
inData_json_aux["index"] = index
index += 1
inData_aux = (
preprocess + "|" + (
country + "," + str(numberUsers)
+ "," + str(numberTransactions)))
for i in range(0, numberUsers):
inData_aux = (
inData_aux + "|" + users['userId'][i] + "," + users['name'][i])
for j in range(0, numberTransactions):
inData_aux = (
inData_aux + "|" + transactions['from'][j]
+ "," + transactions['to'][j]
+ "," + str(transactions['amount'][j])
+ "," + transactions['currency'][j]
+ "," + transactions['date'][j])
inData_json_aux["data"] = inData_aux
inData_str = inData_str + json.dumps(inData_json_aux) + "\n]"
inData_full_json = json.loads(inData_str)
workOrderId = "0x" + uuid.uuid4().hex[:16].upper()
base_json["params"]["workOrderId"] = workOrderId
base_json["params"]["inData"] = inData_full_json
workloadId = base_json["params"]["workloadId"]
workloadIdHex = workloadId.encode("UTF-8").hex()
base_json["params"]["workloadId"] = workloadIdHex
resultString = json.dumps(base_json)
output_json_file.write(resultString)
output_json_file.close()
base_json_file.close()
inData_json_file.close()
def selectWorkerObj(self):
global workerObj_filename
global e4
workerObj_filename = askopenfilename()
e4.delete(0, END)
e4.insert(0, workerObj_filename.rsplit('/', 1)[-1])
def selecECPrivK(self):
global ECPrivk_filename
global e5
ECPrivk_filename = askopenfilename()
e5.delete(0, END)
e5.insert(0, ECPrivk_filename.rsplit('/', 1)[-1])
def initCipherInfo(self):
global workerObj_filename, ECPrivk_filename
global EC_key
global worker_obj
ec_file = open(ECPrivk_filename, "r")
EC_key = ec_file.read()
worker_file = open(workerObj_filename, "r")
worker_str = worker_file.read().rstrip('\n')
worker_json = json.loads(worker_str)
worker_obj = worker.SGXWorkerDetails()
worker_obj.load_worker(worker_json)
worker_file.close()
ec_file.close()
def selectWorkloadJson(self):
global WorkloadJson_filename
global e7
global save_json_filename
WorkloadJson_filename = askopenfilename(
initialdir="/".join(save_json_filename.split("/")[:-1]))
e7.delete(0, END)
e7.insert(0, WorkloadJson_filename.rsplit('/', 1)[-1])
def encryptJsonInData(self):
global EC_key
global WorkloadJson_filename
global worker_obj
enc_json_file = asksaveasfilename()
save_key_file = asksaveasfilename()
json_file = open(WorkloadJson_filename, "r")
workload_str = json_file.read()
workload_json = json.loads(workload_str)
workload_json["params"]["workerId"] = worker_obj.worker_id
workload_str = json.dumps(workload_json)
sig_obj = signature.ClientSignature()
signing_key = signingAlgorithm.signAlgorithm()
signing_key.loadKey(EC_key)
session_iv = sig_obj.generate_sessioniv()
enc_obj = encryptionAlg.encAlgorithm()
session_key = enc_obj.generateKey()
enc_session_key = sig_obj.generate_encrypted_key(
session_key, worker_obj.encryption_key)
request_json = sig_obj.generate_client_signature(
workload_str, worker_obj, signing_key, session_key, session_iv,
enc_session_key)
enc_json_file = open(enc_json_file, "w")
enc_json_file.write(request_json)
enc_session_json = '{"key": ' + str(list(session_key)) + ',"iv": ' \
+ str(list(session_iv)) + '}'
enc_session_key_file = open(save_key_file, "w")
enc_session_key_file.write(enc_session_json)
json_file.close()
enc_json_file.close()
enc_session_key_file.close()
def main():
App()
return 0
if __name__ == '__main__':
main()
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle_fl.paddle_fl.core.trainer.fl_trainer import FLTrainerFactory
from paddle_fl.paddle_fl.core.master.fl_job import FLRunTimeJob
import paddle_fl.paddle_fl.dataset.femnist as femnist
import numpy
import sys
import paddle
import paddle.fluid as fluid
import logging
import math
import time
logging.basicConfig(
filename="test.log",
filemode="w",
format="%(asctime)s %(name)s:%(levelname)s:%(message)s",
datefmt="%d-%M-%Y %H:%M:%S",
level=logging.DEBUG)
trainer_id = int(sys.argv[1]) # trainer id for each guest
job_path = "fl_job_config"
job = FLRunTimeJob()
job.load_trainer_job(job_path, trainer_id)
job._scheduler_ep = "127.0.0.1:9091" # Inform the scheduler IP to trainer
print(job._target_names)
trainer = FLTrainerFactory().create_fl_trainer(job)
trainer._current_ep = "127.0.0.1:{}".format(9000 + trainer_id)
place = fluid.CPUPlace()
trainer.start(place)
print(trainer._step)
test_program = trainer._main_program.clone(for_test=True)
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
feeder = fluid.DataFeeder(feed_list=[img, label], place=fluid.CPUPlace())
def train_test(train_test_program, train_test_feed, train_test_reader):
acc_set = []
for test_data in train_test_reader():
acc_np = trainer.exe.run(program=train_test_program,
feed=train_test_feed.feed(test_data),
fetch_list=["accuracy_0.tmp_0"])
acc_set.append(float(acc_np[0]))
acc_val_mean = numpy.array(acc_set).mean()
return acc_val_mean
epoch_id = 0
step = 0
epoch = 10
count_by_step = False
if count_by_step:
output_folder = "model_node%d" % trainer_id
else:
output_folder = "model_node%d_epoch" % trainer_id
while not trainer.stop():
count = 0
epoch_id += 1
if epoch_id > epoch:
break
print("{} Epoch {} start train".format(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())), epoch_id))
#train_data,test_data= data_generater(trainer_id,inner_step=trainer._step,batch_size=64,count_by_step=count_by_step)
train_reader = paddle.batch(
paddle.reader.shuffle(
femnist.train(
trainer_id,
inner_step=trainer._step,
batch_size=64,
count_by_step=count_by_step),
buf_size=500),
batch_size=64)
test_reader = paddle.batch(
femnist.test(
trainer_id,
inner_step=trainer._step,
batch_size=64,
count_by_step=count_by_step),
batch_size=64)
if count_by_step:
for step_id, data in enumerate(train_reader()):
acc = trainer.run(feeder.feed(data), fetch=["accuracy_0.tmp_0"])
step += 1
count += 1
if count % trainer._step == 0:
break
# print("acc:%.3f" % (acc[0]))
else:
trainer.run_with_epoch(
train_reader, feeder, fetch=["accuracy_0.tmp_0"], num_epoch=1)
acc_val = train_test(
train_test_program=test_program,
train_test_reader=test_reader,
train_test_feed=feeder)
print("Test with epoch %d, accuracy: %s" % (epoch_id, acc_val))
if trainer_id == 0:
save_dir = (output_folder + "/epoch_%d") % epoch_id
trainer.save_inference_program(output_folder)
|
# https://www.acmicpc.net/problem/2468
import copy
import sys
sys.setrecursionlimit(100000)
def DFS(x, y, tempMap):
tempMap[x][y] = 0
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if (0 <= nx < N) and (0 <= ny < N):
if tempMap[nx][ny]!=0:
DFS(nx, ny, tempMap)
N = int(input())
map = [list(map(int, input().split())) for _ in range(N)]
dx = [1, -1, 0, 0]
dy = [0, 0, 1, -1]
maxlist = []
maxvalue = 0
answer = 1
for i in range(len(map)):
maxlist.append(max(map[i]))
maxvalue = max(maxlist)
for k in range(1, maxvalue+1):
tempMap = copy.deepcopy(map)
for i in range(len(tempMap)):
for j in range(len(tempMap[i])):
if tempMap[i][j] <= k:
tempMap[i][j] = 0
count = 0
for x in range(len(tempMap)):
for y in range(len(tempMap[x])):
if tempMap[x][y] != 0:
DFS(x, y, tempMap)
count += 1
answer = max(answer, count)
print(answer) |
'''Escreva um progrma que leia um n inteiro qualquer e mostra na tela os n primeiros elementos de uma sequencia de Fibonacci.
Ex: 0 1 1 2 3 5 8'''
n = int(input('Informe um número: '))
t1 = 0
t2 = 1
print('{} > {}'.format(t1, t2), end=' ')
cont = 3
while cont <= n:
t3 = t1 + t2
print(' > {}'.format(t3), end=' ')
t1 = t2
t2 = t3
cont += 1
print('> FIM!') |
import os
from tests.config import redis_cache_server
EXTENSIONS = (
'lux.ext.base',
'lux.ext.rest',
'lux.ext.content',
'lux.ext.admin',
'lux.ext.auth',
'lux.ext.odm'
)
APP_NAME = COPYRIGHT = HTML_TITLE = 'website.com'
SECRET_KEY = '01W0o2gOG6S1Ldz0ig8qtmdkEick04QlN84fS6OrAqsMMs64Wh'
SESSION_STORE = redis_cache_server
EMAIL_DEFAULT_FROM = '[email protected]'
EMAIL_BACKEND = 'lux.core.mail.LocalMemory'
SESSION_COOKIE_NAME = 'test-website'
SESSION_STORE = 'redis://127.0.0.1:6379/13'
DATASTORE = 'postgresql+green://lux:[email protected]:5432/luxtests'
SERVE_STATIC_FILES = os.path.join(os.path.dirname(__file__), 'media')
CONTENT_REPO = os.path.dirname(__file__)
CONTENT_LOCATION = 'content'
CONTENT_GROUPS = {
"articles": {
"path": "articles",
"body_template": "home.html",
"meta": {
"priority": 1
}
},
"site": {
"path": "*",
"body_template": "home.html",
"meta": {
"priority": 1,
"image": "/media/lux/see.jpg"
}
}
}
DEFAULT_POLICY = [
{
"resource": "contents:*",
"action": "read"
}
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : tql-Python.
# @File : weather
# @Time : 2020/8/24 9:01 下午
# @Author : yuanjie
# @Email : [email protected]
# @Software : PyCharm
# @Description :
import time
import json
import requests
def get_data(year='2020', month='08'):
url = f'http://d1.weather.com.cn/calendar_new/{year}/101010100_{year}{month}.html?_={int(time.time())}'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36',
'Referer': 'http://www.weather.com.cn/weather40d/101010100.shtml'
}
r = requests.get(url, headers=headers)
return json.loads(r.content[11:]) # pd.DataFrame(data)
|
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from abc import abstractmethod
from OTLMOW.OTLModel.Classes.BijlageVoertuigkering import BijlageVoertuigkering
from OTLMOW.OTLModel.Classes.LijnvormigElement import LijnvormigElement
from OTLMOW.OTLModel.Datatypes.BooleanField import BooleanField
from OTLMOW.OTLModel.Datatypes.DtcDocument import DtcDocument
from OTLMOW.OTLModel.Datatypes.DtcProductidentificatiecode import DtcProductidentificatiecode
from OTLMOW.OTLModel.Datatypes.KlLEACMateriaal import KlLEACMateriaal
from OTLMOW.OTLModel.Datatypes.StringField import StringField
# Generated with OTLClassCreator. To modify: extend, do not edit
class AfschermendeConstructie(BijlageVoertuigkering, LijnvormigElement):
"""Abstracte die een lijn- of puntvormige constructie,geïnstalleerd langs de weg om een kerend vermogen te bieden aan een dwalend voertuig,samenvat."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#AfschermendeConstructie'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
@abstractmethod
def __init__(self):
BijlageVoertuigkering.__init__(self)
LijnvormigElement.__init__(self)
self._certificaathouder = OTLAttribuut(field=StringField,
naam='certificaathouder',
label='certificaathouder',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#AfschermendeConstructie.certificaathouder',
definition='De houder van het uitvoeringscertificaat.',
owner=self)
self._isPermanent = OTLAttribuut(field=BooleanField,
naam='isPermanent',
label='is permanent',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#AfschermendeConstructie.isPermanent',
definition='Vermelding of de afschermende constructie al dan niet van permanente aard is.',
owner=self)
self._materiaal = OTLAttribuut(field=KlLEACMateriaal,
naam='materiaal',
label='materiaal',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#AfschermendeConstructie.materiaal',
definition='Het gebruikte materiaal voor de afschermende constructie.',
owner=self)
self._metTandGroef = OTLAttribuut(field=BooleanField,
naam='metTandGroef',
label='met tand-groef',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#AfschermendeConstructie.metTandGroef',
definition='Geeft aan of de afschermende constructie bevestigd is aan de onderliggende laag door middel van een tand-groef aansluiting.',
owner=self)
self._productidentificatiecode = OTLAttribuut(field=DtcProductidentificatiecode,
naam='productidentificatiecode',
label='productidentificatiecode',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#AfschermendeConstructie.productidentificatiecode',
definition='De productidentificatiecode voor het bepalen van de code van het gebruikte product (bv. COPRO/BENOR).',
owner=self)
self._productnaam = OTLAttribuut(field=StringField,
naam='productnaam',
label='productnaam',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#AfschermendeConstructie.productnaam',
definition='Dit is de commerciële naam van de afschermende constructie.',
owner=self)
self._testrapport = OTLAttribuut(field=DtcDocument,
naam='testrapport',
label='testrapport',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#AfschermendeConstructie.testrapport',
usagenote='Attribuut uit gebruik sinds versie 2.0.0 ',
deprecated_version='2.0.0',
kardinaliteit_max='*',
definition='De testresultaten van een afschermende constructie.',
owner=self)
self._uitvoeringscertificatie = OTLAttribuut(field=DtcDocument,
naam='uitvoeringscertificatie',
label='uitvoeringscertificatie',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#AfschermendeConstructie.uitvoeringscertificatie',
usagenote='Bestanden van het type xlsx of pdf.',
definition='Documentatie van het certificaat.',
owner=self)
self._video = OTLAttribuut(field=DtcDocument,
naam='video',
label='video',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#AfschermendeConstructie.video',
usagenote='Attribuut uit gebruik sinds versie 2.0.0 ',
deprecated_version='2.0.0',
kardinaliteit_max='*',
definition='Video van de testen op afschermende constructies.',
owner=self)
@property
def certificaathouder(self):
"""De houder van het uitvoeringscertificaat."""
return self._certificaathouder.get_waarde()
@certificaathouder.setter
def certificaathouder(self, value):
self._certificaathouder.set_waarde(value, owner=self)
@property
def isPermanent(self):
"""Vermelding of de afschermende constructie al dan niet van permanente aard is."""
return self._isPermanent.get_waarde()
@isPermanent.setter
def isPermanent(self, value):
self._isPermanent.set_waarde(value, owner=self)
@property
def materiaal(self):
"""Het gebruikte materiaal voor de afschermende constructie."""
return self._materiaal.get_waarde()
@materiaal.setter
def materiaal(self, value):
self._materiaal.set_waarde(value, owner=self)
@property
def metTandGroef(self):
"""Geeft aan of de afschermende constructie bevestigd is aan de onderliggende laag door middel van een tand-groef aansluiting."""
return self._metTandGroef.get_waarde()
@metTandGroef.setter
def metTandGroef(self, value):
self._metTandGroef.set_waarde(value, owner=self)
@property
def productidentificatiecode(self):
"""De productidentificatiecode voor het bepalen van de code van het gebruikte product (bv. COPRO/BENOR)."""
return self._productidentificatiecode.get_waarde()
@productidentificatiecode.setter
def productidentificatiecode(self, value):
self._productidentificatiecode.set_waarde(value, owner=self)
@property
def productnaam(self):
"""Dit is de commerciële naam van de afschermende constructie."""
return self._productnaam.get_waarde()
@productnaam.setter
def productnaam(self, value):
self._productnaam.set_waarde(value, owner=self)
@property
def testrapport(self):
"""De testresultaten van een afschermende constructie."""
return self._testrapport.get_waarde()
@testrapport.setter
def testrapport(self, value):
self._testrapport.set_waarde(value, owner=self)
@property
def uitvoeringscertificatie(self):
"""Documentatie van het certificaat."""
return self._uitvoeringscertificatie.get_waarde()
@uitvoeringscertificatie.setter
def uitvoeringscertificatie(self, value):
self._uitvoeringscertificatie.set_waarde(value, owner=self)
@property
def video(self):
"""Video van de testen op afschermende constructies."""
return self._video.get_waarde()
@video.setter
def video(self, value):
self._video.set_waarde(value, owner=self)
|
import os
from solartf.core.generator import KerasGeneratorBase
from solartf.data.image.processor import ImageInput
class ImageDirectoryGenerator(KerasGeneratorBase):
def __init__(self,
image_dir,
image_type,
image_shape,
image_format=None):
if image_format is None:
image_format = ('.png', '.jpg', '.jpeg')
self.image_dir = image_dir
self.image_type = image_type
self.image_shape = image_shape
self.image_format = image_format
self.image_path_list = [os.path.join(root, fname) for root, _, fnames in os.walk(self.image_dir)
for fname in fnames if fname.endswith(image_format)]
def __len__(self):
return len(self.image_path_list)
def on_epoch_end(self):
pass
def __getitem__(self, index):
input_image_path = self.image_path_list[index]
image_input = ImageInput(input_image_path,
image_type=self.image_type,
image_shape=self.image_shape)
return image_input
|
"""
This program generates a gene/cell counts table for the mutations
found in a given set of vcf files
"""
import numpy as np
import vcf
import os
import csv
import pandas as pd
import sys
import multiprocessing as mp
import warnings
import click
warnings.simplefilter(action='ignore', category=FutureWarning)
def get_filenames_test():
""" get file names, for the test condition"""
files = []
for file in os.listdir(cwd + "artificalVCF/"):
PATH = cwd + 'artificalVCF/' + file
files.append(PATH)
return files
def get_filenames():
""" get file names based on specified path """
files = []
for file in os.listdir(cwd + "scVCF_filtered_all/"):
PATH = cwd + 'scVCF_filtered_all/' + file
files.append(PATH)
return files
def get_laud_db():
""" returns the COSMIC database after lung adeno filter """
print('setting up LAUD filtered database...')
#pHistList = database.index[database['Primary histology'] == 'carcinoma'].tolist()
pSiteList = database.index[database['Primary site'] == 'lung'].tolist()
#shared = list(set(pHistList) & set(pSiteList))
database_filter = database.iloc[pSiteList]
return database_filter
def get_genome_pos(sample):
""" returns the genome position string that will match against the
ones in COSMIC db """
try:
chr = str(sample[0])
chr = chr.replace("chr", "")
pos = int(sample[1])
ref = str(sample[3])
alt = str(sample[4])
if (len(ref) == 1) & (len(alt) == 1): # most basic case
secondPos = pos
genomePos = chr + ':' + str(pos) + '-' + str(secondPos)
elif (len(ref) > 1) & (len(alt) == 1):
secondPos = pos + len(ref)
genomePos = chr + ':' + str(pos) + '-' + str(secondPos)
elif (len(alt) > 1) & (len(ref) == 1):
secondPos = pos + len(alt)
genomePos = chr + ':' + str(pos) + '-' + str(secondPos)
else: # multibase-for-multibase substitution
secondPos = '1'
genomePos = chr + ':' + str(pos) + '-' + str(secondPos)
except:
# NOTE: Should probably throw here.
genomePos = 'ERROR'
return genomePos
def parse_genome_pos(pos_str):
chrom, pos_range = pos_str.split(':')[0:2]
start_pos, end_pos = pos_range.split('-')[0:2]
return (chrom, start_pos, end_pos)
def make_genome_pos(record):
# Although including `chr` in the CHR column constitutes malformed VCF, it
# may be present, so it should be removed.
CHROM = record.CHROM.replace("chr", "")
POS = record.POS
ref_len = len(record.REF)
alt_len = len(record.ALT)
if ref_len == 1 and alt_len == 1:
return (CHROM, POS, POS)
elif ref_len > 1 and alt_len == 1:
return (CHROM, POS, POS + ref_len)
elif alt_len > 1 and ref_len == 1:
return (CHROM, POS, POS + alt_len)
else: # multibase-for-multibase substitution
return (CHROM, POS, 1)
def find_gene_name(genome_pos):
""" return the gene name from a given genome position string
(ie. '1:21890111-21890111'), by querying the hg38-plus.gtf """
chrom, pos_start, pos_end = genome_pos
# work on hg38_gtf
chromStr = 'chr' + str(chrom)
hg38_gtf_filt = hg38_gtf.where(hg38_gtf[0] == chromStr).dropna()
hg38_gtf_filt = hg38_gtf_filt.where(hg38_gtf_filt[3] <= int(pos_start)).dropna() # lPos good
hg38_gtf_filt = hg38_gtf_filt.where(hg38_gtf_filt[4] >= int(pos_end)).dropna() # rPos good
try:
returnStr = str(hg38_gtf_filt.iloc[0][8]) # keep just the gene name / metadata col
returnStr = returnStr.split(';')[1]
returnStr = returnStr.strip(' gene_name')
returnStr = returnStr.strip(' ')
returnStr = returnStr.strip('"')
except IndexError:
returnStr = ''
return returnStr
def find_cell_mut_gene_names(filename):
""" creates a dictionary obj where each key is a cell and each value
is a list of the genes we found mutations for in that cell """
tup = []
cell = filename.replace(cwd, "")
cell = cell.replace('scVCF_filtered_all/', "")
cell = cell.replace(".vcf", "")
# PyVCF documentation claims that it automatically infers compression type
# from the file extension.
vcf_reader = vcf.Reader(filename=filename)
filtered_gene_names = []
for record in vcf_reader:
genome_pos = make_genome_pos(record)
# TODO: Filter out duplicates?
# And is it better to filter out positional duplicates or gene name
# duplicates?
# No COSMIC filter in test mode
if not test_bool:
# Skip this gene if it isn't in the laud_db
if genome_pos in genome_pos_laud_db:
continue
gene_name = find_gene_name(genome_pos)
filtered_gene_names.append(gene_name)
filtered_series = pd.Series(filtered_gene_names)
tup = (cell, filtered_series)
return tup
def format_dataframe(raw_df):
""" creates the cell/mutation counts table from the raw output that
get_gene_cell_muts_counts provides """
cellNames = list(raw_df.index)
genesList = []
for i in range(0, raw_df.shape[0]):
currList = list(raw_df.iloc[i].unique()) # unique genes for curr_cell
for elm in currList:
if elm not in genesList:
genesList.append(elm)
genesList1 = pd.Series(genesList)
df = pd.DataFrame(columns=genesList1, index=cellNames) # initialize blank dataframe
for col in df.columns: # set everybody to zero
df[col] = 0
for i in range(0,raw_df.shape[0]):
currCell = raw_df.index[i]
currRow = raw_df.iloc[i]
for currGene in currRow:
df[currGene][currCell] += 1
return df
""" get cmdline input """
@click.command()
@click.option('--nthread', default = 64, prompt='number of threads', required=True, type=int)
@click.option('--test', default = False)
@click.option('--wrkdir', default = '/home/ubuntu/cerebra/cerebra/wrkdir/', prompt='s3 import directory', required=True)
def get_mutationcounts_table(nthread, test, wrkdir):
""" generate a cell x gene mutation counts table from a set of germline filtered vcfs """
global database
global database_laud
global hg38_gtf
global genome_pos_laud_db
global cwd
global test_bool
cwd = wrkdir
test_bool = test
database = pd.read_csv(cwd + "CosmicGenomeScreensMutantExport.tsv", delimiter = '\t')
database_laud = get_laud_db()
genome_pos_laud_db = set(map(parse_genome_pos, database_laud['Mutation genome position']))
hg38_gtf = pd.read_csv(cwd + 'hg38-plus.gtf', delimiter = '\t', header = None)
if test:
fNames = get_filenames_test()
else:
fNames = get_filenames()
print('creating pool')
p = mp.Pool(processes=nthread)
print('running...')
try:
cell_genes_pairs = p.map(find_cell_mut_gene_names, fNames, chunksize=1) # default chunksize=1
finally:
p.close()
p.join()
# convert to dictionary
cells_dict = {}
naSeries = pd.Series([np.nan])
for cell, gene_names in cell_genes_pairs:
if len(gene_names.index) == 0:
toAdd = {cell:naSeries}
else:
toAdd = {cell:gene_names}
cells_dict.update(toAdd)
print('writing file')
filterDict_pd = pd.DataFrame.from_dict(cells_dict, orient="index") # orient refers to row/col orientation
filterDict_format = format_dataframe(filterDict_pd)
filterDict_format.to_csv(cwd + "intermediate.csv")
intermediate = pd.read_csv(cwd + 'intermediate.csv')
# rename 0th col
intermediate.rename(columns={'Unnamed: 0' :'cellName'}, inplace=True)
cols = intermediate.columns
dropList = []
for col in cols:
if 'Unnamed' in col:
dropList.append(col)
# want to drop the cols that contain 'Unnamed'
intermediate = intermediate.drop(dropList, axis=1)
if test_bool:
intermediate.to_csv(cwd + "test/mutationcounts_table/geneCellMutationCounts_artifical.csv", index=False)
else:
intermediate.to_csv(cwd + "geneCellMutationCounts.csv", index=False)
cmd = 'rm ' + cwd + 'intermediate.csv'
os.system(cmd)
|
import unittest.mock
import freezegun
import pandas as pd
import pytest
import functions
import src.getraenkeKasse.getraenkeapp
columns_purchases = ['timestamp', 'user', 'code', 'paid']
columns_products = ['nr', 'code', 'desc', 'price', 'stock']
TEST_PRODUCTS_FILE = 'test_file_1'
TEST_PURCHASES_FILE = 'test_file_2'
TEST_USERS_FILE = 'test_file_3'
def get_stock_for_product(gk: src.getraenkeKasse.getraenkeapp.GetraenkeApp, code: str) -> int:
return gk.file_contents.products.loc[gk.file_contents.products['code'] == code, 'stock'].item()
def get_purchases_for_user(gk: src.getraenkeKasse.getraenkeapp.GetraenkeApp, user: str) -> pd.DataFrame:
return gk.file_contents.purchases[gk.file_contents.purchases['user'] == user]
@pytest.fixture(autouse=False)
def mock_functions(request, monkeypatch):
mock_git_pull = unittest.mock.Mock(return_value=request.param['status_git_pull'])
monkeypatch.setattr(target=functions, name='git_pull', value=mock_git_pull)
mock_git_push = unittest.mock.Mock(return_value=True)
monkeypatch.setattr(target=functions, name='git_push', value=mock_git_push)
mock_write_csv = unittest.mock.Mock(return_value=None)
monkeypatch.setattr(target=functions, name='write_csv_file', value=mock_write_csv)
return mock_git_pull, mock_git_push, mock_write_csv
@pytest.fixture(autouse=False)
def mock_getraenkekasse():
with unittest.mock.patch('wx.App', autospec=True), \
unittest.mock.patch('wx.SystemSettings', autospec=True), \
unittest.mock.patch('wx.Font', autospec=True):
gk = src.getraenkeKasse.getraenkeapp.GetraenkeApp(button_height=1,
button_width=1,
font_size=1,
offset=1,
file_names={'products': TEST_PRODUCTS_FILE,
'purchases': TEST_PURCHASES_FILE,
'users': TEST_USERS_FILE})
products = [[1, '1111111111111', 'xxxx', 0.60, 20], [2, '2222222222222', 'yyyy', 0.80, 0]]
gk.file_contents.products = pd.DataFrame(products, columns=columns_products)
purchases = [['2019-12-10T12:20:00', 'aaa', '1111111111111', False],
['2019-12-10T16:30:00', 'bbb', '2222222222222', False],
['2019-12-10T16:35:00', 'bbb', '2222222222222', False]]
gk.file_contents.purchases = pd.DataFrame(purchases, columns=columns_purchases)
gk.file_contents.users = None
return gk
def test_set_stock_for_product(mock_getraenkekasse):
mock_getraenkekasse._set_stock_for_product(nr=1, stock=23)
assert get_stock_for_product(mock_getraenkekasse, '1111111111111') == 23
@pytest.mark.parametrize('mock_functions', [dict(status_git_pull=True)], indirect=True)
def test_replenish_stock(mock_getraenkekasse, mock_functions):
mock_git_pull, mock_git_push, mock_write_csv = mock_functions
mock_getraenkekasse.replenish_stock([[1, 20, 23], [2, 0, 0]])
assert get_stock_for_product(mock_getraenkekasse, '1111111111111') == 23
assert get_stock_for_product(mock_getraenkekasse, '2222222222222') == 0
mock_git_pull.assert_called_once()
mock_git_push.assert_called_once()
mock_write_csv.assert_called_once_with(file=TEST_PRODUCTS_FILE,
df=mock_getraenkekasse.file_contents.products)
def test_decrease_stock_for_product0(mock_getraenkekasse):
status = mock_getraenkekasse._decrease_stock_for_product('1111111111111')
assert status is True
assert get_stock_for_product(mock_getraenkekasse, '1111111111111') == 19
def test_decrease_stock_for_product1(mock_getraenkekasse):
status = mock_getraenkekasse._decrease_stock_for_product('2222222222222')
assert status is False
assert get_stock_for_product(mock_getraenkekasse, '2222222222222') == 0
def test_set_paid_for_user(mock_getraenkekasse):
mock_getraenkekasse._set_paid_for_user(user='bbb')
assert mock_getraenkekasse.file_contents.purchases['paid'].tolist() == [False, True, True]
@pytest.mark.parametrize('mock_functions', [dict(status_git_pull=True)], indirect=True)
def test_pay_for_user(mock_getraenkekasse, mock_functions):
mock_git_pull, mock_git_push, mock_write_csv = mock_functions
mock_getraenkekasse.pay_for_user('aaa')
assert mock_getraenkekasse.file_contents.purchases['paid'].tolist() == [True, False, False]
mock_git_pull.assert_called_once_with(path_repo='./.')
mock_git_push.assert_called_once_with(path_repo='./.', files=[TEST_PURCHASES_FILE],
commit_message='pay for user aaa via getraenkeKasse.py')
mock_write_csv.assert_called_once_with(file=TEST_PURCHASES_FILE,
df=mock_getraenkekasse.file_contents.purchases)
@freezegun.freeze_time('2019-12-10 17:00:00')
@pytest.mark.parametrize('mock_functions', [dict(status_git_pull=True)], indirect=True)
def test_make_purchase_stock_available(mock_getraenkekasse, mock_functions):
mock_git_pull, mock_git_push, mock_write_csv = mock_functions
expected_df = pd.DataFrame([['2019-12-10T12:20:00', 'aaa', '1111111111111', False],
['2019-12-10T17:00:00', 'aaa', '1111111111111', False]],
columns=columns_purchases, index=[0, 3])
mock_getraenkekasse.make_purchase(user='aaa', code='1111111111111')
pd.testing.assert_frame_equal(get_purchases_for_user(gk=mock_getraenkekasse, user='aaa'), expected_df)
assert get_stock_for_product(mock_getraenkekasse, '1111111111111') == 19
mock_git_pull.assert_called_once_with(path_repo='./.')
mock_git_push.assert_called_once_with(path_repo='./.', files=[TEST_PURCHASES_FILE,
TEST_PRODUCTS_FILE],
commit_message='purchase via getraenkeKasse.py')
mock_write_csv.assert_has_calls([unittest.mock.call(file=TEST_PURCHASES_FILE,
df=mock_getraenkekasse.file_contents.purchases),
unittest.mock.call(file=TEST_PRODUCTS_FILE,
df=mock_getraenkekasse.file_contents.products)])
@freezegun.freeze_time('2019-12-10 18:00:00')
@pytest.mark.parametrize('mock_functions', [dict(status_git_pull=True)], indirect=True)
def test_make_purchase_stock_empty(mock_getraenkekasse, mock_functions):
mock_git_pull, mock_git_push, mock_write_csv = mock_functions
expected_df = pd.DataFrame([['2019-12-10T12:20:00', 'aaa', '1111111111111', False],
['2019-12-10T18:00:00', 'aaa', '2222222222222', False]],
columns=columns_purchases, index=[0, 3])
mock_getraenkekasse.make_purchase(user='aaa', code='2222222222222')
pd.testing.assert_frame_equal(get_purchases_for_user(gk=mock_getraenkekasse, user='aaa'), expected_df)
assert get_stock_for_product(mock_getraenkekasse, '2222222222222') == 0
mock_git_pull.assert_called_once_with(path_repo='./.')
mock_git_push.assert_called_once_with(path_repo='./.', files=[TEST_PURCHASES_FILE],
commit_message='purchase via getraenkeKasse.py')
mock_write_csv.assert_called_once_with(file=TEST_PURCHASES_FILE,
df=mock_getraenkekasse.file_contents.purchases)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import logging
import os
from argparse import Namespace
import torch
from torch.utils.tensorboard import SummaryWriter
import torch.nn as nn
import numpy as np
import rouge_score
from rouge_score import rouge_scorer
from fairseq import metrics, options, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.tasks import LegacyFairseqTask, register_task
from fairseq.tasks.translation import TranslationTask
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
@register_task("simple_vae_translation")
class SimpleVaeTranslationTask(TranslationTask):
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args, src_dict, tgt_dict)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.period = int(args.period)
self.store_gradient = False
self.mse_loss = nn.MSELoss()
self.rouge_scorer = rouge_scorer.RougeScorer(['rouge1'], use_stemmer=False)
@staticmethod
def add_args(parser):
TranslationTask.add_args(parser)
parser.add_argument('--period',
help='use pretrained model when training [True, ...]')
parser.add_argument("--grad")
def calculate_rouge(self, tensor1, tensor2):
rouges = []
for i in range(tensor1.size(0)):
s1, s2 = tensor1[i,:], tensor2[i,:]
s1, s2 = s1.tolist(), s2.tolist()
s1, s2 = " ".join(list(map(str, s1))), " ".join(list(map(str, s2)))
rouge = self.rouge_scorer.score(s1,s2)['rouge1'][2]
rouges.append(rouge)
return torch.tensor(rouges).cuda().unsqueeze(dim=1)
def optimize_rouge(self, sample, model, num_updates ,ignore_grad=False):
optimizer = torch.optim.Adam(model.rouge_predict_net.parameters(), lr=1e-4)
net_input = sample['net_input']['src_tokens'] # [B, T1]
src_lengths = sample['net_input']['src_lengths']
target = sample['target'] # [B, T2]
# --- predict ROUGE score
encoder_out = model.encoder.forward(net_input, src_lengths)
alpha = model.rouge_predict_net(encoder_out['encoder_out'][0])
# --- calculate ROUGE score
rouges = self.calculate_rouge(net_input, target)
loss = self.mse_loss(alpha, rouges)
# --- optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
update_num (int): the current update
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
model.train()
model.set_num_updates(update_num)
rouge_loss = self.optimize_rouge(sample, model, update_num)
with torch.autograd.profiler.record_function("forward"):
loss, sample_size, logging_output = criterion(model, sample)
logging_output['rouge_loss'] = rouge_loss.item()
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
if self.store_gradient and update_num%50==0:
target_names = [ "XZtoV.weight", "XtoV.weight", "ZtoV.weight"]
dic = {}
for name, param in model.named_parameters():
if name in target_names:
#dic[name]=param.grad.data.norm(2).item()
dic[name]=param.grad.data.abs().mean().item()
#torch.save(param.grad.data.norm(2), f"{self.gradient_dir}/{name}_{update_num}.pt")
# self.writer.add_scalars("Grad/Norm", dic, update_num)
# if update_num==200:
# for param in model.encoder.parameters():
# param.requires_grad = False
return loss, sample_size, logging_output
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import numpy as np
import tensorflow as tf # TF2
from tensorflow_examples.lite.model_maker.core.data_util import image_dataloader
def _fill_image(rgb, image_size):
r, g, b = rgb
return np.broadcast_to(
np.array([[[r, g, b]]], dtype=np.uint8),
shape=(image_size, image_size, 3))
def _write_filled_jpeg_file(path, rgb, image_size):
tf.keras.preprocessing.image.save_img(path, _fill_image(rgb, image_size),
'channels_last', 'jpeg')
class ImageDataLoaderTest(tf.test.TestCase):
def setUp(self):
super(ImageDataLoaderTest, self).setUp()
self.image_path = os.path.join(self.get_temp_dir(), 'random_image_dir')
if os.path.exists(self.image_path):
return
os.mkdir(self.image_path)
for class_name in ('daisy', 'tulips'):
class_subdir = os.path.join(self.image_path, class_name)
os.mkdir(class_subdir)
_write_filled_jpeg_file(
os.path.join(class_subdir, '0.jpeg'),
[random.uniform(0, 255) for _ in range(3)], 224)
def test_split(self):
ds = tf.data.Dataset.from_tensor_slices([[0, 1], [1, 1], [0, 0], [1, 0]])
data = image_dataloader.ImageClassifierDataLoader(ds, 4, 2, ['pos', 'neg'])
train_data, test_data = data.split(0.5, shuffle=False)
self.assertEqual(train_data.size, 2)
for i, elem in enumerate(train_data.dataset):
self.assertTrue((elem.numpy() == np.array([i, 1])).all())
self.assertEqual(train_data.num_classes, 2)
self.assertEqual(train_data.index_to_label, ['pos', 'neg'])
self.assertEqual(test_data.size, 2)
for i, elem in enumerate(test_data.dataset):
self.assertTrue((elem.numpy() == np.array([i, 0])).all())
self.assertEqual(test_data.num_classes, 2)
self.assertEqual(test_data.index_to_label, ['pos', 'neg'])
def test_from_folder(self):
data = image_dataloader.ImageClassifierDataLoader.from_folder(
self.image_path)
self.assertEqual(data.size, 2)
self.assertEqual(data.num_classes, 2)
self.assertEqual(data.index_to_label, ['daisy', 'tulips'])
for image, label in data.dataset:
self.assertTrue(label.numpy() == 1 or label.numpy() == 0)
if label.numpy() == 0:
raw_image_tensor = image_dataloader.load_image(
os.path.join(self.image_path, 'daisy', '0.jpeg'))
else:
raw_image_tensor = image_dataloader.load_image(
os.path.join(self.image_path, 'tulips', '0.jpeg'))
self.assertTrue((image.numpy() == raw_image_tensor.numpy()).all())
if __name__ == '__main__':
assert tf.__version__.startswith('2')
tf.test.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test with MuJoCo.
"""
import os
import time
import numpy as np
from itertools import count
# from pyrobolearn.simulators.bullet import Bullet
from pyrobolearn.simulators.mujoco import Mujoco
import mujoco_py as mujoco
# sim = Bullet(render=True)
sim = Mujoco(render=True, update_dynamically=True)
print("Gravity: {}".format(sim.get_gravity()))
print("Timestep: {}".format(sim.get_time_step()))
# sim.set_gravity(np.zeros(3))
# load floor
# floor = sim.load_floor(dimension=20)
print("qpos (before loading): ", sim.sim.data.qpos)
# create box
# box = sim.create_primitive_object(sim.GEOM_BOX, position=(0, 0, 2), mass=1, rgba_color=(1, 0, 0, 1))
# sphere = sim.create_primitive_object(sim.GEOM_SPHERE, position=[0.5, 0., 1.], mass=0, radius=0.05,
# rgba_color=(1, 0, 0, 0.5))
# cylinder = sim.create_primitive_object(sim.GEOM_CYLINDER, position=(0, 2, 2), mass=1)
# capsule = sim.create_primitive_object(sim.GEOM_CAPSULE, position=(0, -2, 2), mass=1, rgba_color=(0, 0, 1, 1))
print("qpos (after loading sphere): ", sim.sim.data.qpos)
# print("Sphere id: ", sphere)
# print("Num bodies before loading robot: ", sim.num_bodies())
# load robot
path = os.path.dirname(os.path.abspath(__file__)) + '/../robots/urdfs/rrbot/pendulum.urdf'
# path = os.path.dirname(os.path.abspath(__file__)) + '/../robots/urdfs/rrbot/rrbot.urdf'
path = os.path.dirname(os.path.abspath(__file__)) + '/../robots/urdfs/franka/franka.urdf'
# path = os.path.dirname(os.path.abspath(__file__)) + '/../robots/urdfs/kuka/kuka_iiwa/iiwa14.urdf'
path = os.path.dirname(os.path.abspath(__file__)) + '/../robots/urdfs/hyq2max/hyq2max.urdf'
# path = os.path.dirname(os.path.abspath(__file__)) + '/../robots/urdfs/anymal/anymal.urdf'
# path = os.path.dirname(os.path.abspath(__file__)) + '/../robots/urdfs/centauro/centauro_stick.urdf'
robot_name = path.split('/')[-1].split('.')[0]
robot = sim.load_urdf(path, position=(0, 0, 0.8), use_fixed_base=True)
print("qpos (after loading robot): ", sim.sim.data.qpos)
print("Base position: ", sim.get_base_position(robot))
print("Num bodies after loading robot: ", sim.num_bodies())
# print("Robot")
# print("base name: ", sim.get_body_info(robot))
# print("mass: ", sim.get_base_mass(body_id=robot))
# sim.remove_body(sphere)
print(sim.sim.data.qpos)
# sim.step()
model = sim.model
mjc_sim = sim.sim
data = mjc_sim.data
# The ones that appear in the following are because of the floor
print("nbody", model.nbody - 1) # total number of links
# print("nuser_body", model.nuser_body)
print("njnt", model.njnt) # total number of joints
print("nq", model.nq) # total number of generalized coordinates (=num_actuated_joints); for free joints 7
print("nv", model.nv) # generalized velocities (nq - 1)
print("na", model.na)
print("nu", model.nu)
print("qpos", data.qpos)
print("qvel", data.qvel)
print("act", data.act)
# print("qpos", data.qpos, len(data.qpos)) # nqx1
print("body_dofnum: ", model.body_dofnum)
print("body_mass: ", model.body_mass)
print("body_subtreemass", model.body_subtreemass)
print("subtree_com", data.subtree_com)
print("body_xpos: ", data.body_xpos)
print("body pos: ", model.body_pos)
print("body_xquat: ", data.body_xquat)
# print("get_xpos: ", data.get_body_xpos(sim._bodies[sphere].tag_name))
print("xfrc_applied: ", data.xfrc_applied)
# joints
# print("jnt_type: ", [["free", "ball", "slide", "hinge"][idx] for idx in model.jnt_type])
# print("jnt_qposadr: ", model.jnt_qposadr)
data.body_xpos[1] = np.array(range(3))
num_joints = sim.num_joints(robot)
num_actuated_joints = sim.num_actuated_joints(robot)
num_links = sim.num_links(robot)
joint_ids = sim.get_joint_type_ids(robot, list(range(num_joints)))
joint_ids = np.array([i for i in range(num_joints) if joint_ids[i] != sim.JOINT_FIXED])
# define amplitude and angular velocity when moving the sphere
w = 0.01/2
r = 0.2
print("\nncam: ", model.ncam)
print("cam_xpos: ", data.cam_xpos)
print("cam_xmat: ", data.cam_xmat)
print("cam_fovy: ", model.cam_fovy)
print("Masses: ", sim.get_link_masses(robot))
print("Names: ", sim.get_link_names(robot))
print("Num links: ", num_links)
print("Num joints: ", num_joints)
print("Num actuated joints: ", num_actuated_joints)
print("Contacts: ", data.contact)
print("Sim state: ", mjc_sim.get_state())
print("time: ", data.time)
for i in range(num_joints):
print(sim.get_joint_info(robot, i))
for i in range(num_links):
print(sim.get_link_state(robot, i))
print("Jacobian: ", sim.calculate_jacobian(robot, num_actuated_joints))
data.qpos[:] = np.zeros(model.nq)
viewer = sim.viewer
print(viewer.cam)
print(dir(viewer.cam))
# sim.reset_joint_states(robot, positions=[8.84305270e-05, 7.11378917e-02, -1.68059886e-04, -9.71690439e-01,
# 1.68308810e-05, 3.71467111e-01, 5.62890805e-05])
print(sim.print_xml())
# TODO: the angles are reversed when setting qpos0
# TODO: the robots
if robot_name == 'franka':
positions = np.array([0.0277854, -0.97229678, -0.028778385, -2.427800237, -0.086976557, 1.442695354, -0.711514286,
0., 0.])
sim.reset_joint_states(robot, joint_ids=joint_ids, positions=positions)
elif robot_name == 'iiwa14':
sim.reset_joint_state(robot, joint_id=3, position=-np.pi/2)
elif robot_name == 'pendulum':
sim.reset_joint_state(robot, joint_id=1, position=np.pi/8)
# perform step
for t in count():
# print("nbody", model.nbody)
# print("njnt", model.njnt)
# print("nq", model.nq)
# print("nv", model.nv)
# print("na", model.na)
# print("nu", model.nu)
# print("qpos", mjc_sim.data.qpos) # nqx1
# print("body_dofnum: ", model.body_dofnum)
# print("body_mass: ", model.body_mass)
# print("body_subtreemass", model.body_subtreemass)
# if (t % 200) == 0:
# print("Resetting position")
# model.body_pos[1] = range(3)
# # data.qpos[:3] = range(3)
# print(model.body_pos)
# print(mjc_sim.data.body_xpos[1])
# if t % 200 == 0:
# # print(mjc_sim.data.subtree_com)
# pos = np.zeros(3)
# jacp = np.zeros(3 * model.nv)
# jacr = np.zeros(3 * model.nv)
# mujoco.functions.mj_jac(model, data, jacp, jacr, pos, 4)
# print(jacp)
# print(jacr)
# # model.body_pos[1] = range(3)
# # sim.reset_base_position(sphere, [2, -1, 3])
# position = np.array([0.5, r * np.cos(w * t + np.pi / 2), (1. - r) + r * np.sin(w * t + np.pi / 2)])
# sim.reset_base_position(sphere, position)
# data.qpos[:] = np.zeros(model.nq)
# print joint positions
# print(sim.get_joint_positions(robot))
# sim.set_joint_positions(robot, joint_ids, 0 * np.ones(num_actuated_joints))
# sim.reset_joint_states(robot, joint_ids=joint_ids, positions=positions)
# sim.set_joint_positions(robot, joint_ids, positions)
# sim.set_joint_positions(robot, joint_ids, [0., 0., 0., np.pi/2, 0., 0., 0.])
sim.set_joint_positions(robot, joint_ids, np.zeros(num_actuated_joints), kps=50, kds=1)
# sim.set_joint_positions(robot, joint_ids, np.pi/2 * np.ones(num_actuated_joints), kps=100, kds=10)
# sim.set_joint_positions(robot, joint_ids=1, positions=np.pi/2, kps=100, kds=10)
# sim.set_joint_velocities(robot, joint_ids, np.zeros(num_actuated_joints))
# sim.set_joint_velocities(robot, joint_ids, velocities=5 * np.ones(num_actuated_joints))
# sim.set_joint_torques(robot, joint_ids, torques=np.zeros(num_actuated_joints))
# sim.set_joint_torques(robot, joint_ids, torques=5 * np.ones(num_actuated_joints))
# if t == 500:
# cylinder = sim.create_primitive_object(sim.GEOM_CYLINDER, position=(0, 2, 2), mass=1)
# if t == 2000:
# sim.remove_body(cylinder)
# qpos = data.qpos
# print("time: ", data.time)
# # print(qpos)
# data.qpos[:] = np.zeros(len(qpos))
# print(data.xfrc_applied.shape)
# print(data.mocap_quat)
# print(mjc_sim.data.contact)
sim.step(sleep_time=sim.dt)
|
import sqlite3
class PipelineDatabase:
def __init__(self, name):
self.connection = sqlite3.connect(name)
self.cursor = self.connection.cursor()
def create_if_not_exists(self):
self.cursor.execute('''create table if not exists files
(file int, name text, suff text, format text, primary key (file))''')
self.cursor.execute('''create table if not exists programs
(program int, status int, name text, log text, out text, cmd text,
primary key(program))''')
self.cursor.execute('''create table if not exists files_programs
(parent int, child int)''')
self.cursor.execute('''create table if not exists programs_programs
(parent int, child int, label text)''')
self.cursor.execute('''create table if not exists programs_files
(parent int, child int)''')
def truncate_all(self):
self.cursor.execute('delete from files')
self.cursor.execute('delete from programs')
self.cursor.execute('delete from files_programs')
self.cursor.execute('delete from programs_programs')
self.cursor.execute('delete from programs_files')
def close(self):
self.connection.close()
def execute(self, sql):
self.cursor.execute(sql)
def fetchall(self):
return self.cursor.fetchall()
def commit(self):
self.connection.commit()
|
#!/usr/bin/env python3
from django.core.management.base import BaseCommand
from seqauto.models import JobScript, JobScriptStatus
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--job_script_id', type=int, required=True)
parser.add_argument('--job_id')
def handle(self, *args, **options):
job_script_id = options["job_script_id"]
job_id = options.get("job_id")
job_script = JobScript.objects.get(id=job_script_id)
job_script.job_status = JobScriptStatus.SUBMITTED
if job_id:
job_script.job_id = job_id
job_script.save()
|
import tkinter as tk
from tkinter import messagebox
import random
import numpy as np
window = tk.Tk()
window.title('sudoku')
window.geometry('600x600')
sudoku = np.array([[1, 9, 6, 4, 3, 8, 7, 5, 2],
[3, 8, 4, 5, 7, 2, 1, 6, 9],
[7, 2, 5, 6, 1, 9, 3, 4, 8],
[5, 7, 2, 1, 6, 3, 9, 8, 4],
[6, 3, 1, 8, 9, 4, 5, 2, 7],
[9, 4, 8, 2, 5, 7, 6, 1, 3],
[2, 5, 7, 9, 4, 1, 8, 3, 6],
[4, 1, 9, 3, 8, 6, 2, 7, 5],
[8, 6, 3, 7, 2, 5, 4, 9, 1]])
def change_root(shudu):
""" Change sudoku map"""
numofexchange = 1
temp_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
while numofexchange < 10:
random.shuffle(temp_list)
random_num = temp_list[1]
if random_num == 9:
for i in range(9):
for j in range(9):
if shudu[i][j] == random_num:
shudu[i][j] = 1
elif shudu[i][j] == 1:
shudu[i][j] = 9
else:
for i in range(9):
for j in range(9):
if shudu[i][j] == random_num:
shudu[i][j] = random_num+1
elif shudu[i][j] == random_num+1:
shudu[i][j] = random_num
numofexchange += 1
return shudu
def rowcolumn_exchange(shudu):
""" Row and column transformation"""
numofexchange = 1
temp_list = [1, 2, 3, 4, 5, 6, 7, 8]
while numofexchange < 4:
random.shuffle(temp_list)
rownum = temp_list[1]
if (rownum % 3) == 2:
shudu[[rownum, rownum-1], :] = shudu[[rownum-1, rownum], :]
else:
shudu[[rownum, rownum+1], :] = shudu[[rownum+1, rownum], :]
shudu = shudu.T
numofexchange += 1
return shudu
EntryList = []
s = set()
# tk.Text(window,height=1, width=5).grid(row = 15)
def newproblem(shudu):
temp_list1 = [4, 5, 6]
temp_list2 = [0, 1, 2, 3, 4, 5, 6, 7, 8]
for row in range(9):
random.shuffle(temp_list1)
num1 = temp_list1[1]
random.shuffle(temp_list2)
for col in range(num1):
shudu[row][temp_list2[col]] = 0
return shudu
def solving_sudoku():
col_num = []
row_num = []
sqr_num = [[0]*3 for _ in range(3)]
for i in range(9):
col_num.append(set(tuple(range(1, 10))))
row_num.append(set(tuple(range(1, 10))))
for i in range(3):
for j in range(3):
sqr_num[i][j] = set(tuple(range(1, 10)))
for i in range(9):
for j in range(9):
if sudoku[i][j] != 0:
if sudoku[i][j] in row_num[i]:
row_num[i].remove(sudoku[i][j])
if sudoku[i][j] in col_num[j]:
col_num[j].remove(sudoku[i][j])
if sudoku[i][j] in sqr_num[i//3][j//3]:
sqr_num[i//3][j//3].remove(sudoku[i][j])
tag = 1
for i in range(9):
if len(row_num[i]) != 0:
tag = 0
if len(col_num[i]) != 0:
tag = 0
for i in range(3):
for j in range(3):
if len(sqr_num[i][j]):
tag = 0
return tag
def insert_end():
try:
for (row, col) in s:
sudoku[row][col] = EntryList[row*9+col].get()
tag = solving_sudoku()
if tag == 1:
tk.messagebox.showinfo(title='恭喜', message='正确')
else:
tk.messagebox.showinfo(title='糟糕', message='检查一下哦,有些地方好像不对!')
except Exception:
tk.messagebox.showinfo(title='未完成数独', message='还有空格没有填入数字哦!')
def show(shudu):
for i in range(9):
for j in range(9):
if shudu[i][j] == 0:
EntryList.append(tk.Entry(window, font=("Calibri", 12), justify="center", width=6, fg="black",
highlightbackground="black", highlightcolor="red", highlightthickness=1, bd=0))
EntryList[i*9+j].grid(row=i, column=j, ipady=14)
s.add((i, j))
else:
EntryList.append(
tk.Label(window, text=shudu[i][j], font=("Calibri", 12), width=6))
EntryList[i*9+j].grid(row=i, column=j, ipady=14)
sudoku = change_root(sudoku)
sudoku = rowcolumn_exchange(sudoku)
print(sudoku)
sudoku = newproblem(sudoku)
show(sudoku)
b2 = tk.Button(window, text="提交", command=insert_end)
b2.grid(row=12, column=5)
window.mainloop()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
TODO
"""
import mir_eval
import bss_eval_base
class BSSEvalImages(bss_eval_base.BSSEvalBase):
"""
"""
def __init__(self, true_sources_list, estimated_sources_list, source_labels=None, algorithm_name=None,
do_mono=False, compute_permutation=True):
super(BSSEvalImages, self).__init__(true_sources_list=true_sources_list,
estimated_sources_list=estimated_sources_list,
source_labels=source_labels, do_mono=do_mono,
compute_permutation=compute_permutation)
self._mir_eval_func = mir_eval.separation.bss_eval_images
def _preprocess_sources(self):
reference, estimated = super(BSSEvalImages, self)._preprocess_sources()
if self.num_channels == 1:
raise Exception("Can't run bss_eval_images on mono audio signals!")
mir_eval.separation.validate(reference, estimated)
return reference, estimated
def _populate_scores_dict(self, bss_output):
sdr_list, isr_list, sir_list, sar_list, perm = bss_output # Unpack
assert len(sdr_list) == len(sir_list) \
== len(sar_list) == len(isr_list) == len(self.true_sources_list) * self.num_channels
self.scores[self.RAW_VALUES] = {self.SDR: sdr_list, self.ISR: isr_list, self.SIR: sir_list, self.SAR: sar_list,
self.PERMUTATION: perm}
idx = 0
for i, label in enumerate(self.source_labels):
self.scores[label] = {}
for ch in range(self.num_channels):
chan = 'Ch {}'.format(ch)
self.scores[label][chan] = {}
self.scores[label][chan][self.SDR] = sdr_list[perm[idx]]
self.scores[label][chan][self.ISR] = isr_list[perm[idx]]
self.scores[label][chan][self.SIR] = sir_list[perm[idx]]
self.scores[label][chan][self.SAR] = sar_list[perm[idx]]
idx += 1
self.scores[self.PERMUTATION] = perm
|
#! /usr/bin/env python
"""Post-install / configuration script for Iromlab"""
import os
import sys
import imp
import site
import sysconfig
from shutil import copyfile
import threading
import logging
import pythoncom
from win32com.client import Dispatch
try:
import tkinter as tk # Python 3.x
import tkinter.scrolledtext as ScrolledText
import tkinter.messagebox as tkMessageBox
except ImportError:
import Tkinter as tk # Python 2.x
import ScrolledText
import tkMessageBox
def errorExit(error):
"""Show error message in messagebox and then exit after userv presses OK"""
tkMessageBox.showerror("Error", error)
os._exit(0)
def get_reg(name, path):
"""Read variable from Windows Registry"""
import winreg
# From http://stackoverflow.com/a/35286642
try:
registry_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0,
winreg.KEY_READ)
value, regtype = winreg.QueryValueEx(registry_key, name)
winreg.CloseKey(registry_key)
return value
except WindowsError:
return None
def main_is_frozen():
return (hasattr(sys, "frozen") or # new py2exe
hasattr(sys, "importers") # old py2exe
or imp.is_frozen("__main__")) # tools/freeze
def get_main_dir():
if main_is_frozen():
return os.path.dirname(sys.executable)
return os.path.dirname(sys.argv[0])
def post_install():
"""Install config file + pre-packaged tools to user dir +
Create a Desktop shortcut to the installed software
"""
# This is needed to avoid 'CoInitialize has not been called'
# error with Dispatch. See: https://stackoverflow.com/a/26753031
pythoncom.CoInitialize()
# Package name
packageName = 'iromlab'
# Scripts directory (location of launcher script)
scriptsDir = get_main_dir()
# Package directory (parent of scriptsDir)
packageDir = os.path.abspath(os.path.join(scriptsDir, os.pardir))
# Part 1: install config file
# Locate Windows user directory
userDir = os.path.expanduser('~')
# Config directory
configDirUser = os.path.join(userDir, packageName)
logging.info("User configuration directory: " + configDirUser)
# Create config directory if it doesn't exist
if not os.path.isdir(configDirUser):
logging.info("Creating user configuration directory ...")
try:
os.makedirs(configDirUser)
logging.info("Done!")
except IOError:
msg = 'could not create configuration directory'
errorExit(msg)
# Config file name
configFileUser = os.path.join(configDirUser, 'config.xml')
if not os.path.isfile(configFileUser):
# No config file in user dir, so copy it from location in package.
# Location is /iromlab/conf/config.xml in 'site-packages' directory
# if installed with pip)
logging.info("Copying configuration file to user directory ...")
# Locate global site-packages dir (this returns multiple entries)
sitePackageDirsGlobal = site.getsitepackages()
# Assumptions: site package dir is called 'site-packages' and is
# unique (?)
for directory in sitePackageDirsGlobal:
if 'site-packages' in directory:
sitePackageDirGlobal = directory
try:
logging.info("Global site package directory: " + sitePackageDirGlobal)
except:
pass
# Locate user site-packages dir
sitePackageDirUser = site.getusersitepackages()
logging.info("User site package directory: " + sitePackageDirUser)
# Determine which site package dir to use
if packageDir in sitePackageDirGlobal:
sitePackageDir = sitePackageDirGlobal
elif packageDir in sitePackageDirUser:
sitePackageDir = sitePackageDirUser
else:
msg = 'could not establish package dir to use'
errorExit(msg)
logging.info("Site package directory: " + sitePackageDir)
# Construct path to config file
configFilePackage = os.path.join(sitePackageDir, packageName,
'conf', 'config.xml')
if os.path.isfile(configFilePackage):
try:
copyfile(configFilePackage, configFileUser)
logging.info("Done!")
except IOError:
msg = 'could not copy configuration file to ' + configFileUser
errorExit(msg)
# This should never happen but who knows ...
else:
msg = 'no configuration file found in package'
errorExit(msg)
# Part 2: create Desktop shortcut
logging.info("Creating desktop shortcut ...")
try:
# Target of shortcut
target = os.path.join(scriptsDir, packageName + '.exe')
# Name of link file
linkName = packageName + '.lnk'
# Read location of Windows desktop folder from registry
regName = 'Desktop'
regPath = r'Software\Microsoft\Windows\CurrentVersion\Explorer\User Shell Folders'
desktopFolder = os.path.normpath(get_reg(regName, regPath))
logging.info("Desktop directory: " + desktopFolder)
# Path to location of link file
pathLink = os.path.join(desktopFolder, linkName)
shell = Dispatch('WScript.Shell')
shortcut = shell.CreateShortCut(pathLink)
shortcut.Targetpath = target
shortcut.WorkingDirectory = scriptsDir
shortcut.IconLocation = target
shortcut.save()
logging.info("Done!")
except Exception:
msg = 'Failed to create desktop shortcut'
errorExit(msg)
msg = 'Iromlab configuration completed successfully, click OK to exit!'
tkMessageBox.showinfo("Info", msg)
os._exit(0)
class TextHandler(logging.Handler):
"""This class allows you to log to a Tkinter Text or ScrolledText widget
Adapted from Moshe Kaplan:
https://gist.github.com/moshekaplan/c425f861de7bbf28ef06
"""
def __init__(self, text):
# run the regular Handler __init__
logging.Handler.__init__(self)
# Store a reference to the Text it will log to
self.text = text
def emit(self, record):
msg = self.format(record)
def append():
self.text.configure(state='normal')
self.text.insert(tk.END, msg + '\n')
self.text.configure(state='disabled')
# Autoscroll to the bottom
self.text.yview(tk.END)
# This is necessary because we can't modify the Text from other threads
self.text.after(0, append)
class myGUI(tk.Frame):
"""This class defines the graphical user interface"""
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.root = parent
self.build_gui()
def build_gui(self):
# Build GUI
self.root.title('Iromlab Configuration Tool')
self.root.option_add('*tearOff', 'FALSE')
self.grid(column=0, row=0, sticky='ew')
self.grid_columnconfigure(0, weight=1, uniform='a')
self.grid_columnconfigure(1, weight=1, uniform='a')
self.grid_columnconfigure(2, weight=1, uniform='a')
self.grid_columnconfigure(3, weight=1, uniform='a')
# Add text widget to display logging info
st = ScrolledText.ScrolledText(self, state='disabled')
st.configure(font='TkFixedFont')
st.grid(column=0, row=1, sticky='w', columnspan=4)
# Create textLogger
text_handler = TextHandler(st)
# Logging configuration
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
# Add the handler to logger
logger = logging.getLogger()
logger.addHandler(text_handler)
def main():
"""Main function"""
root = tk.Tk()
myGUI(root)
t1 = threading.Thread(target=post_install, args=[])
t1.start()
root.mainloop()
t1.join()
if __name__ == "__main__":
main()
|
import h5py
import numpy as np
import pandas as pd
import os
from argparse import ArgumentParser
def dataframe_to_deepsurv_ds(df, event_col='Event', time_col='Time'):
# Extract the event and time columns as numpy arrays
e = df[event_col].values.astype(np.int32)
t = df[time_col].values.astype(np.float32)
# Extract the patient's covariates as a numpy array
x_df = df.drop([event_col, time_col], axis=1)
x = x_df.values.astype(np.float32)
# Return the deep surv dataframe
return {
'x': x,
'e': e,
't': t
}
def dataframes_to_hd5(df, ofile, event_col, time_col):
with h5py.File(ofile, 'w') as h:
for k in df:
ds = dataframe_to_deepsurv_ds(df[k], event_col, time_col)
group = h.create_group(k)
group.create_dataset('x', data=ds['x'])
group.create_dataset('e', data=ds['e'])
group.create_dataset('t', data=ds['t'])
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('ifile_os')
parser.add_argument('ifile_pfs')
# parser.add_argument('-e', '--event_col', default='OSEvent')
# parser.add_argument('-t', '--time_col', default='TTDy')
# parser.add_argument('--txcol', type=str, default='SBRT')
# parser.add_argument('--drop', help='drop columns', nargs='+', type=str)
# parser.add_argument('--droprows', help='drop rows where [cols] have value --droprowsval', nargs='+', type=str)
# parser.add_argument(
# '--droprowsval', help='value at which to drop the rows from --droprows, default 1', type=int, default=1)
# parser.add_argument('--droprows2', help='drop rows where [cols] have value --droprowsval2', nargs='+', type=str)
# parser.add_argument(
# '--droprowsval2', help='value at which to drop the rows from --droprows2, default 0', type=int, default=0)
args = parser.parse_args()
print(args)
df = pd.read_csv(args.ifile_os)
# print(df)
drop_sbrtVS = ['Treatment', 'RFA', 'SBRT_OR_RFA']
drop_rfaVS = ['Treatment', 'SBRT', 'SBRT_OR_RFA']
drop_sbrtORrfa = ['Treatment', 'SBRT', 'RFA']
#
# THIS IS FOR OS FIRST
frac = 0.5
ds = {
'SBRT_train': df[df.SBRT == 1].sample(frac=frac),
'RFA_train': df[df.RFA == 1].sample(frac=frac),
'NONE_train': df[df.SBRT_OR_RFA == 0].sample(frac=frac)
}
ds |= {
'SBRT_test': df.loc[df[df.SBRT == 1].index.symmetric_difference(ds['SBRT_train'].index)],
'RFA_test': df.loc[df[df.RFA == 1].index.symmetric_difference(ds['RFA_train'].index)],
'NONE_test': df.loc[df[df.SBRT_OR_RFA == 0].index.symmetric_difference(ds['NONE_train'].index)],
}
df_sbrtVSnone = {
'train': pd.concat([ds['SBRT_train'], ds['NONE_train']]).drop(columns=drop_sbrtVS),
'test': pd.concat([ds['SBRT_test'], ds['NONE_test']]).drop(columns=drop_sbrtVS)
}
df_rfaVSnone = {
'train': pd.concat([ds['RFA_train'], ds['NONE_train']]).drop(columns=drop_rfaVS),
'test': pd.concat([ds['RFA_test'], ds['NONE_test']]).drop(columns=drop_rfaVS)
}
df_sbrtVSrfa = {
'train': pd.concat([ds['SBRT_train'], ds['RFA_train']]).drop(columns=drop_sbrtVS),
'test': pd.concat([ds['SBRT_test'], ds['RFA_test']]).drop(columns=drop_sbrtVS)
}
df_sbrtORrfa = {
'train': pd.concat([ds['SBRT_train'], ds['RFA_train'], ds['NONE_train']]).drop(columns=drop_sbrtORrfa),
'test': pd.concat([ds['SBRT_test'], ds['RFA_test'], ds['NONE_test']]).drop(columns=drop_sbrtORrfa)
}
ofile_os = os.path.join(os.path.dirname(args.ifile_os), 'liver_os_sbrtVSnone.hd5')
dataframes_to_hd5(df_sbrtVSnone, ofile_os, 'OSEvent', 'TTDy')
ofile_os = os.path.join(os.path.dirname(args.ifile_os), 'liver_os_rfaVSnone.hd5')
dataframes_to_hd5(df_rfaVSnone, ofile_os, 'OSEvent', 'TTDy')
ofile_os = os.path.join(os.path.dirname(args.ifile_os), 'liver_os_sbrtVSrfa.hd5')
dataframes_to_hd5(df_sbrtVSrfa, ofile_os, 'OSEvent', 'TTDy')
ofile_os = os.path.join(os.path.dirname(args.ifile_os), 'liver_os_sbrtORrfa.hd5')
dataframes_to_hd5(df_sbrtORrfa, ofile_os, 'OSEvent', 'TTDy')
#
# USE INDICES FROM OS FOR PFS
df_PFS = pd.read_csv(args.ifile_pfs)
df_sbrtVSnone_pfs = {
'train': df_PFS.loc[df_sbrtVSnone['train'].index].drop(columns=drop_sbrtVS),
'test': df_PFS.loc[df_sbrtVSnone['test'].index].drop(columns=drop_sbrtVS)
}
df_rfaVSnone_pfs = {
'train': df_PFS.loc[df_rfaVSnone['train'].index].drop(columns=drop_rfaVS),
'test': df_PFS.loc[df_rfaVSnone['test'].index].drop(columns=drop_rfaVS)
}
df_sbrtVSrfa_pfs = {
'train': df_PFS.loc[df_sbrtVSrfa['train'].index].drop(columns=drop_sbrtVS),
'test': df_PFS.loc[df_sbrtVSrfa['test'].index].drop(columns=drop_sbrtVS)
}
df_sbrtORrfa_pfs = {
'train': df_PFS.loc[df_sbrtORrfa['train'].index].drop(columns=drop_sbrtORrfa),
'test': df_PFS.loc[df_sbrtORrfa['test'].index].drop(columns=drop_sbrtORrfa)
}
ofile_pfs = os.path.join(os.path.dirname(args.ifile_os), 'liver_pfs_sbrtVSnone.hd5')
dataframes_to_hd5(df_sbrtVSnone_pfs, ofile_pfs, 'PFSEvent', 'TTPy')
ofile_pfs = os.path.join(os.path.dirname(args.ifile_os), 'liver_pfs_rfaVSnone.hd5')
dataframes_to_hd5(df_rfaVSnone_pfs, ofile_pfs, 'PFSEvent', 'TTPy')
ofile_pfs = os.path.join(os.path.dirname(args.ifile_os), 'liver_pfs_sbrtVSrfa.hd5')
dataframes_to_hd5(df_sbrtVSrfa_pfs, ofile_pfs, 'PFSEvent', 'TTPy')
ofile_pfs = os.path.join(os.path.dirname(args.ifile_os), 'liver_pfs_sbrtORrfa.hd5')
dataframes_to_hd5(df_sbrtORrfa_pfs, ofile_pfs, 'PFSEvent', 'TTPy')
|
from icevision.models.ultralytics.yolov5.dataloaders import *
from icevision.models.ultralytics.yolov5.model import *
from icevision.models.ultralytics.yolov5.prediction import *
from icevision.models.ultralytics.yolov5.show_results import *
from icevision.models.ultralytics.yolov5.utils import *
from icevision.models.ultralytics.yolov5.backbones import *
from icevision.models.ultralytics.yolov5.show_batch import *
# Soft dependencies
from icevision.soft_dependencies import SoftDependencies
if SoftDependencies.fastai:
from icevision.models.ultralytics.yolov5 import fastai
if SoftDependencies.pytorch_lightning:
from icevision.models.ultralytics.yolov5 import lightning
|
from dbs.apis.dbsClient import *
url="https://cmsweb.cern.ch/dbs/prod/global/DBSReader/"
#url="https://dbs3-test2.cern.ch/dbs/dev/global/DBSReader/"
dbs3api = DbsApi(url=url)
dataset = '/JetHT/Run2018A-v1/RAW'
print(dbs3api.listFileSummaries(dataset=dataset, validFileOnly=1))
print("\n")
print(dbs3api.listFileSummaries(dataset=dataset))
print("\n")
block='/JetHT/Run2018A-v1/RAW#e347a08b-e10d-49a5-b704-f4a2bdae190a'
print(dbs3api.listFileSummaries(block_name=block, validFileOnly=1))
print("\n")
block='/JetHT/Run2018A-v1/RAW#e347a08b-e10d-49a5-b704-f4a2bdae190a'
print(dbs3api.listFileSummaries(block_name=block))
print("\n")
dataset = '/DoubleEG/CMSSW_9_2_3_patch2-2017_07_11_19_22_PRref_92X_dataRun2_Prompt_RefGT_week28_2017-v1/RECO'
print(dbs3api.listFileSummaries(dataset=dataset))
print("\n")
dataset = '/DoubleEG/CMSSW_9_2_3_patch2-2017_07_11_19_22_PRref_92X_dataRun2_Prompt_RefGT_week28_2017-v1/RECO'
print(dbs3api.listFileSummaries(dataset=dataset, sumOverLumi=1))
print("\n")
dataset = '/DoubleEG/CMSSW_9_2_3_patch2-2017_07_11_19_22_PRref_92X_dataRun2_Prompt_RefGT_week28_2017-v1/RECO'
print(dbs3api.listFileSummaries(dataset=dataset, run_num=297723))
print("\n")
dataset = '/DoubleEG/CMSSW_9_2_3_patch2-2017_07_11_19_22_PRref_92X_dataRun2_Prompt_RefGT_week28_2017-v1/RECO'
print(dbs3api.listFileSummaries(dataset=dataset, sumOverLumi=1, run_num=297723))
print("\n")
print("All Done")
|
import unittest
from chalmers.event_dispatcher import EventDispatcher
from chalmers import config, errors
from os.path import join
import tempfile
import os
class TestEventDispatcher(EventDispatcher):
@property
def name(self):
return 'test'
def dispatch_foo(self, value):
self.foo = value
def dispatch_error(self, value):
raise Exception("Expected dispatch error")
class Test(unittest.TestCase):
def setUp(self):
self.root_config = join(tempfile.gettempdir(), 'chalmers_tests')
config.set_relative_dirs(self.root_config)
unittest.TestCase.setUp(self)
def test_init(self):
dispatcher = TestEventDispatcher()
self.assertFalse(dispatcher.is_listening)
def test_listen(self):
d = TestEventDispatcher()
if os.path.exists(d.addr):
os.unlink(d.addr)
self.assertFalse(d.is_listening)
d.start_listener()
self.assertTrue(d.is_listening)
if os.name != 'nt':
self.assertTrue(os.path.exists(d.addr))
d.send('foo', 1)
d.send('exitloop')
d._listener_thread.join()
self.assertFalse(d.is_listening)
self.assertEqual(d.foo, 1)
def test_exception(self):
d = TestEventDispatcher()
if os.path.exists(d.addr):
os.unlink(d.addr)
self.assertFalse(d.is_listening)
d.start_listener()
with self.assertRaises(errors.ChalmersError):
d.send('error', 1)
self.assertTrue(d.is_listening)
d.send('exitloop')
d._listener_thread.join()
self.assertFalse(d.is_listening)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
from __future__ import absolute_import, division, print_function
from libtbx.test_utils import approx_equal
from cctbx import dmtbx
from cctbx import maptbx
from cctbx import miller
from scitbx import fftpack
from libtbx import complex_math
import scitbx.math
from cctbx.array_family import flex
from cctbx.development import random_structure
from cctbx.development import debug_utils
import random
import math
import sys
from six.moves import range
def direct_space_squaring(start, selection_fixed):
map_gridding = miller.index_span(
miller.set.expand_to_p1(start).indices()).map_grid()
if (selection_fixed is None):
fixed = start
var = start
else:
fixed = start.select(selection_fixed)
var = start.select(~selection_fixed)
rfft = fftpack.real_to_complex_3d([n*3//2 for n in map_gridding])
conjugate_flag = True
structure_factor_map = maptbx.structure_factors.to_map(
space_group=fixed.space_group(),
anomalous_flag=fixed.anomalous_flag(),
miller_indices=fixed.indices(),
structure_factors=fixed.data(),
n_real=rfft.n_real(),
map_grid=flex.grid(rfft.n_complex()),
conjugate_flag=conjugate_flag)
real_map = rfft.backward(structure_factor_map.complex_map())
squared_map = flex.pow2(real_map)
squared_sf_map = rfft.forward(squared_map)
allow_miller_indices_outside_map = False
from_map = maptbx.structure_factors.from_map(
anomalous_flag=var.anomalous_flag(),
miller_indices=var.indices(),
complex_map=squared_sf_map,
conjugate_flag=conjugate_flag,
allow_miller_indices_outside_map=allow_miller_indices_outside_map)
if (selection_fixed is None):
return from_map.data()
result = start.data().deep_copy()
result.set_selected(~selection_fixed, from_map.data())
assert result.select(selection_fixed).all_eq(fixed.data())
return result
def reciprocal_space_squaring(start, selection_fixed, verbose):
tprs = dmtbx.triplet_generator(miller_set=start)
if (0 or verbose):
for ih in range(start.indices()[:1].size()):
for relation in tprs.relations_for(ih):
print(relation.format(start.indices(), ih), end=' ')
if (not relation.is_sigma_2(ih)):
print("not sigma-2", end=' ')
print()
amplitudes = abs(start).data()
if (selection_fixed is not None):
amplitudes.set_selected(~selection_fixed, 0)
input_phases = flex.arg(start.data())
result = tprs.apply_tangent_formula(
amplitudes=amplitudes,
phases_rad=input_phases,
selection_fixed=selection_fixed,
use_fixed_only=selection_fixed is not None)
if (selection_fixed is not None):
assert result.select(selection_fixed).all_eq(
input_phases.select(selection_fixed))
return result
def exercise_truncate(q_large):
tprs_full = dmtbx.triplet_generator(
miller_set=q_large,
discard_weights=True)
tprs = dmtbx.triplet_generator(
miller_set=q_large,
amplitudes=q_large.data(),
max_relations_per_reflection=0,
discard_weights=True)
assert tprs.n_relations().all_eq(tprs_full.n_relations())
for n in (1,10,100,1000):
tprs = dmtbx.triplet_generator(
miller_set=q_large,
amplitudes=q_large.data(),
max_relations_per_reflection=n,
discard_weights=True)
assert (tprs.n_relations() >= n).all_eq(tprs.n_relations() == n)
n = 3
tprs = dmtbx.triplet_generator(
miller_set=q_large,
amplitudes=q_large.data(),
max_relations_per_reflection=n,
discard_weights=True)
n_rel_full = tprs_full.n_relations()
n_rel = tprs.n_relations()
amp = q_large.data()
for ih in range(q_large.indices().size()):
if (n_rel[ih] == n_rel_full[ih]): continue
aa_full = flex.double()
for relation in tprs_full.relations_for(ih):
aa_full.append(amp[relation.ik()] * amp[relation.ihmk()])
aa = flex.double()
for relation in tprs.relations_for(ih):
aa.append(amp[relation.ik()] * amp[relation.ihmk()])
aa_full = aa_full.select(flex.sort_permutation(data=aa_full, reverse=True))
assert approx_equal(aa_full[:n], aa)
def exercise(space_group_info, n_scatterers=8, d_min=2, verbose=0,
e_min=1.5):
structure = random_structure.xray_structure(
space_group_info,
elements=["const"]*n_scatterers,
volume_per_atom=200,
min_distance=3.,
general_positions_only=True,
u_iso=0.0)
if (0 or verbose):
structure.show_summary().show_scatterers()
f_calc = structure.structure_factors(
d_min=d_min, anomalous_flag=False).f_calc()
f_obs = abs(f_calc)
q_obs = miller.array(
miller_set=f_obs,
data=f_obs.data()
/ math.sqrt(f_obs.space_group().order_p() * n_scatterers)
/ f_obs.space_group().n_ltr())
q_obs = q_obs.sort(by_value="abs")
q_obs.setup_binner(auto_binning=True)
n_obs = q_obs.quasi_normalize_structure_factors()
r = flex.linear_regression(q_obs.data(), n_obs.data())
if (0 or verbose):
r.show_summary()
assert r.is_well_defined()
assert abs(r.y_intercept()) < 0.1
assert abs(r.slope() - 1) < 0.2
q_large = q_obs.select(
q_obs.quasi_normalized_as_normalized().data() > e_min)
if (0 or verbose):
print("Number of e-values > %.6g: %d" % (e_min, q_large.size()))
other_structure = random_structure.xray_structure(
space_group_info,
elements=["const"]*n_scatterers,
volume_per_atom=200,
min_distance=3.,
general_positions_only=True,
u_iso=0.0)
assert other_structure.unit_cell().is_similar_to(structure.unit_cell())
q_calc = q_large.structure_factors_from_scatterers(
other_structure, algorithm="direct").f_calc()
start = q_large.phase_transfer(q_calc.data())
for selection_fixed in (
None,
flex.double([random.random() for i in range(start.size())]) < 0.4):
from_map_data = direct_space_squaring(start, selection_fixed)
direct_space_result = start.phase_transfer(phase_source=from_map_data)
new_phases = reciprocal_space_squaring(start, selection_fixed, verbose)
reciprocal_space_result = start.phase_transfer(
phase_source=flex.polar(1,new_phases))
mwpe = direct_space_result.mean_weighted_phase_error(
reciprocal_space_result)
if (0 or verbose):
print("mwpe: %.2f" % mwpe, start.space_group_info())
for i,h in enumerate(direct_space_result.indices()):
amp_d,phi_d = complex_math.abs_arg(
direct_space_result.data()[i], deg=True)
amp_r,phi_r = complex_math.abs_arg(
reciprocal_space_result.data()[i],deg=True)
phase_err = scitbx.math.phase_error(phi_d, phi_r, deg=True)
assert phase_err < 1.0 or abs(from_map_data[i]) < 1.e-6
exercise_truncate(q_large)
def run_call_back(flags, space_group_info):
exercise(space_group_info, verbose=flags.Verbose)
def run():
debug_utils.parse_options_loop_space_groups(sys.argv[1:], run_call_back)
if (__name__ == "__main__"):
run()
|
#!/usr/bin/env ipython
import numpy as np
import re
import datetime as dt
from GDSII import GDSII
from GDSII_ARef import GDSII_ARef
from GDSII_SRef import GDSII_SRef
from GDSII_Boundary import GDSII_Boundary
from GDSII_Text import GDSII_Text
from GDSII_Path import GDSII_Path
from GDSII_Box import GDSII_Box
from GDSII_Node import GDSII_Node
class GDSII_Structure(GDSII):
'''
GDSII_Structure class : subclass of GDSII
GDSII Stream file format release 6.0
Structure record
The structure record is a container for all element records. A structure
is called a cell among the CAD/EDS community. Once a cell is defined,
it can be referenced in the layout. Structure references can be nested up
to 32 levels. Structures and referencing is important because it enables
data compression. For example, in the GDSII format, by replacing each
repeated polygon (boundary element) by a cell containing the polygon and
multiple calls to the cell, the file can be compressed 7x. The
compression is significantly more significant if the polygons are arranged
in an array.
The functions of this class are:
setName = Set the name of the cell
addBoundary = Adds a boundary element
addSRef = Adds a cell reference element
addARef = Adds an array of cell reference element
addPath = Adds a path element
addText = Adds a text element
addNode = Adds a node element
genRecord = Generate the record binary
readRecord = Reads a structure record
Long Chang, UH, May 2013
'''
def __init__(self, structureName='UHNano'):
super(GDSII_Structure,self).__init__()
self._dom = self.getDate()
self._doa = self.getDate()
self._structureName = structureName
self._aref = []
self._sref = []
self._boundary = []
self._path = []
self._box = []
self._node = []
self._text = []
self._cStructure = 0x0502 #Structure begin
self._cStructureName = 0x0606 #Structure name
self._cStructureEnd = 0x0700 #Structure end
self._cBoundary = 0x0800 #Boundary element begin
self._cSRef = 0x0A00 #Structure reference element begin
self._cARef = 0x0B00 #Array reference element begin
self._cText = 0x0C00 #Text element begin
self._cBox = 0x2D00 #Box element begin
self._cNode = 0x1500 #Node element begin
self._cPath = 0x0900 #Path element begin
self._cElementEnd = 0x1100 #Element end
def __repr__(self):
print 'Structure record'
print 'structureName: ' , self.structureName
print 'sref: ' , len(self.sref)
print 'aref: ' , len(self.aref)
print 'boundary: ' , len(self.boundary)
print 'path: ' , len(self.path)
print 'text: ' , len(self.text)
print 'box: ' , len(self.box)
print 'node: ' , len(self.node)
return ''
@property
def dom(self):
'''
dom : list of 6 integers
Date of modification
'''
return self._dom
@dom.setter
def dom(self, val):
if not isinstance(val,list):
raise TypeError('GDSII_Structure.dom : This parameter must be a list of integers')
if not len(val) == 6:
raise TypeError('GDSII_Structure.dom : This parameter must have 6 elements')
self._dom = val
@property
def doa(self):
'''
doa : list of 6 integers
Date of access
'''
return self._doa
@doa.setter
def doa(self, val):
if not isinstance(val,list):
raise TypeError('GDSII_Structure.doa : This parameter must be a list of integers')
if not len(val) == 6:
raise TypeError('GDSII_Structure.doa : This parameter must have 6 elements')
self._doa = val
@property
def structureName(self):
'''
structureName : string
Name of the cell to reference
Up to 32 characters
Characters must be from the set [A-Z,a-z,0-9,_,?,$]
'''
return self._structureName
@structureName.setter
def structureName(self, val):
if not isinstance(val,str):
raise TypeError('GDSII_Structure.structureName : This parameter must be of type str')
if len(val) > 32:
raise ValueError('GDSII_Structure.structureName : This parameter cannot be longer than 32 characters')
regex = re.compile('[\W^?^$]')
if not regex.search(val) == None:
raise ValueError('GDSII_Structure.structureName : This parameter must contain only the following characters: A-Z, a-z, 0-9, _, ? and $')
self._structureName = val
@property
def aref(self):
'''
aref : list of GDSII_ARef objects
A list of array of structure references
'''
return self._aref
@aref.setter
def aref(self,val):
if not isinstance(val,GDSII_ARef):
raise('GDSII_Structure.aref : This parameter must be an instance of GDSII_ARef')
self._aref.append(val)
@property
def sref(self):
'''
sref : list of GDSII_SRef objects
A list of structure references
'''
return self._sref
@sref.setter
def sref(self,val):
if not isinstance(val,GDSII_SRef):
raise('GDSII_Structure.sref : This parameter must be an instance of GDSII_SRef')
self._sref.append(val)
@property
def boundary(self):
'''
boundary : list of GDSII_Boundary objects
A list of array of boundary elements
'''
return self._boundary
@boundary.setter
def boundary(self,val):
if not isinstance(val,GDSII_Boundary):
raise('GDSII_Structure.boundary : This parameter must be an instance of GDSII_Boundary')
self._boundary.append(val)
@property
def text(self):
'''
text : list of GDSII_Text objects
A list of array of structure references
'''
return self._text
@text.setter
def text(self,val):
if not isinstance(val,GDSII_Text):
raise('GDSII_Structure.text : This parameter must be an instance of GDSII_Text')
self._text.append(val)
@property
def path(self):
'''
path : list of GDSII_Path objects
A list of path elements
'''
return self._path
@path.setter
def path(self,val):
if not isinstance(val,GDSII_Path):
raise('GDSII_Structure.path : This parameter must be an instance of GDSII_Path')
self._path.append(val)
@property
def box(self):
'''
box : list of GDSII_Box objects
A list of box elements
'''
return self._box
@box.setter
def box(self,val):
if not isinstance(val,GDSII_Box):
raise('GDSII_Structure.box : This parameter must be an instance of GDSII_Box')
self._box.append(val)
@property
def node(self):
'''
node : list of GDSII_Node objects
A list of node elements
'''
return self._node
@node.setter
def node(self,val):
if not isinstance(val,GDSII_Node):
raise('GDSII_Structure.node : This parameter must be an instance of GDSII_Node')
self._node.append(val)
@property
def cStructure(self):
'''
cStructure : 0x0502
Command code for structure begin
'''
return self._cStructure
@property
def cStructureName(self):
'''
cStructureName : 0x0606
Command code for structure name
'''
return self._cStructureName
@property
def cStructureEnd(self):
'''
cStructureEnd : 0x0700
Command code for structure end
'''
return self._cStructureEnd
@property
def cBoundary(self):
'''
cBoundary : 0x0800
Command code for boundary element begin
'''
return self._cBoundary
@property
def cSRef(self):
'''
cSRef : 0x0A00
Command code for structure reference element begin
'''
return self._cSRef
@property
def cARef(self):
'''
cARef : 0x0B00
Command code for array reference element begin
'''
return self._cARef
@property
def cText(self):
'''
cText : 0x0C00
Command code for text element begin
'''
return self._cText
@property
def cBox(self):
'''
cBox : 0x2D00
Command code for box element begin
'''
return self._cBox
@property
def cNode(self):
'''
cNode : 0x1500
Command code for node element begin
'''
return self._cNode
@property
def cPath(self):
'''
cPath : 0x0900
Command code for path element begin
'''
return self._cPath
@property
def cElementEnd(self):
'''
cElementEnd : 0x1100
Command code for element end
'''
return self._cElementEnd
def getDate(self):
'''
getDate()
Returns the time and date as a list
Returns
-------
out : list of integers
The current date and time in the form:
[year month day hour minute second]
'''
tmp = dt.datetime.now()
return [tmp.year,tmp.month,tmp.day,tmp.hour,tmp.minute,tmp.second]
def addARef(self, structureName, xy, pitchX, pitchY, nX, nY, xRot = 0, yRot = 0, reflection = 0, mag = 1, angle = 0):
'''
setARef(structureName, xy, pitchX, pitchY, nX, nY, xRot = 0, yRot = 0, reflection = 0, mag = 1, angle = 0)
Adds an array reference element
Parameters
----------
structureName : string
Name of the cell to reference
Up to 32 characters
Characters must be from the set [A-Z,a-z,0-9,_,?,$]
xy : numpy.ndarray of type numpy.int32 with 2 elements or list of 2 integer elements
The origin, [x y], of the array reference
pitchX : integer
Array pitch or step along X
pitchY : integer
Array pitch or step along Y
nX : integer
Array repeats along X
nY : integer
Array repeats along Y
xRot : float
Array x angle in units of [degrees]
yRot : float
Array y angle in units of [degrees]
reflection : integer from [0,1]
Reflection enable for reflection about the X axis
mag : float
Magnification factor used to scaled the referenced structure
angle : float
Angle in units of [degrees] used to rotate the referenced structure
counterclockwise about the origin
'''
tmp = GDSII_ARef()
tmp.setARef(structureName, xy, pitchX, pitchY, nX, nY, xRot, yRot, reflection, mag, angle)
self.aref = tmp
def addSRef(self, structureName, xy, reflection = 0, mag = 1, angle = 0):
'''
addARef(structureName, xy, reflection = 0, mag = 1, angle = 0)
Adds an structure reference element
Parameters
----------
structureName : string
Name of the cell to reference
Up to 32 characters
Characters must be from the set [A-Z,a-z,0-9,_,?,$]
xy : numpy.ndarray of type numpy.int32 with 2 elements or list of 2 integer elements
The origin, [x y], of the structure reference
reflection : integer from [0,1]
Reflection enable for reflection about the X axis
mag : float
Magnification factor used to scaled the referenced structure
angle : float
Angle in units of [degrees] used to rotate the referenced structure
counterclockwise about the origin
'''
tmp = GDSII_SRef()
tmp.setSRef(structureName, xy, reflection, mag, angle)
self.sref = tmp
def addBoundary(self, xy, layer=0, datatype=0):
'''
addBoundary(xy, layer=0, datatype=0)
Adds a boundary element
Parameters
----------
xy : numpy.ndarray of type numpy.int32 or a list of integers
An array containing the verticies of a polygon in the form
[x1 y1 x2 y2 ... xn yn x1 y1]
layer : integer from 0 to 255
The layer number
datatype : integer from 0 to 255
The datatype number
'''
tmp = GDSII_Boundary()
tmp.setBoundary(xy, layer, datatype)
self.boundary = tmp
def addText(self, text, xy, layer=0, texttype=0, presentation = None, pathtype = None, width = None, reflection = 0, mag = 1, angle = 0):
'''
addText(xy, layer=0, texttype=0)
Adds a text element
Parameters
----------
text : string
A text string
xy : numpy.ndarray of type numpy.int32 or a list of integers
An array containing the verticies of a polygon in the form
[x1 y1 x2 y2 ... xn yn x1 y1]
layer : integer from 0 to 255
The layer number
texttype : integer from 0 to 255
The texttype number
presentation : integer
Specifies the font in bits
Bit Number (0-15)
10-11 Specify Font
12-13 Vertical presentation
0 Top
1 Middle
2 Bottom
14-15 Horizontal presentation
0 Top
1 Middle
2 Bottom
pathtype : integer from the set [0,1,2]
Describe the nature of the text segment ends
0 Square ends at text terminal
1 Rounded ends at text terminal
2 Square ends that overlap terminals by one-half the width
width : integer
Defines the width of the text. If width is negative, it will be
independent of any structure scaling
reflection : integer from [0,1]
Reflection enable for reflection about the X axis
mag : float
Magnification factor used to scaled the referenced structure
angle : float
Angle in degrees counterclockwise used to rotate the referenced
structure about the origin
'''
tmp = GDSII_Text()
tmp.setText(text, xy, layer, texttype)
self.text = tmp
def addPath(self, xy, layer=0, datatype=0, width=None, pathtype=None):
'''
addPath(xy, layer=0, datatype=0, width=None, pathtype=None)
Adds a path element
Parameters
----------
xy : numpy.ndarray of type numpy.int32 or a list of integers
An array containing the verticies of a polygon in the form
[x1 y1 x2 y2 ... xn yn x1 y1]
layer : integer from 0 to 255
The layer number
datatype : integer from 0 to 255
The datatype number
width : integer (nonzero)
Width of the path
pathtype : integer from the set [0,1,2]
Describe the nature of the path segment ends
0 Square ends at path terminal
1 Rounded ends at path terminal
2 Square ends that overlap terminals by one-half the width
'''
tmp = GDSII_Path()
tmp.setPath(xy, layer, datatype, width, pathtype)
self.path = tmp
def addBox(self, xy, layer=0, boxtype=0):
'''
addBox(xy, layer=0, boxtype=0)
Adds a boundary element
Parameters
----------
xy : numpy.ndarray of type numpy.int32 or a list of integers
An array containing the verticies of a box in the form
[x1 y1 x2 y2 x3 y3 x4 y4 x1 y1]
layer : integer from 0 to 255
The layer number
boxtype : integer from 0 to 255
The boxtype number
'''
tmp = GDSII_Box()
tmp.setBox(xy, layer, boxtype)
self.box = tmp
def addNode(self, xy, layer=0, nodetype=0):
'''
addNode(xy, layer=0, nodetype=0)
Adds a node element
Parameters
----------
xy : numpy.ndarray of type numpy.int32 or a list of integers
An array containing the verticies of an electrical net in the form
[x1 y1 x2 y2 ... x50 y50]
layer : integer from 0 to 255
The layer number
nodetype : integer from 0 to 255
The nodetype number
'''
tmp = GDSII_Node()
tmp.setNode(xy, layer, nodetype)
self.node = tmp
def genRecord(self):
'''
genRecord()
Generates the structure record binary
Description
-----------
The structure record is specified by records in the following order:
Structure
StructureName
Boundary Element (optional)
SRef element (optional)
ARef element (optional)
Path element (optional)
Text element (optional)
Box element (optional)
Node element (optional)
'''
self.recordClear()
#Structure start
self.record = self.dec2byte(28)
self.record = self.dec2byte(self.cStructure)
for i in self.dom:
self.record = self.dec2byte(i)
for i in self.doa:
self.record = self.dec2byte(i)
#Define structure name
if len(self.structureName)%2 == 1:
self.record = self.dec2byte(len(self.structureName)+5)
else:
self.record = self.dec2byte(len(self.structureName)+4)
self.record = self.dec2byte(self.cStructureName)
self.record = np.array([ord(i) for i in self.structureName],dtype=np.uint8)
if len(self.structureName)%2 == 1:
self.record = np.zeros(1,dtype=np.uint8)
#Add boundary elements
for i in self.boundary:
i.genRecord()
self.record = i.record
#Add sref elements
for i in self.sref:
i.genRecord()
self.record = i.record
#Add aref elements
for i in self.aref:
i.genRecord()
self.record = i.record
#Add path elements
for i in self.path:
i.genRecord()
self.record = i.record
#Add text elements
for i in self.text:
i.genRecord()
self.record = i.record
#Add box elements
for i in self.box:
i.genRecord()
self.record = i.record
#Add node elements
for i in self.node:
i.genRecord()
self.record = i.record
#Structure end
self.record = self.dec2byte(4)
self.record = self.dec2byte(self.cStructureEnd)
self.recordClip()
def readRecord(self, record):
'''
readRecord(record)
Reads the boundary record and updates the boundary element parameters
'''
self.pointer = 0
#Check if record is a structure record
if self.byte2dec(record[self.opCodePointer]) == self.cStructure:
self.dom[0] = self.byte2dec(record[self.pointer+4:self.pointer+6])
self.dom[1] = self.byte2dec(record[self.pointer+6:self.pointer+8])
self.dom[2] = self.byte2dec(record[self.pointer+8:self.pointer+10])
self.dom[3] = self.byte2dec(record[self.pointer+10:self.pointer+12])
self.dom[4] = self.byte2dec(record[self.pointer+12:self.pointer+14])
self.dom[5] = self.byte2dec(record[self.pointer+14:self.pointer+16])
self.doa[0] = self.byte2dec(record[self.pointer+16:self.pointer+18])
self.doa[1] = self.byte2dec(record[self.pointer+18:self.pointer+20])
self.doa[2] = self.byte2dec(record[self.pointer+20:self.pointer+22])
self.doa[3] = self.byte2dec(record[self.pointer+22:self.pointer+24])
self.doa[4] = self.byte2dec(record[self.pointer+24:self.pointer+26])
self.doa[5] = self.byte2dec(record[self.pointer+26:self.pointer+28])
self.pointer += 28
else:
raise ValueError('GDSII_Structure.readRecord() : The record is not a structure record')
#Structure name
if self.byte2dec(record[self.opCodePointer]) == self.cStructureName:
length = self.byte2dec(record[self.pointer:self.pointer+2])
if record[self.pointer+length-1] == 0:
self.structureName = ''.join([chr(i) for i in record[self.pointer+4:self.pointer+length-1]])
else:
self.structureName = ''.join([chr(i) for i in record[self.pointer+4:self.pointer+length]])
self.pointer += length
else:
raise ValueError('GDSII_Structure.readRecord() : The structure name is not defined')
#Elements
while not self.byte2dec(record[self.opCodePointer]) == self.cStructureEnd:
#Retrieve one element record
tp = self.pointer
tc = [tp+2,tp+3]
while not self.byte2dec(record[tc]) == self.cElementEnd:
tp += self.byte2dec(record[tp:tp+2])
tc = [tp+2,tp+3]
tp += 4
elementType = self.byte2dec(record[self.opCodePointer])
elementRecord = record[self.pointer:tp]
#Read the element record
if elementType == self.cBoundary:
E = GDSII_Boundary()
E.readRecord(elementRecord)
self.boundary = E
elif elementType == self.cSRef:
E = GDSII_SRef()
E.readRecord(elementRecord)
self.sref = E
elif elementType == self.cARef:
E = GDSII_ARef()
E.readRecord(elementRecord)
self.aref = E
elif elementType == self.cPath:
E = GDSII_Path()
E.readRecord(elementRecord)
self.path = E
elif elementType == self.cText:
E = GDSII_Text()
E.readRecord(elementRecord)
self.text = E
elif elementType == self.cBox:
E = GDSII_Box()
E.readRecord(elementRecord)
self.box = E
elif elementType == self.cNode:
E = GDSII_Node()
E.readRecord(elementRecord)
self.node = E
#Point to next element
self.pointer = tp
def test():
a = GDSII_Structure('doseArray');
a.addBoundary([0,0,0,5,5,5,5,0],2,1);
a.addBoundary([10,0,10,5,15,5,15,0],2,2);
a.addBoundary([20,0,20,5,25,5,25,0],2,3);
a.addBoundary([0,10,0,15,5,15,5,10],2,4);
a.addBoundary([10,10,10,15,15,15,15,10],2,5);
a.addBoundary([20,10,20,15,25,15,25,10],2,6);
a.addBoundary([0,20,0,25,5,25,5,20],2,7);
a.addBoundary([10,20,10,25,15,25,15,20],2,8);
a.addBoundary([20,20,20,25,25,25,25,20],2,9);
a.addText('Hello',xy=[0,0])
a.addPath([0,0,1,1,2,2],0,0)
a.addBox([0,0,0,10,10,10,10,0,0,0],0,0)
a.addNode([0,0,20,20],255,255)
a.addSRef('sref',[15,15])
a.addARef('aref',[30,30],100,100,10,10)
a.genRecord()
b = GDSII_Structure()
b.readRecord(a.record)
print a
print b
if __name__ == '__main__':
test()
|
X = int(input())
Y = int(input())
sum = 0
if X < Y:
for i in range(X, Y + 1):
if i % 13 != 0:
sum += i
print(sum)
elif Y < X:
for i in range(Y, X + 1):
if i % 13 != 0:
sum += i
print(sum) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import gspread
import config
from oauth2client.service_account import ServiceAccountCredentials as Account
api_url = 'https://api.leaseweb.com/invoices/v1/invoices'
def api_request(url, headers, params=None):
try:
conn = requests.get(url=url, headers=headers, params=params)
conn.raise_for_status()
except requests.exceptions.HTTPError as http_error:
raise SystemExit(http_error)
except requests.exceptions.RequestException as req_error:
raise SystemExit(req_error)
except Exception as error:
raise SystemExit(error)
else:
return conn.json()
def main(header):
hosts = []
for item in api_request(api_url, header)['invoices']:
host = {
'ContractId': item['id'],
'Date': item['date'],
'DueDate': item['dueDate'],
'TaxAmount': item['taxAmount'],
'Total': item['total'],
'OpenAmount': item['openAmount'],
'Currency': item['currency'],
'Status': item['status'],
}
hosts.append(host)
return hosts
# Google sheet
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
creds = Account.from_json_keyfile_name('google_sheet_secret.json', scope)
client = gspread.authorize(creds)
def update_google_table(parameter_list):
# Google spreadsheet
spreadsheet = client.open("Leaseweb invoices")
# Создание вкладки worksheet
worksheet = spreadsheet.worksheet('All invoices')
# Формирование заголовка таблицы
header = [
'ContractId',
'Date',
'DueDate',
'TaxAmount',
'Total',
'OpenAmount',
'Currency',
'Status',
]
worksheet.update('A1', [header])
start_cell = 'A2'
end_cell = 'H' + str(len(parameter_list) + 1)
cell_range = worksheet.range('{}:{}'.format(start_cell, end_cell))
simplyfied_data = []
for row in parameter_list:
for column in header:
simplyfied_data.append(row[column])
for i, cell in enumerate(cell_range):
cell.value = simplyfied_data[i]
worksheet.update_cells(cell_range)
if __name__ == '__main__':
invoices_list = []
for auth_key in config.lw_accounts:
for invoice in main(config.lw_accounts[auth_key]):
invoices_list.append(invoice)
update_google_table(invoices_list)
|
from pymongo import MongoClient
import os, sys
from loguru import logger
try:
conn = MongoClient(os.environ["DATABASE_ADDRESS"], int(os.environ["DATABASE_PORT"])) #host.docker.internal, 27017
logger.info("Successfully connected to Holder MongoDB")
except Exception as error:
logger.error(error)
sys.exit()
# database name: provider
db = conn.provider
# Created or Switched to collection names: offers
collection = db.offers
# Collection for: stakeholder
stakeholder_col = db.stakeholder |
# -*- coding: utf-8 -*-
import sys
import numpy as np
from gensim.models import Word2Vec
model_file = sys.argv[1]
(pos1, pos2, neg) = sys.argv[2:]
# 学習したモデルのロード
model = Word2Vec.load(model_file)
# 意味ベクトルのノルムを調整
model.init_sims(replace=True)
# クエリベクトルを計算
vec = model[pos1] + model[pos2] - model[neg]
# 全単語の意味ベクトルを含んだ行列を取得
emb = model.wv.vectors_norm
# 全単語に対するクエリベクトルの類似度を計算
sims = np.dot(emb, vec)
# 類似度が最大の単語を選択し、予測結果として出力
for index in np.argsort(-sims):
word = model.wv.index2word[index]
if word not in (pos1, pos2, neg):
print('予測結果:', word)
break
|
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
import time,os,pygame,json,httplib,datetime,urllib,random
from keys import PARSE_API_KEY,PARSE_APP_ID
class Panpin:
# panpin guztien ahoen gpio pinak gordetzen ditu
gpio_pinak = []
def __init__(self,izena,parse_api_class,textu_fitxategia,audio_fitxategia,gpio_pin):
# izena: panpinaren izena: Instantziaren izena adib: 'Olentzero'
# parse_api_class: Parseko apiaren url-a adib: '/1/classes/Olentzero'
# textu_fitxategia: panpin honi dagokion textu fitxagegiaren izena adib: 'olentzero.txt'
# audio_fitxategia: panpin honi dagokion audio fitxategiaren izena adib: 'olentzero.wav'
# gpio_pin: panpin honen ahoari dagokion gpio pina adib: 17
self.izena = izena
self.parse_api_class = parse_api_class
self.textu_fitxategia = textu_fitxategia
self.audio_fitxategia = audio_fitxategia
self.gpio_pin = gpio_pin
Panpin.gpio_pinak.append(gpio_pin)
#self.esaldi_berriak()
def hitzegin(self):
# panpinak bere audio fitxategia
# irakurri eta dagokion led-a pizten du
# hitz egiten duen bitartean
for pin in Panpin.gpio_pinak:
GPIO.output(pin, False)
pygame.mixer.init(17000)
pygame.mixer.music.set_volume(1.0)
pygame.mixer.music.load(self.audio_fitxategia)
pygame.mixer.music.play()
GPIO.output(self.gpio_pin, True)
while pygame.mixer.music.get_busy() == True:
continue
return
def pinak(self):
# panpin guztien pinak itzultzen ditu
return Panpin.gpio_pinak
def esaldi_berriak(self):
# panaren esaldiak eguneratzen ditu
connection = httplib.HTTPSConnection('api.parse.com', 443)
params = urllib.urlencode({"where":json.dumps({"noizarte": {"$gt": {"__type": "Date", "iso": datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ') }}},{"noiztik": {"$lt": {"__type": "Date", "iso": datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ') }}})})
connection.connect()
connection.request('GET', self.parse_api_class+'?%s' % params, '', {
"X-Parse-Application-Id": PARSE_APP_ID,
"X-Parse-REST-API-Key": PARSE_API_KEY
})
emaitza = connection.getresponse()
print emaitza.status # 200 ondo bestela ez
result = json.loads(emaitza.read())
data = result["results"]
data = random.shuffle(data)
self.esaldiak = data
self.esaldien_luzeera = len(data)
self.ind = 0
return data
def get_esaldia(self):
# esaldi bat jasotzen du esaldien zerrendatik
nire_esaldia = self.esaldiak[self.ind]
if self.ind < self.esaldien_luzeera-1:
self.ind = self.ind+1
else:
self.esaldi_berriak()
return nire_esaldia
def idatzi(esaldia):
# textu fitxategian esaldi bat idazten du
with open(self.textu_fitxategia, 'w') as outfile:
json.dump(esaldia, outfile)
return
|
"""This module stores all the REST API of ImageButler."""
from .apis import api
from .ping import Ping
from .image import Image
from .images import Images
api.add_resource(Ping, '/ping')
api.add_resource(Image, '/image')
api.add_resource(Images, '/images')
|
# Program: marker_a4.py
# Proyect: encuadro - Facultad de Ingenier - UDELAR
# Author: Martin Etchart - [email protected].
#
# Description:
# Python script to render blender model providing pose (rotation, translation), fov
# and image dimensions.
#
# Hosted on:
# http://code.google.com/p/encuadro/
#
# Usage:
# blender -b marker_a4.blend --python "marker_a4.py" -o //<filename> -F JPEG -x 1 -f 1
#
# More info:
# http://code.google.com/p/encuadro/wiki/Rendering
import bpy
import sys
import math
####################################
# OPTIONAL ARGUMENT PARSING WITH LIBRARY. DONT WORK
#
#import argparse
#
#parser = argparse.ArgumentParser(description='Python script to render blender model #providing pose (rotation, translation), fov and image dimensions.')
#
#parser.add_argument('-rot', action="store", dest="r")
#parser.add_argument('-tra', action="store", dest="t")
#parser.add_argument('-fov', action="store", dest="fov", type=float)
####################################
####################################
# OPTIONAL ARGUMENT PARSING BY ME. DONT WORK
#
#i = 4
#while (i<len(sys.argv)):
# if (sys.argv[i]=="-tra"):
# i+=1
# tx = float(sys.argv[i])
# i+=1
# ty = float(sys.argv[i])
# i+=1
# tz = float(sys.argv[i])
# print "TRA"
# elif (sys.argv[i]=="-rot"):
# i+=1
# rx = float(sys.argv[i])
# i+=1
# ry = float(sys.argv[i])
# i+=1
# rz = float(sys.argv[i])
# print "ERRE"
# elif (sys.argv[i]=="-fov"):
# i+=1
# fov = float(sys.argv[i])
# elif (sys.argv[i]=="-o"):
# print "OOO"
# break
# i+=1
####################################
print ( sys.argv )
# find the requsted angle
camAngle = None
for ag in sys.argv:
if ag[:12] == "--cam-angle=":
camAngle = float ( ag[12:] )
if camAngle == None:
print ( "Cam angle not set" )
sys.exit(0)
else:
print ( "Cam angle is " + str( camAngle ) )
pi = 3.1415
scene = bpy.data.scenes["Scene"]
#camera = bpy.data.scenes["Camera"]
# Set render resolution
#scene.render.resolution_x = width
#scene.render.resolution_y = height
# Set camera fov in degrees
#scene.camera.data.angle = fov*(pi/180.0)
# Set camera rotation in euler angles
scene.camera.rotation_mode = 'XYZ'
#scene.camera.rotation_euler[0] = rx*(pi/180.0)
#scene.camera.rotation_euler[1] = ry*(pi/180.0)
# this is the angle
rotationInRad = ( camAngle/180.0) * pi
scene.camera.rotation_euler[2] = rotationInRad
print ( "Initial cam location ( " + str( scene.camera.location.x) + "|" +
str( scene.camera.location.y) + "|" +
str( scene.camera.location.z) + ")" )
distanceToCenter = scene.camera.location.y
print ( "Using " + str ( distanceToCenter ) + " as distance to center ")
yPart = math.cos ( - rotationInRad ) * distanceToCenter
xPart = math.sin ( - rotationInRad ) * distanceToCenter
# Set camera translation
scene.camera.location.x = xPart
scene.camera.location.y = yPart
#scene.camera.location.z = tz
print ( "New location (" + str( xPart ) + " | " + str( yPart ) + ")")
|
import threadcount as tc
from threadcount.procedures import fit_line, open_cube_and_deredshift, set_rcParams
def update_settings(s):
# "de-redshift" instrument dispersion:
s.instrument_dispersion_rest = s.instrument_dispersion / (1 + s.z_set)
if s.always_manually_choose is None:
s.always_manually_choose = []
# create a kernel image to use for region averaging.
region_pixels = tc.fit.get_region(s.region_averaging_radius)
k = tc.fit.get_reg_image(region_pixels)
s.kernel = k
# add to the comments string for saving.
if s.comment and not s.comment.endswith("\n"):
s.comment += "\n"
comment_keys = [
"instrument_dispersion",
"region_averaging_radius",
"snr_lower_limit",
"mc_snr",
"mc_n_iterations",
"lmfit_kwargs",
"d_aic",
]
s.comment += "\n".join(
["{}: {}".format(x, s.__dict__.get(x, None)) for x in comment_keys]
)
s.comment += "\nunits: " + s.cube.unit.to_string()
def fit_lines(s):
num_lines = len(s.lines)
for i in range(num_lines):
s._i = i
fit_line.run(s)
def run(user_settings):
default_settings = {
# If setup_parameters is True, only the monitor_pixels will be fit. (faster)
"setup_parameters": False,
"monitor_pixels": [ # the pixels to always save.
# (40, 40),
# (28, 62),
# (48, 58),
# (52, 56),
],
#
# output options
"output_filename": "example_output", # saved files will begin with this.
"save_plots": False, # If True, can take up to 1 hour to save each line's plots.
#
# Prep image, and global fit settings.
"region_averaging_radius": 1.5, # smooth image by this many PIXELS.
"instrument_dispersion": 0.8, # in Angstroms. Will set the minimum sigma for gaussian fits.
"lmfit_kwargs": {
"method": "least_squares"
}, # arguments to pass to lmfit. Should not need to change this.
"snr_lower_limit": 10, # spaxels with SNR below this for the given line will not be fit.
#
# Which emission lines to fit, and which models to fit them.
#
# "lines" must be a list of threadcount.lines.Line class objects.
# There are some preset ones, or you can also define your own.
"lines": [
tc.lines.L_OIII5007,
# tc.lines.L_Hb4861,
],
#
# "models" is a list of lists. The line lines[i] will be fit with the lmfit
# models contained in the list at models[i].
# This means the simplest entry here is: "models": [[tc.models.Const_1GaussModel()]]
# which corresponds to one model being fit to one line.
"models": [ # This is a list of lists, 1 list for each of the above lines.
# 5007 models
[
tc.models.Const_1GaussModel(),
# tc.models.Const_2GaussModel(),
# tc.models.Const_3GaussModel(),
],
# hb models
# [tc.models.Const_1GaussModel()],
],
#
# If any models list has more than one entry, a "best" model is going to be
# chosen. This has the options to choose automatically, or to include an
# interactive portion.
"d_aic": -150, # starting point for choosing models based on delta aic.
# Plot fits and have the user choose which fit is best in the terminal
"interactively_choose_fits": False,
# Selection of pixels to always view and choose, but only if the above line is True.
"always_manually_choose": [
# (28, 62),
# (29, 63),
], # interactively_choose_fits must also be True for these to register.
# Options to include Monte Carlo iterations on the "best" fit.
"mc_snr": 25, # SNR below which the monte carlo is run.
"mc_n_iterations": 20, # number of monte carlo fits for each spaxel.
}
# test if cube has been opened. if not, open cube and deredshift.
if "cube" not in user_settings.keys():
user_settings = open_cube_and_deredshift.run(user_settings)
set_rcParams.set_params({"image.aspect": user_settings["image_aspect"]})
s = tc.fit.process_settings_dict(default_settings, user_settings) # s for settings.
update_settings(s)
fit_lines(s)
return s.__dict__
|
# -*- coding: utf-8 -*-
# @Time : 2019/10/9 20:36
# @Author : Ziqi Wang
# @FileName: read_video.py
# @Email: [email protected]
import cv2
base_path = "D:\Pyproject\image_process\ets_dataset_concordia1_v1"
file_name = "period1-1-1-gray.avi"
data_path = "D:\Pyproject\image_process\data"
def video_to_img(base_path="D:\Pyproject\image_process\ets_dataset_concordia1_v1", file_name="period1-1-1-gray.avi",
img_path=None):
file_path = base_path + "\\" + file_name
video_capture = cv2.VideoCapture()
video_capture.open(file_path)
fps = video_capture.get(cv2.CAP_PROP_FPS)
frames = video_capture.get(cv2.CAP_PROP_FRAME_COUNT)
# fps 是帧率,frame是一段视频中总的图片数量
print("fps=", fps, "frames=", frames)
for i in range(int(frames)):
ret, frame = video_capture.read()
file_name = file_name.split(".")[0]
cv2.imwrite(img_path + "\\" + str(i) + ".png", frame)
# video_to_img()
|
import json
import os
import pathlib
import time
from typing import Optional, List, Set, Dict, Union
import git
from lab import logger, monit
from lab.internal.configs import Configs, ConfigProcessor
from lab.internal.experiment.experiment_run import Run
from lab.internal.lab import lab_singleton
from lab.internal.logger import logger_singleton as logger_internal
from lab.internal.util import is_ipynb
from lab.logger import Text
from lab.utils import get_caller_file
class CheckpointSaver:
def save(self, global_step):
raise NotImplementedError()
def load(self, checkpoint_path):
raise NotImplementedError()
class Checkpoint(CheckpointSaver):
_models: Dict[str, any]
def __init__(self, path: pathlib.PurePath):
self.path = path
self._models = {}
def add_models(self, models: Dict[str, any]):
"""
## Set variable for saving and loading
"""
self._models.update(models)
def save_model(self,
name: str,
model: any,
checkpoint_path: pathlib.Path) -> any:
raise NotImplementedError()
def save(self, global_step):
"""
## Save model as a set of numpy arrays
"""
checkpoints_path = pathlib.Path(self.path)
if not checkpoints_path.exists():
checkpoints_path.mkdir()
checkpoint_path = checkpoints_path / str(global_step)
assert not checkpoint_path.exists()
checkpoint_path.mkdir()
files = {}
for name, model in self._models.items():
files[name] = self.save_model(name, model, checkpoint_path)
# Save header
with open(str(checkpoint_path / "info.json"), "w") as f:
f.write(json.dumps(files))
def load_model(self,
name: str,
model: any,
checkpoint_path: pathlib.Path,
info: any):
raise NotImplementedError()
def load(self, checkpoint_path):
"""
## Load model as a set of numpy arrays
"""
with open(str(checkpoint_path / "info.json"), "r") as f:
files = json.loads(f.readline())
# Load each model
for name, model in self._models.items():
self.load_model(name, model, checkpoint_path, files[name])
return True
class Experiment:
r"""
Each experiment has different configurations or algorithms.
An experiment can have multiple runs.
Keyword Arguments:
name (str, optional): name of the experiment
python_file (str, optional): path of the Python file that
created the experiment
comment (str, optional): a short description of the experiment
writers (Set[str], optional): list of writers to write stat to
ignore_callers: (Set[str], optional): list of files to ignore when
automatically determining ``python_file``
tags (Set[str], optional): Set of tags for experiment
"""
run: Run
configs_processor: Optional[ConfigProcessor]
# whether not to start the experiment if there are uncommitted changes.
check_repo_dirty: bool
checkpoint_saver: CheckpointSaver
def __init__(self, *,
name: Optional[str],
python_file: Optional[str],
comment: Optional[str],
writers: Set[str],
ignore_callers: Set[str],
tags: Optional[Set[str]]):
if python_file is None:
python_file = get_caller_file(ignore_callers)
if python_file.startswith('<ipython'):
assert is_ipynb()
if name is None:
raise ValueError("You must specify python_file or experiment name"
" when creating an experiment from a python notebook.")
lab_singleton().set_path(os.getcwd())
python_file = 'notebook.ipynb'
else:
lab_singleton().set_path(python_file)
if name is None:
file_path = pathlib.PurePath(python_file)
name = file_path.stem
if comment is None:
comment = ''
self.name = name
self.experiment_path = lab_singleton().experiments / name
self.check_repo_dirty = lab_singleton().check_repo_dirty
self.configs_processor = None
experiment_path = pathlib.Path(self.experiment_path)
if not experiment_path.exists():
experiment_path.mkdir(parents=True)
if tags is None:
tags = set(name.split('_'))
self.run = Run.create(
experiment_path=self.experiment_path,
python_file=python_file,
trial_time=time.localtime(),
comment=comment,
tags=list(tags))
repo = git.Repo(lab_singleton().path)
self.run.commit = repo.head.commit.hexsha
self.run.commit_message = repo.head.commit.message.strip()
self.run.is_dirty = repo.is_dirty()
self.run.diff = repo.git.diff()
logger_internal().reset_writers()
if 'sqlite' in writers:
from lab.internal.logger.writers import sqlite
logger_internal().add_writer(sqlite.Writer(self.run.sqlite_path))
if 'tensorboard' in writers:
from lab.internal.logger.writers import tensorboard
logger_internal().add_writer(tensorboard.Writer(self.run.tensorboard_log_path))
self.checkpoint_saver = None
def __print_info_and_check_repo(self):
"""
🖨 Print the experiment info and check git repo status
"""
logger.log()
logger.log([
(self.name, Text.title),
': ',
(str(self.run.uuid), Text.meta)
])
if self.run.comment != '':
logger.log(['\t', (self.run.comment, Text.highlight)])
logger.log([
"\t"
"[dirty]" if self.run.is_dirty else "[clean]",
": ",
(f"\"{self.run.commit_message.strip()}\"", Text.highlight)
])
if self.run.load_run is not None:
logger.log([
"\t"
"loaded from",
": ",
(f"{self.run.load_run}", Text.meta2),
])
# Exit if git repository is dirty
if self.check_repo_dirty and self.run.is_dirty:
logger.log([("[FAIL]", Text.danger),
" Cannot trial an experiment with uncommitted changes."])
exit(1)
def _load_checkpoint(self, checkpoint_path: pathlib.PurePath):
if self.checkpoint_saver is not None:
self.checkpoint_saver.load(checkpoint_path)
def save_checkpoint(self):
if self.checkpoint_saver is not None:
self.checkpoint_saver.save(logger_internal().global_step)
def calc_configs(self,
configs: Optional[Configs],
configs_dict: Dict[str, any],
run_order: Optional[List[Union[List[str], str]]]):
self.configs_processor = ConfigProcessor(configs, configs_dict)
self.configs_processor(run_order)
logger.log()
def __start_from_checkpoint(self, run_uuid: str, checkpoint: Optional[int]):
checkpoint_path, global_step = experiment_run.get_last_run_checkpoint(
self.experiment_path,
run_uuid,
checkpoint)
if global_step is None:
return 0
else:
with monit.section("Loading checkpoint"):
self._load_checkpoint(checkpoint_path)
self.run.load_run = run_uuid
return global_step
def start(self, *,
run_uuid: Optional[str] = None,
checkpoint: Optional[int] = None):
if run_uuid is not None:
if checkpoint is None:
checkpoint = -1
global_step = self.__start_from_checkpoint(run_uuid, checkpoint)
else:
global_step = 0
self.run.start_step = global_step
logger_internal().set_start_global_step(global_step)
self.__print_info_and_check_repo()
if self.configs_processor is not None:
self.configs_processor.print()
self.run.save_info()
if self.configs_processor is not None:
self.configs_processor.save(self.run.configs_path)
logger_internal().save_indicators(self.run.indicators_path)
logger_internal().save_artifacts(self.run.artifacts_path)
if self.configs_processor:
logger_internal().write_h_parameters(self.configs_processor.get_hyperparams())
_internal: Optional[Experiment] = None
def experiment_singleton() -> Experiment:
global _internal
assert _internal is not None
return _internal
def create_experiment(*,
name: Optional[str],
python_file: Optional[str],
comment: Optional[str],
writers: Set[str],
ignore_callers: Set[str],
tags: Optional[Set[str]]):
global _internal
_internal = Experiment(name=name,
python_file=python_file,
comment=comment,
writers=writers,
ignore_callers=ignore_callers,
tags=tags)
|
#################################################################################
# Copyright (C) 2009-2011 Vladimir "Farcaller" Pouzanov <[email protected]> #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #
# THE SOFTWARE. #
#################################################################################
import struct
from datetime import datetime, timedelta
class BPListWriter(object):
def __init__(self, objects):
self.bplist = ""
self.objects = objects
def binary(self):
'''binary -> string
Generates bplist
'''
self.data = 'bplist00'
# TODO: flatten objects and count max length size
# TODO: write objects and save offsets
# TODO: write offsets
# TODO: write metadata
return self.data
def write(self, filename):
'''
Writes bplist to file
'''
if self.bplist != "":
pass
# TODO: save self.bplist to file
else:
raise Exception('BPlist not yet generated')
class BPListReader(object):
def __init__(self, s):
self.data = s
self.objects = []
self.resolved = {}
def __unpackIntStruct(self, sz, s):
'''__unpackIntStruct(size, string) -> int
Unpacks the integer of given size (1, 2 or 4 bytes) from string
'''
if sz == 1:
ot = '!B'
elif sz == 2:
ot = '!H'
elif sz == 4:
ot = '!I'
elif sz == 8:
ot = '!Q'
else:
raise Exception('int unpack size '+str(sz)+' unsupported')
return struct.unpack(ot, s)[0]
def __unpackInt(self, offset):
'''__unpackInt(offset) -> int
Unpacks int field from plist at given offset
'''
return self.__unpackIntMeta(offset)[1]
def __unpackIntMeta(self, offset):
'''__unpackIntMeta(offset) -> (size, int)
Unpacks int field from plist at given offset and returns its size and value
'''
obj_header = struct.unpack('!B', self.data[offset])[0]
obj_type, obj_info = (obj_header & 0xF0), (obj_header & 0x0F)
int_sz = 2**obj_info
return int_sz, self.__unpackIntStruct(int_sz, self.data[offset+1:offset+1+int_sz])
def __resolveIntSize(self, obj_info, offset):
'''__resolveIntSize(obj_info, offset) -> (count, offset)
Calculates count of objref* array entries and returns count and offset to first element
'''
if obj_info == 0x0F:
ofs, obj_count = self.__unpackIntMeta(offset+1)
objref = offset+2+ofs
else:
obj_count = obj_info
objref = offset+1
return obj_count, objref
def __unpackFloatStruct(self, sz, s):
'''__unpackFloatStruct(size, string) -> float
Unpacks the float of given size (4 or 8 bytes) from string
'''
if sz == 4:
ot = '!f'
elif sz == 8:
ot = '!d'
else:
raise Exception('float unpack size '+str(sz)+' unsupported')
return struct.unpack(ot, s)[0]
def __unpackFloat(self, offset):
'''__unpackFloat(offset) -> float
Unpacks float field from plist at given offset
'''
obj_header = struct.unpack('!B', self.data[offset])[0]
obj_type, obj_info = (obj_header & 0xF0), (obj_header & 0x0F)
int_sz = 2**obj_info
return int_sz, self.__unpackFloatStruct(int_sz, self.data[offset+1:offset+1+int_sz])
def __unpackDate(self, offset):
td = int(struct.unpack(">d", self.data[offset+1:offset+9])[0])
return datetime(year=2001,month=1,day=1) + timedelta(seconds=td)
def __unpackItem(self, offset):
'''__unpackItem(offset)
Unpacks and returns an item from plist
'''
obj_header = struct.unpack('!B', self.data[offset])[0]
obj_type, obj_info = (obj_header & 0xF0), (obj_header & 0x0F)
if obj_type == 0x00:
if obj_info == 0x00: # null 0000 0000
return None
elif obj_info == 0x08: # bool 0000 1000 // false
return False
elif obj_info == 0x09: # bool 0000 1001 // true
return True
elif obj_info == 0x0F: # fill 0000 1111 // fill byte
raise Exception("0x0F Not Implemented") # this is really pad byte, FIXME
else:
raise Exception('unpack item type '+str(obj_header)+' at '+str(offset)+ 'failed')
elif obj_type == 0x10: # int 0001 nnnn ... // # of bytes is 2^nnnn, big-endian bytes
return self.__unpackInt(offset)
elif obj_type == 0x20: # real 0010 nnnn ... // # of bytes is 2^nnnn, big-endian bytes
return self.__unpackFloat(offset)
elif obj_type == 0x30: # date 0011 0011 ... // 8 byte float follows, big-endian bytes
return self.__unpackDate(offset)
elif obj_type == 0x40: # data 0100 nnnn [int] ... // nnnn is number of bytes unless 1111 then int count follows, followed by bytes
obj_count, objref = self.__resolveIntSize(obj_info, offset)
return self.data[objref:objref+obj_count] # XXX: we return data as str
elif obj_type == 0x50: # string 0101 nnnn [int] ... // ASCII string, nnnn is # of chars, else 1111 then int count, then bytes
obj_count, objref = self.__resolveIntSize(obj_info, offset)
return self.data[objref:objref+obj_count]
elif obj_type == 0x60: # string 0110 nnnn [int] ... // Unicode string, nnnn is # of chars, else 1111 then int count, then big-endian 2-byte uint16_t
obj_count, objref = self.__resolveIntSize(obj_info, offset)
return self.data[objref:objref+obj_count*2].decode('utf-16be')
elif obj_type == 0x80: # uid 1000 nnnn ... // nnnn+1 is # of bytes
# FIXME: Accept as a string for now
obj_count, objref = self.__resolveIntSize(obj_info, offset)
return self.data[objref:objref+obj_count]
elif obj_type == 0xA0: # array 1010 nnnn [int] objref* // nnnn is count, unless '1111', then int count follows
obj_count, objref = self.__resolveIntSize(obj_info, offset)
arr = []
for i in range(obj_count):
arr.append(self.__unpackIntStruct(self.object_ref_size, self.data[objref+i*self.object_ref_size:objref+i*self.object_ref_size+self.object_ref_size]))
return arr
elif obj_type == 0xC0: # set 1100 nnnn [int] objref* // nnnn is count, unless '1111', then int count follows
# XXX: not serializable via apple implementation
raise Exception("0xC0 Not Implemented") # FIXME: implement
elif obj_type == 0xD0: # dict 1101 nnnn [int] keyref* objref* // nnnn is count, unless '1111', then int count follows
obj_count, objref = self.__resolveIntSize(obj_info, offset)
keys = []
for i in range(obj_count):
keys.append(self.__unpackIntStruct(self.object_ref_size, self.data[objref+i*self.object_ref_size:objref+i*self.object_ref_size+self.object_ref_size]))
values = []
objref += obj_count*self.object_ref_size
for i in range(obj_count):
values.append(self.__unpackIntStruct(self.object_ref_size, self.data[objref+i*self.object_ref_size:objref+i*self.object_ref_size+self.object_ref_size]))
dic = {}
for i in range(obj_count):
dic[keys[i]] = values[i]
return dic
else:
raise Exception('don\'t know how to unpack obj type '+hex(obj_type)+' at '+str(offset))
def __resolveObject(self, idx):
try:
return self.resolved[idx]
except KeyError:
obj = self.objects[idx]
if type(obj) == list:
newArr = []
for i in obj:
newArr.append(self.__resolveObject(i))
self.resolved[idx] = newArr
return newArr
if type(obj) == dict:
newDic = {}
for k,v in obj.iteritems():
rk = self.__resolveObject(k)
rv = self.__resolveObject(v)
newDic[rk] = rv
self.resolved[idx] = newDic
return newDic
else:
self.resolved[idx] = obj
return obj
def parse(self):
# read header
if self.data[:8] != 'bplist00':
raise Exception('Bad magic')
# read trailer
self.offset_size, self.object_ref_size, self.number_of_objects, self.top_object, self.table_offset = struct.unpack('!6xBB4xI4xI4xI', self.data[-32:])
#print "** plist offset_size:",self.offset_size,"objref_size:",self.object_ref_size,"num_objs:",self.number_of_objects,"top:",self.top_object,"table_ofs:",self.table_offset
# read offset table
self.offset_table = self.data[self.table_offset:-32]
self.offsets = []
ot = self.offset_table
for i in range(self.number_of_objects):
offset_entry = ot[:self.offset_size]
ot = ot[self.offset_size:]
self.offsets.append(self.__unpackIntStruct(self.offset_size, offset_entry))
#print "** plist offsets:",self.offsets
# read object table
self.objects = []
k = 0
for i in self.offsets:
obj = self.__unpackItem(i)
#print "** plist unpacked",k,type(obj),obj,"at",i
k += 1
self.objects.append(obj)
# rebuild object tree
#for i in range(len(self.objects)):
# self.__resolveObject(i)
# return root object
return self.__resolveObject(self.top_object)
@classmethod
def plistWithString(cls, s):
parser = cls(s)
return parser.parse()
# helpers for testing
def plist(obj):
from Foundation import NSPropertyListSerialization, NSPropertyListBinaryFormat_v1_0
b = NSPropertyListSerialization.dataWithPropertyList_format_options_error_(obj, NSPropertyListBinaryFormat_v1_0, 0, None)
return str(b.bytes())
def unplist(s):
from Foundation import NSData, NSPropertyListSerialization
d = NSData.dataWithBytes_length_(s, len(s))
return NSPropertyListSerialization.propertyListWithData_options_format_error_(d, 0, None, None)
|
# 比较直接,首先想到的是 从后往前递归 做
# 但其实每次计算 * 的可能性的时候,重复计算了前面的某些结果
# 可以用一个全局二维数组保存下 某个子串s 和 某个子匹配p 是否匹配
class Solution:
def isMatch(self, s: str, p: str) -> bool:
if len(s)==0 and len(p)==0:return True
if len(s)!=0 and len(p)==0:return False
if len(s)==0 and len(p)!=0:
return len(p)>=2 and p[-1]=='*' and self.isMatch(s,p[:-2])
last_p=p[-1]
if last_p.isalpha():
if last_p!=s[-1]:return False
return self.isMatch(s[:-1],p[:-1])
elif last_p=='.':
return self.isMatch(s[:-1],p[:-1])
elif last_p=='*':
if len(p)==1:return False
second_p=p[-2]
if second_p==s[-1] or second_p=='.':
# *代表1,用完 *代表1以上,用完继续用 *代表0
return self.isMatch(s[:-1],p[:-2]) or self.isMatch(s[:-1],p) or self.isMatch(s,p[:-2])
else:
# *只能用0次
return self.isMatch(s,p[:-2])
if __name__ == "__main__":
s = Solution()
print(s.isMatch("a","ab*"))
|
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import six
class CheckResult(collections.namedtuple('CheckResult', ['healthy', 'message'])):
"""Result for the health check
healthy - boolean
message - string
"""
@six.add_metaclass(abc.ABCMeta)
class BaseHealthCheck(object):
"""Abstract class implemented by the monasca-api healthcheck classes"""
@abc.abstractmethod
def health_check(self):
"""Evaluate health of given service"""
raise NotImplementedError # pragma: no cover
|
from django import forms
from .models import Restaurante, Review
class BusquedaPorNombre(forms.Form):
nombre = forms.CharField(label="Nombre del Restaurante")
class BuscaTituloCuerpo(forms.Form):
contenido = forms.CharField(label="Contenido en cuerpo o titulo")
|
import os
import re
def _create_path(base: str, tail: str) -> str:
"""
Creates a safe path according to which OS the program is running on
:param base: The current path
:param tail: The last part of the path that is to be added
:return: A new OS safe path
"""
return base + ('\\' if os.name == 'nt' else '/') + __safe_file_name(tail)
def __safe_file_name(name: str) -> str:
"""
This helper is responsible for removing forbidden OS characters from a certain string.
:param name: String to be converted
:return: Safe string
"""
return re.sub(r'<|>|/|:|\"|\\|\||\?|\*', '', name) |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy import wcs
from . helper import SimModelTAB
def test_2d_spatial_tab_roundtrip(tab_wcs_2di):
nx, ny = tab_wcs_2di.pixel_shape
# generate "random" test coordinates:
np.random.seed(1)
xy = 0.51 + [nx + 0.99, ny + 0.99] * np.random.random((100, 2))
rd = tab_wcs_2di.wcs_pix2world(xy, 1)
xy_roundtripped = tab_wcs_2di.wcs_world2pix(rd, 1)
m = np.logical_and(*(np.isfinite(xy_roundtripped).T))
assert np.allclose(xy[m], xy_roundtripped[m], rtol=0, atol=1e-7)
def test_2d_spatial_tab_vs_model():
nx = 150
ny = 200
model = SimModelTAB(nx=nx, ny=ny)
# generate FITS HDU list:
hdulist = model.hdulist
# create WCS object:
w = wcs.WCS(hdulist[0].header, hdulist)
# generate "random" test coordinates:
np.random.seed(1)
xy = 0.51 + [nx + 0.99, ny + 0.99] * np.random.random((100, 2))
rd = w.wcs_pix2world(xy, 1)
rd_model = model.fwd_eval(xy)
assert np.allclose(rd, rd_model, rtol=0, atol=1e-7)
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import fcl
class World:
def __init__(self, x_min, x_max, v_min, v_max, Pset):
self.x_min = x_min
self.x_max = x_max
self.v_min = v_min
self.v_max = v_max
self.Pset = Pset
self.fcl_manager = fcl.DynamicAABBTreeCollisionManager()
objs = []
for p in self.Pset:
objs.append(self.rec2fcl(p))
self.fcl_manager.registerObjects(objs)
self.fcl_manager.setup()
def rec2fcl(self, rec):
box = fcl.Box(rec[2], rec[3], 1.0)
tf = fcl.Transform([rec[0], rec[1], 0])
return fcl.CollisionObject(box, tf)
def point2fcl(self, p):
point = fcl.Cylinder(0.01, 1)
tf = fcl.Transform([p[0], p[1], 0])
return fcl.CollisionObject(point, tf)
def isValidPoint(self, s_q):
# check if the sampled point is inside the world"
if not (self.x_min[0] < s_q[0] < self.x_max[0]
and self.x_min[1] < s_q[1] < self.x_max[1]
and self.v_min[0] < s_q[2] < self.v_max[0]
and self.v_min[1] < s_q[3] < self.v_max[1]):
return False
# 一对many
fclPoint = self.point2fcl(s_q)
req = fcl.CollisionRequest(num_max_contacts=100, enable_contact=True)
rdata = fcl.CollisionData(request=req)
self.fcl_manager.collide(fclPoint, rdata, fcl.defaultCollisionCallback)
# print('Collision between manager 1 and Mesh?: {}'.format(rdata.result.is_collision))
# print('Contacts:')
# for c in rdata.result.contacts:
# print('\tO1: {}, O2: {}'.format(c.o1, c.o2))
return not rdata.result.is_collision
def isValid(self, q_set):
# check validity for multiple points.
# will be used for piecewize path consited of multiple points
# for q in q_set:
# if not self.isValidPoint(q):
# return False
# return True
if len(q_set.shape) < 2:
return self.isValidPoint(q_set)
manager_q = fcl.DynamicAABBTreeCollisionManager()
qs = []
for q in q_set:
qs.append(self.point2fcl(q))
manager_q.registerObjects(qs)
req = fcl.CollisionRequest(num_max_contacts=100, enable_contact=True)
rdata = fcl.CollisionData(request=req)
self.fcl_manager.collide(manager_q, rdata, fcl.defaultCollisionCallback)
# print('Collision between manager 1 and Mesh?: {}'.format(rdata.result.is_collision))
# print('Contacts:')
# for c in rdata.result.contacts:
# print('\tO1: {}, O2: {}'.format(c.o1, c.o2))
return not rdata.result.is_collision
def drawRec(self, p, ax):
rect = mpl.patches.Rectangle((p[0]-p[2]/2, p[1]-p[3]/2), p[2], p[3])
ax.add_patch(rect)
def show(self, ax):
p1 = [self.x_min[0], self.x_min[1]]
p2 = [self.x_min[0], self.x_max[1]]
p3 = [self.x_max[0], self.x_max[1]]
p4 = [self.x_max[0], self.x_min[1]]
ax.plot([p1[0], p2[0]], [p1[1], p2[1]], "k-")
ax.plot([p2[0], p3[0]], [p2[1], p3[1]], "k-")
ax.plot([p3[0], p4[0]], [p3[1], p4[1]], "k-")
ax.plot([p4[0], p1[0]], [p4[1], p1[1]], "k-")
for P in self.Pset:
self.drawRec(P, ax)
|
from app.db.error.non_existent_error import NonExistentError
from fastapi.responses import JSONResponse
from fastapi import status
from starlette.exceptions import HTTPException
async def custom_http_exception_handler(request, exc: HTTPException):
return JSONResponse(status_code=exc.status_code, content=exc.detail)
async def custom_value_error_handler(request, exc: ValueError):
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content=str(exc))
async def custom_non_existent_error_handler(request, exc: NonExistentError):
return JSONResponse(status_code=status.HTTP_404_NOT_FOUND, content=str(exc)) |
N = int(input())
A = list(map(int, input().split()))
v = 0
for i in range(N):
v += 1 / A[i]
print(1 / v)
|
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:
m = len(obstacleGrid)
n = len(obstacleGrid[0])
dp = [[0 for i in range(n)] for j in range(m)]
if obstacleGrid[0][0] == 1:
return 0
dp[0][0] = 1
for col in range(1, n):
if obstacleGrid[0][col] == 1:
dp[0][col] = 0
else:
dp[0][col] = dp[0][col - 1]
for row in range(1, m):
if obstacleGrid[row][0] == 1:
dp[row][0] = 0
else:
dp[row][0] = dp[row - 1][0]
for row in range(1, m):
for col in range(1, n):
if obstacleGrid[row][col] == 1:
dp[row][col] = 0
else:
dp[row][col] = dp[row][col - 1] + dp[row - 1][col]
return dp[-1][-1]
|
"""Defines Falcon hooks
Copyright 2013 by Rackspace Hosting, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from functools import wraps
import six
from falcon import HTTP_METHODS
from falcon import api_helpers
def before(action):
"""Decorator to execute the given action function *before* the responder.
Args:
action: A function with a similar signature to a resource responder
method, taking (req, resp, params), where params includes values for
URI template field names, if any. Hooks may also add pseudo-params
of their own. For example:
def do_something(req, resp, params):
try:
params['id'] = int(params['id'])
except ValueError:
raise falcon.HTTPBadRequest('Invalid ID',
'ID was not valid.')
params['answer'] = 42
"""
def _before(responder_or_resource):
if isinstance(responder_or_resource, six.class_types):
resource = responder_or_resource
for method in HTTP_METHODS:
responder_name = 'on_' + method.lower()
try:
responder = getattr(resource, responder_name)
except AttributeError:
# resource does not implement this method
pass
else:
# Usually expect a method, but any callable will do
if hasattr(responder, '__call__'):
# This pattern is necessary to capture the current
# value of responder in the do_before_all closure;
# otherwise, they will capture the same responder
# variable that is shared between iterations of the
# for loop, above.
def let(responder=responder):
@wraps(responder)
def do_before_all(self, req, resp, **kwargs):
action(req, resp, kwargs)
responder(self, req, resp, **kwargs)
api_helpers._propagate_argspec(
do_before_all,
responder)
setattr(resource, responder_name, do_before_all)
let()
return resource
else:
responder = responder_or_resource
@wraps(responder)
def do_before_one(self, req, resp, **kwargs):
action(req, resp, kwargs)
responder(self, req, resp, **kwargs)
api_helpers._propagate_argspec(do_before_one, responder)
return do_before_one
return _before
def after(action):
"""Decorator to execute the given action function *after* the responder.
Args:
action: A function with a similar signature to a resource responder
method, taking (req, resp).
"""
def _after(responder_or_resource):
if isinstance(responder_or_resource, six.class_types):
resource = responder_or_resource
for method in HTTP_METHODS:
responder_name = 'on_' + method.lower()
try:
responder = getattr(resource, responder_name)
except AttributeError:
# resource does not implement this method
pass
else:
# Usually expect a method, but any callable will do
if hasattr(responder, '__call__'):
def let(responder=responder):
@wraps(responder)
def do_after_all(self, req, resp, **kwargs):
responder(self, req, resp, **kwargs)
action(req, resp)
api_helpers._propagate_argspec(
do_after_all,
responder)
setattr(resource, responder_name, do_after_all)
let()
return resource
else:
responder = responder_or_resource
@wraps(responder)
def do_after_one(self, req, resp, **kwargs):
responder(self, req, resp, **kwargs)
action(req, resp)
api_helpers._propagate_argspec(do_after_one, responder)
return do_after_one
return _after
|
import curses
import py2048
def incurses(screen, game):
curses.noecho()
curses.cbreak()
screen.keypad(True)
try:
while not game.isover:
screen.addstr(0, 0, repr(game))
ch = screen.getch()
if ch == curses.KEY_RIGHT:
game.moveright()
elif ch == curses.KEY_LEFT:
game.moveleft()
elif ch == curses.KEY_UP:
game.moveup()
elif ch == curses.KEY_DOWN:
game.movedown()
elif ch == ord('r'):
game.reset()
elif ch == ord('q'):
break
finally:
curses.nocbreak()
screen.keypad(0)
curses.echo()
def terminal_run(size):
game = py2048.Game(size)
while True:
curses.wrapper(incurses, game)
if not game.isover:
break
print(game)
while True:
print("Press 'q' to quit\n"
"Press 'r' to start a new game")
ch = input()
if ch == 'r':
game.reset()
break
elif ch == 'q':
return
|
import functools
import time
import logging
logger = logging.getLogger("wrapper")
#------------------------------------------------------------------------------#
# TIME
def time_measurement(func):
"""Timestamp decorator for dedicated functions"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
startTime = time.time()
result = func(*args, **kwargs)
execTime = time.time() - startTime
mlsec = repr(execTime).split('.')[1][:3]
readable = time.strftime("%H:%M:%S.{}".format(mlsec), time.gmtime(execTime))
logger.info('----> Function "{}" took : {} sec'.format(func.__name__, readable))
return result
return wrapper
|
# (C) Copyright IBM Corp. 2016
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of DyBMs.
.. seealso:: Takayuki Osogami and Makoto Otsuka, "Seven neurons \
memorizing sequences of alphabetical images via spike-timing dependent \
plasticity," Scientific Repeports, 5, 14149; doi: 10.1038/srep14149 \
(2015). http://www.nature.com/articles/srep14149
.. seealso:: Takayuki Osogami and Makoto Otsuka, Learning dynamic \
Boltzmann machines with spike-timing dependent plasticity, Technical \
Report RT0967, IBM Research, 2015. https://arxiv.org/abs/1509.08634
"""
__author__ = "Takayuki Osogami, Rudy Raymond"
import numpy as np
from six.moves import xrange, zip
from copy import deepcopy
from itertools import product
from .. import arraymath as amath
from ..base.generator import ListGenerator, ElementGenerator, SequenceGenerator
from ..time_series.time_series_model import StochasticTimeSeriesModel
from ..time_series.vector_regression import VectorRegression, MultiTargetVectorRegression, \
VectorRegressionWithHidden, sigmoid
from ..base.sgd import AdaGrad
from ..base.metrics import RMSE, baseline_RMSE
DEBUG = False
class LinearDyBM(VectorRegression):
"""LinearDyBM is a minimal DyBM for real-valued time-series. It
extends a vector auto-regressive (VAR) model by incorporating
eligibility traces (and an Echo State Network or ESN).
Specifically, a pattern, :math:`\mathbf{x}^{[t]}`, at time
:math:`t` is predicted with
.. math::
\mathbf{b}
+ \sum_{\ell=1}^{L} \mathbf{W}^{[\ell]} \, \mathbf{x}^{[t-\ell]}
+ \sum_{k=1}^{K} \mathbf{V}^{[k]} \, \mathbf{e}^{[k]}
+ \\Phi^{[t-1]}
Here, :math:`\mathbf{x}^{[t-\ell]}` is the pattern at time
:math:`t-\ell`, and :math:`\mathbf{e}^{[k]}` is a vector of
eligibility traces, which are updated as follows after receiving a
pattern at each time :math:`t`:
.. math::
\mathbf{e}^{[k]} \leftarrow \lambda^{[k]} \, \mathbf{e}^{[k]}
+ \mathbf{x}^{[t]}
where :math:`\lambda^{[k]}` is the :math:`k`-th decay rate.
Optionally, LinearDyBM can also take into account the
supplementary bias, :math:`\\Phi^{[t]} = \mathbf{A} \\Psi^{[t]}`,
created by an ESN from the (non-linear) features :math:`\\Psi^{[t]}`,
as follows (Notice that :math:`\mathbf{A}` is learned by the ESN):
.. math::
\\Psi^{[t]}
= (1-\\rho) \, \\Psi^{[t-1]}
+ \\rho \, \mathcal{F}(\mathbf{W}_{rec} \, \\Psi^{[t-1]}
+ \mathbf{W}_{in} \, \mathbf{x}^{[t]})
where :math:`\mathbf{W}_{rec}` is the matrix of the internal
weight in the ESN, :math:`\mathbf{W}_{in}` is the matrix of
the weight from the input to the ESN, :math:`\rho` is a leak parameter,
and :math:`\mathcal{F}` is a non-linear function (specifically, hyperbolic tangent). These
:math:`\mathbf{W}_{rec}` and :math:`\mathbf{W}_{in}` are randomly
initialized and fixed throughout learning.
LinearDyBM learns the values of :math:`\mathbf{V}^{[\cdot]}`,
:math:`\mathbf{W}^{[\cdot]}` from given
training time-series. The ESN is used differently in Dasgupta &
Osogami (2017). See RNNGaussianDyBM.py for the implementation
that follows Dasgupta & Osogami (2017). The LinearDyBM without an
ESN closely follows Osogami (2016).
.. seealso:: Takayuki Osogami, "Learning binary or real-valued \
time-series via spike-timing dependent plasticity," presented at \
Computing with Spikes NIPS 2016 Workshop, Barcelona, Spain, December \
2016. https://arxiv.org/abs/1612.04897
.. seealso:: Sakyasingha Dasgupta and Takayuki Osogami, "Nonlinear \
Dynamic Boltzmann Machines for Time-series Prediction," in \
Proceedings of the 31st AAAI Conference on Artificial Intelligence \
(AAAI-17), pages 1833-1839, San Francisco, CA, January 2017. \
http://aaai.org/ocs/index.php/AAAI/AAAI17/paper/view/14350/14343
LinearDyBM allows the target time-series to be different from the input
time-series. Namely, LinearDyBM can be trained to predict a target
pattern, :math:`\mathbf{y}^{[t]}`, at time :math:`t` from input time-series
:math:`\mathbf{x}^{[:t]}` (i.e., `\mathbf{x}^{[0]`, ..., `\mathbf{x}^{[t-1]`) \
with
.. math::
\mathbf{b}
+ \sum_{\ell=1}^{L} \mathbf{W}^{[\ell]} \, \mathbf{x}^{[t-\ell]}
+ \sum_{k=1}^{K} \mathbf{V}^{[k]} \, \mathbf{e}^{[k]}
+ \\Phi^{[t-1]}
Note that $\mathbf{x}^{[t]}$ is not used to predict $\mathbf{y}^{[t]}$.
Parameters
----------
in_dim : int
Dimension of input time-series
out_dim : int, optional
Dimension of target time-series
delay : int, optional
length of the FIFO queue plus 1
decay_rates : list, optional
Decay rates of eligibility traces
SGD : instance of SGD.SGD, optional
Instance of a stochastic gradient method
L1 : float, optional
Strength of L1 regularization
L2 : float, optional
Strength of L2 regularization
use_bias : boolean, optional
Whether to use bias parameters
sigma : float, optional
Standard deviation of initial values of weight parameters
insert_to_etrace : str, {"w_delay", "wo_delay"}, optional
"w_delay" : Insert pattern observed d-1 time steps ago into
eligibility traces.
"wo_delay" : Insert the latest pattern into eligibility traces
esn : ESN, optional
Echo state network
random : arraymath.random
random number generator
Attributes
----------
decay_rates : array, shape (n_etrace, 1)
Decay rates of eligibility traces
e_trace : array, shape (n_etrace, in_dim)
e_trace[k, :] corresponds to the k-th eligibility trace.
esn : ESN
esn
fifo : deque
FIFO queue storing L in_patterns, each in_pattern has shape (in_dim,).
insert_to_etrace : str
insert_to_etrace
n_etrace : int
The number of eligibility traces
len_fifo : int
The length of FIFO queues (delay - 1)
L1 : dict
Dictionary of the strength of L1 regularization
L1[x] : float
Strength of L1 regularization for variable x for x in ["b","V","W"]
L2 : dict
Dictionary of the strength of L2 regularization
L2[x] : float
Strength of L2 regularization for variable x for x in ["b","V","W"]
in_dim : int
in_dim
out_dim : int
out_dim
SGD : SGD
Optimizer used in the stochastic gradient method
variables : dict
Dictionary of model parameters
variables["W"] : array, shape (len_fifo, in_dim, out_dim)
variables["W"][l] corresponds to the weight
from the input observed at time step t - l - 1
to the mean at time step t (current time).
variables["b"] : array, shape (out_dim,)
variables["b"] corresponds to the bias to out_pattern.
variables["V"] : array, shape (n_etrace, in_dim, out_dim)
variables["V"][k] corresponds to the weight
from the k-th eligibility trace to the mean.
"""
def __init__(self, in_dim, out_dim=None, delay=2, decay_rates=[0.5],
SGD=None, L1=0, L2=0, use_bias=True, sigma=0,
insert_to_etrace="wo_delay", esn=None, random=None):
if insert_to_etrace not in ["w_delay", "wo_delay"]:
raise ValueError("insert_to_etrace should be either 'w_delay' "
"or 'wo_delay'.")
self.n_etrace = len(decay_rates)
self.decay_rates = amath.array(decay_rates).reshape((self.n_etrace, 1))
order = delay - 1
# Echo state network
self.esn = esn
if SGD is None:
SGD = AdaGrad()
if random is None:
random = amath.random.RandomState(0)
VectorRegression.__init__(self, in_dim, out_dim, order, SGD, L1, L2,
use_bias, sigma, random)
if self.n_etrace > 0 and self.in_dim > 0 and self.out_dim > 0:
if sigma <= 0:
self.variables["V"] \
= amath.zeros((self.n_etrace, self.in_dim, self.out_dim),
dtype=float)
else:
self.variables["V"] \
= random.normal(0, sigma,
(self.n_etrace, self.in_dim, self.out_dim))
if SGD is None:
SGD = AdaGrad()
self.SGD = SGD.set_shape(self.variables) # resetting SGD
self.L1["V"] = L1
self.L2["V"] = L2
self.insert_to_etrace = insert_to_etrace
def init_state(self):
"""Initializing FIFO queue and eligibility traces
"""
VectorRegression.init_state(self)
# eligibility trace
self.e_trace = amath.zeros((self.n_etrace, self.in_dim))
# Echo state network
if self.esn is not None:
self.esn.init_state()
def _update_state(self, in_pattern):
"""Updating FIFO queues and eligibility traces by appending in_pattern
Parameters
----------
in_pattern : array, shape (in_dim,)
in_pattern to be appended to fifo.
"""
assert in_pattern.shape == (self.in_dim,), "in_pattern must have shape (in_dim,)"
popped_in_pattern = VectorRegression._update_state(self, in_pattern)
if self.insert_to_etrace == "wo_delay" and self.in_dim > 0:
self.e_trace = amath.op.update_e_trace(self.e_trace,
self.decay_rates,
in_pattern)
elif self.insert_to_etrace == "w_delay" and self.in_dim > 0:
self.e_trace = amath.op.update_e_trace(self.e_trace,
self.decay_rates,
popped_in_pattern)
elif self.in_dim > 0:
raise NotImplementedError("_update_state not implemented for ",
self.insert_to_etrace, self.in_dim)
else:
# no need to do anything when in_dim == 0
pass
# Echo state network
if self.esn is not None:
self.esn._update_state(in_pattern)
def _get_gradient(self, out_pattern, expected=None):
"""Computing the gradient of log likelihood
Parameters
----------
out_pattern : array, shape (out_dim,)
out_pattern observed at the current time
expected : array, shape (out_dim,), optional
out_pattern expected by the current model.
To be computed if not given.
Returns
-------
dict
dictionary of gradients with name of a variable as a key
"""
assert out_pattern.shape == (self.out_dim,), \
"out_pattern must have shape (out_dim,)"
if expected is not None:
assert expected.shape == (self.out_dim,), "expected must have shape (out_dim,)"
if expected is None:
expected = self._get_mean().reshape((self.out_dim,))
gradient = VectorRegression._get_gradient(self, out_pattern, expected)
if "V" in self.variables:
# TODO:
# dx in the following has been computed
# in VectorRegression._get_gradient
dx = out_pattern - expected
gradient["V"] = amath.op.mult_2d_1d_to_3d(self.e_trace, dx)
if DEBUG:
grad_V_naive \
= np.array([self.e_trace[k, :].reshape((self.in_dim, 1)) * dx
for k in range(self.n_etrace)])
assert np.allclose(grad_V_naive, gradient["V"]), \
"gradient[\"V\"] has a bug."
self.SGD.apply_L2_regularization(gradient, self.variables, self.L2)
return gradient
def learn_one_step(self, out_pattern):
"""Learning a pattern and updating parameters
Parameters
----------
out_pattern : array, or list of arrays
pattern whose log likelihood is to be increased
"""
delta_this = self._get_delta(out_pattern)
if self.esn is not None:
delta_esn = self.esn._get_delta(out_pattern)
if delta_this is not None:
self._update_parameters(delta_this)
if self.esn is not None and delta_esn is not None:
self.esn._update_parameters(delta_esn)
def _get_mean(self):
"""Computing estimated mean
Returns
-------
mu : array, shape (out_dim,)
estimated mean
"""
mu = self._get_conditional_negative_energy()
return mu
def _get_conditional_negative_energy(self):
"""Computing the fundamental output
Returns
-------
array, shape (out_dim,)
fundamental output
"""
mu = VectorRegression._get_conditional_negative_energy(self)
if "V" in self.variables:
mu += amath.tensordot(self.e_trace, self.variables["V"], axes=2)
if DEBUG:
mu_naive = deepcopy(mu)
for k in range(self.n_etrace):
mu_naive = mu_naive \
+ self.e_trace[k, :].dot(self.variables["V"][k])
assert amath.allclose(mu, mu_naive), "ERROR: mu has a bug."
# Echo state network
if self.esn is not None:
mu += self.esn._get_mean()
return mu
def _get_sample(self):
"""Returning mean as a sample, shape (out_dim,).
LinearDyBM should be used as a deterministic model
Returns
-------
array, shape (out_dim,)
mu, estimated mean (deterministic)
"""
return self._get_mean()
def _time_reversal(self):
"""Making an approximately time-reversed LinearDyBM by transposing
matrices.
For discriminative learning, where in_dim != out_dim, time reversal
would also implies that input and target is reversed
"""
if self.esn is not None:
# TODO: implement _time_reversal with ESN
raise NotImplementedError("_time_reversal is not implemented with ESN")
self._transpose_matrices()
self._exchange_dimensions()
def _transpose_matrices(self):
"""Making an approximately time-reversed LinearDyBM by transposing
matrices. Dimensions should be exchanged with _exchange_dimensions()
"""
for i in xrange(self.n_etrace):
self.variables["V"][i] = self.variables["V"][i].transpose()
self.SGD.first["V"][i] = self.SGD.first["V"][i].transpose()
self.SGD.second["V"][i] = self.SGD.second["V"][i].transpose()
for i in xrange(self.len_fifo):
self.variables["W"][i] = self.variables["W"][i].transpose()
self.SGD.first["W"][i] = self.SGD.first["W"][i].transpose()
self.SGD.second["W"][i] = self.SGD.second["W"][i].transpose()
def _exchange_dimensions(self):
"""Exchanging in_dim and out_dim
"""
out_dim = self.in_dim
self.in_dim = self.out_dim
self.out_dim = out_dim
class MultiTargetLinearDyBM(MultiTargetVectorRegression):
"""MultiTargetLinearDyBM is a building block for ComplexDyBM.
MultiTargetLinearDyBM is similar to LinearDyBM but accepts
multiple targets. Namely, MultiTargetLinearDyBM can be trained to predict
target patterns, :math:`(\mathbf{y}_1^{[t]}, \mathbf{y}_2^{[t]}, \ldots)`,
at time :math:`t` from input time-series :math:`\mathbf{x}^{[:t-1]}` with
.. math::
\mathbf{b}_1
+ \sum_{\ell=1}^{L} \mathbf{W}_1^{[\ell]} \, \mathbf{x}^{[t-\ell]}
+ \sum_{k=1}^{K} \mathbf{V}_1^{[k]} \, \mathbf{e}^{[k]}
\mathbf{b}_2
+ \sum_{\ell=1}^{L} \mathbf{W}_2^{[\ell]} \, \mathbf{x}^{[t-\ell]}
+ \sum_{k=1}^{K} \mathbf{V}_2^{[k]} \, \mathbf{e}^{[k]}
\ldots
.. todo:: Support the Echo State Network
Parameters
----------
in_dim : int
Dimension of input time-series
out_dims : list,
List of the dimension of target time-series
SGDs : list of the instances of SGD.SGD, optional
List of the optimizer for the stochastic gradient method
delay : int, optional
length of the FIFO queue plus 1
decay_rates : list, optional
Decay rates of eligibility traces
L1 : float, optional
Strength of L1 regularization
L2 : float, optional
Strength of L2 regularization
use_biases : list of boolean, optional
Whether to use bias parameters
sigma : float, optional
Standard deviation of initial values of weight parameters
insert_to_etrace : str, {"w_delay", "wo_delay"}, optional
"w_delay" : Insert pattern observed d-1 time steps ago into
eligibility traces.
"wo_delay" : Insert the latest pattern into eligibility traces.
random : arraymath.random
random number generator
"""
def __init__(self, in_dim, out_dims, SGDs=None, delay=2, decay_rates=[0.5],
L1=0, L2=0, use_biases=None, sigma=0,
insert_to_etrace="wo_delay", random=None):
if SGDs is None:
SGDs = [AdaGrad() for i in range(len(out_dims))]
if len(out_dims) != len(SGDs):
raise ValueError("out_dims and SGDs must have a common length")
if use_biases is None:
use_biases = [True] * len(out_dims)
if random is None:
random = amath.random.RandomState(0)
self.layers = [LinearDyBM(in_dim, out_dim, delay, decay_rates, SGD,
L1, L2, use_bias, sigma, insert_to_etrace,
random=random)
for (out_dim, SGD, use_bias)
in zip(out_dims, SGDs, use_biases)]
# Only layer 0 has internal states, which are shared among all layers
for i in xrange(1, len(self.layers)):
self.layers[i].fifo = self.layers[0].fifo
self.layers[i].e_trace = self.layers[0].e_trace
StochasticTimeSeriesModel.__init__(self)
def _time_reversal(self):
"""
Making an approximately time-reversed MultiTargetLinearDyBM by
transposing matrices
For discriminative learning, where in_dim != out_dim, time revierasal
would also implies that input and target are exchanged
"""
for layer in self.layers:
layer._time_reversal()
class ComplexDyBM(StochasticTimeSeriesModel):
"""
Complex DyBM with multiple visible layers
Layers can have different delays, decay rates, and activations
Parameters
----------
delays : list of integers, length n_layers
delays[l] corresponds to the delay of the l-th layer.
decay_rates : list of lists of floats, length n_layers
decay_rates[l] corresponds to the decay rates of eligibility traces
of the l-th layer.
activations : list of strings, 'linear' or 'sigmoid', length n_layers
activations[l] corresponds to the activation unit of the l-th layer.
in_dims : list of integers, length n_layers
in_dims[l] corresponds to the dimension of input time-series
of the l-th layer.
out_dims : list of integers, length n_layers, optional
out_dims[l] corresponds to the dimension of target time-series
of the l-th layer.
If None, set out_dims = in_dims.
SGD : instance of SGD.SGD or list of instances of SGD.SGD, lengh n_layers if \
list, optional.
If list, SGD[l] corresponds to SGD for the l-th layer.
L1 : float, optional
Strength of L1 regularization.
L2 : float, optional
Strength of L2 regularization.
use_biases : list of booleans, length n_layers
Whether to use bias parameters in each layer.
sigma : float, optional
Standard deviation of initial values of parameters.
insert_to_etrace : str, {"w_delay", "wo_delay"}, optional
"w_delay" : Pattern observed d-1 time steps ago will be inserted
into eligibility traces.
"wo_delay" : The newest pattern will be inserted into
eligibility traces.
random : arraymath.random
random number generator
Attributes
----------
activations : list of strings, 'linear' or 'sigmoid', length n_layers
Activations
out_dim : int
The sum of all the out_dims
n_layers : int
The number of layers in complex DyBM.
layers : list of LinearDyBM objects, length n_layers
layers[l] corresponds to the l-th LinearDyBM.
"""
def __init__(self, delays, decay_rates, activations, in_dims,
out_dims=None, SGD=None, L1=0., L2=0., use_biases=None,
sigma=0, insert_to_etrace="wo_delay", random=None):
if insert_to_etrace not in ["w_delay", "wo_delay"]:
raise ValueError("insert_to_etrace should be either 'w_delay' "
"or 'wo_delay'.")
n_layers = len(delays)
if out_dims is None:
out_dims = in_dims
if use_biases is None:
use_biases = [[True] * n_layers] * n_layers
if SGD is None:
SGD = [AdaGrad() for _ in range(n_layers)]
if SGD.__class__ is not list:
SGD = [deepcopy(SGD) for _ in range(n_layers)]
if not len(delays) == len(decay_rates) == len(activations) \
== len(in_dims) == len(out_dims) == len(SGD) == len(use_biases):
raise ValueError("delays, decay_rates, activations, in_dims, "
"out_dims, SGD, and use_biases must have the"
"same number of elements, each of which"
"corresponds to the parameters of each layer.")
if random is None:
random = amath.random.RandomState(0)
self.activations = activations
# out_dim: Total target dimension of this complex DyBM
self.out_dim = sum(out_dims)
self.layers = [MultiTargetLinearDyBM(dim, out_dims, deepcopy(SGD), delay,
decay_rate, L1=L1, L2=L2,
use_biases=use_bias, sigma=sigma,
insert_to_etrace=insert_to_etrace,
random=random)
for (delay, decay_rate, dim, sgd, use_bias)
in zip(delays, decay_rates, in_dims, SGD, use_biases)]
self.n_layers = len(self.layers)
self.insert_to_etrace = insert_to_etrace
StochasticTimeSeriesModel.__init__(self)
def init_state(self):
"""
Initializing the state of each layer
"""
for layer in self.layers:
layer.init_state()
def _update_state(self, in_patterns):
"""
Updating the state of each layer
Parameters
----------
in_patterns : list of arrays, length n_layers
in_patterns[l] : array, length in_dims[l].
This corresponds to in_pattern for updating states of the \
l-th layer.
"""
for (layer, in_pattern) in zip(self.layers, in_patterns):
layer._update_state(in_pattern)
def _get_delta(self, out_patterns, expected=None, weightLLs=None):
"""
Getting delta, how much we change parameters for each layer
Parameters
----------
out_patterns : list of arrays, length n_layers.
out_patterns[l] : array, length out_dims[l]
This corresponds to out_pattern of the l-th layer.
expected : array, shape (out_dim,), optional
Expected next output pattern
weightLLs : list of boolean, length 2, optional
weight[l] denotes whether to weigh the gradient with log likelihood
Returns
-------
list of dicts, length n_layers.
the l-th element corresponds to a dictionary of deltas with name \
of a variable as a key.
"""
if expected is None:
# expected pattern with current parameters
expected = self.predict_next()
deltas = list()
for layer in self.layers:
d = layer._get_delta(out_patterns, expected, weightLLs)
deltas.append(d)
return deltas
def _update_parameters(self, deltas):
"""
Updating the parameters for each layer
Parameters
----------
deltas : list of dicts, length n_layers.
deltas[l] corresponds to a dictionary of deltas of the l-th layer
with name of a variable as a key.
"""
for (layer, d) in zip(self.layers, deltas):
layer._update_parameters(d)
def get_LL(self, out_patterns):
"""
Getting log likelihood of given out_patterns
Parameters
----------
out_patterns : list of arrays, length n_layers.
out_patterns[l] : array, length out_dims[l]
This corresponds to out_pattern of the l-th layer.
Returns
-------
list
The l-th element corresponds to the log likelihood
of the l-th layer.
"""
mu_list = self._get_conditional_negative_energy()
LL = list()
for layer, activation, mu, out_pattern in zip(self.layers,
self.activations,
mu_list,
out_patterns):
if activation == "linear":
# real valued prediction
# same as prediction of Gaussian DyBM
loglikelihood = None
elif activation == "sigmoid":
# Prediction in [0, 1]
# For binary input, predicted value should be interpreted
# as probability.
# Same as the original binary DyBM
loglikelihood = (- mu * out_pattern
- amath.log(1. + amath.exp(-mu)))
loglikelihood = amath.sum(loglikelihood)
else:
# TODO: implement other activations
raise NotImplementedError("get_LL not implemented for " + activation)
LL.append(loglikelihood)
return LL
def predict_next(self):
"""
Predicting next pattern with the expected values
Returns
-------
array, shape (out_dim,)
prediction of out_pattern
"""
return self._get_mean()
def _get_mean(self):
"""
Getting expected values
Returns
-------
mean : list of arrays, length n_layers.
mean[l] corresponds to the expected out_pattern for the l-th layer
"""
mu_list = self._get_conditional_negative_energy()
mean = list()
for layer, activation, mu in zip(self.layers,
self.activations,
mu_list):
if activation == "linear":
# real valued prediction
# same as prediction of Gaussian DyBM
pred = mu
elif activation in ["sigmoid", "sigmoid_deterministic"]:
# prediction in [0, 1]
# For binary input, predicted value should be interpreted
# as probability.
# Same as the original binary DyBM
pred = sigmoid(mu)
else:
# TODO: implement other activations
raise NotImplementedError("_get_mean not defined for activation:" + activation)
mean.append(pred)
return mean
def _get_conditional_negative_energy(self):
"""
Computing the conditional negative energy, summed up over all layers
Returns
-------
list of array, length len(layers)
list of conditional negative energy
"""
mu_all = [amath.concatenate(layer._get_conditional_negative_energy())
for layer in self.layers]
mu_all = amath.concatenate(mu_all).reshape((self.n_layers, self.out_dim))
mu_sum = mu_all.sum(axis=0)
start = 0
mu_list = list()
for layer in self.layers:
in_dim = layer.get_input_dimension()
mu = mu_sum[start:start + in_dim]
mu_list.append(mu)
start += in_dim
return mu_list
def _get_sample(self):
"""
Sampling next values
Returns
-------
list of arrays, length n_layers
the l-th element corresponds to samples for the l-th layer.
"""
means = self._get_mean()
samples = list()
for mean, activation in zip(means, self.activations):
if activation in ["linear", "sigmoid_deterministic"]:
# for linear activation, mean is always sampled
sample = mean
elif activation == "sigmoid":
u = self.random.random_sample(mean.shape)
sample = u < mean
samples.append(sample)
return samples
def set_learning_rate(self, rates):
"""
Setting the learning rates
Parameters
----------
rate : list of float, length n_layers
learning rates
"""
for layer in self.layers:
layer.set_learning_rate(rates)
class BinaryDyBM(ComplexDyBM):
"""
Binary (Bernoulli) DyBM, consisting of a visible Bernoulli layer
Parameters
----------
in_dim : integer
The dimension of input time-series
out_dim : integer, optional
The dimension of target time-series
delay : integer (>0), optional
length of fifo queue plus one
decay_rates : list of floats, optional
Decay rates of eligibility traces of the l-th layer.
SGD : object of SGD.SGD, optional
Object of a stochastic gradient method
L1 : float, optional
Strength of L1 regularization
L2 : float, optional
Strength of L2 regularization
sigma : float, optional
Standard deviation of initial values of parameters
insert_to_etrace : str, {"w_delay", "wo_delay"}, optional
"w_delay" : Pattern observed d-1 time steps ago will be inserted
into eligibility traces.
"wo_delay" : The newest pattern will be inserted
into eligibility traces.
Attributes
----------
n_etrace : int
the number of eligibility traces
len_fifo : int
the length of FIFO queues (delay - 1)
in_dim : int
in_dim
out_dim : int
out_dim
"""
def __init__(self, in_dim, out_dim=None, delay=2, decay_rates=[0.5],
SGD=None, L1=0, L2=0, sigma=0,
insert_to_etrace="wo_delay"):
if not delay > 0:
raise ValueError("delay must be a positive integer")
if insert_to_etrace not in ["w_delay", "wo_delay"]:
raise ValueError("insert_to_etrace should be either `w_delay` or "
"`wo_delay`.")
if out_dim is None:
out_dim = in_dim
self.n_etrace = len(decay_rates)
self.len_fifo = delay - 1
self.in_dim = in_dim
self.out_dim = out_dim
ComplexDyBM.__init__(self, [delay], [decay_rates], ["sigmoid"],
[in_dim], SGD=SGD, L1=L1, L2=L2,
use_biases=[[True]], sigma=sigma,
insert_to_etrace=insert_to_etrace)
def get_input_dimension(self):
"""
Getting the dimension of input sequence
Returns
-------
in_dim : int
dimension of input sequence
"""
return self.in_dim
def get_target_dimension(self):
"""
Getting the dimension of target sequence
Returns
-------
out_dim : int
dimension of target sequence
"""
return self.out_dim
def set_learning_rate(self, rate):
"""
Setting the learning rate
Parameters
----------
rate : float
learning rate
"""
ComplexDyBM.set_learning_rate(self, [rate])
def _update_state(self, in_pattern):
ComplexDyBM._update_state(self, [in_pattern])
def _get_delta(self, out_pattern, expected=None):
return ComplexDyBM._get_delta(self, [out_pattern])
def get_predictions(self, in_seq):
"""
Getting predicsions corresponding to given input sequence
Parameters
----------
in_seq : sequence or generator
input sequence
Returns
-------
list of arrays
list of predictions
"""
predictions = ComplexDyBM.get_predictions(self, in_seq)
return [p[0] for p in predictions]
class GaussianBernoulliDyBM(ComplexDyBM):
"""
Gaussian-Bernoulli DyBM, consisting of a visible Gaussian layer,
and a hidden Bernoulli layer
.. seealso:: Takayuki Osogami, Hiroshi Kajino, and Taro Sekiyama, \
"Bidirectional learning for time-series models with hidden units", \
ICML 2017.
Parameters
----------
delays : list of integers (>0), length 2
delays[l] corresponds to the delay of the l-th layer.
decay_rates : list of lists of floats, length 2
decay_rates[l] corresponds to the decay rates of eligibility traces
of the l-th layer.
in_dims : list of integers, length 2
in_dims[l] corresponds to the dimension of input time-series
of the l-th layer.
SGD : instance of SGD.SGD or list of instances of SGD.SGD, optional
Instance of a stochastic gradient method
L1 : float, optional
Strength of L1 regularization
L2 : float, optional
Strength of L2 regularization
sigma : float, optional
Standard deviation of initial values of parameters
insert_to_etrace : str, {"w_delay", "wo_delay"}, optional
"w_delay" : Pattern observed d-1 time steps ago will be inserted
into eligibility traces
"wo_delay" : The newest pattern will be inserted
into eligibility traces
Attributes
----------
out_dim : int
the sum of all the out_dims
n_etrace : int
the number of eligibility traces
len_fifo : int
the length of FIFO queues (delay - 1)
n_layers : int
2, the number of layers
layers : list of LinearDyBM objects, length 2
layers[l] corresponds to the l-th LinearDyBM.
visibility : list
[True,False], visibility of layers
"""
def __init__(self, delays, decay_rates, in_dims, SGD=None, L1=0.,
L2=0., sigma=0, insert_to_etrace="wo_delay"):
if not len(delays) == len(decay_rates) == len(in_dims):
raise ValueError("GaussianBernoulliDyBM only allows two layers")
if not delays[0] == delays[1]:
raise NotImplementedError("Current implementation only allows "
"constant delay")
if not len(decay_rates[0]) == len(decay_rates[1]):
raise NotImplementedError("Current implementation only allows "
"constant number of decay rates")
if not delays[0] > 0:
raise ValueError("delay must be a positive integer")
if insert_to_etrace not in ["w_delay", "wo_delay"]:
raise ValueError("insert_to_etrace should be either 'w_delay' or "
"'wo_delay'.")
if SGD is None:
SGD = [AdaGrad(), AdaGrad()]
self.n_etrace = len(decay_rates[0])
self.len_fifo = delays[0] - 1
self.visibility = [True, False]
# activations = ["linear", "sigmoid"]
activations = ["linear", "sigmoid_deterministic"]
use_bias = [[True, False], [False, False]]
self.min_step_size = 1e-6 # to be updated adaptively
self.max_step_size = 1. # to be updated adaptively
ComplexDyBM.__init__(self, delays, decay_rates, activations, in_dims,
in_dims, SGD, L1, L2, use_bias, sigma,
insert_to_etrace)
def learn(self, in_generator, get_result=True):
"""
Learning generator
Parameters
----------
in_generator : generator
generator of input data for the visible Gaussian layer
get_result : boolean, optional
whether the accuracy of prediction during learning is yielded
Returns
----------
dict
dictionary of
"prediction": list of arraymath array, shape (out_dim, 1)
"actual": list of arraymath array, shape (out_dim, 1)
"error": list of float
(squared error for each predicted pattern)
"""
# [data, hidden samples]
in_list_generator \
= ListGenerator([in_generator, ElementGenerator(self, 1)])
result = ComplexDyBM.learn(self, in_list_generator,
get_result=get_result)
if get_result:
for key in result:
result[key] = [x[0] for x in result[key]]
return result
def _get_delta(self, out_patterns, expected=None, weightLLs=None):
"""
Getting delta, how much we change parameters for each layer
Parameters
----------
out_patterns : list of arrays, length 2.
out_patterns[l] : array, length out_dims[l]
This corresponds to out_pattern of the l-th layer.
expected : list of arrays, length 2, optional
expected[l] : array, length out_dims[l]
out_pattern of the l-th layer expected by the current model
to be computed if not given.
weightLLs : list of boolean, length 2, optional.
weight[l] denotes whether to weigh the gradient with log likelihood
Returns
-------
list of dicts, length n_layers.
The l-th element corresponds to a dictionary of deltas
with name of a variable as a key.
"""
return ComplexDyBM._get_delta(self, out_patterns, expected,
[False, True])
def get_predictions(self, in_generator):
"""
Getting prediction of the visible layer, corresponding to a given input
generator
Parameters
----------
in_generator : Generator
input generator for the visible layer
Returns
-------
list of arrays
the i-th array is the i-th predicted pattern
"""
in_generator = ListGenerator([in_generator, ElementGenerator(self, 1)])
predictions_all_layers \
= ComplexDyBM.get_predictions(self, in_generator)
predictions = [p[0] for p in predictions_all_layers]
return predictions
def _time_reversal(self):
"""
Making an approximately time-reversed GaussianBernoulliDyBM
by transposing matrices
"""
for v, length in zip(["V", "W"], [self.n_etrace, self.len_fifo]):
for i in xrange(length):
for j in xrange(2):
if v not in self.layers[j].layers[j].variables:
continue
# transposing visible to visible, hidden to hidden
self.layers[j].layers[j].variables[v][i] \
= self.layers[j].layers[j].variables[v][i].transpose()
self.layers[j].layers[j].SGD.first[v][i] \
= self.layers[j].layers[j].SGD.first[v][i].transpose()
self.layers[j].layers[j].SGD.second[v][i] \
= self.layers[j].layers[j].SGD.second[v][i].transpose()
if v not in self.layers[1].layers[0].variables:
continue
# transposing visible to hidden, hidden to visible
tmp = deepcopy(
self.layers[1].layers[0].variables[v][i].transpose())
self.layers[1].layers[0].variables[v][i] \
= self.layers[0].layers[1].variables[v][i].transpose()
self.layers[0].layers[1].variables[v][i] = tmp
tmp = deepcopy(
self.layers[1].layers[0].SGD.first[v][i].transpose())
self.layers[1].layers[0].SGD.first[v][i] \
= self.layers[0].layers[1].SGD.first[v][i].transpose()
self.layers[0].layers[1].SGD.first[v][i] = tmp
tmp = deepcopy(
self.layers[1].layers[0].SGD.second[v][i].transpose())
self.layers[1].layers[0].SGD.second[v][i] \
= self.layers[0].layers[1].SGD.second[v][i].transpose()
self.layers[0].layers[1].SGD.second[v][i] = tmp
def get_input_dimension(self):
"""
Getting input dimension (size of the visible layer)
Returns
-------
integer : input dimension
"""
return self.layers[0].get_input_dimension()
def get_target_dimension(self):
"""
Getting target dimension (size of the visible layer)
Returns
-------
integer : target dimension, which equals input dimension
"""
return self.layers[0].get_input_dimension()
def get_LL_sequence(self, in_seq, out_seq=None):
# This is computationally hard
raise NotImplementedError("get_LL_sequence not implemented for GaussianBernoulliDyBM")
def _get_gradient_for_layer(self, out_pattern, target_layer):
"""
Getting the gradient given out_pattern is given in target_layer
Parameters
----------
out_pattern : array
target pattern
target_layer : int
index of target layer
Returns
-------
list of array, length n_layers
list of gradients for all layers
"""
expected = self._get_mean()[target_layer]
return [layer._get_gradient_for_layer(out_pattern, target_layer,
expected=expected)
for layer in self.layers]
def _get_total_gradient(self, in_gen, out_gen=None):
"""
Getting the gradient (sum of all stochastic gradients)
Parameters
----------
in_gen : generator
input generator
out_gen : generator, optional
output generator
Returns
-------
total_gradient : array
gradient
"""
self._reset_generators()
total_gradient = None
for in_pattern in in_gen:
if out_gen is None:
out_pattern = in_pattern
else:
out_pattern = out_gen.next()
# get gradient
gradient = self._get_gradient_for_layer(out_pattern, 0)
# add the gradient into total_gradient
if total_gradient is None:
total_gradient = gradient
else:
for i in xrange(len(self.layers)):
for key in gradient[i]:
total_gradient[i][key] = total_gradient[i][key] + gradient[i][key]
# update internal state
sample = self._get_sample()[1] # sample hidden activation
self._update_state([in_pattern, sample])
return total_gradient
def _store_fifo(self):
"""
Deepcopy fifo queues
Returns
-------
original_fifo : list of fifo
deepcopied fifo queues
"""
original_fifo = [None, None]
for i in range(2):
original_fifo[i] = deepcopy(self.layers[i].layers[0].fifo)
return original_fifo
def _restore_fifo(self, original_fifo):
"""
Set fifo queues
Parameters
----------
original_fifo : list of fifo
new fifo queues
"""
for i in range(2):
self.layers[i].layers[0].fifo = original_fifo[i]
def _apply_gradient(self, in_seq, out_seq=None, bestError=None):
"""
Apply a gradient of a whole time-series with forward only
Parameters
----------
in_seq : list
input sequence
out_seq : list, optional
target sequence
bestError : float, optimal
RMSE with the current model
Returns
-------
bestError : float
RMSE after applying the gradient
"""
if out_seq is None:
out_seq = in_seq
in_gen = SequenceGenerator(in_seq)
total_gradient = self._get_total_gradient(in_gen)
best_variables = [deepcopy(layer.layers[0].variables) for layer in self.layers]
if bestError is None:
in_gen.reset()
bestError = self._get_RMSE(in_gen, out_seq)
# A simple line search for step size
step_size = self.max_step_size
while step_size > self.min_step_size:
# tentatively apply gradient with a step size
for i in xrange(len(self.layers)):
for key in self.layers[i].layers[0].variables:
self.layers[i].layers[0].variables[key] \
= best_variables[i][key] + step_size * total_gradient[i][key]
# evaluate error
in_gen.reset()
error = self._get_RMSE(in_gen, out_seq)
# stop the line search if non-negligible improvement
if error < bestError:
best_variables = [deepcopy(layer.layers[0].variables)
for layer in self.layers]
bestError = error
# increase the max step size if improves with the max step size
if step_size == self.max_step_size:
self.max_step_size *= 2
break
step_size = step_size / 2.
if step_size > self.min_step_size:
return bestError
else:
for i in xrange(len(self.layers)):
for key in self.layers[i].layers[0].variables:
self.layers[i].layers[0].variables[key] = best_variables[i][key]
return None
def _fit_by_GD(self, in_seq, out_seq=None, max_iteration=1000):
"""
Fitting only with a forward learning
Parameters
----------
in_seq : list
input sequence
out_seq : list
target sequence
max_iteration : int, optional
maximum number of iterations
Returns
-------
bestError : float
RMSE after learning
"""
if out_seq is None:
out_seq = in_seq
in_gen = SequenceGenerator(in_seq)
bestError = self._get_RMSE(in_gen, out_seq)
for i in xrange(max_iteration):
error = self._apply_gradient(in_seq, out_seq, bestError=bestError)
if error is None:
break
else:
bestError = error
return bestError
def _read(self, generator):
"""
Read a squence from a generator, together with hidden activations,
to update the state
Parameters
----------
generator : Generator
Generator of a sequence
"""
list_generator = ListGenerator([generator, ElementGenerator(self, 1)])
for patterns in list_generator:
self._update_state(patterns)
def _get_bidirectional_gradient(self):
"""
Getting the gradient for forward sequence and backward sequence
Returns
-------
fwd_gradient : list
gradient for forward sequence
bwd_gradient : list
gradient for backward sequence
"""
self._reset_generators()
self._read(self._fwd_warmup)
fwd_gradient = self._get_total_gradient(self._fwd_train)
self._time_reversal()
self._read(self._bwd_warmup)
bwd_gradient = self._get_total_gradient(self._bwd_train)
self._time_reversal()
return fwd_gradient, bwd_gradient
def _get_RMSE(self, in_gen, target_seq):
"""
Getting RMSE
Parameters
----------
in_gen : generator
input generator
target_seq : list
target patterns
Returns
-------
float
RMSE
"""
original_fifo = self._store_fifo()
predictions = self.get_predictions(in_gen)
self._restore_fifo(original_fifo)
return RMSE(target_seq, predictions)
def _reset_generators(self):
"""
Resetting generators for training
"""
self._fwd_warmup.reset()
self._fwd_train.reset()
self._bwd_warmup.reset()
self._bwd_train.reset()
def _get_bidirectional_error(self):
"""
Getting weighted RMSE for forward sequence and backward sequence
Returns
-------
error : float
weighted RMSE
"""
self._reset_generators()
self._read(self._fwd_warmup)
fwdError = self._get_RMSE(self._fwd_train, self._fwd_train_seq)
if self._bwd_weight != 0:
self._time_reversal()
self._read(self._bwd_warmup)
bwdError = self._get_RMSE(self._bwd_train, self._bwd_train_seq)
self._time_reversal()
else:
bwdError = 0
error = fwdError + self._bwd_weight * bwdError
return error
def _get_step_size_for_bidirectional(self, fwd_gradient, bwd_gradient):
"""
Getting step size with line search
Parameters
----------
fwd_gradient: list
graditn for forward sequence
bwd_gradient : list
gradient for backward sequence
Returns
-------
step_size : float
step size
best_variables : dict
variables after applying the gradient with the step size
best_error : float
RMSE after applying the gradient with the step size
"""
# A simple line search for step size
step_size = self.max_step_size
while step_size > self.min_step_size:
# tentatively apply gradient with a step size to the best variables
self._restore_variables(self._best_variables)
for i in xrange(len(self.layers)):
for key in self.layers[i].layers[0].variables:
self.layers[i].layers[0].variables[key] \
= self.layers[i].layers[0].variables[key] \
+ step_size * fwd_gradient[i][key]
if self._bwd_weight != 0:
self._time_reversal()
for i in xrange(len(self.layers)):
for key in self.layers[i].layers[0].variables:
self.layers[i].layers[0].variables[key] \
= self.layers[i].layers[0].variables[key] \
+ step_size * self._bwd_weight * bwd_gradient[i][key]
self._time_reversal()
# evaluate error
error = self._get_bidirectional_error()
# stop the line search if non-negligible improvement
if error < self._best_error:
self._best_variables = self._store_variables()
self._best_error = error
# update max_step_size
if step_size == self.max_step_size:
# increase the max step size if improves with the max step size
self.max_step_size *= 10
elif self.max_step_size > 10 * step_size:
# decrease max_step_size if much larger than improving step size
self.max_step_size = 10 * step_size
break
step_size = step_size / 2.
# update min_step_size
if step_size <= self.min_step_size and error > self._best_error * 1.001:
self.min_step_size = self.min_step_size / 10
print("reduced min_step_size to", self.min_step_size)
if self.min_step_size < 1e-16:
print("Too small a min_step_size", self.min_step_size)
print("error", error)
print("best_error", self._best_error)
break
if step_size < self.min_step_size:
print("no improvement with minimum step size")
error = self._get_bidirectional_error()
return step_size, self._best_variables, self._best_error
def _store_variables(self):
"""
Deep copy variables
Returns
-------
variables: list
deepcopied variables
"""
variables = [[None, None], [None, None]]
for i, j in product(range(2), range(2)):
variables[i][j] = deepcopy(self.layers[i].layers[j].variables)
return variables
def _restore_variables(self, variables):
"""
Set variables
Parameters
----------
variables : list
new variables
"""
for i, j in product(range(2), range(2)):
self.layers[i].layers[j].variables = deepcopy(variables[i][j])
def _fit_bidirectional_by_GD(self, dataset, len_train, len_test, len_warm,
bwd_weight, bwd_end, max_iteration=1000):
"""
Fitting with bidirectional learning with gradient descent
Parameters
----------
dataset : list
dataset
len_train : int
length of training dataset
len_test : int
length of test dataset
len_warm : int
length of warmup period
bwd_weight : float
weight of backward learning
bwd_end : int
when to stop bidirectional learning
max_iteration : int
when to stop learning
Returns
-------
train_RMSE : float
traning RMSE
test_RMSE : float
test RMSE
"""
# Prepare dataset
t0 = 0
t1 = t0 + len_warm
t2 = t1 + len_train
t3 = t2 + len_warm
t4 = t3 + len_test
self._fwd_warmup = SequenceGenerator(dataset[t0:t1])
self._fwd_train = SequenceGenerator(dataset[t1:t3])
self._bwd_warmup = SequenceGenerator(dataset[t2:t3])
self._bwd_train = SequenceGenerator(dataset[t0:t2])
self._bwd_warmup.reverse()
self._bwd_train.reverse()
self._fwd_train_seq = self._fwd_train.to_list()
self._bwd_train_seq = self._bwd_train.to_list()
self._test_warmup = SequenceGenerator(dataset[t0:t3])
self._test = SequenceGenerator(dataset[t3:t4])
self._test_seq = self._test.to_list()
self._bwd_weight = bwd_weight
test_RMSE = list()
train_RMSE = list()
# Evaluate error before training
baseline = baseline_RMSE(self._test_warmup.to_list()[-1], self._test_seq)
self._test_warmup.reset()
self._test.reset()
self._read(self._test_warmup)
predictions = self.get_predictions(self._test)
rmse = RMSE(self._test_seq, predictions)
test_RMSE.append(rmse)
self._best_variables = self._store_variables()
self._best_error = self._get_bidirectional_error()
train_RMSE.append(self._best_error)
for i in xrange(max_iteration):
if i > max_iteration * bwd_end:
self._bwd_weight = 0
# get gradient
fwd_gradient, bwd_gradient = self._get_bidirectional_gradient()
for g in fwd_gradient:
for key in g:
g[key] = g[key] / (len_train + len_warm)
for g in bwd_gradient:
for key in g:
g[key] = g[key] / (len_train + len_warm)
# A simple line search for step size
step_size, self._best_variables, self._best_error \
= self._get_step_size_for_bidirectional(fwd_gradient, bwd_gradient)
train_RMSE.append(self._best_error)
self._test_warmup.reset()
self._test.reset()
self._read(self._test_warmup)
predictions = self.get_predictions(self._test)
rmse = RMSE(self._test_seq, predictions)
test_RMSE.append(rmse)
if step_size < self.min_step_size:
break
self._restore_variables(self._best_variables)
return train_RMSE, test_RMSE
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .UrlShortener import UrlShorter
from .lib.BitLinkUrlShortener import BitLinkUrlShorter
from .lib.GoogleUrlShortener import GoogleUrlShorter
__all__ = ['UrlShorter', 'GoogleUrlShorter', 'BitLinkUrlShorter']
|
# Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import tarfile
import botocore.exceptions
import os
import pytest
import sagemaker
import sagemaker.predictor
import sagemaker.utils
import tests.integ
import tests.integ.timeout
from sagemaker.tensorflow.serving import Model, Predictor
@pytest.fixture(scope='session', params=[
'ml.c5.xlarge',
pytest.param('ml.p3.2xlarge',
marks=pytest.mark.skipif(
tests.integ.test_region() in tests.integ.HOSTING_NO_P3_REGIONS,
reason='no ml.p3 instances in this region'))])
def instance_type(request):
return request.param
@pytest.fixture(scope='module')
def tfs_predictor(instance_type, sagemaker_session, tf_full_version):
endpoint_name = sagemaker.utils.unique_name_from_base('sagemaker-tensorflow-serving')
model_data = sagemaker_session.upload_data(
path=os.path.join(tests.integ.DATA_DIR, 'tensorflow-serving-test-model.tar.gz'),
key_prefix='tensorflow-serving/models')
with tests.integ.timeout.timeout_and_delete_endpoint_by_name(endpoint_name,
sagemaker_session):
model = Model(model_data=model_data, role='SageMakerRole',
framework_version=tf_full_version,
sagemaker_session=sagemaker_session)
predictor = model.deploy(1, instance_type, endpoint_name=endpoint_name)
yield predictor
def tar_dir(directory, tmpdir):
target = os.path.join(str(tmpdir), 'model.tar.gz')
with tarfile.open(target, mode='w:gz') as t:
t.add(directory, arcname=os.path.sep)
return target
@pytest.fixture
def tfs_predictor_with_model_and_entry_point_same_tar(instance_type,
sagemaker_session,
tf_full_version,
tmpdir):
endpoint_name = sagemaker.utils.unique_name_from_base('sagemaker-tensorflow-serving')
model_tar = tar_dir(os.path.join(tests.integ.DATA_DIR, 'tfs/tfs-test-model-with-inference'),
tmpdir)
model_data = sagemaker_session.upload_data(
path=model_tar,
key_prefix='tensorflow-serving/models')
with tests.integ.timeout.timeout_and_delete_endpoint_by_name(endpoint_name,
sagemaker_session):
model = Model(model_data=model_data,
role='SageMakerRole',
framework_version=tf_full_version,
sagemaker_session=sagemaker_session)
predictor = model.deploy(1, instance_type, endpoint_name=endpoint_name)
yield predictor
@pytest.fixture(scope='module')
def tfs_predictor_with_model_and_entry_point_and_dependencies(instance_type,
sagemaker_session, tf_full_version):
endpoint_name = sagemaker.utils.unique_name_from_base('sagemaker-tensorflow-serving')
model_data = sagemaker_session.upload_data(
path=os.path.join(tests.integ.DATA_DIR,
'tensorflow-serving-test-model.tar.gz'),
key_prefix='tensorflow-serving/models')
with tests.integ.timeout.timeout_and_delete_endpoint_by_name(endpoint_name,
sagemaker_session):
entry_point = os.path.join(tests.integ.DATA_DIR,
'tfs/tfs-test-entrypoint-and-dependencies/inference.py')
dependencies = [os.path.join(tests.integ.DATA_DIR,
'tfs/tfs-test-entrypoint-and-dependencies/dependency.py')]
model = Model(entry_point=entry_point,
model_data=model_data,
role='SageMakerRole',
dependencies=dependencies,
framework_version=tf_full_version,
sagemaker_session=sagemaker_session)
predictor = model.deploy(1, instance_type, endpoint_name=endpoint_name)
yield predictor
@pytest.fixture(scope='module')
def tfs_predictor_with_accelerator(sagemaker_session, tf_full_version):
endpoint_name = sagemaker.utils.unique_name_from_base("sagemaker-tensorflow-serving")
instance_type = 'ml.c4.large'
accelerator_type = 'ml.eia1.medium'
model_data = sagemaker_session.upload_data(
path=os.path.join(tests.integ.DATA_DIR, 'tensorflow-serving-test-model.tar.gz'),
key_prefix='tensorflow-serving/models')
with tests.integ.timeout.timeout_and_delete_endpoint_by_name(endpoint_name,
sagemaker_session):
model = Model(model_data=model_data, role='SageMakerRole',
framework_version=tf_full_version,
sagemaker_session=sagemaker_session)
predictor = model.deploy(1, instance_type, endpoint_name=endpoint_name,
accelerator_type=accelerator_type)
yield predictor
@pytest.mark.canary_quick
def test_predict(tfs_predictor, instance_type): # pylint: disable=W0613
input_data = {'instances': [1.0, 2.0, 5.0]}
expected_result = {'predictions': [3.5, 4.0, 5.5]}
result = tfs_predictor.predict(input_data)
assert expected_result == result
@pytest.mark.skipif(tests.integ.test_region() not in tests.integ.EI_SUPPORTED_REGIONS,
reason='EI is not supported in region {}'.format(tests.integ.test_region()))
@pytest.mark.canary_quick
def test_predict_with_accelerator(tfs_predictor_with_accelerator):
input_data = {'instances': [1.0, 2.0, 5.0]}
expected_result = {'predictions': [3.5, 4.0, 5.5]}
result = tfs_predictor_with_accelerator.predict(input_data)
assert expected_result == result
def test_predict_with_entry_point(tfs_predictor_with_model_and_entry_point_same_tar):
input_data = {'instances': [1.0, 2.0, 5.0]}
expected_result = {'predictions': [4.0, 4.5, 6.0]}
result = tfs_predictor_with_model_and_entry_point_same_tar.predict(input_data)
assert expected_result == result
def test_predict_with_model_and_entry_point_and_dependencies_separated(
tfs_predictor_with_model_and_entry_point_and_dependencies):
input_data = {'instances': [1.0, 2.0, 5.0]}
expected_result = {'predictions': [4.0, 4.5, 6.0]}
result = tfs_predictor_with_model_and_entry_point_and_dependencies.predict(input_data)
assert expected_result == result
def test_predict_generic_json(tfs_predictor):
input_data = [[1.0, 2.0, 5.0], [1.0, 2.0, 5.0]]
expected_result = {'predictions': [[3.5, 4.0, 5.5], [3.5, 4.0, 5.5]]}
result = tfs_predictor.predict(input_data)
assert expected_result == result
def test_predict_jsons_json_content_type(tfs_predictor):
input_data = '[1.0, 2.0, 5.0]\n[1.0, 2.0, 5.0]'
expected_result = {'predictions': [[3.5, 4.0, 5.5], [3.5, 4.0, 5.5]]}
predictor = sagemaker.RealTimePredictor(tfs_predictor.endpoint,
tfs_predictor.sagemaker_session, serializer=None,
deserializer=sagemaker.predictor.json_deserializer,
content_type='application/json',
accept='application/json')
result = predictor.predict(input_data)
assert expected_result == result
def test_predict_jsons(tfs_predictor):
input_data = '[1.0, 2.0, 5.0]\n[1.0, 2.0, 5.0]'
expected_result = {'predictions': [[3.5, 4.0, 5.5], [3.5, 4.0, 5.5]]}
predictor = sagemaker.RealTimePredictor(tfs_predictor.endpoint,
tfs_predictor.sagemaker_session, serializer=None,
deserializer=sagemaker.predictor.json_deserializer,
content_type='application/jsons',
accept='application/jsons')
result = predictor.predict(input_data)
assert expected_result == result
def test_predict_jsonlines(tfs_predictor):
input_data = '[1.0, 2.0, 5.0]\n[1.0, 2.0, 5.0]'
expected_result = {'predictions': [[3.5, 4.0, 5.5], [3.5, 4.0, 5.5]]}
predictor = sagemaker.RealTimePredictor(tfs_predictor.endpoint,
tfs_predictor.sagemaker_session, serializer=None,
deserializer=sagemaker.predictor.json_deserializer,
content_type='application/jsonlines',
accept='application/jsonlines')
result = predictor.predict(input_data)
assert expected_result == result
def test_predict_csv(tfs_predictor):
input_data = '1.0,2.0,5.0\n1.0,2.0,5.0'
expected_result = {'predictions': [[3.5, 4.0, 5.5], [3.5, 4.0, 5.5]]}
predictor = Predictor(tfs_predictor.endpoint, tfs_predictor.sagemaker_session,
serializer=sagemaker.predictor.csv_serializer)
result = predictor.predict(input_data)
assert expected_result == result
def test_predict_bad_input(tfs_predictor):
input_data = {'junk': 'data'}
with pytest.raises(botocore.exceptions.ClientError):
tfs_predictor.predict(input_data)
|
from __future__ import annotations
import os
from typing import Tuple, List, Dict, Set, Callable, Any, TYPE_CHECKING
from qtpy import QtGui
from qtpy.QtWidgets import QAction, QToolBar, QMenu
if TYPE_CHECKING:
from cpylog import SimpleLogger
from qtpy.QtWidgets import QMainWindow
def build_actions(self: QMainWindow,
base_actions: Dict[str, QAction],
icon_path: str,
tools_list: List[Tuple[str, str, str, str, str, Callable[Any]]],
checkables_set: Set[str],
log: SimpleLogger) -> Dict[str, Any]:
checkables = {}
actions = base_actions
for tool in tools_list:
(name, txt, icon, shortcut, tip, func) = tool
if name in actions:
log.error('trying to create a duplicate action %r' % name)
continue
if icon is None:
msg = f'missing_icon = {name!r}!!!'
print(msg)
self.log.warning(msg)
ico = None
else:
ico = QtGui.QIcon()
pth = os.path.join(icon_path, icon)
if icon.endswith('.png') and not os.path.exists(pth):
self.log.warning(str((name, pth)))
ico.addPixmap(QtGui.QPixmap(pth), QtGui.QIcon.Normal, QtGui.QIcon.Off)
if name in checkables:
is_checked = checkables[name]
actions[name] = QAction(ico, txt, self, checkable=True)
actions[name].setChecked(is_checked)
else:
actions[name] = QAction(ico, txt, self)
if shortcut:
actions[name].setShortcut(shortcut)
#actions[name].setShortcutContext(QtCore.Qt.WidgetShortcut)
if tip:
actions[name].setStatusTip(tip)
if func:
actions[name].triggered.connect(func)
return actions
def fill_menus(self: QMainWindow,
menus_list: List[Tuple[str, str, List[str]]],
actions: Dict[str, QAction],
allow_missing_actions: bool=False) -> None:
assert len(self.actions) > 0, self.actions
menus = {}
for name, header, actions_to_add in menus_list:
for i, item in enumerate(menus_list):
if item != '':
break
actions_to_add = actions_to_add[i:]
if len(actions_to_add) == 0:
continue
#file_menu = self.menubar.addMenu('&Help')
if isinstance(header, str):
menu = self.menubar.addMenu(header)
assert isinstance(menu, QMenu), menu
elif isinstance(header, QToolBar):
menu = header
else:
raise TypeError(header)
for action_name in actions_to_add:
if action_name == '':
menu.addSeparator()
continue
try:
action = self.actions[action_name]
except KeyError:
if not allow_missing_actions:
raise
self.log.warning(f'missing action {action_name}')
menu.addAction(action)
menus[name] = menu
return menus
|
# pip install streamlit streamlit_folium
# run with: streamlit run playground/st_view.py
import streamlit as st
from streamlit_folium import folium_static
import matplotlib.pyplot as plt
from slither.core.visualization import make_map, plot_speed_heartrate, plot_velocity_histogram
from slither.service import Service
from slither.core.ui_text import d
s = Service(base_path="tmp/data_import")
activities = s.list_activities()
page = st.sidebar.selectbox(
"Select page",
("Overview", "Activity")
)
n = st.sidebar.slider("Show first", 0, len(activities) - 1)
view_activity = st.sidebar.selectbox(
"Select view",
("Map", "Speed and Heartrate", "Velocities")
)
if page == "Overview":
header = f"""
| ID | Start | Sport | Distance | Duration | Calories | Heartrate |
| -- | ----- | ----- | -------- | -------- | -------- | --------- |"""
rows = [
f"| {i + 1} | {d.display_datetime(a.start_time)} | {d.display_sport(a.sport)} | {d.display_distance(a.distance)} | {d.display_time(a.time)} | {d.display_calories(a.calories)} | {d.display_heartrate(a.heartrate)} |"
for i, a in enumerate(activities)]
st.write("\n".join([header] + rows))
if page == "Activity":
a = activities[n]
st.write(f"""
| Start | Sport | Distance | Duration | Calories | Heartrate |
| ----- | ----- | -------- | -------- | -------- | --------- |
| {d.display_datetime(a.start_time)} | {d.display_sport(a.sport)} | {d.display_distance(a.distance)} | {d.display_time(a.time)} | {d.display_calories(a.calories)} | {d.display_heartrate(a.heartrate)} |
---
""")
if view_activity == "Map":
if a.has_path:
m = make_map(a.get_path())
folium_static(m)
elif view_activity == "Speed and Heartrate":
fig = plt.figure()
ax = plt.subplot(111)
twin_ax = ax.twinx()
if a.has_path:
lines, labels = plot_speed_heartrate(ax, twin_ax, a.get_path())
fig.legend(handles=lines, labels=labels, loc="upper center")
st.write(fig)
elif view_activity == "Velocities":
fig = plt.figure()
ax = plt.subplot(111)
if a.has_path:
plot_velocity_histogram(a.get_path(), ax)
st.write(fig)
|
# -*- coding: UTF-8 -*-
import win32api
import win32gui
import win32con
import os
def setwallpaper(pic):
# open register
regkey = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER, "Control Panel\\Desktop", 0, win32con.KEY_SET_VALUE)
win32api.RegSetValueEx(regkey, "WallpaperStyle", 0, win32con.REG_SZ, "0")
win32api.RegSetValueEx(regkey, "TileWallpaper", 0, win32con.REG_SZ, "0")
# refresh screen
win32gui.SystemParametersInfo(win32con.SPI_SETDESKWALLPAPER, pic, win32con.SPIF_SENDWININICHANGE)
setwallpaper(os.path.dirname(os.getcwd()) + "\\cache\\cache.jpg")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import functools
from hermes_python.hermes import Hermes
#from hermes_python.ontology import *
import datetime, time
from threading import Timer
THRESHOLD_INTENT_CONFSCORE_DROP = 0.3
THRESHOLD_INTENT_CONFSCORE_TAKE = 0.6
CREATOR = "maremoto:"
INTENT_HELP_ME = CREATOR + "helpMe"
INTENT_CALL_SOMEONE = CREATOR + "callSomeone"
INTENT_RAISE_ALARM = CREATOR + "raiseAlarm"
INTENT_YES = CREATOR + "yes"
INTENT_NO = CREATOR + "No"
INTENT_END = CREATOR + "everythingIsOk"
# fake intents to communicate to satellites
INTENT_CALL_END = CREATOR + "callEnd"
INTENT_CLEAR_ALARM = CREATOR + "clearAlarm"
INTENT_FILTER_INCLUDE_ALL = [
INTENT_HELP_ME,
INTENT_CALL_SOMEONE,
INTENT_RAISE_ALARM,
INTENT_YES,
INTENT_NO,
INTENT_END
]
"""
INTENT_FILTER_GET_NAME = [
INTENT_CALL_SOMEONE,
INTENT_RAISE_ALARM,
INTENT_END
]
INTENT_FILTER_YESNO = [
INTENT_YES,
INTENT_NO,
INTENT_CALL_SOMEONE,
INTENT_RAISE_ALARM,
INTENT_END
]
"""
# Waiting before launch an alarm to the final assistance message
SECONDS_LOCUTION_TO_SOUND = 6
def ahora():
then = datetime.datetime.now()
return (time.mktime(then.timetuple())*1e3 + then.microsecond/1e3)/1000
class SnipsMPU(object):
'''
Client for MQTT protocol at BASE
'''
def __init__(self, i18n, mqtt_addr, site_id, timeout_failure_seconds,
assistances_manager, alarm, pixels):
self.__i18n = i18n
self.__mqtt_addr = mqtt_addr
self.__base_site_id = site_id
self.__timeout_failure_seconds = timeout_failure_seconds
self.__assistances_manager = assistances_manager
self.__alarm = alarm
self.__pixels = pixels
self.__hermes = None
self.__check_timer = None
# ============
# Checking decorators and helpers
# ============
def _append_text(self, sentence, text):
if len(sentence.strip()) > 1:
return sentence + " . " + text # para que haga espacio
else:
return text
def _check_site_id(handler):
@functools.wraps(handler)
def wrapper(self, hermes, intent_message):
if intent_message.site_id != self.__base_site_id:
print("SATTELLITE SESSION:",intent_message.site_id,"!=",self.__base_site_id)
return handler(self, hermes, intent_message)
else:
return handler(self, hermes, intent_message)
return wrapper
def _check_confidence_score(handler):
@functools.wraps(handler)
def wrapper(self, hermes, intent_message):
session_id = intent_message.session_id
if handler is None:
return None
'''
if intent_message.intent.confidence_score < THRESHOLD_INTENT_CONFSCORE_DROP:
hermes.publish_end_session(session_id, "")
return None
'''
if intent_message.intent.confidence_score <= THRESHOLD_INTENT_CONFSCORE_TAKE:
hermes.publish_continue_session(session_id, self.__i18n.get('error.doNotUnderstand'), INTENT_FILTER_INCLUDE_ALL)
return None
return handler(self, hermes, intent_message)
return wrapper
# ============
# Session helpers
# ============
def _trace_session(self, msg):
session_id = msg.session_id
site_id = msg.site_id
custom_data = msg.custom_data
print(" sessionID:", session_id)
print(" session site ID:",site_id)
print(" customData:",custom_data)
return session_id, site_id, custom_data
def _session_started(self, hermes, session_started_message):
print("# Session Started")
session_id, site_id, custom_data = self._trace_session(session_started_message)
# Satellite session button pushed, help requested
if site_id != self.__base_site_id:
if custom_data is not None and (custom_data.split(",")[0] in [ INTENT_HELP_ME , INTENT_CALL_END, INTENT_CLEAR_ALARM ]):
return
# The actions will be executed when the session is finished, to avoid sessions to mix
assistance = self.__assistances_manager.get_assistance(site_id, hermes)
assistance.update_session(hermes, session_id, self.__base_site_id)
def _session_ended(self, hermes, session_ended_message):
print("# Session Ended")
session_id, site_id, custom_data = self._trace_session(session_ended_message)
if site_id == self.__base_site_id or custom_data is None or custom_data == "":
assistance = self.__assistances_manager.get_assistance(site_id, hermes)
assistance.update_session(hermes, None, self.__base_site_id)
# Internal messaging satellite->base
else:
time.sleep(0.5) # avoid race conditions
action = custom_data.split(",")[0]
# Satellite request for help
if action == INTENT_HELP_ME:
client_name=custom_data.split(",")[1]
if self.__alarm.is_on():
self.__alarm.off()
assistance = self.__assistances_manager.get_assistance(site_id, hermes, client_name)
assistance.alarm_off(hermes)
else:
self.handler_user_request_help(hermes, None, site_id, client_name=client_name)
# Satellite informs of call result
elif action == INTENT_CALL_END:
assistance = self.__assistances_manager.get_assistance(site_id, hermes)
assistance.remote_call_result(hermes, int(custom_data.split(",")[1]))
# Satellite informs of alarm clear
elif action == INTENT_CLEAR_ALARM:
if self.__alarm.is_on():
self.__alarm.off()
assistance = self.__assistances_manager.get_assistance(site_id, hermes)
assistance.alarm_off(hermes)
# No message to base, act normal
else:
assistance = self.__assistances_manager.get_assistance(site_id, hermes)
assistance.update_session(hermes, None, self.__base_site_id)
# ============
# Intent handlers
# ============
#@_check_confidence_score
def handler_user_request_help(self, hermes, intent_message, site_id=None, client_name=None):
print("User is asking for help")
if site_id is None:
site_id = intent_message.site_id
assistance = self.__assistances_manager.get_assistance(site_id, hermes, client_name)
assistance.start(hermes)
@_check_confidence_score
def handler_user_gives_name(self, hermes, intent_message):
print("User is calling to someone")
site_id = intent_message.site_id
name = None
if intent_message.slots.callee_name:
name = intent_message.slots.callee_name.first().value
assistance = self.__assistances_manager.get_assistance(site_id, hermes)
assistance.call_to_contact(hermes, name)
@_check_confidence_score
def handler_user_says_yes(self, hermes, intent_message):
print("User says yes")
site_id = intent_message.site_id
assistance = self.__assistances_manager.get_assistance(site_id, hermes)
assistance.yesno_answer(hermes, is_yes=True)
@_check_confidence_score
def handler_user_says_no(self, hermes, intent_message):
print("User says no")
site_id = intent_message.site_id
assistance = self.__assistances_manager.get_assistance(site_id, hermes)
assistance.yesno_answer(hermes, is_yes=False)
@_check_confidence_score
def handler_user_quits(self, hermes, intent_message):
print("User wants to quit")
site_id = intent_message.site_id
assistance = self.__assistances_manager.get_assistance(site_id, hermes)
assistance.quit_assistance(hermes)
@_check_confidence_score
def handler_raise_alarm(self, hermes, intent_message):
print("User wants to raise the alarm")
site_id = intent_message.site_id
assistance = self.__assistances_manager.get_assistance(site_id, hermes)
self.fatal(assistance, hermes)
# ============
# Check failed assistances every second
# ============
def _check_failed_assistances(self):
#print(" ...check failed assistances\n", self.__assistances_manager)
failed_assistances = self.__assistances_manager.get_failed_assistances(self.__timeout_failure_seconds)
for assistance in failed_assistances:
if assistance.immediate_alarm():
sentence = self.__i18n.get('error.automaticAlarmOn')
self.fatal(assistance, assistance.get_hermes(), sentence=sentence, call_to_default=False)
else:
sentence = self.__i18n.get('error.silenceAlarmOn', {"timeout": self.__timeout_failure_seconds})
self.fatal(assistance, assistance.get_hermes(), sentence=sentence, call_to_default=True)
break # no more than one alarm
self.__check_timer = Timer(1, self._check_failed_assistances)
self.__check_timer.start()
# ============
# Exported procedures
# ============
def pushed_button(self):
'''
Hardware hotword or cancel alarm
'''
print("User pushes the button")
site_id = self.__base_site_id # action in the base
hermes = self.__hermes
assistance = self.__assistances_manager.get_assistance(site_id, hermes)
self.__pixels.wakeup()
time.sleep(0.1)
self.__pixels.off()
if self.__alarm.is_on():
self.__alarm.off()
assistance.alarm_off(hermes)
elif assistance.is_active():
if assistance.is_calling():
print("Hang up the call")
assistance.hang_up()
else:
print("The last assistance is on progress, nothing to be done")
#TODO poner BOUNCE_TIME para el botón
else:
self.handler_user_request_help(hermes, None, site_id)
def fatal(self, assistance, hermes, sentence="", call_to_default=False):
'''
Fatal error action
'''
if not self.__alarm.is_on():
def alarm_off_callback():
assistance.alarm_off(hermes)
# delay alarm on with a callback to allow emergency call
if call_to_default:
def call_to_default_callback():
self.__alarm.on(delay=SECONDS_LOCUTION_TO_SOUND, off_callback=alarm_off_callback)
assistance.alarm_on(hermes, sentence=sentence, call_to_default_callback=call_to_default_callback)
# alarm on now
else:
assistance.alarm_on(hermes, sentence=sentence)
self.__alarm.on(delay=SECONDS_LOCUTION_TO_SOUND, off_callback=alarm_off_callback)
else:
print("[Alarm] already on")
def start_block(self):
'''
Protocol start
'''
# Start check timer
self._check_failed_assistances()
# Subscribe to voice intents
with Hermes(self.__mqtt_addr
,rust_logs_enabled=True #TODO quitar
) as h:
self.__hermes = h
h.subscribe_intent(INTENT_HELP_ME, self.handler_user_request_help) \
.subscribe_intent(INTENT_CALL_SOMEONE, self.handler_user_gives_name) \
.subscribe_intent(INTENT_YES, self.handler_user_says_yes) \
.subscribe_intent(INTENT_NO, self.handler_user_says_no) \
.subscribe_intent(INTENT_RAISE_ALARM, self.handler_raise_alarm) \
.subscribe_intent(INTENT_END, self.handler_user_quits) \
.subscribe_session_ended(self._session_ended) \
.subscribe_session_started(self._session_started) \
.loop_forever()
'''
.loop_start()
while True:
time.sleep(1)
#TODO aclarar si funciona y si se puede poner aquí vigilar asistencias fallidas
'''
def stop(self):
'''
Stop working
'''
# Cancel check timer
if self.__check_timer is not None:
self.__check_timer.cancel()
self.__check_timer = None
# Alarm stop
self.__alarm.off(dismiss_callback=True)
# Stop client
self.__hermes.loop_stop()
self.__hermes = None
|
import logging # DEBUG INFO WARNING ERROR
import tkinter.ttk as TTK # use for Combobox
import win_main2
from function import *
from logging.handlers import QueueHandler
import queue
import configparser
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
import matplotlib.pyplot as plt
import tkinter as tk
from tkinter import scrolledtext # use to display logger
import matplotlib
import numpy as np
import os
import sys
def GUI_start():
"""This will initialise the loggers
After all imports it will start the main window
"""
print("-_____GUI start____-")
logging.basicConfig(filename="logging.log", level=logging.DEBUG, # <- set logging level
format="%(asctime)s:%(levelname)s:%(message)s") # set level
# Set up multpiple Log handler
loggerGUI = logging.getLogger(__name__)
loggerGUI.setLevel(logging.DEBUG) # <- set logging level
logging_handler = logging.FileHandler("log_GUI_file.log")
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
logging_handler.setFormatter(formatter)
loggerGUI.addHandler(logging_handler)
loggerGUI.info("set upp logger in puls_win.py")
logger_gui = logging.getLogger('GUI')
logger_gui.addHandler(logging.StreamHandler())
logger_gui.info("logging from GUI start up")
# windows
# from puls_win import *
# from pre_expsetup import *
print("-_____GUI init of logging end____-")
print("___start GUI analys")
# show window, wait for user imput
win_main = win_main2.window_main()
win_main.mainloop()
# end
print("_____end from GUI_analyzer___")
|
"""Run in command line mode."""
import fire
import morfeus.buried_volume
import morfeus.cone_angle
import morfeus.conformer
import morfeus.dispersion
import morfeus.local_force
import morfeus.pyramidalization
import morfeus.sasa
import morfeus.sterimol
import morfeus.visible_volume
import morfeus.xtb
def main() -> None:
"""Call Fire to access command line scripts."""
fire.Fire(
{
"buried_volume": morfeus.buried_volume.cli,
"cone_angle": morfeus.cone_angle.cli,
"conformer": morfeus.conformer.cli,
"dispersion": morfeus.dispersion.cli,
"local_force": morfeus.local_force.cli,
"pyramidalization": morfeus.pyramidalization.cli,
"sasa": morfeus.sasa.cli,
"sterimol": morfeus.sterimol.cli,
"visible_volume": morfeus.visible_volume.cli,
"xtb": morfeus.xtb.cli,
}
)
if __name__ == "__main__":
main()
|
'''
Plane defined by vectors and point.
'''
# - Imports
from ..exceptions import VectorsNotSameSize
from typing import TypeVar
# - Globals
# Typing for Vectors and Points
Point = TypeVar("Point")
Vector = TypeVar("Vector")
class Plane:
"""
Defines a plane
Parameters
----------
point
Point on the plane.
vector1
Direction vector.
vector2
Direction vector.
Raises
------
VectorsNotSameSize
If the vectors are not the same size this error will be raised.
"""
def __init__(self, point: Point, vector1: Vector, vector2: Vector):
self.point = point
self.vectors = [vector1, vector2]
# Throw error if vectors different sizes
if len(point) != len(vector1) != len(vector2):
raise VectorsNotSameSize()
def __len__(self):
# Size of point vector
return len(self.point) |
import pytest
from aioworkers.core.config import Config, MergeDict
from aioworkers.core.context import (
Context,
EntityContextProcessor,
GroupResolver,
Octopus,
Signal,
)
def test_octopus():
f = Octopus()
f.r = 1
assert f['r'] == 1
f['g'] = 2
assert f.g == 2
f['y.t'] = 3
assert f.y.t == 3
f['d.w.f'] = 4
assert dir(f)
assert repr(f)
assert f.__repr__(header=True)
assert f.items()
f.s = 'w'
assert callable(f['s.upper'])
assert f['s.upper']() == 'W'
f[None] = True
assert not f[None]
def test_octopus_iter():
f = Octopus()
f.r = 1
assert f['r'] == 1
f['g'] = 2
assert f.g == 2
f['y.t'] = 3
assert f.y.t == 3
f['d.w.f'] = 4
assert list(f.find_iter(int))
f['d.f'] = f
assert list(f.find_iter(int))
async def test_context_items(loop):
f = Context({}, loop=loop)
f.r = 1
assert f['r'] == 1
f['g'] = 2
assert f.g == 2
f['y.t'] = 3
assert f.y.t == 3
f['d.w.f'] = 4
assert dir(f)
assert repr(f)
await f.stop()
async def test_context_create(loop):
conf = Config()
conf.update(MergeDict({
'q.cls': 'aioworkers.queue.timeout.TimestampQueue',
'f.e': 1,
}))
c = Context(conf, loop=loop)
await c.init()
await c.start()
assert c.config.f.e == 1
with pytest.raises(AttributeError):
c.r
with pytest.raises(KeyError):
c['r']
async def handler(context):
pass
c.on_stop.append(handler)
async def handler():
raise ValueError
c.on_stop.append(handler)
c.on_stop.append(handler())
c.on_stop.append(1)
await c.stop()
def test_group_resolver():
gr = GroupResolver()
assert not gr.match(['1'])
assert gr.match(None)
gr = GroupResolver(all_groups=True)
assert gr.match(['1'])
assert gr.match(None)
gr = GroupResolver(default=False)
assert not gr.match(['1'])
assert not gr.match(None)
gr = GroupResolver(exclude=['1'])
assert not gr.match(['1'])
assert not gr.match(['1', '2'])
assert gr.match(None)
gr = GroupResolver(exclude=['1'], all_groups=True)
assert not gr.match(['1'])
assert not gr.match(['1', '2'])
assert gr.match(None)
gr = GroupResolver(include=['1'])
assert gr.match(['1'])
assert gr.match(None)
async def test_signal(loop):
gr = GroupResolver()
context = Context({}, loop=loop, group_resolver=gr)
s = Signal(context)
s.append(1, ('1',))
s.append(1)
await s.send(gr)
await s.send(GroupResolver(all_groups=True))
async def test_func(loop):
config = MergeDict(
now={
'func': 'time.monotonic',
}
)
context = Context(config, loop=loop)
async with context:
assert isinstance(context.now, float)
def test_create_entity():
with pytest.raises((ValueError, TypeError)):
EntityContextProcessor(None, 'x', {'cls': 'time.time'})
with pytest.raises(TypeError):
EntityContextProcessor(None, 'x', {'cls': 'aioworkers.humanize.size'})
|
# 21/11/02 = Tue
# 54. Spiral Matrix [Medium]
# Given an m x n matrix, return all elements of the matrix in spiral order.
# Example 1:
# Input: matrix = [[1,2,3],[4,5,6],[7,8,9]]
# Output: [1,2,3,6,9,8,7,4,5]
# Example 2:
# Input: matrix = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
# Output: [1,2,3,4,8,12,11,10,9,5,6,7]
# Constraints:
# m == matrix.length
# n == matrix[i].length
# 1 <= m, n <= 10
# -100 <= matrix[i][j] <= 100
class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
ret = []
m = len(matrix)
n = len(matrix[0])
base = 0
while m > 0 and n > 0:
for c in range(0, n):
ret.append(matrix[base][base+c])
for r in range(1, m):
ret.append(matrix[base+r][base+n-1])
if m > 1:
for c in reversed(range(0, n-1)):
ret.append(matrix[base+m-1][base+c])
if n > 1:
for r in reversed(range(1, m-1)):
ret.append(matrix[base+r][base])
m -= 2
n -= 2
base += 1
return ret
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.request_helpers"""
import unittest
from swift.common.swob import Request, HTTPException, HeaderKeyDict
from swift.common.storage_policy import POLICIES, EC_POLICY, REPL_POLICY
from swift.common import request_helpers as rh
from swift.common.constraints import AUTO_CREATE_ACCOUNT_PREFIX
from test.unit import patch_policies
from test.unit.common.test_utils import FakeResponse
server_types = ['account', 'container', 'object']
class TestRequestHelpers(unittest.TestCase):
def test_constrain_req_limit(self):
req = Request.blank('')
self.assertEqual(10, rh.constrain_req_limit(req, 10))
req = Request.blank('', query_string='limit=1')
self.assertEqual(1, rh.constrain_req_limit(req, 10))
req = Request.blank('', query_string='limit=1.0')
self.assertEqual(10, rh.constrain_req_limit(req, 10))
req = Request.blank('', query_string='limit=11')
with self.assertRaises(HTTPException) as raised:
rh.constrain_req_limit(req, 10)
self.assertEqual(raised.exception.status_int, 412)
def test_validate_params(self):
req = Request.blank('')
actual = rh.validate_params(req, ('limit', 'marker', 'end_marker'))
self.assertEqual({}, actual)
req = Request.blank('', query_string='limit=1&junk=here&marker=foo')
actual = rh.validate_params(req, ())
self.assertEqual({}, actual)
req = Request.blank('', query_string='limit=1&junk=here&marker=foo')
actual = rh.validate_params(req, ('limit', 'marker', 'end_marker'))
expected = {'limit': '1', 'marker': 'foo'}
self.assertEqual(expected, actual)
req = Request.blank('', query_string='limit=1&junk=here&marker=')
actual = rh.validate_params(req, ('limit', 'marker', 'end_marker'))
expected = {'limit': '1', 'marker': ''}
self.assertEqual(expected, actual)
# ignore bad junk
req = Request.blank('', query_string='limit=1&junk=%ff&marker=foo')
actual = rh.validate_params(req, ('limit', 'marker', 'end_marker'))
expected = {'limit': '1', 'marker': 'foo'}
self.assertEqual(expected, actual)
# error on bad wanted parameter
req = Request.blank('', query_string='limit=1&junk=here&marker=%ff')
with self.assertRaises(HTTPException) as raised:
rh.validate_params(req, ('limit', 'marker', 'end_marker'))
self.assertEqual(raised.exception.status_int, 400)
def test_validate_container_params(self):
req = Request.blank('')
actual = rh.validate_container_params(req)
self.assertEqual({'limit': 10000}, actual)
req = Request.blank('', query_string='limit=1&junk=here&marker=foo')
actual = rh.validate_container_params(req)
expected = {'limit': 1, 'marker': 'foo'}
self.assertEqual(expected, actual)
req = Request.blank('', query_string='limit=1&junk=here&marker=')
actual = rh.validate_container_params(req)
expected = {'limit': 1, 'marker': ''}
self.assertEqual(expected, actual)
# ignore bad junk
req = Request.blank('', query_string='limit=1&junk=%ff&marker=foo')
actual = rh.validate_container_params(req)
expected = {'limit': 1, 'marker': 'foo'}
self.assertEqual(expected, actual)
# error on bad wanted parameter
req = Request.blank('', query_string='limit=1&junk=here&marker=%ff')
with self.assertRaises(HTTPException) as raised:
rh.validate_container_params(req)
self.assertEqual(raised.exception.status_int, 400)
# error on bad limit
req = Request.blank('', query_string='limit=10001')
with self.assertRaises(HTTPException) as raised:
rh.validate_container_params(req)
self.assertEqual(raised.exception.status_int, 412)
def test_is_user_meta(self):
m_type = 'meta'
for st in server_types:
self.assertTrue(rh.is_user_meta(st, 'x-%s-%s-foo' % (st, m_type)))
self.assertFalse(rh.is_user_meta(st, 'x-%s-%s-' % (st, m_type)))
self.assertFalse(rh.is_user_meta(st, 'x-%s-%sfoo' % (st, m_type)))
def test_is_sys_meta(self):
m_type = 'sysmeta'
for st in server_types:
self.assertTrue(rh.is_sys_meta(st, 'x-%s-%s-foo' % (st, m_type)))
self.assertFalse(rh.is_sys_meta(st, 'x-%s-%s-' % (st, m_type)))
self.assertFalse(rh.is_sys_meta(st, 'x-%s-%sfoo' % (st, m_type)))
def test_is_sys_or_user_meta(self):
m_types = ['sysmeta', 'meta']
for mt in m_types:
for st in server_types:
self.assertTrue(rh.is_sys_or_user_meta(
st, 'x-%s-%s-foo' % (st, mt)))
self.assertFalse(rh.is_sys_or_user_meta(
st, 'x-%s-%s-' % (st, mt)))
self.assertFalse(rh.is_sys_or_user_meta(
st, 'x-%s-%sfoo' % (st, mt)))
def test_strip_sys_meta_prefix(self):
mt = 'sysmeta'
for st in server_types:
self.assertEqual(rh.strip_sys_meta_prefix(
st, 'x-%s-%s-a' % (st, mt)), 'a')
mt = 'not-sysmeta'
for st in server_types:
with self.assertRaises(ValueError):
rh.strip_sys_meta_prefix(st, 'x-%s-%s-a' % (st, mt))
def test_strip_user_meta_prefix(self):
mt = 'meta'
for st in server_types:
self.assertEqual(rh.strip_user_meta_prefix(
st, 'x-%s-%s-a' % (st, mt)), 'a')
mt = 'not-meta'
for st in server_types:
with self.assertRaises(ValueError):
rh.strip_sys_meta_prefix(st, 'x-%s-%s-a' % (st, mt))
def test_is_object_transient_sysmeta(self):
self.assertTrue(rh.is_object_transient_sysmeta(
'x-object-transient-sysmeta-foo'))
self.assertFalse(rh.is_object_transient_sysmeta(
'x-object-transient-sysmeta-'))
self.assertFalse(rh.is_object_transient_sysmeta(
'x-object-meatmeta-foo'))
def test_strip_object_transient_sysmeta_prefix(self):
mt = 'object-transient-sysmeta'
self.assertEqual(rh.strip_object_transient_sysmeta_prefix(
'x-%s-a' % mt), 'a')
mt = 'object-sysmeta-transient'
with self.assertRaises(ValueError):
rh.strip_object_transient_sysmeta_prefix('x-%s-a' % mt)
def test_remove_items(self):
src = {'a': 'b',
'c': 'd'}
test = lambda x: x == 'a'
rem = rh.remove_items(src, test)
self.assertEqual(src, {'c': 'd'})
self.assertEqual(rem, {'a': 'b'})
def test_copy_header_subset(self):
src = {'a': 'b',
'c': 'd'}
from_req = Request.blank('/path', environ={}, headers=src)
to_req = Request.blank('/path', {})
test = lambda x: x.lower() == 'a'
rh.copy_header_subset(from_req, to_req, test)
self.assertTrue('A' in to_req.headers)
self.assertEqual(to_req.headers['A'], 'b')
self.assertFalse('c' in to_req.headers)
self.assertFalse('C' in to_req.headers)
def test_get_ip_port(self):
node = {
'ip': '1.2.3.4',
'port': 6000,
'replication_ip': '5.6.7.8',
'replication_port': 7000,
}
self.assertEqual(('1.2.3.4', 6000), rh.get_ip_port(node, {}))
self.assertEqual(('5.6.7.8', 7000), rh.get_ip_port(node, {
rh.USE_REPLICATION_NETWORK_HEADER: 'true'}))
self.assertEqual(('1.2.3.4', 6000), rh.get_ip_port(node, {
rh.USE_REPLICATION_NETWORK_HEADER: 'false'}))
@patch_policies(with_ec_default=True)
def test_get_name_and_placement_object_req(self):
path = '/device/part/account/container/object'
req = Request.blank(path, headers={
'X-Backend-Storage-Policy-Index': '0'})
device, part, account, container, obj, policy = \
rh.get_name_and_placement(req, 5, 5, True)
self.assertEqual(device, 'device')
self.assertEqual(part, 'part')
self.assertEqual(account, 'account')
self.assertEqual(container, 'container')
self.assertEqual(obj, 'object')
self.assertEqual(policy, POLICIES[0])
self.assertEqual(policy.policy_type, EC_POLICY)
req.headers['X-Backend-Storage-Policy-Index'] = 1
device, part, account, container, obj, policy = \
rh.get_name_and_placement(req, 5, 5, True)
self.assertEqual(device, 'device')
self.assertEqual(part, 'part')
self.assertEqual(account, 'account')
self.assertEqual(container, 'container')
self.assertEqual(obj, 'object')
self.assertEqual(policy, POLICIES[1])
self.assertEqual(policy.policy_type, REPL_POLICY)
req.headers['X-Backend-Storage-Policy-Index'] = 'foo'
with self.assertRaises(HTTPException) as raised:
device, part, account, container, obj, policy = \
rh.get_name_and_placement(req, 5, 5, True)
e = raised.exception
self.assertEqual(e.status_int, 503)
self.assertEqual(str(e), '503 Service Unavailable')
self.assertEqual(e.body, b"No policy with index foo")
@patch_policies(with_ec_default=True)
def test_get_name_and_placement_object_replication(self):
# yup, suffixes are sent '-'.joined in the path
path = '/device/part/012-345-678-9ab-cde'
req = Request.blank(path, headers={
'X-Backend-Storage-Policy-Index': '0'})
device, partition, suffix_parts, policy = \
rh.get_name_and_placement(req, 2, 3, True)
self.assertEqual(device, 'device')
self.assertEqual(partition, 'part')
self.assertEqual(suffix_parts, '012-345-678-9ab-cde')
self.assertEqual(policy, POLICIES[0])
self.assertEqual(policy.policy_type, EC_POLICY)
path = '/device/part'
req = Request.blank(path, headers={
'X-Backend-Storage-Policy-Index': '1'})
device, partition, suffix_parts, policy = \
rh.get_name_and_placement(req, 2, 3, True)
self.assertEqual(device, 'device')
self.assertEqual(partition, 'part')
self.assertIsNone(suffix_parts) # false-y
self.assertEqual(policy, POLICIES[1])
self.assertEqual(policy.policy_type, REPL_POLICY)
path = '/device/part/' # with a trailing slash
req = Request.blank(path, headers={
'X-Backend-Storage-Policy-Index': '1'})
device, partition, suffix_parts, policy = \
rh.get_name_and_placement(req, 2, 3, True)
self.assertEqual(device, 'device')
self.assertEqual(partition, 'part')
self.assertEqual(suffix_parts, '') # still false-y
self.assertEqual(policy, POLICIES[1])
self.assertEqual(policy.policy_type, REPL_POLICY)
def test_validate_internal_name(self):
self.assertIsNone(rh._validate_internal_name('foo'))
self.assertIsNone(rh._validate_internal_name(
rh.get_reserved_name('foo')))
self.assertIsNone(rh._validate_internal_name(
rh.get_reserved_name('foo', 'bar')))
self.assertIsNone(rh._validate_internal_name(''))
self.assertIsNone(rh._validate_internal_name(rh.RESERVED))
def test_invalid_reserved_name(self):
with self.assertRaises(HTTPException) as raised:
rh._validate_internal_name('foo' + rh.RESERVED)
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace name")
def test_validate_internal_account(self):
self.assertIsNone(rh.validate_internal_account('AUTH_foo'))
self.assertIsNone(rh.validate_internal_account(
rh.get_reserved_name('AUTH_foo')))
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_account('AUTH_foo' + rh.RESERVED)
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace account")
def test_validate_internal_container(self):
self.assertIsNone(rh.validate_internal_container('AUTH_foo', 'bar'))
self.assertIsNone(rh.validate_internal_container(
rh.get_reserved_name('AUTH_foo'), 'bar'))
self.assertIsNone(rh.validate_internal_container(
'foo', rh.get_reserved_name('bar')))
self.assertIsNone(rh.validate_internal_container(
rh.get_reserved_name('AUTH_foo'), rh.get_reserved_name('bar')))
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_container('AUTH_foo' + rh.RESERVED, 'bar')
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace account")
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_container('AUTH_foo', 'bar' + rh.RESERVED)
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace container")
# These should always be operating on split_path outputs so this
# shouldn't really be an issue, but just in case...
for acct in ('', None):
with self.assertRaises(ValueError) as raised:
rh.validate_internal_container(
acct, 'bar')
self.assertEqual(raised.exception.args[0], 'Account is required')
def test_validate_internal_object(self):
self.assertIsNone(rh.validate_internal_obj('AUTH_foo', 'bar', 'baz'))
self.assertIsNone(rh.validate_internal_obj(
rh.get_reserved_name('AUTH_foo'), 'bar', 'baz'))
for acct in ('AUTH_foo', rh.get_reserved_name('AUTH_foo')):
self.assertIsNone(rh.validate_internal_obj(
acct,
rh.get_reserved_name('bar'),
rh.get_reserved_name('baz')))
for acct in ('AUTH_foo', rh.get_reserved_name('AUTH_foo')):
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_obj(
acct, 'bar', rh.get_reserved_name('baz'))
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace object "
b"in user-namespace container")
for acct in ('AUTH_foo', rh.get_reserved_name('AUTH_foo')):
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_obj(
acct, rh.get_reserved_name('bar'), 'baz')
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid user-namespace object "
b"in reserved-namespace container")
# These should always be operating on split_path outputs so this
# shouldn't really be an issue, but just in case...
for acct in ('', None):
with self.assertRaises(ValueError) as raised:
rh.validate_internal_obj(
acct, 'bar', 'baz')
self.assertEqual(raised.exception.args[0], 'Account is required')
for cont in ('', None):
with self.assertRaises(ValueError) as raised:
rh.validate_internal_obj(
'AUTH_foo', cont, 'baz')
self.assertEqual(raised.exception.args[0], 'Container is required')
def test_invalid_names_in_system_accounts(self):
self.assertIsNone(rh.validate_internal_obj(
AUTO_CREATE_ACCOUNT_PREFIX + 'system_account', 'foo',
'crazy%stown' % rh.RESERVED))
def test_invalid_reserved_names(self):
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_obj('AUTH_foo' + rh.RESERVED, 'bar', 'baz')
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace account")
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_obj('AUTH_foo', 'bar' + rh.RESERVED, 'baz')
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace container")
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_obj('AUTH_foo', 'bar', 'baz' + rh.RESERVED)
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace object")
def test_get_reserved_name(self):
expectations = {
tuple(): rh.RESERVED,
('',): rh.RESERVED,
('foo',): rh.RESERVED + 'foo',
('foo', 'bar'): rh.RESERVED + 'foo' + rh.RESERVED + 'bar',
('foo', ''): rh.RESERVED + 'foo' + rh.RESERVED,
('', ''): rh.RESERVED * 2,
}
failures = []
for parts, expected in expectations.items():
name = rh.get_reserved_name(*parts)
if name != expected:
failures.append('get given %r expected %r != %r' % (
parts, expected, name))
if failures:
self.fail('Unexpected reults:\n' + '\n'.join(failures))
def test_invalid_get_reserved_name(self):
self.assertRaises(ValueError)
with self.assertRaises(ValueError) as ctx:
rh.get_reserved_name('foo', rh.RESERVED + 'bar', 'baz')
self.assertEqual(str(ctx.exception),
'Invalid reserved part in components')
def test_split_reserved_name(self):
expectations = {
rh.RESERVED: ('',),
rh.RESERVED + 'foo': ('foo',),
rh.RESERVED + 'foo' + rh.RESERVED + 'bar': ('foo', 'bar'),
rh.RESERVED + 'foo' + rh.RESERVED: ('foo', ''),
rh.RESERVED * 2: ('', ''),
}
failures = []
for name, expected in expectations.items():
parts = rh.split_reserved_name(name)
if tuple(parts) != expected:
failures.append('split given %r expected %r != %r' % (
name, expected, parts))
if failures:
self.fail('Unexpected reults:\n' + '\n'.join(failures))
def test_invalid_split_reserved_name(self):
self.assertRaises(ValueError)
with self.assertRaises(ValueError) as ctx:
rh.split_reserved_name('foo')
self.assertEqual(str(ctx.exception),
'Invalid reserved name')
class TestHTTPResponseToDocumentIters(unittest.TestCase):
def test_200(self):
fr = FakeResponse(
200,
{'Content-Length': '10', 'Content-Type': 'application/lunch'},
b'sandwiches')
doc_iters = rh.http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 0)
self.assertEqual(last_byte, 9)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Length'), '10')
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), b'sandwiches')
self.assertRaises(StopIteration, next, doc_iters)
fr = FakeResponse(
200,
{'Transfer-Encoding': 'chunked',
'Content-Type': 'application/lunch'},
b'sandwiches')
doc_iters = rh.http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 0)
self.assertIsNone(last_byte)
self.assertIsNone(length)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Transfer-Encoding'), 'chunked')
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), b'sandwiches')
self.assertRaises(StopIteration, next, doc_iters)
def test_206_single_range(self):
fr = FakeResponse(
206,
{'Content-Length': '8', 'Content-Type': 'application/lunch',
'Content-Range': 'bytes 1-8/10'},
b'andwiche')
doc_iters = rh.http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 1)
self.assertEqual(last_byte, 8)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Length'), '8')
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), b'andwiche')
self.assertRaises(StopIteration, next, doc_iters)
# Chunked response should be treated in the same way as non-chunked one
fr = FakeResponse(
206,
{'Transfer-Encoding': 'chunked',
'Content-Type': 'application/lunch',
'Content-Range': 'bytes 1-8/10'},
b'andwiche')
doc_iters = rh.http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 1)
self.assertEqual(last_byte, 8)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), b'andwiche')
self.assertRaises(StopIteration, next, doc_iters)
def test_206_multiple_ranges(self):
fr = FakeResponse(
206,
{'Content-Type': 'multipart/byteranges; boundary=asdfasdfasdf'},
(b"--asdfasdfasdf\r\n"
b"Content-Type: application/lunch\r\n"
b"Content-Range: bytes 0-3/10\r\n"
b"\r\n"
b"sand\r\n"
b"--asdfasdfasdf\r\n"
b"Content-Type: application/lunch\r\n"
b"Content-Range: bytes 6-9/10\r\n"
b"\r\n"
b"ches\r\n"
b"--asdfasdfasdf--"))
doc_iters = rh.http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 0)
self.assertEqual(last_byte, 3)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), b'sand')
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 6)
self.assertEqual(last_byte, 9)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), b'ches')
self.assertRaises(StopIteration, next, doc_iters)
def test_update_etag_is_at_header(self):
# start with no existing X-Backend-Etag-Is-At
req = Request.blank('/v/a/c/o')
rh.update_etag_is_at_header(req, 'X-Object-Sysmeta-My-Etag')
self.assertEqual('X-Object-Sysmeta-My-Etag',
req.headers['X-Backend-Etag-Is-At'])
# add another alternate
rh.update_etag_is_at_header(req, 'X-Object-Sysmeta-Ec-Etag')
self.assertEqual('X-Object-Sysmeta-My-Etag,X-Object-Sysmeta-Ec-Etag',
req.headers['X-Backend-Etag-Is-At'])
with self.assertRaises(ValueError) as cm:
rh.update_etag_is_at_header(req, 'X-Object-Sysmeta-,-Bad')
self.assertEqual('Header name must not contain commas',
cm.exception.args[0])
def test_resolve_etag_is_at_header(self):
def do_test():
req = Request.blank('/v/a/c/o')
# ok to have no X-Backend-Etag-Is-At
self.assertIsNone(rh.resolve_etag_is_at_header(req, metadata))
# ok to have no matching metadata
req.headers['X-Backend-Etag-Is-At'] = 'X-Not-There'
self.assertIsNone(rh.resolve_etag_is_at_header(req, metadata))
# selects from metadata
req.headers['X-Backend-Etag-Is-At'] = 'X-Object-Sysmeta-Ec-Etag'
self.assertEqual('an etag value',
rh.resolve_etag_is_at_header(req, metadata))
req.headers['X-Backend-Etag-Is-At'] = 'X-Object-Sysmeta-My-Etag'
self.assertEqual('another etag value',
rh.resolve_etag_is_at_header(req, metadata))
# first in list takes precedence
req.headers['X-Backend-Etag-Is-At'] = \
'X-Object-Sysmeta-My-Etag,X-Object-Sysmeta-Ec-Etag'
self.assertEqual('another etag value',
rh.resolve_etag_is_at_header(req, metadata))
# non-existent alternates are passed over
req.headers['X-Backend-Etag-Is-At'] = \
'X-Bogus,X-Object-Sysmeta-My-Etag,X-Object-Sysmeta-Ec-Etag'
self.assertEqual('another etag value',
rh.resolve_etag_is_at_header(req, metadata))
# spaces in list are ok
alts = 'X-Foo, X-Object-Sysmeta-My-Etag , X-Object-Sysmeta-Ec-Etag'
req.headers['X-Backend-Etag-Is-At'] = alts
self.assertEqual('another etag value',
rh.resolve_etag_is_at_header(req, metadata))
# lower case in list is ok
alts = alts.lower()
req.headers['X-Backend-Etag-Is-At'] = alts
self.assertEqual('another etag value',
rh.resolve_etag_is_at_header(req, metadata))
# upper case in list is ok
alts = alts.upper()
req.headers['X-Backend-Etag-Is-At'] = alts
self.assertEqual('another etag value',
rh.resolve_etag_is_at_header(req, metadata))
metadata = {'X-Object-Sysmeta-Ec-Etag': 'an etag value',
'X-Object-Sysmeta-My-Etag': 'another etag value'}
do_test()
metadata = dict((k.lower(), v) for k, v in metadata.items())
do_test()
metadata = dict((k.upper(), v) for k, v in metadata.items())
do_test()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.