content
stringlengths 5
1.05M
|
---|
from qtpy import QtCore
from qtpy import QtWidgets
class ToolBar(QtWidgets.QToolBar):
def __init__(self, title):
super(ToolBar, self).__init__(title)
layout = self.layout()
m = (0, 0, 0, 0)
layout.setSpacing(0)
layout.setContentsMargins(*m)
self.setContentsMargins(*m)
self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint)
def addAction(self, action):
if isinstance(action, QtWidgets.QWidgetAction):
return super(ToolBar, self).addAction(action)
btn = QtWidgets.QToolButton()
btn.setDefaultAction(action)
btn.setToolButtonStyle(self.toolButtonStyle())
self.addWidget(btn)
# center align
for i in range(self.layout().count()):
if isinstance(
self.layout().itemAt(i).widget(), QtWidgets.QToolButton
):
self.layout().itemAt(i).setAlignment(QtCore.Qt.AlignCenter)
|
# A001 test case - New user registration with user name, email and password
# User login data should be provided in data/users.txt file. Arbitrary number of users can be tested.
import data.data_tcA001 as da01
import func.func_01 as fu01
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
options = Options()
options.headless = True
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options)
driver.get("http://localhost:1667")
# Wait for loading
fu01.wait(driver, By.ID, "app", 1)
# *** TC-A001 **************************************
def test_A001(users):
usern_text = []
for user in users:
fu01.cookie_ok(driver)
fu01.sign_up(driver, user[0], user[1], user[2])
usn_text = fu01.registr_check(driver)
usern_text.append(usn_text)
fu01.close_driver(driver)
return usern_text
user_menu_text = test_A001(da01.users)
list_username = []
for user in da01.users:
list_username.append(user[0])
print(list_username)
# ***************************************************
# Normal run
if __name__ == "__main__":
print(user_menu_text)
try:
assert list_username == user_menu_text
except:
print("Hiba, az ellenőrző feltételnél nincs egyezés.")
|
# Generated by Django 2.0 on 2018-01-12 12:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(max_length=10000)),
('date_posted', models.DateTimeField(auto_now_add=True)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='RoomChat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('admin', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='owned_rooms', to=settings.AUTH_USER_MODEL)),
('participants', models.ManyToManyField(related_name='chatrooms', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='message',
name='roomchat',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='roomchat.RoomChat'),
),
]
|
import functools
import inspect
from operator import methodcaller
import sys
from contextlib import contextmanager
from html import escape as escape_html
from types import MappingProxyType as mappingproxy
from math import ceil
def exc_clear():
# exc_clear was removed in Python 3. The except statement automatically
# clears the exception.
pass
def consistent_round(val):
if (val % 1) >= 0.5:
return ceil(val)
else:
return round(val)
update_wrapper = functools.update_wrapper
wraps = functools.wraps
def values_as_list(dictionary):
"""Return the dictionary values as a list without forcing a copy
in Python 2.
"""
return list(dictionary.values())
def getargspec(f):
full_argspec = inspect.getfullargspec(f)
return inspect.ArgSpec(
args=full_argspec.args,
varargs=full_argspec.varargs,
keywords=full_argspec.varkw,
defaults=full_argspec.defaults,
)
unicode = type(u'')
__all__ = [
'consistent_round',
'contextmanager',
'escape_html',
'exc_clear',
'mappingproxy',
'unicode',
'update_wrapper',
'values_as_list',
'wraps',
]
|
from system import System_25D
import os
import util.fill_space
import subprocess
class PassiveInterposer(System_25D):
"""docstring for Passive"""
def __init__(self):
self.chiplet_count = 0
self.width = [None] * self.chiplet_count
self.height = [None] * self.chiplet_count
self.hubump = [None] * self.chiplet_count
self.power = [None] * self.chiplet_count
self.rotation = [None] * self.chiplet_count
self.x = [None] * self.chiplet_count
self.y = [None] * self.chiplet_count
self.link_type = 'nppl'
self.length_threshold = 9
def gen_flp(self, filename):
# material properties
UnderFill = "\t2.32E+06\t0.625\n"
Copper = "\t3494400\t0.0025\n"
Silicon = "\t1.75E+06\t0.01\n"
resistivity_Cu = 0.0025
resistivity_UF = 0.625
resistivity_Si = 0.01
specHeat_Cu = 3494400
specHeat_UF = 2320000
specHeat_Si = 1750000
C4_diameter = 0.000250 #250um
C4_edge = 0.000600 #600um
TSV_diameter = 0.000010 #10um
TSV_edge = 0.000050 #50um
ubump_diameter = 0.000025 #25um
ubump_edge = 0.000045 #45um
Aratio_C4 = (C4_edge/C4_diameter)*(C4_edge/C4_diameter)-1 # ratio of white area and C4 area
Aratio_TSV= (TSV_edge/TSV_diameter)*(TSV_edge/TSV_diameter)-1
Aratio_ubump=(ubump_edge/ubump_diameter)*(ubump_edge/ubump_diameter)-1
resistivity_C4=(1+Aratio_C4)*resistivity_Cu*resistivity_UF/(resistivity_UF+Aratio_C4*resistivity_Cu)
resistivity_TSV=(1+Aratio_TSV)*resistivity_Cu*resistivity_Si/(resistivity_Si+Aratio_TSV*resistivity_Cu)
resistivity_ubump=(1+Aratio_ubump)*resistivity_Cu*resistivity_UF/(resistivity_UF+Aratio_ubump*resistivity_Cu)
specHeat_C4=(specHeat_Cu+Aratio_C4*specHeat_UF)/(1+Aratio_C4)
specHeat_TSV=(specHeat_Cu+Aratio_TSV*specHeat_Si)/(1+Aratio_TSV)
specHeat_ubump=(specHeat_Cu+Aratio_ubump*specHeat_UF)/(1+Aratio_ubump)
mat_C4 = "\t"+str(specHeat_C4)+"\t"+str(resistivity_C4)+"\n"
mat_TSV = "\t"+str(specHeat_TSV)+"\t"+str(resistivity_TSV)+"\n"
mat_ubump = "\t"+str(specHeat_ubump)+"\t"+str(resistivity_ubump)+"\n"
with open(self.path + filename+ 'L0_Substrate.flp','w') as L0_Substrate:
L0_Substrate.write("# Floorplan for Substrate Layer with size "+str(self.intp_size/1000)+"x"+str(self.intp_size/1000)+" m\n")
L0_Substrate.write("# Line Format: <unit-name>\\t<width>\\t<height>\\t<left-x>\\t<bottom-y>\\t[<specific-heat>]\\t[<resistivity>]\n")
L0_Substrate.write("# all dimensions are in meters\n")
L0_Substrate.write("# comment lines begin with a '#' \n")
L0_Substrate.write("# comments and empty lines are ignored\n\n")
L0_Substrate.write("Substrate\t"+str(self.intp_size/1000)+"\t"+str(self.intp_size/1000)+"\t0.0\t0.0\n")
# os.system("perl util/tofig.pl -f 20 "+self.path+filename+"L0_Substrate.flp | fig2dev -L ps | ps2pdf - "+self.path+filename+"L0_Substrate.pdf")
with open(self.path+filename +'L1_C4Layer.flp','w') as L1_C4Layer:
L1_C4Layer.write("# Floorplan for C4 Layer \n")
L1_C4Layer.write("# Line Format: <unit-name>\\t<width>\\t<height>\\t<left-x>\\t<bottom-y>\\t[<specific-heat>]\\t[<resistivity>]\n")
L1_C4Layer.write("# all dimensions are in meters\n")
L1_C4Layer.write("# comment lines begin with a '#' \n")
L1_C4Layer.write("# comments and empty lines are ignored\n\n")
L1_C4Layer.write("C4Layer\t"+str(self.intp_size / 1000)+"\t"+str(self.intp_size / 1000)+"\t0.0\t0.0"+mat_C4)
# os.system("perl util/tofig.pl -f 20 "+self.path+filename+"L1_C4Layer.flp | fig2dev -L ps | ps2pdf - "+self.path+filename+"L1_C4Layer.pdf")
with open(self.path+filename +'L2_Interposer.flp','w') as L2_Interposer:
L2_Interposer.write("# Floorplan for Silicon Interposer Layer\n")
L2_Interposer.write("# Line Format: <unit-name>\\t<width>\\t<height>\\t<left-x>\\t<bottom-y>\\t[<specific-heat>]\\t[<resistivity>]\n")
L2_Interposer.write("# all dimensions are in meters\n")
L2_Interposer.write("# comment lines begin with a '#' \n")
L2_Interposer.write("# comments and empty lines are ignored\n\n")
L2_Interposer.write("Interposer\t"+str(self.intp_size / 1000)+"\t"+str(self.intp_size / 1000)+"\t0.0\t0.0"+mat_TSV)
# os.system("perl util/tofig.pl -f 20 "+self.path+filename+"L2_Interposer.flp | fig2dev -L ps | ps2pdf - "+self.path+filename+"L2_Interposer.pdf")
with open(self.path+filename + 'sim.flp','w') as SIMP:
with open(self.path + filename + 'L3.flp', 'w') as L3_UbumpLayer:
with open(self.path + filename + 'L4.flp', 'w') as L4_ChipLayer:
L3_UbumpLayer.write("# Floorplan for Microbump Layer \n")
L3_UbumpLayer.write("# Line Format: <unit-name>\\t<width>\\t<height>\\t<left-x>\\t<bottom-y>\\t[<specific-heat>]\\t[<resistivity>]\n")
L3_UbumpLayer.write("# all dimensions are in meters\n")
L3_UbumpLayer.write("# comment lines begin with a '#' \n")
L3_UbumpLayer.write("# comments and empty lines are ignored\n\n")
L4_ChipLayer.write("# Floorplan for Chip Layer\n")
L4_ChipLayer.write("# Line Format: <unit-name>\\t<width>\\t<height>\\t<left-x>\\t<bottom-y>\\t[<specific-heat>]\\t[<resistivity>]\n")
L4_ChipLayer.write("# all dimensions are in meters\n")
L4_ChipLayer.write("# comment lines begin with a '#' \n")
L4_ChipLayer.write("# comments and empty lines are ignored\n\n")
L3_UbumpLayer.write('Edge_0\t' + str(self.intp_size / 1000 - self.granularity / 1000) + '\t' + str(self.granularity / 2 / 1000) + '\t'+str(self.granularity/2/1000)+'\t0\t' + mat_ubump)
L3_UbumpLayer.write('Edge_1\t' + str(self.intp_size / 1000 - self.granularity / 1000) + '\t' + str(self.granularity / 2 / 1000) + '\t'+str(self.granularity/2/1000)+'\t'+ str(self.intp_size / 1000 - self.granularity / 2 / 1000)+'\t' + mat_ubump)
L3_UbumpLayer.write('Edge_2\t' + str(self.granularity / 2 / 1000) + '\t' + str(self.intp_size / 1000) + '\t0\t0\t' + mat_ubump)
L3_UbumpLayer.write('Edge_3\t' + str(self.granularity / 2 / 1000) + '\t' + str(self.intp_size / 1000) + '\t'+str(self.intp_size/1000-self.granularity / 2/1000)+'\t0\t' + mat_ubump)
L4_ChipLayer.write('Edge_0\t' + str(self.intp_size / 1000 - self.granularity / 1000) + '\t' + str(self.granularity / 2 / 1000) + '\t'+str(self.granularity/2/1000)+'\t0\t' + mat_ubump)
L4_ChipLayer.write('Edge_1\t' + str(self.intp_size / 1000 - self.granularity / 1000) + '\t' + str(self.granularity / 2 / 1000) + '\t'+str(self.granularity/2/1000)+'\t'+ str(self.intp_size / 1000 - self.granularity / 2 / 1000)+'\t' + mat_ubump)
L4_ChipLayer.write('Edge_2\t' + str(self.granularity / 2 / 1000) + '\t' + str(self.intp_size / 1000) + '\t0\t0\t' + mat_ubump)
L4_ChipLayer.write('Edge_3\t' + str(self.granularity / 2 / 1000) + '\t' + str(self.intp_size / 1000) + '\t'+str(self.intp_size/1000-self.granularity / 2/1000)+'\t0\t' + mat_ubump)
x_offset0, y_offset0 = self.granularity / 2 / 1000, self.granularity / 2 / 1000
index_ubump = 0
for i in range(0, self.chiplet_count):
x_offset1 = self.x[i] / 1000 - self.width[i] / 1000 * 0.5 - self.hubump[i] / 1000
y_offset1 = self.y[i] / 1000 - self.height[i] / 1000 * 0.5 - self.hubump[i] / 1000
if self.hubump[i] > 0:
L3_UbumpLayer.write("Ubump_"+str(index_ubump)+"\t"+str(self.width[i] / 1000 + self.hubump[i] / 1000)+"\t"+str(self.hubump[i] / 1000)+"\t"+str(x_offset1)+"\t"+str(y_offset1)+mat_ubump)
L3_UbumpLayer.write("Ubump_"+str(index_ubump+1)+"\t"+str(self.hubump[i] / 1000)+"\t"+str(self.height[i] / 1000 + self.hubump[i] / 1000)+"\t"+str(x_offset1)+"\t"+str(y_offset1+self.hubump[i] / 1000)+mat_ubump)
L3_UbumpLayer.write("Ubump_"+str(index_ubump+2)+"\t"+str(self.hubump[i] / 1000)+"\t"+str(self.height[i] / 1000 + self.hubump[i] / 1000)+"\t"+str(x_offset1+self.width[i] / 1000 + self.hubump[i] / 1000)+"\t"+str(y_offset1)+mat_ubump)
L3_UbumpLayer.write("Ubump_"+str(index_ubump+3)+"\t"+str(self.width[i] / 1000 + self.hubump[i] / 1000)+"\t"+str(self.hubump[i] / 1000)+"\t"+str(x_offset1+self.hubump[i] / 1000)+"\t"+str(y_offset1+self.height[i] / 1000 + self.hubump[i] / 1000)+mat_ubump)
L4_ChipLayer.write("Ubump_"+str(index_ubump)+"\t"+str(self.width[i] / 1000 + self.hubump[i] / 1000)+"\t"+str(self.hubump[i] / 1000)+"\t"+str(x_offset1)+"\t"+str(y_offset1)+Silicon)
L4_ChipLayer.write("Ubump_"+str(index_ubump+1)+"\t"+str(self.hubump[i] / 1000)+"\t"+str(self.height[i] / 1000 + self.hubump[i] / 1000)+"\t"+str(x_offset1)+"\t"+str(y_offset1+self.hubump[i] / 1000)+Silicon)
L4_ChipLayer.write("Ubump_"+str(index_ubump+2)+"\t"+str(self.hubump[i] / 1000)+"\t"+str(self.height[i] / 1000 + self.hubump[i] / 1000)+"\t"+str(x_offset1+self.width[i] / 1000 + self.hubump[i] / 1000)+"\t"+str(y_offset1)+Silicon)
L4_ChipLayer.write("Ubump_"+str(index_ubump+3)+"\t"+str(self.width[i] / 1000 + self.hubump[i] / 1000)+"\t"+str(self.hubump[i] / 1000)+"\t"+str(x_offset1+self.hubump[i] / 1000)+"\t"+str(y_offset1+self.height[i] / 1000 + self.hubump[i] / 1000)+Silicon)
index_ubump += 4
# not sure about the microbump density for the center region. Assume the same as the edge area so far. Need to be updated if the microbump pitch for center power/gnd clk is found
L3_UbumpLayer.write("Chiplet_"+str(i)+"\t"+str(self.width[i] / 1000)+"\t"+str(self.height[i] / 1000)+"\t"+str(x_offset1 + self.hubump[i] / 1000)+"\t"+str(y_offset1+self.hubump[i] / 1000)+mat_ubump)
L4_ChipLayer.write("Chiplet_"+str(i)+"\t"+str(self.width[i] / 1000)+"\t"+str(self.height[i] / 1000)+"\t"+str(x_offset1 + self.hubump[i] / 1000)+"\t"+str(y_offset1+self.hubump[i] / 1000)+Silicon)
SIMP.write("Unit_"+str(i)+"\t"+str(self.width[i] / 1000 + 2 * self.hubump[i] / 1000)+"\t"+str(self.height[i] / 1000 + 2 * self.hubump[i] / 1000)+"\t"+str(x_offset1)+"\t"+str(y_offset1)+"\n")
# os.system("perl util/tofig.pl -f 20 "+self.path+filename+"L3.flp | fig2dev -L ps | ps2pdf - "+self.path+filename+"L3.pdf")
# os.system("perl util/tofig.pl -f 20 "+self.path+filename+"L4.flp | fig2dev -L ps | ps2pdf - "+self.path+filename+"L4.pdf")
util.fill_space.fill_space(x_offset0, self.intp_size / 1000 - x_offset0, y_offset0, self.intp_size / 1000 - y_offset0, self.path+filename+'sim', self.path+filename+'L3', self.path+filename+'L3_UbumpLayer')
util.fill_space.fill_space(x_offset0, self.intp_size / 1000 - x_offset0, y_offset0, self.intp_size / 1000 - y_offset0, self.path+filename+'sim', self.path+filename+'L4', self.path+filename+'L4_ChipLayer')
# os.system("perl util/tofig.pl -f 20 "+self.path+filename+"L3_UbumpLayer.flp | fig2dev -L ps | ps2pdf - "+self.path+filename+"L3_UbumpLayer.pdf")
# os.system("perl util/tofig.pl -f 20 "+self.path+filename+"L4_ChipLayer.flp | fig2dev -L ps | ps2pdf - "+self.path+filename+"L4_ChipLayer.pdf")
with open(self.path+filename +'L5_TIM.flp','w') as L5_TIM:
L5_TIM.write("# Floorplan for TIM Layer \n")
L5_TIM.write("# Line Format: <unit-name>\\t<width>\\t<height>\\t<left-x>\\t<bottom-y>\\t[<specific-heat>]\\t[<resistivity>]\n")
L5_TIM.write("# all dimensions are in meters\n")
L5_TIM.write("# comment lines begin with a '#' \n")
L5_TIM.write("# comments and empty lines are ignored\n\n")
L5_TIM.write("TIM\t"+str(self.intp_size / 1000)+"\t"+str(self.intp_size / 1000)+"\t0.0\t0.0\n")
# os.system("perl util/tofig.pl -f 20 "+self.path+filename+"L5_TIM.flp | fig2dev -L ps | ps2pdf - "+self.path+filename+"L5_TIM.pdf")
with open(self.path+filename + 'layers.lcf','w') as LCF:
LCF.write("# File Format:\n")
LCF.write("#<Layer Number>\n")
LCF.write("#<Lateral heat flow Y/N?>\n")
LCF.write("#<Power Dissipation Y/N?>\n")
LCF.write("#<Specific heat capacity in J/(m^3K)>\n")
LCF.write("#<Resistivity in (m-K)/W>\n")
LCF.write("#<Thickness in m>\n")
LCF.write("#<floorplan file>\n")
LCF.write("\n# Layer 0: substrate\n0\nY\nN\n1.06E+06\n3.33\n0.0002\n"+self.path+filename+"L0_Substrate.flp\n")
LCF.write("\n# Layer 1: Epoxy SiO2 underfill with C4 copper pillar\n1\nY\nN\n2.32E+06\n0.625\n0.00007\n"+self.path+filename+"L1_C4Layer.flp\n")
LCF.write("\n# Layer 2: silicon interposer\n2\nY\nN\n1.75E+06\n0.01\n0.00011\n"+self.path+filename+"L2_Interposer.flp\n")
LCF.write("\n# Layer 3: Underfill with ubump\n3\nY\nN\n2.32E+06\n0.625\n1.00E-05\n"+self.path+filename+"L3_UbumpLayer.flp\n")
LCF.write("\n# Layer 4: Chip layer\n4\nY\nY\n1.75E+06\n0.01\n0.00015\n"+self.path+filename+"L4_ChipLayer.flp\n")
LCF.write("\n# Layer 5: TIM\n5\nY\nN\n4.00E+06\n0.25\n2.00E-05\n"+self.path+filename+"L5_TIM.flp\n")
if os.path.isfile(self.path + 'new_hotspot.config') == False:
with open('util/hotspot.config','r') as Config_in:
with open(self.path + 'new_hotspot.config','w') as Config_out:
size_spreader = 2 * self.intp_size / 1000
size_heatsink = 2 * size_spreader
r_convec = 0.1 * 0.06 * 0.06 / size_heatsink / size_heatsink #0.1*0.06*0.06 are from default hotspot.config file
for line in Config_in:
if line == ' -s_sink 0.06\n':
Config_out.write(line.replace('0.06',str(size_heatsink)))
elif line == ' -s_spreader 0.03\n':
Config_out.write(line.replace('0.03',str(size_spreader)))
elif line == ' -r_convec 0.1\n':
Config_out.write(line.replace('0.1',str(r_convec)))
else:
Config_out.write(line)
def gen_ptrace(self, filename):
num_component = 0
component, component_name, component_index = [], [], []
# Read components from flp file
with open (self.path + filename + 'L4_ChipLayer.flp','r') as FLP:
for line in FLP:
line_sp = line.split()
if line_sp:
if line_sp[0] != '#':
component.append(line_sp[0])
comp = component[num_component].split('_')
component_name.append(comp[0])
component_index.append(int(comp[1]))
num_component+=1
with open (self.path + filename + '.ptrace','w') as Ptrace:
# Write ptrace header
for i in range(0,num_component):
# if component_name[i] == 'Core':
Ptrace.write(component[i]+'\t')
Ptrace.write('\n')
for i in range(0,num_component):
if component_name[i] == 'Chiplet':
Ptrace.write(str(self.power[component_index[i]])+'\t')
# elif component_name[i] == 'Ubump':
# Ptrace.write('0.3\t')
else:
Ptrace.write('0\t')
Ptrace.write('\n')
def run_hotspot(self, filename):
proc = subprocess.Popen(["./util/hotspot",
"-c",self.path+"new_hotspot.config",
"-f",self.path+filename+"L4_ChipLayer.flp",
"-p",self.path+filename+".ptrace",
"-steady_file",self.path+filename+".steady",
"-grid_steady_file",self.path+filename+".grid.steady",
"-model_type","grid",
"-detailed_3D","on",
"-grid_layer_file",self.path+filename+"layers.lcf"],
stdout=subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = proc.communicate()
outlist = stdout.split()
return (max(list(map(float,outlist[3::2])))-273.15)
def clean_hotspot(self, filename):
os.system('rm ' + self.path + filename + '{*.flp,*.lcf,*.ptrace,*.steady}')
def compute_ubump_overhead(self):
# print (self.link_type)
connection = self.connection_matrix
for i in range(self.chiplet_count):
assert connection[i][i] == 0, 'a link from and to the same chiplet is not allowed'
s = 0
for j in range(self.chiplet_count):
s += connection[i][j] + connection[j][i]
if self.link_type == 'ppl':
s *= 2
h = 1
w_stretch = 0.045 * h
while ((self.width[i] + self.height[i]) * 2 * w_stretch + 4 * w_stretch * w_stretch) / 0.045 / 0.045 < s:
h += 1
w_stretch = 0.045 * h
if h > 1000:
print ('microbump is too high to be a feasible case')
exit()
# print (i, s, self.width[i], self.height[i], h, w_stretch)
self.hubump[i] = w_stretch
# print (self.hubump)
def set_link_type(self, link_type):
self.link_type = link_type |
"""
Experiment for NN4(RI)
Aim: To find the best max_epochs for NN4(*, 1024, 1024, 1024) + RI(k = 3, m = 200)
max_epochs: [22, 24, ... ,98, 140]
Averaging 20 models
Summary
epochs 88 , loss 0.421860471364
Time:3:40:30 on i7-4790k 32G MEM GTX660
I got a different result, epochs 112 loss 0.422868, before I reinstalled ubuntu 14.04 LTS.
So I chose max_epochs = 112.
"""
import numpy as np
import scipy as sp
import pandas as pd
from pylearn2.models import mlp
from pylearn2.models.mlp import RectifiedLinear, Softmax, MLP
from pylearn2.costs.mlp.dropout import Dropout
from pylearn2.training_algorithms import sgd, learning_rule
from pylearn2.termination_criteria import EpochCounter
from pylearn2.datasets import DenseDesignMatrix
from pylearn2.train import Train
from theano.compat.python2x import OrderedDict
import theano.tensor as T
from theano import function
import pickle
import sklearn.preprocessing as pp
from sklearn import cross_validation
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import scale
from sklearn.metrics import log_loss
from sklearn.grid_search import ParameterGrid
from datetime import datetime
import os
from utility import *
from predict import predict
import pylab
path = os.getcwd() + '/'
path_log = path + 'logs/'
file_train = path + 'train.csv'
training = pd.read_csv(file_train, index_col = 0)
num_train = training.shape[0]
y = training['target'].values
yMat = pd.get_dummies(training['target']).values
X = training.iloc[:,:93].values
scaler = pp.StandardScaler()
X2 = scaler.fit_transform(X ** .6)
kf = cross_validation.StratifiedKFold(y, n_folds=5, shuffle = True, random_state = 345)
for train_idx, valid_idx in kf:
break
y_train = yMat[train_idx]
y_valid = yMat[valid_idx]
training = DenseDesignMatrix(X = X2[train_idx], y = y_train)
valid = DenseDesignMatrix(X = X2[valid_idx], y = y_valid)
# [l1, l2, l3, l4, output]
nIter = 20
# Params for RI
m = 200
k = 3
# Params for NN
epochs = 20
epochs_add = 2
n_add = 60
bs = 64
mm = .97
lr = .01
dim2 = 1024
ir1 = .01
ir2 = .05
ip = .8
ir_out = .05
mcn_out = 2.5
scores = []
t0 = datetime.now()
predAll = [np.zeros(y_valid.shape) for s in range(n_add)]
for i in range(nIter):
seed = i + 3819
R = RImatrix(X.shape[1], m, k, rm_dup_cols = True, seed = seed)
R = np.abs(R.todense().astype(np.float32))
dim1 = R.shape[1]
l1 = RectifiedLinear(layer_name='l1', irange = ir1, dim = dim1, mask_weights = R)
l2 = RectifiedLinear(layer_name='l2', irange = ir2, dim = dim2, max_col_norm = 1.)
l3 = RectifiedLinear(layer_name='l3', irange = ir2, dim = dim2, max_col_norm = 1.)
l4 = RectifiedLinear(layer_name='l4', irange = ir2, dim = dim2, max_col_norm = 1.)
output = Softmax(layer_name='y', n_classes = 9, irange = ir_out,
max_col_norm = mcn_out)
mdl = MLP([l1, l2, l3, l4, output], nvis = X2.shape[1])
trainer = sgd.SGD(learning_rate=lr,
batch_size=bs,
learning_rule=learning_rule.Momentum(mm),
cost=Dropout(input_include_probs = {'l1':1.},
input_scales = {'l1':1.},
default_input_include_prob=ip,
default_input_scale=1/ip),
termination_criterion=EpochCounter(epochs),seed = seed)
decay = sgd.LinearDecayOverEpoch(start=2, saturate=20, decay_factor= .1)
experiment = Train(dataset = training, model=mdl, algorithm=trainer, extensions=[decay])
experiment.main_loop()
epochs_current = epochs
for s in range(n_add):
del mdl.monitor
trainer = sgd.SGD(learning_rate=lr * .1,
batch_size=bs,
learning_rule=learning_rule.Momentum(mm),
cost=Dropout(input_include_probs = {'l1':1.},
input_scales = {'l1':1.},
default_input_include_prob=ip,
default_input_scale=1/ip),
termination_criterion=EpochCounter(epochs_add),seed = seed)
experiment = Train(dataset = training, model=mdl, algorithm=trainer)
experiment.main_loop()
epochs_current += epochs_add
pred_train = predict(mdl, X2[train_idx].astype(np.float32))
pred_valid = predict(mdl, X2[valid_idx].astype(np.float32))
predAll[s] += pred_valid
scores.append({'epochs':epochs_current, 'nModels':i + 1, 'seed':seed,
'train':log_loss(y_train, pred_train),
'valid':log_loss(y_valid, pred_valid),
'valid_avg':log_loss(y_valid, predAll[s] / (i + 1))})
print(scores[-1], datetime.now() - t0)
df = pd.DataFrame(scores)
if os.path.exists(path_log) is False:
print('mkdir', path_log)
os.mkdir(path_log)
df.to_csv(path_log + 'exp_NN4_RI_max_epochs.csv')
keys = ['epochs']
grouped = df.groupby(keys)
print('epochs',grouped['valid_avg'].last().idxmin(),', loss',grouped['valid_avg'].last().min())
# epochs 88 , loss 0.421860471364
g = grouped[['train', 'valid']].mean()
g['valid_avg'] = grouped['valid_avg'].last()
print(g.iloc[[0,1,32,33,34,58,59],:])
# train valid valid_avg
# epochs
# 22 0.319737 0.468458 0.436766
# 24 0.313538 0.468300 0.435694
# 86 0.193640 0.486078 0.422321
# 88 0.190694 0.487625 0.421860
# 90 0.187374 0.487897 0.421998
# 138 0.134388 0.512527 0.423662
# 140 0.132642 0.514666 0.425003
ax = g.plot()
ax.set_title('NN4(RI) m=200, k=3')
ax.set_ylabel('Logloss')
fig = ax.get_figure()
fig.savefig(path_log + 'exp_NN4_RI_max_epochs.png')
|
"""
SECS utils
"""
import numpy as np
d2r = np.pi/180
MU0 = 4 * np.pi * 1e-7
RE = 6371.2 * 1e3
def dpclip(x, delta = 1e-7):
"""
dot product clip:
clip x to values between -1 + delta and 1 - delta
"""
return np.clip(x, -1 + delta, 1 - delta)
def get_theta(lat, lon, lat_secs, lon_secs, return_degrees = False):
"""" calculate theta angle - the angle between data point and secs node.
Parameters
----------
lat: array-like
Array of latitudes of evaluation points [deg]
Flattened array must have same size as lon
lon: array-like
Array of longitudes of evaluation points [deg].
Flattened array must have same size as lat
lat_secs: array-like
Array of SECS pole latitudes [deg]
Flattened array must have same size as lon_secs
lon_secs: array-like
Array of SECS pole longitudes [deg]
Flattened array must havef same size as lat_secs
Output will be a 2D array with shape (mlat.size, mlat_secs.size)
return_degrees: bool, optional
Set to True if you want output in degrees. Default is False (radians)
Returns
-------
theta: 2D array (lat.size, lat_secs.size)
Array of polar angles, angular distances between the points
described by (lat, lon) and the points described by
(lat_secs, lon_secs). Unit in radians unless return_degrees is set
to True
"""
# reshape angles and convert to radians:
la = np.array(lat).flatten()[:, np.newaxis] * d2r
lo = np.array(lon).flatten()[:, np.newaxis] * d2r
la_s = np.array(lat_secs).flatten()[np.newaxis, :] * d2r
lo_s = np.array(lon_secs).flatten()[np.newaxis, :] * d2r
# ECEF position vectors of data points - should be N by 3, where N is number of data points
ecef_r_data = np.hstack((np.cos(la ) * np.cos(lo ), np.cos(la ) * np.sin(lo ), np.sin(la )))
# position vectors SECS poles - should be 3 by M, where M is number of SECS - these are the z axes of each SECS
ecef_r_secs = np.vstack((np.cos(la_s) * np.cos(lo_s), np.cos(la_s) * np.sin(lo_s), np.sin(la_s))).T
# the polar angles (N, M):
theta = np.arccos(dpclip(np.einsum('ij, kj -> ik', ecef_r_data, ecef_r_secs)))
if return_degrees:
theta = theta / d2r
return theta
def get_SECS_J_G_matrices(lat, lon, lat_secs, lon_secs,
current_type = 'divergence_free', constant = 1./(4*np.pi),
RI = RE + 110 * 1e3,
singularity_limit = 0):
""" Calculate matrices Ge and Gn which relate SECS amplitudes to current density
vector components.
Parameters
----------
lat: array-like
Array of latitudes of evaluation points [deg]
Flattened array must have same size as lon
lon: array-like
Array of longitudes of evaluation points [deg].
Flattened array must have same size as lat
lat_secs: array-like
Array of SECS pole latitudes [deg]
Flattened array must have same size as lon_secs
lon_secs: array-like
Array of SECS pole longitudes [deg]
Flattened array must havef same size as lat_secs
current_type: string, optional
The type of SECS function. This must be either
'divergence_free' (default): divergence-free basis functions
'curl_free': curl-free basis functions
'potential': scalar field whose negative gradient is curl-free SECS
'scalar':
constant: float, optional
The SECS functions are scaled by the factor 1/(4pi), which is
the default value of 'constant'. Change if you want something
different.
RI: float (optional)
Radius of SECS poles. Default is Earth radius + 110,000 m
singularity_limit: float (optional)
A modified version of the SECS functions will be used at
points that are closer than singularity_limit. The modification
is given by equations 2.43 (CF) and 2.44 (DF) in Vanhamaki and
Juusola (2020), and singularity_limit / RI is equal to theta0
in these equations. Default is 0, which means that the original
version of the SECS functions are used (with singularities).
singularity_limit is ignored if current_type is 'potential' or 'scalar'
Returns
-------
If current_type is 'divergence_free' or 'curl_free':
Ge: 2D array
2D array with shape (lat.size, lat_secs.size), relating SECS amplitudes
m to the eastward current densities at (lat, lon) via 'je = Ge.dot(m)'
Gn: 2D array
2D array with shape (lat.size, lat_secs.size), relating SECS amplitudes
m to the northward current densities at (lat, lon) via 'jn = Gn.dot(m)'
If current_type is 'potential' or 'scalar':
G: 2D array
2D array with shape (lat.size, lat_secs.size), relating amplitudes m
to scalar field magnitude at (lat, lon) via 'z = G.dot(m)'
"""
# reshape angles and convert to radians:
la = np.array(lat).flatten()[:, np.newaxis] * d2r
lo = np.array(lon).flatten()[:, np.newaxis] * d2r
la_s = np.array(lat_secs).flatten()[np.newaxis, :] * d2r
lo_s = np.array(lon_secs).flatten()[np.newaxis, :] * d2r
# ECEF position vectors of data points - should be N by 3, where N is number of data points
ecef_r_data = np.hstack((np.cos(la ) * np.cos(lo ), np.cos(la ) * np.sin(lo ), np.sin(la )))
# position vectors SECS poles - should be 3 by M, where M is number of SECS - these are the z axes of each SECS
ecef_r_secs = np.vstack((np.cos(la_s) * np.cos(lo_s), np.cos(la_s) * np.sin(lo_s), np.sin(la_s))).T
# unit vector pointing from SECS to data points - (M, N, 3)
ecef_t = ecef_r_secs[np.newaxis, :, :] - ecef_r_data[:, np.newaxis, :] # difference vector - not tangential yet
ecef_t = ecef_t - np.einsum('ijk,ik->ij', ecef_t, ecef_r_data)[:, :, np.newaxis] * ecef_r_data[:, np.newaxis, :] # subtract radial part of the vector to make it tangential
ecef_t = ecef_t/np.linalg.norm(ecef_t, axis = 2)[:, :, np.newaxis] # normalize the result
# make N rotation matrices to rotate ecef_t to enu_t - one rotation matrix per SECS:
R = np.hstack( (np.dstack((-np.sin(lo) , np.cos(lo) , np.zeros_like(la) )),
np.dstack((-np.cos(lo) * np.sin(la), -np.sin(lo) * np.sin(la), np.cos( la) )),
np.dstack(( np.cos(lo) * np.cos(la), np.sin(lo) * np.cos(la), np.sin( la) ))) )
# apply rotation matrices to make enu vectors pointing from data points to SECS
enu_t = np.einsum('lij, lkj->lki', R, ecef_t)[:, :, :-1] # remove last component (up), which should deviate from zero only by machine precicion
if current_type == 'divergence_free':
# rotate these vectors to get vectors pointing eastward with respect to SECS systems at each data point
enu_vec = np.dstack((enu_t[:, :, 1], -enu_t[:, :, 0])) # north -> east and east -> south
elif current_type == 'curl_free':
enu_vec = -enu_t # outward from SECS
elif current_type in ['potential', 'scalar']:
enu_vec = 1
else:
raise Exception('type must be "divergence_free", "curl_free", "potential", or "sclar"')
# get the scalar part of Amm's divergence-free SECS:
theta = np.arccos(dpclip(np.einsum('ij,kj->ik', ecef_r_secs, ecef_r_data)))
if current_type in ['divergence_free', 'curl_free']:
coeff = constant /np.tan(theta/2)/ RI
# apply modifications to handle singularities:
theta0 = singularity_limit / RI
if theta0 > 0:
alpha = 1 / np.tan(theta0/2)**2
coeff[theta < theta0] = constant * alpha * np.tan(theta[theta < theta0]/2) / RI
# G matrices
Ge = coeff * enu_vec[:, :, 0].T
Gn = coeff * enu_vec[:, :, 1].T
return Ge.T, Gn.T
else: # current_type is 'potential' or 'scalar'
if current_type == 'potential':
return -2*constant*np.log(np.sin(theta/2)).T
elif current_type == 'scalar':
return constant / np.tan(theta/2).T
def get_SECS_B_G_matrices(lat, lon, r, lat_secs, lon_secs,
current_type = 'divergence_free', constant = 1./(4*np.pi),
RI = RE + 110 * 1e3,
singularity_limit = 0,
induction_nullification_radius = None):
""" Calculate matrices Ge, Gn, and Gr which relate SECS amplitudes to magnetic field
Based on equations (9) and (10) of Amm and Viljanen 1999, or (2.13)-(2.14) in Vanhamaki
and Juusola 2020.
If singularity_limit > 0, the magnetic field of curl-free currents is modified, but
not the magnetic field of divergence-free currents (!). See Section 2.10.2 and
equation (2.46) in Vanhamaki and Juusola 2020.
Parameters
----------
lat: array-like
Array of latitudes of evaluation points [deg]
Flattened array must have same size as lon
lon: array-like
Array of longitudes of evaluation points [deg].
Flattened array must have same size as lat
r: array-like
Array of radii of evaluation points. Flattened
array must either have size 1, in which case one
radius is used for all points, or have same size as
lat. Unit should be the same as RI
lat_secs: array-like
Array of SECS pole latitudes [deg]
Flattened array must have same size as lon_secs
lon_secs: array-like
Array of SECS pole longitudes [deg]
Flattened array must havef same size as lat_secs
current_type: string, optional
The type of SECS function. This must be either
'divergence_free' (default): divergence-free basis functions
'curl_free': curl-free basis functions
constant: float, optional
The SECS functions are scaled by the factor 1/(4pi), which is
the default value of 'constant'. Change if you want something
different.
RI: float (optional)
Radius of SECS poles. Default is Earth radius + 110,000 m
singularity_limit: float (optional)
A modified version of the SECS functions will be used at
points that are closer than singularity_limit. The modification
is given by equations 2.43 (CF) and 2.44 (DF) in Vanhamaki and
Juusola (2020), and singularity_limit / RI is equal to theta0
in these equations. Default is 0, which means that the original
version of the SECS functions are used (with singularities).
induction_nullification_radius: float or None, optional
The radius at which ground induced image currents cancel the radial
magnetic field. Default in None, in which case there are no
induced image currents. This part is based on equations of Appendix A
in "Juusola, L., Kauristie, K., Vanhamäki, H., Aikio, A., and
van de Kamp, M. (2016), Comparison of auroral ionospheric and field‐
aligned currents derived from Swarm and ground magnetic field measurements,
J. Geophys. Res. Space Physics, 121, 9256– 9283, doi:10.1002/2016JA022961."
Returns
-------
Ge: 2D array
2D array with shape (lat.size, lat_secs.size), relating SECS amplitudes
m to the eastward magnetic field at (lat, lon) via 'Be = Ge.dot(m)'
Gn: 2D array
2D array with shape (lat.size, lat_secs.size), relating SECS amplitudes
m to the northward magnetic field at (lat, lon) via 'Bn = Gn.dot(m)'
Gr: 2D array
2D array with shape (lat.size, lat_secs.size), relating SECS amplitudes
m to the radial magnetic field at (lat, lon) via 'Br = Gr.dot(m)'
"""
# reshape angles and convert to radians:
la = np.array(lat).flatten()[:, np.newaxis] * d2r
lo = np.array(lon).flatten()[:, np.newaxis] * d2r
la_s = np.array(lat_secs).flatten()[np.newaxis, :] * d2r
lo_s = np.array(lon_secs).flatten()[np.newaxis, :] * d2r
# reshape r:
if np.array(r).size == 1:
r = np.ones_like(la) * r
else:
r = np.array(r).flatten()[:, np.newaxis]
# ECEF position vectors of data points - should be N by 3, where N is number of data points
ecef_r_data = np.hstack((np.cos(la ) * np.cos(lo ), np.cos(la ) * np.sin(lo ), np.sin(la )))
# position vectors SECS poles - should be 3 by M, where M is number of SECS - these are the z axes of each SECS
ecef_r_secs = np.vstack((np.cos(la_s) * np.cos(lo_s), np.cos(la_s) * np.sin(lo_s), np.sin(la_s))).T
# unit vector pointing from SECS to data points - (M, N, 3)
ecef_t = ecef_r_secs[np.newaxis, :, :] - ecef_r_data[:, np.newaxis, :] # difference vector - not tangential yet
ecef_t = ecef_t - np.einsum('ijk,ik->ij', ecef_t, ecef_r_data)[:, :, np.newaxis] * ecef_r_data[:, np.newaxis, :] # subtract radial part of the vector to make it tangential
ecef_t = ecef_t/np.linalg.norm(ecef_t, axis = 2)[:, :, np.newaxis] # normalize the result
# make N rotation matrices to rotate ecef_t to enu_t - one rotation matrix per SECS:
R = np.hstack( (np.dstack((-np.sin(lo) , np.cos(lo) , np.zeros_like(la) )),
np.dstack((-np.cos(lo) * np.sin(la), -np.sin(lo) * np.sin(la), np.cos( la) )),
np.dstack(( np.cos(lo) * np.cos(la), np.sin(lo) * np.cos(la), np.sin( la) ))) )
# apply rotation matrices to make enu vectors pointing from data points to SECS
enu_t = np.einsum('lij, lkj->lki', R, ecef_t)[:, :, :-1] # remove last component (up), which should deviate from zero only by machine precicion
# the polar angles (N, M):
theta = np.arccos(dpclip(np.einsum('ij, kj -> ik', ecef_r_data, ecef_r_secs)))
# indices of data points that are below and above current sheet:
below = r.flatten() <= RI
above = r.flatten() > RI
# G matrix scale factors
if current_type == 'divergence_free':
s = np.minimum(r, RI) / np.maximum(r, RI)
sa = s[above]
sb = s[below]
Ar = MU0 * constant / r # common factor radial direction
Sr = np.zeros_like(theta)
Sr[below] = 1 / np.sqrt(1 + sb**2 - 2 * sb * np.cos(theta[below])) - 1
Sr[above] = sa / np.sqrt(1 + sa**2 - 2 * sa * np.cos(theta[above])) - sa
Gr = Ar * Sr
An_ = MU0 * constant / (r * np.sin(theta)) # common factor local northward (note sign difference wrt theta) direction
Sn_ = np.zeros_like(theta)
Sn_[below] = (sb - np.cos(theta[below])) / np.sqrt(1 + sb**2 - 2 * sb * np.cos(theta[below])) + np.cos(theta[below])
Sn_[above] = (1 - sa * np.cos(theta[above])) / np.sqrt(1 + sa**2 - 2 * sa * np.cos(theta[above])) - 1
Gn_ = An_ * Sn_
# calculate geo east, north:
Ge = Gn_ * enu_t[:, :, 0]
Gn = Gn_ * enu_t[:, :, 1]
elif current_type == 'curl_free':
# G matrix for local eastward component
Ge_ = -MU0 * constant / np.tan(theta/2) / r
# apply modifications to handle singularities:
theta0 = singularity_limit / RI
if theta0 > 0:
alpha = 1 / np.tan(theta0/2)**2
rr = np.tile(r, (1, theta.shape[1])) # need one r for every element in matrix
Ge_[theta < theta0] = -MU0 * constant * alpha * np.tan(theta[theta < theta0]/2) / rr[theta < theta0]
# zero below current sheet:
Ge_[below] *= 0
# calculate geo east, north, radial:
Ge = Ge_ * enu_t[:, :, 1] # eastward component of enu_t is northward component of enu_e (unit vector in local east direction)
Gn = -Ge_ * enu_t[:, :, 0] # northward component of enu_t is eastward component of enu_e
Gr = Ge_ * 0 # no radial component
if induction_nullification_radius != None and current_type == 'divergence_free':
# include the effect of telluric image currents
radius = induction_nullification_radius**2 / RI
amplitude_factor = -RI / induction_nullification_radius
Ge_, Gn_, Gr_ = get_SECS_B_G_matrices(lat, lon, r, lat_secs, lon_secs,
current_type = 'divergence_free',
RI = radius)
Ge = Ge + amplitude_factor * Ge_
Gn = Gn + amplitude_factor * Gn_
Gr = Gr + amplitude_factor * Gr_
return Ge, Gn, Gr
|
import itertools
words = []
with open('words.italian.txt') as f:
words = f.read().split('\n')
def charCount(word):
dict = {}
for i in word:
dict[i] = dict.get(i, 0) + 1
return dict
def possible_words(lwords, charSet):
l = []
for word in lwords:
flag = 1
chars = charCount(word)
for key in chars:
if key not in charSet:
flag = 0
else:
if charSet.count(key) != chars[key]:
flag = 0
if flag == 1:
l.append(word)
return l
def check(s1, s2):
if len(s1) != len(s2):
return False
if (sorted(s1) == sorted(s2)):
return True
return False
if __name__ == "__main__":
word = input('Inserisci parola da anagrammare: ').lower().strip()
print('')
word = ''.join([i for i in word if i.isalpha()])
minwordlen = int(input('Inserisci la lunghezza minima delle parole: '))
print('')
charSet = [char for char in word]
pw = possible_words(words, charSet)
pw = [x for x in pw if len(x)>=minwordlen]
print(f'List of all possible words made from: {word}')
print(pw)
print('Ok, check if there are some anagrams...')
for i in range(2, 10):
for l in itertools.permutations(pw, r=i):
aw = ''.join(l)
# print(f'Check: {aw}')
x = check(word, aw)
if x:
print(l)
|
from bs4 import BeautifulSoup as bs
import pyperclip as pc
import requests
import re
import search
def t1337x_search(query):
'''
Search for torrents and return results if find anything and print no result were found and start process again
'''
url='https://1337x.to/search/'+query+'/1/'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.2840.71 Safari/539.36'}
source=requests.get(url,headers=headers)
soup=bs(source.text,'html.parser')
if(soup.find_all('td',class_='coll-1 name')): #check if it return null or something ( case of wrong serach query)
results=soup.find_all('td',class_='coll-1 name')
size=soup.find_all('td',class_="coll-4 size mob-vip")
search_result(soup,results,size) #call for getting results
else:
print("No results were returned. Please refine your search.")
search.menu() #start program again
def search_result(soup,results,size):
'''
return search result of torrents name upto 10 and ask user for select the desired one
'''
links=[] #ltst for storing all url links
i=1
for r,s in zip(results,size): #print serach result upto 15 torrents
print(i,re.sub('[\W_]+', ' ', r.text[:49]),s.text.replace("B","B, seedrs-"))
print()
i=i+1
for link in soup.find_all('a'):
#getting link for all 15 torrents
b=link.get('href')print(i,r.text+"\nsize="+s.text+" | seeders="+sd.text.rstrip()+"| leechers="+l.text.replace("\n",""))
if re.match("^/torrent",b):
links.append("https://1337x.to"+b)
print("select a torrent")
choice=int(input())
ch_url=links[choice-1] #return url of desired torrent
getTorrent(ch_url)
def getTorrent(ch_url):
'''
return magnet link of desired torrent which user select
'''
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.2840.71 Safari/539.36'}
source=requests.get(ch_url,headers=headers)
soup=bs(source.text,'html.parser')
magnet=soup.find_all('a')
#searching for magnet link
for link in magnet:
b=link.get('href')
if re.match("^magnet:",b):
magnet_link=b
break
print("\nHere is your magnet link for the torrent \n")
print(magnet_link)
pc.copy(str(magnet_link))
print ("\nWe make your work more easy\nYour magnet link is now in your clipboard\nGo to seedr.cc or open Torrent Downloader application and paste the link your download will start.")
print("\nThanks for using Torrent Finder \nCreated with " + "\u2764"+ " by Rishabh Sharma")
search.menu()
|
# -*- coding: utf-8 -*-
"""
Demonstrate use of GLLinePlotItem to draw cross-sections of a surface.
"""
## Add path to library (just for examples; you do not need this)
import initExample
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import pyqtgraph as pg
import numpy as np
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.opts['distance'] = 40
w.show()
w.setWindowTitle('pyqtgraph example: GLLinePlotItem')
gx = gl.GLGridItem()
gx.rotate(90, 0, 1, 0)
gx.translate(-10, 0, 0)
w.addItem(gx)
gy = gl.GLGridItem()
gy.rotate(90, 1, 0, 0)
gy.translate(0, -10, 0)
w.addItem(gy)
gz = gl.GLGridItem()
gz.translate(0, 0, -10)
w.addItem(gz)
def fn(x, y):
return np.cos((x**2 + y**2)**0.5)
n = 51
y = np.linspace(-10,10,n)
x = np.linspace(-10,10,100)
for i in range(n):
yi = np.array([y[i]]*100)
d = (x**2 + yi**2)**0.5
z = 10 * np.cos(d) / (d+1)
pts = np.vstack([x,yi,z]).transpose()
plt = gl.GLLinePlotItem(pos=pts, color=pg.glColor((i,n*1.3)), width=(i+1)/10., antialias=True)
w.addItem(plt)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from flask import Flask, request, render_template, Response
from flask_restplus import Api, Resource, fields, reqparse, abort, fields, Model
import logging
import json
import time
from argparse import ArgumentParser
from importlib import import_module
flask_app = Flask(__name__)
app = Api(app=flask_app)
name_space = app.namespace('app', description='Flaskification of Fn')
logging.basicConfig(level=logging.DEBUG)
fn_cache = {}
@name_space.route("/fn/<modulefn>")
class ModuleRunner(Resource):
@name_space.expect([fields.String], validate=False)
def post(self, modulefn):
if not modulefn:
abort(404, "no module specified")
runner_args = request.json # the args to be passed to the function, a single value is first promoted to a list
logging.info(f" * GET ModuleRunner:: module.fn: {modulefn} args: {runner_args}")
try:
start_time = time.time()
logging.info(f" * GET ModuleRunner:: module cached: {modulefn in fn_cache}")
if modulefn not in fn_cache:
p, m = modulefn.rsplit('.', 1)
mod = import_module(p)
if not mod:
abort(404, f"no module found: {modulefn}")
if getattr(mod, m):
fn_cache[modulefn] = getattr(mod, m)
logging.info(f" * GET ModuleRunner:: cached(key: {modulefn}): {mod}")
else:
abort(404, f"no funtion {m} found: {mod}")
if modulefn in fn_cache:
fn = fn_cache[modulefn]
result = fn(*runner_args)
logging.info(f" * GET ModuleRunner:: module.fn: {modulefn} args: {runner_args}, result: {result}")
duration = time.time() - start_time
return {
"modulefn": modulefn,
"args": runner_args,
"duration_ms": duration/1000.0,
"result": result
}
except Exception as e:
return {
"modulefn": modulefn,
"args": runner_args,
"error": str(e)
}
if __name__ == '__main__':
logging.info(f" * server starting")
flask_app.run(host="0.0.0.0", debug=True)
|
import os
import sys
sys.path.append(os.path.realpath(os.path.join(os.path.split(__file__)[0], '..', '..')))
import numpy as np
def cents_repr(amout_in_cents, upper_case=True):
if amout_in_cents < 100:
res = f"{amout_in_cents} USD CENTS"
else:
res = f"{amout_in_cents/100} USD"
if upper_case:
return res.upper()
else:
return res
def randn_skew_fast(shape, alpha=0.0, loc=0.0, scale=1.0):
# Taken from: https://stackoverflow.com/questions/36200913/generate-n-random-numbers-from-a-skew-normal-distribution-using-numpy
if not isinstance(shape, (list, tuple)):
shape = (shape,)
sigma = alpha / np.sqrt(1.0 + alpha**2)
u0 = np.random.randn(*shape)
v = np.random.randn(*shape)
u1 = (sigma*u0 + np.sqrt(1.0 - sigma**2)*v) * scale
u1[u0 < 0] *= -1
u1 = u1 + loc
return u1 |
from django.db import connection, transaction
from django.db.models import get_model
ProductRecord = get_model('analytics', 'ProductRecord')
Product = get_model('catalogue', 'Product')
class Calculator(object):
# Map of field name to weight
weights = {'num_views': 1,
'num_basket_additions': 3,
'num_purchases': 5}
def __init__(self, logger):
self.logger = logger
self.cursor = connection.cursor()
def run(self):
self.calculate_scores()
self.update_product_models()
def calculate_scores(self):
self.logger.info("Calculating product scores")
# Build the "SET ..." part of the SQL statement
weighted_sum = " + ".join(
["%s*`%s`" % (weight, field) for field, weight
in self.weights.items()])
ctx = {'table': ProductRecord._meta.db_table,
'weighted_total': weighted_sum,
'total_weight': sum(self.weights.values())}
sql = '''UPDATE `%(table)s`
SET score = %(weighted_total)s / %(total_weight)s''' % ctx
self.logger.debug(sql)
self.cursor.execute(sql)
transaction.commit_unless_managed()
def update_product_models(self):
self.logger.info("Updating product records")
qs = ProductRecord.objects.all()
for record in qs:
record.product.score = record.score
record.product.save()
self.logger.info("Updated scores for %d products" % qs.count())
|
from resources import * |
'''
Created on Apr 19, 2016
@author: Rohan Achar
'''
import json
from time import sleep
from pcc.recursive_dictionary import RecursiveDictionary
from common.wire_formats import FORMATS
from datamodel.all import DATAMODEL_TYPES
from pcc.dataframe.dataframe_threading import dataframe_wrapper as dataframe
from pcc.dataframe.application_queue import ApplicationQueue
from common.modes import Modes
from common.converter import create_jsondict, create_complex_obj
FETCHING_MODES = set([Modes.Getter,
Modes.GetterSetter,
Modes.Taker])
TRACKING_MODES = set([Modes.Tracker])
PUSHING_MODES = set([Modes.Deleter,
Modes.GetterSetter,
Modes.Setter,
Modes.TakerSetter,
Modes.Producing])
ALL_MODES = set([Modes.Deleter,
Modes.GetterSetter,
Modes.Setter,
Modes.TakerSetter,
Modes.Producing,
Modes.Tracker,
Modes.Getter])
class dataframe_stores(object):
def __init__(self, name2class):
self.master_dataframe = dataframe()
self.app_to_df = {}
self.name2class = name2class
self.pause_servers = False
self.app_wire_format = {}
def __pause(self):
while self.pause_servers:
sleep(0.1)
def add_new_dataframe(self, name, df):
self.__pause()
self.app_to_df[name] = df
def delete_app(self, app):
self.__pause()
del self.app_to_df[app]
def register_app(self, app, type_map, wire_format = "json"):
self.__pause()
types_to_get = set()
for mode in FETCHING_MODES:
types_to_get.update(set(type_map.setdefault(mode, set())))
types_to_track = set()
for mode in TRACKING_MODES:
types_to_track.update(set(type_map.setdefault(mode, set())))
types_to_track = types_to_track.difference(types_to_get)
real_types_to_get = [self.name2class[tpstr] for tpstr in types_to_get]
real_types_to_track = [self.name2class[tpstr] for tpstr in types_to_track]
self.master_dataframe.add_types(real_types_to_get + real_types_to_track)
df = ApplicationQueue(app, real_types_to_get + real_types_to_track, self.master_dataframe)
self.add_new_dataframe(app, df)
# Add all types to master.
types_to_add_to_master = set()
for mode in ALL_MODES:
types_to_add_to_master.update(set(type_map.setdefault(mode, set())))
self.master_dataframe.add_types([self.name2class[tpstr] for tpstr in types_to_add_to_master])
self.app_wire_format[app] = wire_format
def disconnect(self, app):
self.__pause()
if app in self.app_to_df:
self.delete_app(app)
def reload_dms(self, datamodel_types):
self.__pause()
pass
def update(self, app, changes):
#print json.dumps(changes, sort_keys = True, separators = (',', ': '), indent = 4)
self.__pause()
dfc_type, content_type = FORMATS[self.app_wire_format[app]]
dfc = dfc_type()
dfc.ParseFromString(changes)
if app in self.app_to_df:
self.master_dataframe.apply_changes(dfc, except_app = app)
def getupdates(self, app):
self.__pause()
dfc_type, content_type = FORMATS[self.app_wire_format[app]]
final_updates = dfc_type()
if app in self.app_to_df:
final_updates = dfc_type(self.app_to_df[app].get_record())
self.app_to_df[app].clear_record()
return final_updates.SerializeToString(), content_type
def get_app_list(self):
return self.app_to_df.keys()
def clear(self, tp = None):
if not tp:
self.__init__(self.name2class)
else:
if tp in self.master_dataframe.object_map:
del self.master_dataframe.object_map[tp]
if tp in self.master_dataframe.current_state:
del self.master_dataframe.current_state[tp]
def pause(self):
self.pause_servers = True
def unpause(self):
self.pause_servers = False
def gc(self, sim):
# For now not clearing contents
self.delete_app(sim)
def get(self, tp):
return [create_jsondict(o) for o in self.master_dataframe.get(tp)]
def put(self, tp, objs):
real_objs = [create_complex_obj(tp, obj, self.master_dataframe.object_map) for obj in objs.values()]
tpname = tp.__realname__
gkey = self.master_dataframe.member_to_group[tpname]
if gkey == tpname:
self.master_dataframe.extend(tp, real_objs)
else:
for obj in real_objs:
oid = obj.__primarykey__
if oid in self.master_dataframe.object_map[gkey]:
# do this only if the object is already there.
# cannot add an object if it is a subset (or any other pcc type) type if it doesnt exist.
for dim in obj.__dimensions__:
# setting attribute to the original object, so that changes cascade
setattr(self.master_dataframe.object_map[gkey][oid], dim._name, getattr(obj, dim._name))
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:bandit_38]
# language: python
# name: conda-env-bandit_38-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %% [markdown]
# ## Metaclass example
# %%
class LittleMeta(type):
def __new__(cls, clsname, superclasses, attributedict):
print('clsname: ', clsname)
print('superclasses: ', superclasses)
print('attributedict: ', attributedict)
return type.__new__(cls, clsname, superclasses, attributedict)
# %%
class S:
pass
class A(S, metaclass=LittleMeta):
pass
# %%
a = A()
# %%
print(a)
# %%
a.__class__.__bases__
# %%
a.__class__
# %%
type(S)
# %% [markdown]
# ## Factory Method #1
# %%
# %%
class ObjectFactory:
def __init__(self):
self._builders = {}
def register_builder(self, key, builder):
self._builders[key] = builder
print(builder)
def create(self, key, **kwargs):
builder = self._builders.get(key)
if not builder:
raise ValueError(key)
return builder(**kwargs)
# %%
class ServiceHYDAT:
def __init__(self, db_hdl):
self._db_hdl = db_hdl
print(f'ServiceHYDAT.__init__(): dbl_hdl = {self._db_hdl}')
def get_obs(self):
print('Getting some data')
pass
# %%
class ServiceHYDATBuilder:
def __init__(self):
print('ServiceHYDATBuilder.__init__()')
self._instance = None
def __call__(self, HYDAT_db_filename, **_ignored):
print('ServiceHYDATBuilder.__call__()')
if not self._instance:
print(' *new* ServiceHYDAT instance')
db_hdl = self.connect(HYDAT_db_filename)
self._instance = ServiceHYDAT(db_hdl)
print(self._instance)
return self._instance
def connect(self, db_filename):
# This would connect to the sqlite3 db and return the handle
print(f'Connecting to {db_filename}')
return 'mydb_hdl'
# %%
factory = ObjectFactory()
factory.register_builder('HYDAT', ServiceHYDATBuilder())
# %%
config = {'HYDAT_db_filename': 'somefile.sqlite'}
hydat = factory.create('HYDAT', **config)
# %%
print(hydat)
# %%
hydat.get_obs()
# %%
id(hydat)
# %%
id(factory.create('HYDAT', **config))
# %%
# %% [markdown]
# ## Factory method approach #2
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
from abc import ABCMeta, abstractmethod
from typing import Callable
# %%
class DataFactory:
registry = {}
# @classmethod
# def register(cls, name: str) -> Callable:
# def inner_wrapper(wrapped_class) -> Callable:
# cls.registry[name] = wrapped_class
# print(f'DataFactory: registered service {name}')
# return wrapped_class
# return inner_wrapper
def register(self, name, connector):
self.registry[name] = connector
print(connector)
# @classmethod
def create_service(cls, name:str, **kwargs):
svc_class = cls.registry.get(name)
if not svc_class:
raise ValueError(name)
print(f'DataFactory.create_service(): {name} service retrieved')
print(svc_class)
return svc_class(**kwargs)
# %%
class ServiceBase(metaclass=ABCMeta):
def __init__(self, **kwargs):
pass
@abstractmethod
def get_obs(self):
# print('Getting some data')
pass
# %%
# @DataFactory.register('HYDAT')
class ServiceHYDAT(ServiceBase):
def __init__(self, db_hdl):
self._db_hdl = db_hdl
print(f'ServiceHYDAT.__init__(): dbl_hdl = {self._db_hdl}')
def get_obs(self):
print('Get HYDAT data')
# %%
# @DataFactory.register('HYDAT')
class ServiceHYDAT_connector():
def __init__(self, **kwargs):
print('ServiceHYDAT_connector.__init__()')
self._instance = None
def __call__(self, HYDAT_db_filename, **_ignored):
print('ServiceHYDAT_connector.__call__()')
if not self._instance:
print(' *new* ServiceHYDAT instance')
db_hdl = self.connect(HYDAT_db_filename)
self._instance = ServiceHYDAT(db_hdl)
return self._instance
def connect(self, db_filename):
# This would connect to the sqlite3 db and return the handle
print(f'Connecting to {db_filename}')
return 'mydb_hdl'
# %%
dsources = DataFactory()
dsources.register('HYDAT', ServiceHYDAT_connector())
config = {'HYDAT_db_filename': 'somefile.sqlite'}
ds_hydat = dsources.create_service('HYDAT', **config)
# %%
ds_hydat
# %%
ds_hydat.get_obs()
# %%
DataFactory.registry
# %%
bb = ServiceHYDAT()
# %%
bb = ServiceHYDAT_connector('blah')
# %%
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyYtopt(PythonPackage):
"""Ytopt package implements search using Random Forest (SuRF), an autotuning
search method developed within Y-Tune ECP project."""
homepage = "https://xgitlab.cels.anl.gov/pbalapra/ytopt"
url = "https://xgitlab.cels.anl.gov/pbalapra/ytopt/raw/release/dist/ytopt-0.1.0.tar.gz"
version('0.1.0', sha256='c7081fe3585a5b7a25bcb84733cd2326b72de3bfc4f84d6ad110341f24c3e612')
depends_on('py-scikit-learn', type=('build', 'run'))
depends_on('py-scikit-optimize', type=('build', 'run'))
def build_args(self, spec, prefix):
args = []
return args
|
# Copyright 2018 Autodesk, Inc. All rights reserved.
#
# Use of this software is subject to the terms of the Autodesk license agreement
# provided at the time of installation or download, or which otherwise accompanies
# this software in either electronic or hard copy form.
#
from __future__ import print_function
import re
import six
import argparse
from six.moves.urllib import parse
from six.moves import BaseHTTPServer
import json
import ssl
import logging
import subprocess
from six.moves.socketserver import ThreadingMixIn
import sg_jira
DESCRIPTION = """
A simple web app frontend to the SG Jira bridge.
"""
CSS_TEMPLATE = """
<style>
body {
margin: 0;
background-color: #eee;
font-family: Arial, Helvetica, sans-serif;
}
h1 {
background-color: whitesmoke;
color: #00BAFF;
border-radius: 5px;
padding: 5 5 5 15px;
border-bottom: 1px solid #ddd;
}
.content { margin: 0 0 15px 15px; }
.error { margin: 0 0 15px 15px; }
.details { margin: 40px 0 15px 15px; }
h2 { margin-bottom: 10px; }
p { margin-top: 10px; }
</style>
"""
HMTL_TEMPLATE = """
<head>
<title>SG Jira Bridge: %s</title>
{style}
</head>
<body>
<h1>SG Jira Bridge</h1>
<div class="content">
<h2>%s</h2>
<p>%s</p>
</div>
</body>
</html>
""".format(
style=CSS_TEMPLATE
)
# We overriding the default html error template to render errors to the user.
# This template *requires* the following format tokens:
# - %(code)d - for the response code
# - %(explain)s - for the short explanation of the response code
# - %(message)s - for a detailed message about the error
HTML_ERROR_TEMPLATE = """
<head>
<title>SG Jira Bridge Error %(code)d: %(message)s</title>
{style}
</head>
<body>
<h1>SG Jira Bridge</h1>
<div class="error">
<h2>Error %(code)d</h2>
<p>%(explain)s</p>
</div>
<div class="details">
<p><strong>Details: </strong> <pre>%(message)s</pre></p>
</div>
</body>
""".format(
style=CSS_TEMPLATE
)
# Please note that we can't use __name__ here as it would be __main__
logger = logging.getLogger("webapp")
def get_sg_jira_bridge_version():
"""
Helper to extract a version number for the sg-jira-bridge module.
This will attenmpt to extract the version number from git if installed from
a cloned repo. If a version is unable to be determined, or the process
fails for any reason, we return "dev"
:returns: A major.minor.patch[.sub] version string or "dev".
"""
# Note: if you install from a cloned git repository
# (e.g. pip install ./tk-core), the version number
# will be picked up from the most recently added tag.
try:
version_git = subprocess.check_output(
["git", "describe", "--abbrev=0"]
).rstrip()
return version_git
except Exception:
# Blindly ignore problems. Git might be not available, or the user may
# have installed via a zip archive, etc...
pass
return "dev"
class SgJiraBridgeBadRequestError(Exception):
"""
Custom exception so we can differentiate between errors we raise that
should return 4xx error codes and errors in the application which should
return 500 error codes.
"""
pass
class Server(ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""
Basic server with threading functionality mixed in. This will help the server
keep up with a high volume of throughput from ShotGrid and Jira.
"""
def __init__(self, settings, *args, **kwargs):
# Note: BaseHTTPServer.HTTPServer is not a new style class so we can't use
# super here.
BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
self._sg_jira = sg_jira.Bridge.get_bridge(settings)
def sync_in_jira(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Brige method.
"""
return self._sg_jira.sync_in_jira(*args, **kwargs)
def sync_in_shotgun(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Brige method.
"""
return self._sg_jira.sync_in_shotgun(*args, **kwargs)
def admin_reset(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Bridge method.
"""
return self._sg_jira.reset(*args, **kwargs)
@property
def sync_settings_names(self):
"""
Return the list of sync settings this server handles.
"""
return self._sg_jira.sync_settings_names
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# On Python3, in socketserver.StreamRequestHandler, if this is
# set it will use makefile() to produce the output stream. Otherwise,
# it will use socketserver._SocketWriter, and we won't be able to get
# to the data.
# taken from https://stackoverflow.com/a/53163148/4223964
wbufsize = 1
protocol_version = "HTTP/1.1"
# Inject the version of sg-jira-bridge into server_version for the headers.
server_version = "sg-jira-bridge/%s %s" % (
get_sg_jira_bridge_version(),
BaseHTTPServer.BaseHTTPRequestHandler.server_version,
)
# BaseHTTPServer Class variable that stores the HTML template for error
# pages. Override the default error page template with our own.
error_message_format = HTML_ERROR_TEMPLATE
def post_response(self, response_code, message, content=None):
"""
Convenience method for handling the response
Handles sending the response, setting headers, and writing any
content in the expected order. Sets appropriate headers including
content length which is required by HTTP/1.1.
:param int response_code: Standard HTTP response code sent in headers.
:param str message: Message to accompany response code in headers.
:param str content: Optional content to return as content in the
response. This is typically html displayed in a browser.
"""
# NOTE: All responses must:
# - send the response first.
# - then, if there is some data, call end_headers to add a blank line.
# - then write the data, if any, with self.wfile.write
self.send_response(response_code, message)
content_len = 0
if content:
content_len = len(content)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Content-Length", content_len)
# TODO: Ideally we use the default functionality of HTTP/1.1 where
# keep-alive is True (no header needed). However, for some reason,
# this currently blocks new connections for 60 seconds (likely the
# default keep-alive timeout). So for now we explicitly close the
# connection with the header below to ensure things run smoothly.
# Once the issue has been resolved, we can remove this header.
self.send_header("Connection", "close")
self.end_headers()
if content:
self.wfile.write(content)
def do_GET(self):
"""
Handle a GET request.
"""
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in self.path[1:].split("/") if x]
if not path_parts:
self.post_response(
200,
"The server is alive",
HMTL_TEMPLATE % ("The server is alive", "The server is alive", ""),
)
return
# Return a correct error for browser favicon requests in order to
# reduce confusing log messages that look bad but aren't.
if len(path_parts) == 1 and path_parts[0] == "favicon.ico":
self.send_error(404)
return
if path_parts[0] == "sg2jira":
title = "Shotgun to Jira"
elif path_parts[0] == "jira2sg":
title = "Jira to Shotgun"
else:
self.send_error(400, "Invalid request path %s" % self.path)
return
settings_name = path_parts[1]
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
self.send_error(400, "Invalid settings name %s" % settings_name)
return
# Success, send a basic html page.
self.post_response(
200,
six.ensure_binary("Syncing with %s settings." % settings_name),
six.ensure_binary(
HMTL_TEMPLATE
% (title, title, "Syncing with %s settings." % settings_name)
),
)
def do_POST(self):
"""
Handle a POST request.
Post url paths need to have the form::
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
admin/reset
If the SG Entity is not specified in the path, it must be specified in
the provided payload.
"""
# /sg2jira/default[/Task/123]
# /jira2sg/default/Issue/KEY-123
# /admin/reset
try:
parsed = parse.urlparse(self.path)
# Extract additional query parameters.
# What they could be is still TBD, may be things like `dry_run=1`?
parameters = {}
if parsed.query:
parameters = parse.parse_qs(parsed.query, True, True)
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in parsed.path[1:].split("/") if x]
if not path_parts:
self.send_error(400, "Invalid request path %s" % self.path)
# Treat the command
if path_parts[0] == "admin":
self._handle_admin_request(path_parts, parameters)
elif path_parts[0] in ["sg2jira", "jira2sg"]:
self._handle_sync_request(path_parts, parameters)
else:
self.send_error(
400,
"Invalid request path %s: unknown command %s"
% (self.path, path_parts[0]),
)
return
self.post_response(200, "POST request successful")
except SgJiraBridgeBadRequestError as e:
self.send_error(400, str(e))
except Exception as e:
self.send_error(500, str(e))
logger.debug(e, exc_info=True)
def _read_payload(self):
"""
Read the body of a request to get the payload.
:returns: payload as a dictionary or empty dict if there was no payload
"""
content_type = self.headers.get("content-type")
# Check the content type, if not set we assume json.
# We can have a charset just after the content type, e.g.
# application/json; charset=UTF-8.
if content_type and not re.search(r"\s*application/json\s*;?", content_type):
raise SgJiraBridgeBadRequestError(
"Invalid content-type %s, it must be 'application/json'" % content_type
)
content_len = int(self.headers.get("content-length", 0))
body = self.rfile.read(content_len)
payload = {}
if body:
payload = json.loads(body)
return payload
def _handle_sync_request(self, path_parts, parameters):
"""
Handle a request to sync between ShotGrid and Jira in either direction.
At this point, only the action (the first path_part) from the request
path has been validated. The rest of the path_parts still need to be
validated before we proceed. We expect the path to for this request to
be one of the following:
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
If the SG Entity is not specified in the path, it must be present in
the loaded payload.
:param list path_parts: List of strings representing each part of the
URL path that this request accessed. For example,
``["sg2jira", "default", "Task", "123"]``.
:param dict parameters: Optional additional parameters that were extracted
from the url.
:raises SgJiraBridgeBadRequestError: If there is any problem we detect with the
path, or payload.
"""
entity_type = None
entity_key = None
if len(path_parts) == 4:
direction, settings_name, entity_type, entity_key = path_parts
elif len(path_parts) == 2:
direction, settings_name = path_parts
else:
raise SgJiraBridgeBadRequestError("Invalid request path %s" % self.path)
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
raise SgJiraBridgeBadRequestError(
"Invalid settings name %s" % settings_name
)
payload = self._read_payload()
if direction == "sg2jira":
# Ensure we get a valid entity_type and entity_id
if not entity_type or not entity_key:
# We need to retrieve this from the payload.
entity_type = payload.get("entity_type")
entity_key = payload.get("entity_id")
if not entity_type or not entity_key:
raise SgJiraBridgeBadRequestError(
"Invalid request payload %s, unable to retrieve a Shotgun Entity type and its id."
% payload
)
# We could have a str or int here depending on how it was sent.
try:
entity_key = int(entity_key)
except ValueError as e:
# log the original exception before we obfuscate it
logger.debug(e, exc_info=True)
raise SgJiraBridgeBadRequestError(
"Invalid Shotgun %s id %s, it must be a number."
% (entity_type, entity_key,)
)
self.server.sync_in_jira(
settings_name, entity_type, int(entity_key), event=payload, **parameters
)
elif direction == "jira2sg":
if not entity_type or not entity_key:
# We can't retrieve this easily from the webhook payload without
# hard coding a list of supported resource types, so we require
# it to be specified in the path for the time being.
raise SgJiraBridgeBadRequestError(
"Invalid request path %s, it must include a Jira resource "
"type and its key" % self.path
)
self.server.sync_in_shotgun(
settings_name, entity_type, entity_key, event=payload, **parameters
)
def _handle_admin_request(self, path_parts, parameters):
"""
Handle admin request to the server.
Currently handles a single action, ``reset`` which resets the Bridge
in order to clear out the ShotGrid schema cache.
At this point, only the action (the first path_part) from the request
path has been validated. The rest of the path_parts still need to be
validated before we proceed.
admin/reset
:param list path_parts: List of strings representing each part of the
URL path that this request accessed. For example,
``["admin", "reset"]``.
:param dict parameters: Optional additional parameters that were extracted
from the url.
:raises SgJiraBridgeBadRequestError: If there is any problem we detect with the
path, or payload.
"""
# The only function we respond to now is reset
if len(path_parts) != 2 or path_parts[1] != "reset":
raise SgJiraBridgeBadRequestError(
"Invalid admin path '%s'. Action is not set or unsupported." % self.path
)
self.server.admin_reset(**parameters)
def log_message(self, format, *args):
"""
Override :class:`BaseHTTPServer.BaseHTTPRequestHandler` method to use a
standard logger.
:param str format: A format string, e.g. '%s %s'.
:param args: Arbitrary list of arguments to use with the format string.
"""
message = "%s - %s - %s" % (self.client_address[0], self.path, format % args)
logger.info(message)
def log_error(self, format, *args):
"""
Override :class:`BaseHTTPServer.BaseHTTPRequestHandler` method to use a
standard logger.
:param str format: A format string, e.g. '%s %s'.
:param args: Arbitrary list of arguments to use with the format string.
"""
message = "%s - %s - %s" % (self.client_address[0], self.path, format % args)
logger.error(message)
def create_server(port, settings, keyfile=None, certfile=None):
"""
Create the server.
:param int port: A port number to listen to.
:param str settings: Path to settings file.
:param str keyfile: Optional path to a PEM key file to run in HTTPS mode.
:param str certfile: Optional path to a PEM certificate file to run in HTTPS mode.
:returns: The HTTP Server
:type: :class:`BaseHTTPServer.BaseHTTPRequestHandler`
"""
httpd = Server(settings, ('', port), RequestHandler)
if keyfile and certfile:
# Activate HTTPS.
httpd.socket = ssl.wrap_socket(
httpd.socket, keyfile=keyfile, certfile=certfile, server_side=True
)
return httpd
def run_server(port, settings, keyfile=None, certfile=None):
"""
Run the server until a shutdown is requested.
:param int port: A port number to listen to.
:param str settings: Path to settings file.
:param str keyfile: Optional path to a PEM key file to run in https mode.
:param str certfile: Optional path to a PEM certificate file to run in https mode.
"""
create_server(port, settings, keyfile, certfile).serve_forever()
def main():
"""
Retrieve command line arguments and start the server.
"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
"--port", type=int, default=9090, help="The port number to listen to.",
)
parser.add_argument("--settings", help="Full path to settings file.", required=True)
parser.add_argument(
"--ssl_context",
help="A key and certificate file pair to run the server in HTTPS mode.",
nargs=2,
)
args = parser.parse_args()
keyfile = None
certfile = None
if args.ssl_context:
keyfile, certfile = args.ssl_context
run_server(
port=args.port, settings=args.settings, keyfile=keyfile, certfile=certfile,
)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Shutting down...")
|
from typing import TYPE_CHECKING, Generator
from .base import EXEC_NAMESPACE, EXPS_NAMESPACE, EXPS_STASH, ExpRefInfo
if TYPE_CHECKING:
from dvc.scm.git import Git
def exp_refs(scm: "Git") -> Generator["ExpRefInfo", None, None]:
"""Iterate over all experiment refs."""
for ref in scm.iter_refs(base=EXPS_NAMESPACE):
if ref.startswith(EXEC_NAMESPACE) or ref == EXPS_STASH:
continue
yield ExpRefInfo.from_ref(ref)
def exp_refs_by_rev(
scm: "Git", rev: str
) -> Generator["ExpRefInfo", None, None]:
"""Iterate over all experiment refs pointing to the specified revision."""
for ref in scm.get_refs_containing(rev, EXPS_NAMESPACE):
if not (ref.startswith(EXEC_NAMESPACE) or ref == EXPS_STASH):
yield ExpRefInfo.from_ref(ref)
def exp_refs_by_name(
scm: "Git", name: str
) -> Generator["ExpRefInfo", None, None]:
"""Iterate over all experiment refs matching the specified name."""
for ref_info in exp_refs(scm):
if ref_info.name == name:
yield ref_info
|
import requests
import json
from time import sleep
def main():
while True:
search_user_name = input('What user do you want to search?\n > ')
github = requests.get(f'https://api.github.com/users/{search_user_name}')
response = json.loads(github.text)
if 'message' in response:
print('User not found!')
sleep(10)
break
bio = response['bio'].replace('\r\n', '')
name = response['name']
username = response['login']
followers = response['followers']
follwoing = response['following']
site = response['blog']
email = response['email']
if email == None:
email = "Don't have a public email"
hireable = response['hireable']
if hireable == None:
hireable = 'Not hireable'
twitter_username = response['twitter_username']
location = response['location']
company = response['company']
print(f'\n----------------------------------------------\n{username}\n----------------------------------------------\nname: {name}\ndescription: {bio}\nemail: {email}\nwebsite: {site}\nfollowers: {str(followers)}\nfollowing: {str(follwoing)}\ntwitter username: {twitter_username}\ncompany: {company}\nhireable: {hireable}\nlocation: {location}\nprofile link: https://github.com/{username}\n----------------------------------------------')
if __name__ == '__main__':
main() |
from django.contrib import admin
from django.urls import path, include
from webdev.home_view import home
urlpatterns = [
path('admin/', admin.site.urls),
path('', home),
path('tarefas/', include('webdev.tarefas.urls'))
]
|
from django.contrib.auth.models import User
from django.test import Client, TestCase
from django.urls import reverse
from apps.ldap.test_helpers import LDAPTestCase
from .models import (
Officer,
Officership,
Person,
Politburo,
PolitburoMembership,
Semester,
)
from .staff_views import update_or_create_officer
class TestPages(LDAPTestCase):
def setUp(self):
super().setUp()
pnunez_user = User.objects.create_user(
first_name="Phillip",
last_name="Nunez",
username="pnunez",
email="[email protected]",
password="passwurd",
is_staff=True,
)
pnunez_person = Person(pnunez_user)
pnunez_person.save()
pnunez_officer = Officer(person=pnunez_person)
pnunez_officer.save()
semester = Semester(id="fa72", name="Fall 1972", current=True)
semester.save()
officership = Officership(
officer=pnunez_officer,
semester=semester,
blurb="I love the CSUA",
office_hours="Fri 6-7 PM",
)
officership.save()
president = Politburo(
position="president",
title="Hoser President",
description="The president is a cool person.",
contact="Reach out to [name] to be epic.",
)
president.save()
pnunez_prez = PolitburoMembership(
politburo=president, semester=semester, person=pnunez_person
)
pnunez_prez.save()
def test_officers(self):
response = self.client.get("/officers/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Phillip")
def test_pb(self):
response = self.client.get("/politburo/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Phillip")
def test_sponsors(self):
response = self.client.get("/sponsors/")
self.assertEqual(response.status_code, 200)
def test_tutoring(self):
response = self.client.get("/tutoring/")
self.assertEqual(response.status_code, 200)
def test_events(self):
response = self.client.get("/events/")
self.assertEqual(response.status_code, 200)
def test_update_or_create_officer_staff_only(self):
url = reverse("add-officer")
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.client.login(username="pnunez", password="passwurd")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_update_or_create_officer_failure(self):
url = reverse("add-officer")
self.client.login(username="pnunez", password="passwurd")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, data={"username": "pnunez"}, follow=True)
self.assertFormError(response, "form", None, ["User pnunez is not in LDAP"])
response = self.client.post(url, data={"username": "cnunez"}, follow=True)
self.assertContains(response, "User cnunez created")
self.assertContains(response, "Person cnunez created")
self.assertContains(response, "Officer cnunez created")
self.assertContains(response, "Added cnunez to officers LDAP group")
|
import requests
BASE_URL = 'https://covid-193.p.rapidapi.com/'
ENDPOINT = 'history'
API_KEY = '687dbfe6b7msh0e5814a43c930b7p11d7cdjsn493dc356cfb6'
def history(day, country):
params = {
'country': country,
'day': day
}
headers = {
'x-rapidapi-key': API_KEY,
}
response = requests.get(BASE_URL + ENDPOINT, params=params, headers=headers)
result = dict()
json_res = response.json()
try:
result['code'] = 200
result["day"] = json_res["parameters"]["day"]
result["country"] = json_res["parameters"]["country"]
cases_list = dict()
for res in json_res["response"]:
cases_list["new"] = res["cases"]["new"]
cases_list["active"] = res["cases"]["active"]
cases_list["death"] = res["deaths"]["new"]
result["cases"] = cases_list
except:
result['code'] = 400
return result
|
'''
2014314433
lee young suk
'''
userinputa=input("Enter the first integer : ")
userinputb=input("Enter the second integer : ")
if int(userinputa)%2==0 and int(userinputb)%2==0:
print("Both a and b are even numbers")
elif int(userinputa)%2==0 or int(userinputb)%2==0:
print("Either a or b is an even number")
else:
print("Both a and b are odd numbers")
|
import numpy
import sys
def in_fault_window(line):
line = line.strip()
timestamp = line.split()[1]
time_segments = timestamp.split(':')
hour = int(time_segments[0])
minute = int(time_segments[1])
return hour % 3 == 0 and minute >= 30 and minute <= 45
def print_summary(data):
print 'Data points:', len(data)
print 'Min:', min(data)
print 'Max:', max(data)
print 'Mean:', numpy.mean(data)
print 'Std Dev:', numpy.std(data)
print '95th Percentile:', numpy.percentile(data, 95.0)
print '99th Percentile:', numpy.percentile(data, 99.0)
if __name__ == '__main__':
fp = open(sys.argv[1], 'r')
lines = fp.readlines()
fp.close()
data = []
output = open('vector.tmp', 'w')
for line in lines:
line = line.strip()
if 'Benchmark result' in line:
if not in_fault_window(line):
segments = line.strip().split()
value = int(segments[-2])
data.append(value)
output.write(str(value) + '\n')
output.flush()
output.close()
print 'All data points summary'
print '======================='
print_summary(data)
print
print 'Filterd (< 1000) data points summary'
print '===================================='
print_summary(filter(lambda x: x < 1000, data))
mean = numpy.mean(data)
sd = numpy.std(data)
print
print 'Filtered (< mean + 2sd) data points summary'
print '==========================================='
print_summary(filter(lambda x: x < mean + 2 * sd, data))
|
# -*- coding: utf-8 -*-
from math import atan2, degrees
from ..Qt import QtCore, QtGui
from ..Point import Point
from .. import functions as fn
from .GraphicsObject import GraphicsObject
__all__ = ['TextItem']
class TextItem(GraphicsObject):
"""
GraphicsItem displaying unscaled text (the text will always appear normal even inside a scaled ViewBox).
"""
def __init__(self, text='', color=(200,200,200), html=None, anchor=(0,0),
border=None, fill=None, angle=0, rotateAxis=None):
"""
============== =================================================================================
**Arguments:**
*text* The text to display
*color* The color of the text (any format accepted by pg.mkColor)
*html* If specified, this overrides both *text* and *color*
*anchor* A QPointF or (x,y) sequence indicating what region of the text box will
be anchored to the item's position. A value of (0,0) sets the upper-left corner
of the text box to be at the position specified by setPos(), while a value of (1,1)
sets the lower-right corner.
*border* A pen to use when drawing the border
*fill* A brush to use when filling within the border
*angle* Angle in degrees to rotate text. Default is 0; text will be displayed upright.
*rotateAxis* If None, then a text angle of 0 always points along the +x axis of the scene.
If a QPointF or (x,y) sequence is given, then it represents a vector direction
in the parent's coordinate system that the 0-degree line will be aligned to. This
Allows text to follow both the position and orientation of its parent while still
discarding any scale and shear factors.
============== =================================================================================
The effects of the `rotateAxis` and `angle` arguments are added independently. So for example:
* rotateAxis=None, angle=0 -> normal horizontal text
* rotateAxis=None, angle=90 -> normal vertical text
* rotateAxis=(1, 0), angle=0 -> text aligned with x axis of its parent
* rotateAxis=(0, 1), angle=0 -> text aligned with y axis of its parent
* rotateAxis=(1, 0), angle=90 -> text orthogonal to x axis of its parent
"""
self.anchor = Point(anchor)
self.rotateAxis = None if rotateAxis is None else Point(rotateAxis)
#self.angle = 0
GraphicsObject.__init__(self)
self.textItem = QtGui.QGraphicsTextItem()
self.textItem.setParentItem(self)
self._lastTransform = None
self._lastScene = None
self._bounds = QtCore.QRectF()
if html is None:
self.setColor(color)
self.setText(text)
else:
self.setHtml(html)
self.fill = fn.mkBrush(fill)
self.border = fn.mkPen(border)
self.setAngle(angle)
def setText(self, text, color=None):
"""
Set the text of this item.
This method sets the plain text of the item; see also setHtml().
"""
if color is not None:
self.setColor(color)
self.setPlainText(text)
def setPlainText(self, text):
"""
Set the plain text to be rendered by this item.
See QtGui.QGraphicsTextItem.setPlainText().
"""
if text != self.toPlainText():
self.textItem.setPlainText(text)
self.updateTextPos()
def toPlainText(self):
return self.textItem.toPlainText()
def setHtml(self, html):
"""
Set the HTML code to be rendered by this item.
See QtGui.QGraphicsTextItem.setHtml().
"""
if self.toHtml() != html:
self.textItem.setHtml(html)
self.updateTextPos()
def toHtml(self):
return self.textItem.toHtml()
def setTextWidth(self, *args):
"""
Set the width of the text.
If the text requires more space than the width limit, then it will be
wrapped into multiple lines.
See QtGui.QGraphicsTextItem.setTextWidth().
"""
self.textItem.setTextWidth(*args)
self.updateTextPos()
def setFont(self, *args):
"""
Set the font for this text.
See QtGui.QGraphicsTextItem.setFont().
"""
self.textItem.setFont(*args)
self.updateTextPos()
def setAngle(self, angle):
"""
Set the angle of the text in degrees.
This sets the rotation angle of the text as a whole, measured
counter-clockwise from the x axis of the parent. Note that this rotation
angle does not depend on horizontal/vertical scaling of the parent.
"""
self.angle = angle
self.updateTransform(force=True)
def setAnchor(self, anchor):
self.anchor = Point(anchor)
self.updateTextPos()
def setColor(self, color):
"""
Set the color for this text.
See QtGui.QGraphicsItem.setDefaultTextColor().
"""
self.color = fn.mkColor(color)
self.textItem.setDefaultTextColor(self.color)
def updateTextPos(self):
# update text position to obey anchor
r = self.textItem.boundingRect()
tl = self.textItem.mapToParent(r.topLeft())
br = self.textItem.mapToParent(r.bottomRight())
offset = (br - tl) * self.anchor
self.textItem.setPos(-offset)
def boundingRect(self):
return self.textItem.mapRectToParent(self.textItem.boundingRect())
def viewTransformChanged(self):
# called whenever view transform has changed.
# Do this here to avoid double-updates when view changes.
self.updateTransform()
def paint(self, p, *args):
# this is not ideal because it requires the transform to be updated at every draw.
# ideally, we would have a sceneTransformChanged event to react to..
s = self.scene()
ls = self._lastScene
if s is not ls:
if ls is not None:
ls.sigPrepareForPaint.disconnect(self.updateTransform)
self._lastScene = s
if s is not None:
s.sigPrepareForPaint.connect(self.updateTransform)
self.updateTransform()
p.setTransform(self.sceneTransform())
if self.border.style() != QtCore.Qt.PenStyle.NoPen or self.fill.style() != QtCore.Qt.BrushStyle.NoBrush:
p.setPen(self.border)
p.setBrush(self.fill)
p.setRenderHint(p.RenderHint.Antialiasing, True)
p.drawPolygon(self.textItem.mapToParent(self.textItem.boundingRect()))
def setVisible(self, v):
GraphicsObject.setVisible(self, v)
if v:
self.updateTransform()
def updateTransform(self, force=False):
if not self.isVisible():
return
# update transform such that this item has the correct orientation
# and scaling relative to the scene, but inherits its position from its
# parent.
# This is similar to setting ItemIgnoresTransformations = True, but
# does not break mouse interaction and collision detection.
p = self.parentItem()
if p is None:
pt = QtGui.QTransform()
else:
pt = p.sceneTransform()
if not force and pt == self._lastTransform:
return
t = pt.inverted()[0]
# reset translation
t.setMatrix(t.m11(), t.m12(), t.m13(), t.m21(), t.m22(), t.m23(), 0, 0, t.m33())
# apply rotation
angle = -self.angle
if self.rotateAxis is not None:
d = pt.map(self.rotateAxis) - pt.map(Point(0, 0))
a = degrees(atan2(d.y(), d.x()))
angle += a
t.rotate(angle)
self.setTransform(t)
self._lastTransform = pt
self.updateTextPos()
|
"""Test config flow."""
from unittest.mock import patch
from aiomusiccast import MusicCastConnectionException
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import ssdp
from homeassistant.components.yamaha_musiccast.const import DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST
from tests.common import MockConfigEntry
@pytest.fixture(autouse=True)
async def silent_ssdp_scanner(hass):
"""Start SSDP component and get Scanner, prevent actual SSDP traffic."""
with patch(
"homeassistant.components.ssdp.Scanner._async_start_ssdp_listeners"
), patch("homeassistant.components.ssdp.Scanner._async_stop_ssdp_listeners"), patch(
"homeassistant.components.ssdp.Scanner.async_scan"
):
yield
@pytest.fixture(autouse=True)
def mock_setup_entry():
"""Mock setting up a config entry."""
with patch(
"homeassistant.components.yamaha_musiccast.async_setup_entry", return_value=True
):
yield
@pytest.fixture
def mock_get_device_info_valid():
"""Mock getting valid device info from musiccast API."""
with patch(
"aiomusiccast.MusicCastDevice.get_device_info",
return_value={"system_id": "1234567890", "model_name": "MC20"},
):
yield
@pytest.fixture
def mock_get_device_info_invalid():
"""Mock getting invalid device info from musiccast API."""
with patch(
"aiomusiccast.MusicCastDevice.get_device_info",
return_value={"type": "no_yamaha"},
):
yield
@pytest.fixture
def mock_get_device_info_exception():
"""Mock raising an unexpected Exception."""
with patch(
"aiomusiccast.MusicCastDevice.get_device_info",
side_effect=Exception("mocked error"),
):
yield
@pytest.fixture
def mock_get_device_info_mc_exception():
"""Mock raising an unexpected Exception."""
with patch(
"aiomusiccast.MusicCastDevice.get_device_info",
side_effect=MusicCastConnectionException("mocked error"),
):
yield
@pytest.fixture
def mock_ssdp_yamaha():
"""Mock that the SSDP detected device is a musiccast device."""
with patch("aiomusiccast.MusicCastDevice.check_yamaha_ssdp", return_value=True):
yield
@pytest.fixture
def mock_ssdp_no_yamaha():
"""Mock that the SSDP detected device is not a musiccast device."""
with patch("aiomusiccast.MusicCastDevice.check_yamaha_ssdp", return_value=False):
yield
@pytest.fixture
def mock_valid_discovery_information():
"""Mock that the ssdp scanner returns a useful upnp description."""
with patch(
"homeassistant.components.ssdp.async_get_discovery_info_by_st",
return_value=[
ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location="http://127.0.0.1:9000/MediaRenderer/desc.xml",
ssdp_headers={
"_host": "127.0.0.1",
},
upnp={},
)
],
):
yield
@pytest.fixture
def mock_empty_discovery_information():
"""Mock that the ssdp scanner returns no upnp description."""
with patch(
"homeassistant.components.ssdp.async_get_discovery_info_by_st", return_value=[]
):
yield
# User Flows
async def test_user_input_device_not_found(
hass, mock_get_device_info_mc_exception, mock_get_source_ip
):
"""Test when user specifies a non-existing device."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "none"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_user_input_non_yamaha_device_found(
hass, mock_get_device_info_invalid, mock_get_source_ip
):
"""Test when user specifies an existing device, which does not provide the musiccast API."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "127.0.0.1"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "no_musiccast_device"}
async def test_user_input_device_already_existing(
hass, mock_get_device_info_valid, mock_get_source_ip
):
"""Test when user specifies an existing device."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="1234567890",
data={CONF_HOST: "192.168.188.18", "model": "MC20", "serial": "1234567890"},
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "192.168.188.18"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "already_configured"
async def test_user_input_unknown_error(
hass, mock_get_device_info_exception, mock_get_source_ip
):
"""Test when user specifies an existing device, which does not provide the musiccast API."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "127.0.0.1"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_user_input_device_found(
hass,
mock_get_device_info_valid,
mock_valid_discovery_information,
mock_get_source_ip,
):
"""Test when user specifies an existing device."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "127.0.0.1"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert isinstance(result2["result"], ConfigEntry)
assert result2["data"] == {
"host": "127.0.0.1",
"serial": "1234567890",
"upnp_description": "http://127.0.0.1:9000/MediaRenderer/desc.xml",
}
async def test_user_input_device_found_no_ssdp(
hass,
mock_get_device_info_valid,
mock_empty_discovery_information,
mock_get_source_ip,
):
"""Test when user specifies an existing device, which no discovery data are present for."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "127.0.0.1"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert isinstance(result2["result"], ConfigEntry)
assert result2["data"] == {
"host": "127.0.0.1",
"serial": "1234567890",
"upnp_description": "http://127.0.0.1:49154/MediaRenderer/desc.xml",
}
# SSDP Flows
async def test_ssdp_discovery_failed(hass, mock_ssdp_no_yamaha, mock_get_source_ip):
"""Test when an SSDP discovered device is not a musiccast device."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data=ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location="http://127.0.0.1/desc.xml",
upnp={
ssdp.ATTR_UPNP_MODEL_NAME: "MC20",
ssdp.ATTR_UPNP_SERIAL: "123456789",
},
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "yxc_control_url_missing"
async def test_ssdp_discovery_successful_add_device(
hass, mock_ssdp_yamaha, mock_get_source_ip
):
"""Test when the SSDP discovered device is a musiccast device and the user confirms it."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data=ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location="http://127.0.0.1/desc.xml",
upnp={
ssdp.ATTR_UPNP_MODEL_NAME: "MC20",
ssdp.ATTR_UPNP_SERIAL: "1234567890",
},
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
assert result["step_id"] == "confirm"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert isinstance(result2["result"], ConfigEntry)
assert result2["data"] == {
"host": "127.0.0.1",
"serial": "1234567890",
"upnp_description": "http://127.0.0.1/desc.xml",
}
async def test_ssdp_discovery_existing_device_update(
hass, mock_ssdp_yamaha, mock_get_source_ip
):
"""Test when the SSDP discovered device is a musiccast device, but it already exists with another IP."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="1234567890",
data={CONF_HOST: "192.168.188.18", "model": "MC20", "serial": "1234567890"},
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data=ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location="http://127.0.0.1/desc.xml",
upnp={
ssdp.ATTR_UPNP_MODEL_NAME: "MC20",
ssdp.ATTR_UPNP_SERIAL: "1234567890",
},
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert mock_entry.data[CONF_HOST] == "127.0.0.1"
assert mock_entry.data["upnp_description"] == "http://127.0.0.1/desc.xml"
|
"""Alarm App."""
import json
from pathlib import Path
import dash_auth
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash_charts.utils_app import AppBase
from dash_charts.utils_fig import map_args, map_outputs
class AppAlarm(AppBase):
"""PiAlarm UI."""
name = 'PiAlarm'
"""Application name"""
secret_filename = Path(__file__).parent / 'secret.json'
"""Path to json file with username and passwords. Example: `{'username': 'password'}`."""
external_stylesheets = [dbc.themes.FLATLY]
"""List of external stylesheets."""
id_button = 'test-button'
"""Button ID."""
id_status = 'string-status'
"""Status ID."""
def initialization(self):
"""Initialize ids with `self.register_uniq_ids([...])` and other one-time actions."""
super().initialization()
self.register_uniq_ids([self.id_button, self.id_status])
if not self.secret_filename.is_file():
raise FileNotFoundError(f'Expected: {self.secret_filename}')
user_pass_pairs = json.loads(self.secret_filename.read_text())
dash_auth.BasicAuth(self.app, user_pass_pairs)
def create_elements(self):
"""Initialize charts and tables."""
pass
def return_layout(self):
"""Return Dash application layout.
Returns:
obj: Dash HTML object. Default is simple HTML text
"""
return dbc.Container([
dbc.Col([
html.H1('PiAlarm'),
dbc.Button('Test Button', color='secondary', id=self.ids[self.id_button]),
html.P('', id=self.ids[self.id_status]),
]),
])
def create_callbacks(self):
"""Create Dash callbacks."""
outputs = [(self.id_status, 'children')]
inputs = [(self.id_button, 'n_clicks')]
states = []
@self.callback(outputs, inputs, states)
def update_table(*raw_args):
args_in, args_state = map_args(raw_args, inputs, states)
n_clicks = args_in[self.id_button]['n_clicks']
return map_outputs(outputs, [
(self.id_status, 'children', f'Clicked: {n_clicks}'),
])
|
#!/usr/bin/sudo python
from scapy.all import *
from scapy.layers.inet import TCP, IP
from filter import TrafficFilter, CallBackFilter
src = '192.168.1.63'
dst = '192.168.1.94'
verbose = True
def log(data: str):
if verbose:
print(data)
def throttle_data_from_destination(pkt: Packet):
ip, tcp = pkt[IP], pkt[TCP]
# verify that the DST sent packet to SRC
if ip.src != dst or ip.dst != src:
return
throttle(pkt)
def throttle(pkt: Packet):
try:
ip, tcp = pkt[IP], pkt[TCP]
for i in range(0, 3):
new_ip = IP(src=ip.dst, dst=ip.src)
new_tcp = TCP(dport=tcp.sport,
sport=tcp.dport,
seq=tcp.ack,
ack=tcp.seq + len(tcp.payload),
flags='A')
send(new_ip / new_tcp, verbose=False)
log(f'> {format_packet((ip / tcp))}')
except Exception as ex:
log(f'Exception during packet sending: {ex}')
def format_packet(pkt: Packet, print_payload: bool = False) -> str:
ip, tcp = pkt[IP], pkt[TCP]
result = f'{pkt.summary()} --> FLAG {tcp.flags}, SEQ {tcp.seq}, ACK {tcp.ack}, PAY: {len(tcp.payload)}'
if tcp.payload and print_payload:
result += f'---\n{tcp.payload}\n---'
return result
def custom_callback(pkt: Packet):
ip, tcp = pkt[IP], pkt[TCP]
log(f'< {format_packet(pkt, False)}')
for ignored_flag in {'S', 'SA', 'R'}:
# we need to use this, in set does not work
if tcp.flags == ignored_flag:
return
throttle_data_from_destination(pkt)
if __name__ == '__main__':
# 1st argument is the source
# 2nd argument is the destination
if len(sys.argv) >= 3:
src = sys.argv[1]
dst = sys.argv[2]
if len(sys.argv) >= 4:
verbose = True
else:
print('1st argument for source, 2nd for destination IP')
print(sys.argv)
exit(1)
tf = TrafficFilter(src=src, dst=dst)
cb = CallBackFilter(tf, callback=custom_callback)
print(f'Executing sniffing between SRC: {src} and DST {dst}')
sniff(prn=cb.callback, filter="tcp")
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import argparse
import os
import logging
import sys
import subprocess
logging.getLogger().setLevel(logging.INFO)
root_dir = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", ".."))
sdk_dir = os.path.join(root_dir, "sdk")
def run_black(service_dir):
logging.info("Running black for {}".format(service_dir))
out = subprocess.Popen([sys.executable, "-m", "black", "-l", "120", "sdk/{}".format(service_dir)],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd = root_dir
)
stdout,stderr = out.communicate()
if stderr:
raise RuntimeError("black ran into some trouble during its invocation: " + stderr)
if stdout:
if "reformatted" in stdout.decode('utf-8'):
return False
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Run black to verify formatted code."
)
parser.add_argument(
"--service_directory", help="Directory of the package being tested"
)
parser.add_argument(
"--validate", help=("Flag that enables formatting validation.")
)
args = parser.parse_args()
if args.validate != "False":
if not run_black(args.service_directory):
raise ValueError("Found difference between formatted code and current commit. Please re-generate with the latest autorest.")
else:
print("Skipping formatting validation") |
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
import sys
import django
django.setup()
# pylint: disable=wrong-import-position
import cavedb.utils
from cavedb.generate_docs import write_global_bulletin_files
from cavedb.generate_docs import write_bulletin_files
from cavedb.generate_docs import run_buildscript
def do_build_bulletin(bulletin_id):
if bulletin_id == cavedb.utils.GLOBAL_BULLETIN_ID:
write_global_bulletin_files()
else:
bulletin = cavedb.models.Bulletin.objects.get(pk=bulletin_id)
if bulletin is None:
print('Bulletin %s not found' % (bulletin_id))
sys.exit(1)
write_bulletin_files(bulletin)
run_buildscript(bulletin_id)
if __name__ == '__main__':
if len(sys.argv) != 2:
print('usage: generate_single_doc.py <bulletin ID>')
sys.exit(1)
do_build_bulletin(sys.argv[1])
|
import parsy
def between(p_open, p_close, p):
"""
-- | @'between' open close p@ parses @open@, followed by @p@ and @close@.
-- Returns the value returned by @p@.
--
-- > braces = between (symbol "{") (symbol "}")
between_brackets = partial(between, symbol("["), symbol("]"))
"""
return p_open >> p << p_close
def nested(p_open, p_close, p_token, p_sep_by):
@parsy.generate
def group():
return (
yield between(
p_open,
p_close,
expr.sep_by(p_sep_by),
)
)
expr = p_token | group
return expr
|
import numpy
from stl import mesh
# find the max dimensions, so we can know the bounding box, getting the height,
# width, length (because these are the step size)...
def find_mins_maxs(obj):
minx = obj.x.min()
maxx = obj.x.max()
miny = obj.y.min()
maxy = obj.y.max()
minz = obj.z.min()
maxz = obj.z.max()
return minx, maxx, miny, maxy, minz, maxz
def translate(_solid, step, padding, multiplier, axis):
if 'x' == axis:
items = 0, 3, 6
elif 'y' == axis:
items = 1, 4, 7
elif 'z' == axis:
items = 2, 5, 8
else:
raise RuntimeError('Unknown axis %r, expected x, y or z' % axis)
# _solid.points.shape == [:, ((x, y, z), (x, y, z), (x, y, z))]
_solid.points[:, items] += (step * multiplier) + (padding * multiplier)
def copy_obj(obj, dims, num_rows, num_cols, num_layers):
w, l, h = dims
copies = []
for layer in range(num_layers):
for row in range(num_rows):
for col in range(num_cols):
# skip the position where original being copied is
if row == 0 and col == 0 and layer == 0:
continue
_copy = mesh.Mesh(obj.data.copy())
# pad the space between objects by 10% of the dimension being
# translated
if col != 0:
translate(_copy, w, w / 10., col, 'x')
if row != 0:
translate(_copy, l, l / 10., row, 'y')
if layer != 0:
translate(_copy, h, h / 10., layer, 'z')
copies.append(_copy)
return copies
class Combine:
def __init__(self, files):
self.files = files
def combine(self):
bodies = [mesh.Mesh.from_file(file) for file in self.files]
axes = ['x', 'y', 'z']
for i in range(1, len(bodies)):
minx, maxx, miny, maxy, minz, maxz = find_mins_maxs(bodies[i-1])
w1 = maxx - minx
l1 = maxy - miny
h1 = maxz - minz
translate(bodies[i], w1, w1 / 10., 1, axes[i%3])
return mesh.Mesh(numpy.concatenate([body.data for body in bodies]))
def combine_two(self):
# Using an existing stl file:
main_body = mesh.Mesh.from_file(self.files[0])
# rotate along Y
# main_body.rotate([0.0, 0.5, 0.0], math.radians(90))
minx, maxx, miny, maxy, minz, maxz = find_mins_maxs(main_body)
w1 = maxx - minx
l1 = maxy - miny
h1 = maxz - minz
copies = copy_obj(main_body, (w1, l1, h1), 2, 2, 1)
# I wanted to add another related STL to the final STL
twist_lock = mesh.Mesh.from_file(self.files[1])
minx, maxx, miny, maxy, minz, maxz = find_mins_maxs(twist_lock)
w2 = maxx - minx
l2 = maxy - miny
h2 = maxz - minz
translate(twist_lock, w1, w1 / 100., 3, 'x')
copies2 = copy_obj(twist_lock, (w2, l2, h2), 2, 2, 1)
# combined = mesh.Mesh(numpy.concatenate([main_body.data, twist_lock.data] +
# [copy.data for copy in copies] +
# [copy.data for copy in copies2]))
combined = mesh.Mesh(numpy.concatenate([main_body.data, twist_lock.data]))
return combined
|
# 입력
n = int(input())
# 브루트포스
cnt = 0 # 박수 친 횟수
for num in range(1, n+1):
for i in str(num):
if i == '3' or i == '6' or i == '9':
cnt += 1
print(cnt)
|
# -*- test-case-name: axiom.test.test_scheduler -*-
import warnings
from zope.interface import implements
from twisted.internet import reactor
from twisted.application.service import IService, Service
from twisted.python import log, failure
from epsilon.extime import Time
from axiom.iaxiom import IScheduler
from axiom.item import Item, declareLegacyItem
from axiom.attributes import AND, timestamp, reference, integer, inmemory, bytes
from axiom.dependency import uninstallFrom
from axiom.upgrade import registerUpgrader
from axiom.substore import SubStore
VERBOSE = False
class TimedEventFailureLog(Item):
typeName = 'timed_event_failure_log'
schemaVersion = 1
desiredTime = timestamp()
actualTime = timestamp()
runnable = reference()
traceback = bytes()
class TimedEvent(Item):
typeName = 'timed_event'
schemaVersion = 1
time = timestamp(indexed=True)
runnable = reference()
running = inmemory(doc='True if this event is currently running.')
def activate(self):
self.running = False
def _rescheduleFromRun(self, newTime):
"""
Schedule this event to be run at the indicated time, or if the
indicated time is None, delete this event.
"""
if newTime is None:
self.deleteFromStore()
else:
self.time = newTime
def invokeRunnable(self):
"""
Run my runnable, and reschedule or delete myself based on its result.
Must be run in a transaction.
"""
runnable = self.runnable
if runnable is None:
self.deleteFromStore()
else:
try:
self.running = True
newTime = runnable.run()
finally:
self.running = False
self._rescheduleFromRun(newTime)
def handleError(self, now, failureObj):
""" An error occurred running my runnable. Check my runnable for an
error-handling method called 'timedEventErrorHandler' that will take
the given failure as an argument, and execute that if available:
otherwise, create a TimedEventFailureLog with information about what
happened to this event.
Must be run in a transaction.
"""
errorHandler = getattr(self.runnable, 'timedEventErrorHandler', None)
if errorHandler is not None:
self._rescheduleFromRun(errorHandler(self, failureObj))
else:
self._defaultErrorHandler(now, failureObj)
def _defaultErrorHandler(self, now, failureObj):
TimedEventFailureLog(store=self.store,
desiredTime=self.time,
actualTime=now,
runnable=self.runnable,
traceback=failureObj.getTraceback())
self.deleteFromStore()
class _WackyControlFlow(Exception):
def __init__(self, eventObject, failureObject):
Exception.__init__(self, "User code failed during timed event")
self.eventObject = eventObject
self.failureObject = failureObject
MAX_WORK_PER_TICK = 10
class SchedulerMixin:
def _oneTick(self, now):
theEvent = self._getNextEvent(now)
if theEvent is None:
return False
try:
theEvent.invokeRunnable()
except:
raise _WackyControlFlow(theEvent, failure.Failure())
self.lastEventAt = now
return True
def _getNextEvent(self, now):
# o/` gonna party like it's 1984 o/`
theEventL = list(self.store.query(TimedEvent,
TimedEvent.time <= now,
sort=TimedEvent.time.ascending,
limit=1))
if theEventL:
return theEventL[0]
def tick(self):
now = self.now()
self.nextEventAt = None
workBeingDone = True
workUnitsPerformed = 0
errors = 0
while workBeingDone and workUnitsPerformed < MAX_WORK_PER_TICK:
try:
workBeingDone = self.store.transact(self._oneTick, now)
except _WackyControlFlow, wcf:
self.store.transact(wcf.eventObject.handleError, now, wcf.failureObject)
log.err(wcf.failureObject)
errors += 1
workBeingDone = True
if workBeingDone:
workUnitsPerformed += 1
x = list(self.store.query(TimedEvent, sort=TimedEvent.time.ascending, limit=1))
if x:
self._transientSchedule(x[0].time, now)
if errors or VERBOSE:
log.msg("The scheduler ran %(eventCount)s events%(errors)s." % dict(
eventCount=workUnitsPerformed,
errors=(errors and (" (with %d errors)" % (errors,))) or ''))
def schedule(self, runnable, when):
TimedEvent(store=self.store, time=when, runnable=runnable)
self._transientSchedule(when, self.now())
def reschedule(self, runnable, fromWhen, toWhen):
for evt in self.store.query(TimedEvent,
AND(TimedEvent.time == fromWhen,
TimedEvent.runnable == runnable)):
evt.time = toWhen
self._transientSchedule(toWhen, self.now())
break
else:
raise ValueError("%r is not scheduled to run at %r" % (runnable, fromWhen))
def unscheduleFirst(self, runnable):
"""
Remove from given item from the schedule.
If runnable is scheduled to run multiple times, only the temporally first
is removed.
"""
for evt in self.store.query(TimedEvent, TimedEvent.runnable == runnable, sort=TimedEvent.time.ascending):
evt.deleteFromStore()
break
def unscheduleAll(self, runnable):
for evt in self.store.query(TimedEvent, TimedEvent.runnable == runnable):
evt.deleteFromStore()
def scheduledTimes(self, runnable):
"""
Return an iterable of the times at which the given item is scheduled to
run.
"""
events = self.store.query(
TimedEvent, TimedEvent.runnable == runnable)
return (event.time for event in events if not event.running)
_EPSILON = 1e-20 # A very small amount of time.
class _SiteScheduler(object, Service, SchedulerMixin):
"""
Adapter from a site store to L{IScheduler}.
"""
implements(IScheduler)
timer = None
callLater = reactor.callLater
now = Time
def __init__(self, store):
self.store = store
def startService(self):
"""
Start calling persistent timed events whose time has come.
"""
super(_SiteScheduler, self).startService()
self._transientSchedule(self.now(), self.now())
def stopService(self):
"""
Stop calling persistent timed events.
"""
super(_SiteScheduler, self).stopService()
if self.timer is not None:
self.timer.cancel()
self.timer = None
def tick(self):
self.timer = None
return super(_SiteScheduler, self).tick()
def _transientSchedule(self, when, now):
"""
If the service is currently running, schedule a tick to happen no
later than C{when}.
@param when: The time at which to tick.
@type when: L{epsilon.extime.Time}
@param now: The current time.
@type now: L{epsilon.extime.Time}
"""
if not self.running:
return
if self.timer is not None:
if self.timer.getTime() < when.asPOSIXTimestamp():
return
self.timer.cancel()
delay = when.asPOSIXTimestamp() - now.asPOSIXTimestamp()
# reactor.callLater allows only positive delay values. The scheduler
# may want to have scheduled things in the past and that's OK, since we
# are dealing with Time() instances it's impossible to predict what
# they are relative to the current time from user code anyway.
delay = max(_EPSILON, delay)
self.timer = self.callLater(delay, self.tick)
self.nextEventAt = when
class _UserScheduler(object, Service, SchedulerMixin):
"""
Adapter from a non-site store to L{IScheduler}.
"""
implements(IScheduler)
def __init__(self, store):
self.store = store
def now(self):
"""
Report the current time, as reported by the parent's scheduler.
"""
return IScheduler(self.store.parent).now()
def _transientSchedule(self, when, now):
"""
If this service's store is attached to its parent, ask the parent to
schedule this substore to tick at the given time.
@param when: The time at which to tick.
@type when: L{epsilon.extime.Time}
@param now: Present for signature compatibility with
L{_SiteScheduler._transientSchedule}, but ignored otherwise.
"""
if self.store.parent is not None:
subStore = self.store.parent.getItemByID(self.store.idInParent)
hook = self.store.parent.findOrCreate(
_SubSchedulerParentHook,
subStore=subStore)
hook._schedule(when)
def migrateDown(self):
"""
Remove the components in the site store for this SubScheduler.
"""
subStore = self.store.parent.getItemByID(self.store.idInParent)
ssph = self.store.parent.findUnique(
_SubSchedulerParentHook,
_SubSchedulerParentHook.subStore == subStore,
default=None)
if ssph is not None:
te = self.store.parent.findUnique(TimedEvent,
TimedEvent.runnable == ssph,
default=None)
if te is not None:
te.deleteFromStore()
ssph.deleteFromStore()
def migrateUp(self):
"""
Recreate the hooks in the site store to trigger this SubScheduler.
"""
te = self.store.findFirst(TimedEvent, sort=TimedEvent.time.descending)
if te is not None:
self._transientSchedule(te.time, None)
class _SchedulerCompatMixin(object):
"""
Backwards compatibility helper for L{Scheduler} and L{SubScheduler}.
This mixin provides all the attributes from L{IScheduler}, but provides
them by adapting the L{Store} the item is in to L{IScheduler} and
getting them from the resulting object. Primarily in support of test
code, it also supports rebinding those attributes by rebinding them on
the L{IScheduler} powerup.
@see: L{IScheduler}
"""
implements(IScheduler)
def forwardToReal(name):
def get(self):
return getattr(IScheduler(self.store), name)
def set(self, value):
setattr(IScheduler(self.store), name, value)
return property(get, set)
now = forwardToReal("now")
tick = forwardToReal("tick")
schedule = forwardToReal("schedule")
reschedule = forwardToReal("reschedule")
unschedule = forwardToReal("unschedule")
unscheduleAll = forwardToReal("unscheduleAll")
scheduledTimes = forwardToReal("scheduledTimes")
def activate(self):
"""
Whenever L{Scheduler} or L{SubScheduler} is created, either newly or
when loaded from a database, emit a deprecation warning referring
people to L{IScheduler}.
"""
# This is unfortunate. Perhaps it is the best thing which works (it is
# the first I found). -exarkun
if '_axiom_memory_dummy' in vars(self):
stacklevel = 7
else:
stacklevel = 5
warnings.warn(
self.__class__.__name__ + " is deprecated since Axiom 0.5.32. "
"Just adapt stores to IScheduler.",
category=PendingDeprecationWarning,
stacklevel=stacklevel)
class Scheduler(Item, _SchedulerCompatMixin):
"""
Track and execute persistent timed events for a I{site} store.
This is deprecated and present only for backwards compatibility. Adapt
the store to L{IScheduler} instead.
"""
implements(IService)
typeName = 'axiom_scheduler'
schemaVersion = 2
dummy = integer()
def activate(self):
_SchedulerCompatMixin.activate(self)
def setServiceParent(self, parent):
"""
L{Scheduler} is no longer an L{IService}, but still provides this
method as a no-op in case an instance which was still an L{IService}
powerup is loaded (in which case it will be used like a service
once).
"""
declareLegacyItem(
Scheduler.typeName, 1,
dict(eventsRun=integer(default=0),
lastEventAt=timestamp(),
nextEventAt=timestamp()))
def scheduler1to2(old):
new = old.upgradeVersion(Scheduler.typeName, 1, 2)
new.store.powerDown(new, IService)
new.store.powerDown(new, IScheduler)
return new
registerUpgrader(scheduler1to2, Scheduler.typeName, 1, 2)
class _SubSchedulerParentHook(Item):
schemaVersion = 4
typeName = 'axiom_subscheduler_parent_hook'
subStore = reference(
doc="""
The L{SubStore} for which this scheduling hook exists.
""", reftype=SubStore)
def run(self):
"""
Tick our C{subStore}'s L{SubScheduler}.
"""
IScheduler(self.subStore).tick()
def _schedule(self, when):
"""
Ensure that this hook is scheduled to run at or before C{when}.
"""
sched = IScheduler(self.store)
for scheduledAt in sched.scheduledTimes(self):
if when < scheduledAt:
sched.reschedule(self, scheduledAt, when)
break
else:
sched.schedule(self, when)
def upgradeParentHook1to2(oldHook):
"""
Add the scheduler attribute to the given L{_SubSchedulerParentHook}.
"""
newHook = oldHook.upgradeVersion(
oldHook.typeName, 1, 2,
loginAccount=oldHook.loginAccount,
scheduledAt=oldHook.scheduledAt,
scheduler=oldHook.store.findFirst(Scheduler))
return newHook
registerUpgrader(upgradeParentHook1to2, _SubSchedulerParentHook.typeName, 1, 2)
declareLegacyItem(
_SubSchedulerParentHook.typeName, 2,
dict(loginAccount=reference(),
scheduledAt=timestamp(default=None),
scheduler=reference()))
def upgradeParentHook2to3(old):
"""
Copy the C{loginAccount} attribute, but drop the others.
"""
return old.upgradeVersion(
old.typeName, 2, 3,
loginAccount=old.loginAccount)
registerUpgrader(upgradeParentHook2to3, _SubSchedulerParentHook.typeName, 2, 3)
declareLegacyItem(
_SubSchedulerParentHook.typeName, 3,
dict(loginAccount=reference(),
scheduler=reference()))
def upgradeParentHook3to4(old):
"""
Copy C{loginAccount} to C{subStore} and remove the installation marker.
"""
new = old.upgradeVersion(
old.typeName, 3, 4, subStore=old.loginAccount)
uninstallFrom(new, new.store)
return new
registerUpgrader(upgradeParentHook3to4, _SubSchedulerParentHook.typeName, 3, 4)
class SubScheduler(Item, _SchedulerCompatMixin):
"""
Track and execute persistent timed events for a substore.
This is deprecated and present only for backwards compatibility. Adapt
the store to L{IScheduler} instead.
"""
schemaVersion = 2
typeName = 'axiom_subscheduler'
dummy = integer()
def activate(self):
_SchedulerCompatMixin.activate(self)
def subscheduler1to2(old):
new = old.upgradeVersion(SubScheduler.typeName, 1, 2)
try:
new.store.powerDown(new, IScheduler)
except ValueError:
# Someone might have created a SubScheduler but failed to power it
# up. Fine.
pass
return new
registerUpgrader(subscheduler1to2, SubScheduler.typeName, 1, 2)
|
import math
def my_abs(x):
if not isinstance(x, (int, float)):
raise TypeError('Bad operand type')
if x >= 0:
return x
else:
return -x
def move(x, y, step, angle=0):
nx = x + step * math.cos(angle)
ny = y - step * math.sin(angle)
return nx, ny
n = my_abs(-20)
print(n)
x, y = move(100, 100, 60, math.pi / 6)
print(x, y)
#TypeError
my_abs('123') |
import tensorflow as tf
def smooth_l1_loss(bbox_prediction, bbox_target, sigma=3.0):
"""
Return Smooth L1 Loss for bounding box prediction.
Args:
bbox_prediction: shape (1, H, W, num_anchors * 4)
bbox_target: shape (1, H, W, num_anchors * 4)
Smooth L1 loss is defined as:
0.5 * x^2 if |x| < d
abs(x) - 0.5 if |x| >= d
Where d = 1 and x = prediction - target
"""
sigma2 = sigma ** 2
diff = bbox_prediction - bbox_target
abs_diff = tf.abs(diff)
abs_diff_lt_sigma2 = tf.less(abs_diff, 1.0 / sigma2)
bbox_loss = tf.reduce_sum(
tf.where(
abs_diff_lt_sigma2,
0.5 * sigma2 * tf.square(abs_diff),
abs_diff - 0.5 / sigma2
), [1]
)
return bbox_loss
if __name__ == '__main__':
bbox_prediction_tf = tf.placeholder(tf.float32)
bbox_target_tf = tf.placeholder(tf.float32)
loss_tf = smooth_l1_loss(bbox_prediction_tf, bbox_target_tf)
with tf.Session() as sess:
loss = sess.run(
loss_tf,
feed_dict={
bbox_prediction_tf: [
[0.47450006, -0.80413032, -0.26595005, 0.17124325]
],
bbox_target_tf: [
[0.10058594, 0.07910156, 0.10555581, -0.1224325]
],
})
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'regenerateaddresses.ui'
#
# Created: Sun Sep 15 23:50:23 2013
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_regenerateAddressesDialog(object):
def setupUi(self, regenerateAddressesDialog):
regenerateAddressesDialog.setObjectName(_fromUtf8("regenerateAddressesDialog"))
regenerateAddressesDialog.resize(532, 332)
self.gridLayout_2 = QtGui.QGridLayout(regenerateAddressesDialog)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.buttonBox = QtGui.QDialogButtonBox(regenerateAddressesDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout_2.addWidget(self.buttonBox, 1, 0, 1, 1)
self.groupBox = QtGui.QGroupBox(regenerateAddressesDialog)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout = QtGui.QGridLayout(self.groupBox)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_6 = QtGui.QLabel(self.groupBox)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout.addWidget(self.label_6, 1, 0, 1, 1)
self.lineEditPassphrase = QtGui.QLineEdit(self.groupBox)
self.lineEditPassphrase.setInputMethodHints(QtCore.Qt.ImhHiddenText|QtCore.Qt.ImhNoAutoUppercase|QtCore.Qt.ImhNoPredictiveText)
self.lineEditPassphrase.setEchoMode(QtGui.QLineEdit.Password)
self.lineEditPassphrase.setObjectName(_fromUtf8("lineEditPassphrase"))
self.gridLayout.addWidget(self.lineEditPassphrase, 2, 0, 1, 5)
self.label_11 = QtGui.QLabel(self.groupBox)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout.addWidget(self.label_11, 3, 0, 1, 3)
self.spinBoxNumberOfAddressesToMake = QtGui.QSpinBox(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spinBoxNumberOfAddressesToMake.sizePolicy().hasHeightForWidth())
self.spinBoxNumberOfAddressesToMake.setSizePolicy(sizePolicy)
self.spinBoxNumberOfAddressesToMake.setMinimum(1)
self.spinBoxNumberOfAddressesToMake.setProperty("value", 8)
self.spinBoxNumberOfAddressesToMake.setObjectName(_fromUtf8("spinBoxNumberOfAddressesToMake"))
self.gridLayout.addWidget(self.spinBoxNumberOfAddressesToMake, 3, 3, 1, 1)
spacerItem = QtGui.QSpacerItem(132, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 3, 4, 1, 1)
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 4, 0, 1, 1)
self.lineEditAddressVersionNumber = QtGui.QLineEdit(self.groupBox)
self.lineEditAddressVersionNumber.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditAddressVersionNumber.sizePolicy().hasHeightForWidth())
self.lineEditAddressVersionNumber.setSizePolicy(sizePolicy)
self.lineEditAddressVersionNumber.setMaximumSize(QtCore.QSize(31, 16777215))
self.lineEditAddressVersionNumber.setText(_fromUtf8(""))
self.lineEditAddressVersionNumber.setObjectName(_fromUtf8("lineEditAddressVersionNumber"))
self.gridLayout.addWidget(self.lineEditAddressVersionNumber, 4, 1, 1, 1)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 4, 2, 1, 1)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 5, 0, 1, 1)
self.lineEditStreamNumber = QtGui.QLineEdit(self.groupBox)
self.lineEditStreamNumber.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditStreamNumber.sizePolicy().hasHeightForWidth())
self.lineEditStreamNumber.setSizePolicy(sizePolicy)
self.lineEditStreamNumber.setMaximumSize(QtCore.QSize(31, 16777215))
self.lineEditStreamNumber.setObjectName(_fromUtf8("lineEditStreamNumber"))
self.gridLayout.addWidget(self.lineEditStreamNumber, 5, 1, 1, 1)
spacerItem2 = QtGui.QSpacerItem(325, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem2, 5, 2, 1, 3)
self.checkBoxEighteenByteRipe = QtGui.QCheckBox(self.groupBox)
self.checkBoxEighteenByteRipe.setObjectName(_fromUtf8("checkBoxEighteenByteRipe"))
self.gridLayout.addWidget(self.checkBoxEighteenByteRipe, 6, 0, 1, 5)
self.label_4 = QtGui.QLabel(self.groupBox)
self.label_4.setWordWrap(True)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 7, 0, 1, 5)
self.label = QtGui.QLabel(self.groupBox)
self.label.setWordWrap(True)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 5)
self.gridLayout_2.addWidget(self.groupBox, 0, 0, 1, 1)
self.retranslateUi(regenerateAddressesDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), regenerateAddressesDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), regenerateAddressesDialog.reject)
QtCore.QMetaObject.connectSlotsByName(regenerateAddressesDialog)
def retranslateUi(self, regenerateAddressesDialog):
regenerateAddressesDialog.setWindowTitle(_translate("regenerateAddressesDialog", "Regenerate Existing Addresses", None))
self.groupBox.setTitle(_translate("regenerateAddressesDialog", "Regenerate existing addresses", None))
self.label_6.setText(_translate("regenerateAddressesDialog", "Passphrase", None))
self.label_11.setText(_translate("regenerateAddressesDialog", "Number of addresses to make based on your passphrase:", None))
self.label_2.setText(_translate("regenerateAddressesDialog", "Address version number:", None))
self.label_3.setText(_translate("regenerateAddressesDialog", "Stream number:", None))
self.lineEditStreamNumber.setText(_translate("regenerateAddressesDialog", "1", None))
self.checkBoxEighteenByteRipe.setText(_translate("regenerateAddressesDialog", "Spend several minutes of extra computing time to make the address(es) 1 or 2 characters shorter", None))
self.label_4.setText(_translate("regenerateAddressesDialog", "You must check (or not check) this box just like you did (or didn\'t) when you made your addresses the first time.", None))
self.label.setText(_translate("regenerateAddressesDialog", "If you have previously made deterministic addresses but lost them due to an accident (like hard drive failure), you can regenerate them here. If you used the random number generator to make your addresses then this form will be of no use to you.", None))
|
#!/usr/bin/python3
import schedule
class VKBot:
def __init__(self):
self.session = None
def on_message(self, message, session) -> bool:
'''
Do any needed actions on this message, possibly using the session object.
If the session object is used, it is not cached.
Return a boolean: if it is False, it is assumed that this bot doesn't
care about this message, and it should be sent to the next bot in the
list; if True, the message will not be shown to lower-priority bots.
'''
return False
def send_message(self,*args,**kwargs):
'''
Send a message on my session.
This should be replaced with the actual function when the manager connects.
'''
pass
def send_debug_message(self, *args, **kwargs):
'''
Send a message on my session.
This message may not be actually sent, if the manager doesn't want to
send debug messages. Therefore, this should only be used for debug info.
This should be replaced with the actual function when the manager connects.
'''
pass
def create_jobs(self):
'''
Register any repeating actions with the `schedule` module.
The jobs are instance-identified, so they can be destroyed later.
To make the job destroyable, set the `origin_bot` field of every method
to `self`.
This should be run when the bot is connected to a manager.
'''
pass
def destroy_jobs(self):
'''
Delete any jobs created by me in the 'schedule' module.
Used if the bot needs to be torn down.
This should be run when the bot is disconnected from a manager.
'''
for i in schedule.jobs:
if i.job_func.bot_id==self:
schedule.cancel_job(i)
|
import numpy as np
import pandas as pd
import torch
import os
import argparse
from torch.utils.data import DataLoader, sampler, TensorDataset, ConcatDataset
from attack_functions import *
from trojai_utils import *
from boundary_geometry import *
parser = argparse.ArgumentParser(description="TrojAI Round 5 script for boundary thickness and tilting")
parser.add_argument('--N', type=int, help="number of embeddings of each class to use")
parser.add_argument('--embedding-type', type=str,
choices = ['GPT-2', 'BERT', 'DistilBERT'],
help='use which embedding')
parser.add_argument('--architecture-type', type=str,
choices = ['GruLinear', 'LstmLinear'],
help='use which architecture')
parser.add_argument('--batch-size', type=int,
help='Batch size for the adversarial attacks')
parser.add_argument('--eps', type=float,
help='PGD attack strength')
parser.add_argument('--iters', type=int,
help='PGD attack iterations')
args = parser.parse_args()
# For Round 5 (change as needed based on your file system's structure)
THICK_NAMES = ["clean", "adv+to-", "adv-to+", "uap+to-", "uap-to+"]
TILT_NAMES = ["adv_adv+to-", "adv_adv-to+", "uap_uap+to-", "uap_uap-to+"]
BASE_EMBEDDINGS_PATH = "your embedding path"
RESULTS_PATH_TRAIN = "your train results path"
RESULTS_PATH_TEST = "your test results path"
RESULTS_PATH_HOLDOUT = "your holdout results path"
METADATA_TRAIN = pd.read_csv("place where training set's METADATA.csv is")
METADATA_TEST = pd.read_csv("place where test set's METADATA.csv is")
METADATA_HOLDOUT = pd.read_csv("place where holdout set's METADATA.csv is")
TRAIN_BASE_PATH = "point me to round5-train-dataset"
TEST_BASE_PATH = "point me to round5-test-dataset"
HOLDOUT_BASE_PATH = "point me to round5-holdout-dataset"
# Round 5 reference models (50 per (embedding, architecture) type)
REF_IDS = {
"BERT": {"LstmLinear": [14, 68, 73, 74, 98, 110, 123, 138, 163, 168, 196, 234, 240, 256, 263, 274, 299, 303, 318, 320, 349, 364, 389, 395, 405, 422, 446, 450, 463, 503, 512, 517, 524, 526, 533, 542, 563, 576, 599, 605, 617, 643, 646, 706, 707, 709, 710, 716, 719, 720],
"GruLinear": [20, 22, 30, 47, 67, 69, 79, 87, 92, 93, 97, 109, 112, 122, 152, 157, 165, 171, 175, 178, 181, 183, 185, 187, 190, 220, 230, 266, 273, 279, 294, 315, 322, 334, 336, 342, 354, 404, 415, 421, 431, 474, 477, 491, 497, 499, 502, 506, 511, 519]},
"DistilBERT": {"LstmLinear": [2, 12, 83, 86, 104, 105, 127, 131, 134, 135, 141, 156, 159, 201, 243, 244, 254, 272, 288, 310, 321, 332, 374, 377, 387, 398, 399, 416, 427, 445, 449, 460, 464, 483, 510, 523, 532, 537, 541, 543, 551, 570, 583, 588, 631, 648, 669, 670, 673, 678],
"GruLinear": [8, 17, 39, 41, 42, 45, 49, 55, 63, 76, 90, 96, 103, 149, 153, 176, 177, 179, 184, 193, 204, 208, 213, 231, 239, 245, 265, 270, 306, 347, 348, 350, 365, 371, 384, 391, 396, 419, 423, 425, 467, 468, 476, 487, 500, 516, 527, 529, 531, 548]},
"GPT-2": {"LstmLinear": [13, 18, 29, 48, 61, 72, 80, 88, 95, 100, 108, 114, 121, 132, 151, 158, 161, 162, 197, 198, 226, 228, 258, 264, 285, 304, 312, 317, 325, 333, 337, 345, 351, 368, 373, 386, 401, 403, 418, 426, 433, 461, 466, 472, 479, 493, 507, 508, 514, 530],
"GruLinear": [3, 7, 28, 32, 36, 52, 59, 71, 82, 89, 124, 126, 128, 148, 154, 191, 205, 206, 207, 224, 236, 237, 241, 246, 251, 253, 259, 260, 278, 284, 287, 289, 301, 335, 356, 360, 362, 366, 367, 378, 409, 411, 438, 471, 478, 485, 509, 513, 546, 547]}
}
UAP_MIN_SUCCESS_RATE = .80
dtype = torch.float32
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
def filter_dataset(models, ds):
# Keep all datapoints that are correct for at least 70% the models
filtered_ds = []
for _, (x, y) in enumerate(DataLoader(ds, batch_size=args.batch_size)):
successes = torch.zeros(len(y), device=device)
for model in models:
pred = torch.argmax(model(x), dim=-1)
successes += (pred == y)
correct_pred_ids = successes >= (0.7 * len(models))
filtered_ds.append(TensorDataset(x[correct_pred_ids], y[correct_pred_ids]))
return ConcatDataset(filtered_ds)
def make_perturbed_datasets(models, pos_ds, neg_ds, batch_size, attack_type, eps, iters, step_size):
# Run the attack
attack = PGDAdversarialDataset(models, eps=eps, step_size=step_size, iters=iters, p=2, universal=(attack_type=="uap"))
attacked_pos_ds, pos_loss_final = make_adversarial_dataset(pos_ds, attack, batch_size)
attacked_neg_ds, neg_loss_final = make_adversarial_dataset(neg_ds, attack, batch_size)
# Verify success
mean_psr, mean_nsr = 0, 0
for model in models:
psr = flip_success(attacked_pos_ds, 0, model) # + == 1, so want it to flip to 0
nsr = flip_success(attacked_neg_ds, 1, model) # - == 0, so want it to flip to 1
mean_psr, mean_nsr = (mean_psr + psr / len(models)), (mean_nsr + nsr / len(models))
if not (psr > UAP_MIN_SUCCESS_RATE and nsr > UAP_MIN_SUCCESS_RATE):
print("psr {}, nsr {} failed to pass threshold {}".format(psr, nsr, UAP_MIN_SUCCESS_RATE))
raise RuntimeError()
print(mean_psr, mean_nsr)
return attacked_pos_ds, attacked_neg_ds, pos_loss_final, neg_loss_final
def compute_geometry(pos_ds, neg_ds, batch_size, eps, iters, step_size):
# Get reference model's datasets
ref_model_ids = REF_IDS[args.embedding_type][args.architecture_type]
ref_models = [load_model(ref_model_id, TRAIN_BASE_PATH)[0] for ref_model_id in ref_model_ids]
ref_filt_pos_ds, ref_filt_neg_ds = filter_dataset(ref_models, pos_ds), filter_dataset(ref_models, neg_ds)
print("\t ref model filter dataset lengths:", len(ref_filt_pos_ds), len(ref_filt_neg_ds))
ref_adv_pos_ds, ref_adv_neg_ds, _, _ = make_perturbed_datasets(ref_models, ref_filt_pos_ds, ref_filt_neg_ds,
batch_size, "adv", eps, iters, step_size)
ref_uap_pos_ds, ref_uap_neg_ds, _, _ = make_perturbed_datasets(ref_models, ref_filt_pos_ds, ref_filt_neg_ds,
batch_size, "uap", eps, iters, step_size)
# Compute features
for which in ["clean", "poisoned"]:
for metadata, base_path, results_path in zip([METADATA_TRAIN, METADATA_TEST, METADATA_HOLDOUT], [TRAIN_BASE_PATH, TEST_BASE_PATH, HOLDOUT_BASE_PATH], [RESULTS_PATH_TRAIN, RESULTS_PATH_TEST, RESULTS_PATH_HOLDOUT]):
model_ids = metadata.index[(metadata.embedding==args.embedding_type)
& (metadata.model_architecture==args.architecture_type)
& (metadata.poisoned==(which=="poisoned"))].tolist()
# Iterate over models
for i, model_id in enumerate(model_ids):
try:
# Load model and only keep samples it correctly classifies
model, _ = load_model(model_id, base_path)
filt_pos_ds, filt_neg_ds = filter_dataset([model], pos_ds), filter_dataset([model], neg_ds)
print("\t model {} len(filt_pos_ds): {}, len(filt_neg_ds): {}".format(model_id,
len(filt_pos_ds),
len(filt_neg_ds)))
# Make adv and UAP datasets
adv_pos_ds, adv_neg_ds, adv_pos_loss_final, adv_neg_loss_final = make_perturbed_datasets([model],
filt_pos_ds,
filt_neg_ds,
batch_size,
"adv",
eps,
iters,
step_size)
uap_pos_ds, uap_neg_ds, uap_pos_loss_final, uap_neg_loss_final = make_perturbed_datasets([model],
filt_pos_ds,
filt_neg_ds,
batch_size,
"uap",
eps,
iters,
step_size)
# Compute boundary thickness
xr_ds_thick = [filt_pos_ds, filt_pos_ds, filt_neg_ds, filt_pos_ds, filt_neg_ds]
xs_ds_thick = [filt_neg_ds, adv_pos_ds, adv_neg_ds, uap_pos_ds, uap_neg_ds]
for xr_ds, xs_ds, file_suffix in zip(xr_ds_thick, xs_ds_thick, THICK_NAMES):
# NOTE: batch_size in boundary_thickness has no effect on the statistical accuracy of the
# computation, it only affects how many inputs go through the DNN at a time. We have to
# set it to a low value (32, for our TrojAI experiments) since we sample 1000 points along
# the line segment between each pair of inputs, implying 32 * 1000 points are going through
# the DNN at a time; feel free to adjust based on how powerful your GPUs are
thick = boundary_thickness(xr_ds, xs_ds, model, [(0, 0.75), (0, 1)], batch_size=32, num_points=1000)
torch.save(thick, os.path.join(results_path,
args.embedding_type,
args.architecture_type,
which + file_suffix + "_thickness{}.pt".format(model_id)))
# Compute boundary tilting
xr_ds_tilt = [filt_pos_ds, filt_neg_ds, filt_pos_ds, filt_neg_ds]
xr_adv_ds_tilt = [adv_pos_ds, adv_neg_ds, uap_pos_ds, uap_neg_ds]
xs_ds_tilt = [ref_adv_pos_ds, ref_adv_neg_ds, ref_uap_pos_ds, ref_uap_neg_ds]
for xr_ds, xs_ds, xr_adv_ds, file_suffix in zip(xr_ds_tilt, xs_ds_tilt, xr_adv_ds_tilt, TILT_NAMES):
tilt = boundary_tilting(xr_ds, xs_ds, xr_adv_ds, model, batch_size=args.batch_size, reduce_clean=False)
torch.save(tilt, os.path.join(results_path,
args.embedding_type,
args.architecture_type,
which + file_suffix + "_tilting{}.pt".format(model_id)))
except Exception:
print("Failed for model_id {}".format(model_id))
# Print progress
print("{0} of {1} {2} models done".format(i, len(model_ids), which))
def get_dataset(embeddings, labels):
embeddings = embeddings.to("cuda")
labels = labels.to("cuda")
dataset = torch.utils.data.TensorDataset(embeddings, labels)
return dataset
# Load in embeddings to use
pos_embeddings = torch.load(os.path.join(BASE_EMBEDDINGS_PATH, args.embedding_type, "pos_embeddings{}.pt".format(args.N)))
pos_labels = torch.load(os.path.join(BASE_EMBEDDINGS_PATH, args.embedding_type, "pos_labels{}.pt".format(args.N)))
pos_ds = get_dataset(pos_embeddings, pos_labels)
neg_embeddings = torch.load(os.path.join(BASE_EMBEDDINGS_PATH, args.embedding_type, "neg_embeddings{}.pt".format(args.N)))
neg_labels = torch.load(os.path.join(BASE_EMBEDDINGS_PATH, args.embedding_type, "neg_labels{}.pt".format(args.N)))
neg_ds = get_dataset(neg_embeddings, neg_labels)
# Compute and save features
step_size = 2 * args.eps / args.iters
compute_geometry(pos_ds, neg_ds, args.batch_size, args.eps, args.iters, step_size) |
#
# Low-level object abstraction in cDatabase
#
import os
import json
import yaml
from cdatabase.config import cfg
from cdatabase.cdata import cData
import cdatabase.utils as utils
class cObject(object):
###########################################################################
def __init__(self, path: str):
"""
Initialize cObject in a given path
Args:
path (str): Path to cObject..
"""
self.path = path
self.entries = {}
###########################################################################
def list(self, load_entries: bool = False,
object_id: str = "",
data_name: str = "",
data_id: str = ""):
"""
List cData objects in a cObject directory
Args:
None
"""
if not os.path.isdir(self.path):
raise RuntimeError("Path to cObject ("+self.path+") doesn't exit")
# List directories
dir_list = utils.dir_list(self.path, data_name, data_id)
for name in dir_list:
# Attempt to load data
p = os.path.join(self.path, name)
data = cData(p)
# Check if valid format
if data.is_valid():
add_data = False
if load_entries:
data.load(ignore_errors=True)
# Check if belongs to a requested object using ID
if data.is_loaded() and (object_id == '' or data.belongs_to_object(object_id)) \
and (data_id == '' or data.belongs_to_id(data_id)):
add_data = True
if add_data:
self.entries[name] = data
return True
###########################################################################
def get_entries(self):
"""
Get cData objects
Args:
None
"""
return self.entries
|
import matplotlib.pyplot as plt
import numpy as np
import pickle
from collections import defaultdict
from shapely.geometry import Point, Polygon
# Init NuScenes. Requires the dataset to be stored on disk.
from nuscenes.nuscenes import NuScenes
from nuscenes.map_expansion.map_api import NuScenesMap
nusc = NuScenes(version='v1.0-trainval', \
dataroot='../../../../data/', \
verbose=False)
so_map = NuScenesMap(dataroot='../../../../data/', \
map_name='singapore-onenorth')
bs_map = NuScenesMap(dataroot='../../../../data/', \
map_name='boston-seaport')
sh_map = NuScenesMap(dataroot='../../../../data/', \
map_name='singapore-hollandvillage')
sq_map = NuScenesMap(dataroot='../../../../data/', \
map_name='singapore-queenstown')
# dict mapping map name to map file
map_files = {'singapore-onenorth': so_map,
'boston-seaport': bs_map,
'singapore-hollandvillage': sh_map,
'singapore-queenstown': sq_map}
# dict with person token as key and other features as values
pedestrian_details = dict()
# dict with scene number as key and trajectories and map name as values
scene_info = dict()
# initializing a dict for layer names and number of points in each layer
layer_list = so_map.layer_names
layer_list.append("white_area")
layer_dict = dict.fromkeys(layer_list, 0)
# defining the sensor to extract ego_pose from sample_data,
# we need a sensor to get sample_data
sensor = "LIDAR_TOP"
for n_scene in range(850):
print(n_scene)
# initialize the scene
my_scene = nusc.scene[n_scene]
# getting the map name
cur_map = nusc.get('log', my_scene["log_token"])["location"]
# entering the scene number and map name
scene_info[str(n_scene)] = {"trajectories_x": [], "trajectories_y": [],\
"map_name": cur_map}
# per scene person token database
seen_person_tokens = []
# first sample
first_sample_token = my_scene['first_sample_token']
sample = nusc.get('sample', first_sample_token)
while True:
for ann in sample['anns']:
group_name = nusc.get('sample_annotation', ann)['category_name']
if "human.pedestrian" in group_name and \
nusc.get('sample_annotation', ann)['instance_token'] not in seen_person_tokens:
cur_person_token = nusc.get('sample_annotation', ann)['instance_token']
cur_person_instance = nusc.get("instance", cur_person_token)
nbr_samples = cur_person_instance['nbr_annotations']
# initializing the dict with the new person token
pedestrian_details[cur_person_token] = {"translation":[],
"rotation":[],
"velocity":[],
"ego_translation":[],
"ego_rotation":[],
"ego_time": [],
"d_curb":[],
"height":[]}
first_token = cur_person_instance['first_annotation_token']
current_token = first_token
for i in range(nbr_samples):
current_ann = nusc.get('sample_annotation', current_token)
# getting the sample corresponding to this annotation to retrieve
# ego details
annotation_sample = nusc.get('sample', current_ann['sample_token'])
if current_ann["attribute_tokens"]:
current_attr = nusc.get('attribute', current_ann['attribute_tokens'][0])['name']
if current_attr.split(".")[1] != "sitting_lying_down":
# updating pedestrian details dict
pedestrian_details[cur_person_token]["group"] = group_name.split(".")[-1]
pedestrian_details[cur_person_token]["translation"].append(
current_ann["translation"])
pedestrian_details[cur_person_token]["rotation"].append(
current_ann["rotation"])
pedestrian_details[cur_person_token]["height"].append(
current_ann["size"][2])
pedestrian_details[cur_person_token]["scene_no"] = n_scene
pedestrian_details[cur_person_token]["map_name"] = cur_map
# only takes velocity at a particular time step
pedestrian_details[cur_person_token]["velocity"].append(
list(nusc.box_velocity(current_token)))
# updating ego details
lidar_data = nusc.get('sample_data',
annotation_sample['data'][sensor])
ego_token = lidar_data['ego_pose_token']
ego_pose = nusc.get('ego_pose', ego_token)
pedestrian_details[cur_person_token]["ego_translation"].append(
ego_pose["translation"])
pedestrian_details[cur_person_token]["ego_rotation"].append(
ego_pose["rotation"])
pedestrian_details[cur_person_token]["ego_time"].append(
ego_pose["timestamp"])
# calculating d_curb
cur_ped_x = current_ann["translation"][0]
cur_ped_y = current_ann["translation"][1]
layers_on_point_dict = map_files[cur_map].layers_on_point(
cur_ped_x, cur_ped_y)
# get all the layers in a list
layers_from_dict = [l for l in list(layers_on_point_dict.keys()) \
if layers_on_point_dict[l]]
# d_curb if he is on the walkway or whitespace, else give 0
if "walkway" in layers_from_dict or len(layers_from_dict) == 0:
# serching for road type polygon 25m around pedestrian
records_patch = map_files[cur_map].get_records_in_patch(
(current_ann["translation"][0]-25,
current_ann["translation"][1]-25,
current_ann["translation"][0]+25,
current_ann["translation"][1]+25),
["lane", "road_block", "road_segment"])
# save the closest distance to any road polygon
d_curb = 30
for l, pol in records_patch.items():
for poli in pol:
poli_token = map_files[cur_map].get(l,poli)["polygon_token"]
cur_poly = map_files[cur_map].extract_polygon(poli_token)
cur_point = Point(cur_ped_x, cur_ped_y)
d_curb = min(d_curb, cur_point.distance(cur_poly))
pedestrian_details[cur_person_token]["d_curb"].append(
d_curb)
else:
# pedestrian on the road has 0 distance to curb
pedestrian_details[cur_person_token]["d_curb"].append(0)
current_token = current_ann["next"]
seen_person_tokens.append(cur_person_token)
if sample['next'] != '':
sample = nusc.get('sample', sample['next'])
else:
#last sample of the scene
break
for k, val in pedestrian_details.items():
velocities_x = [v[0] for v in val["velocity"]]
velocities_y = [v[1] for v in val["velocity"]]
times = val["ego_time"]
del_vx = np.diff(velocities_x)
del_vy = np.diff(velocities_y)
del_time = 1e-6 * (np.diff(times))
acc_x = [dx/dt for dx,dt in zip(del_vx, del_time)]
acc_y = [dy/dt for dy,dt in zip(del_vy, del_time)]
if len(acc_x) > 0:
acc_x.append(acc_x[-1])
acc_y.append(acc_y[-1])
pedestrian_details[k]["acceleration_x"] = acc_x
pedestrian_details[k]["acceleration_y"] = acc_y
pedestrian_details[k]["del_time"] = del_time
new_ped_details = {}
# extracting the ped trajs with more than 16 samples
for k, v in pedestrian_details.items():
if len(v['translation']) > 15:
if not np.any(np.isnan(np.array(v['velocity']))):
cur_diffs = [round(1e-6*(t-s),1) for s, t in zip(v['ego_time'], v['ego_time'][1:])]
if not any(i > 0.9 for i in cur_diffs):
new_ped_details[k] = v
# SAVING all the dict files in details folder
with open('details/pedestrian_details.pkl', 'wb') as handle:
pickle.dump(pedestrian_details, handle, protocol=pickle.HIGHEST_PROTOCOL)
# saving the new ped_details dict
with open('details/new_ped_details.pkl', 'wb') as handle:
pickle.dump(new_ped_details, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
from functools import wraps
def if_arg(func):
@wraps(func)
def run_function_only_if_arg(*args):
if args[0]:
return func(*args)
else:
return {
"valid": None,
"value": None
}
return run_function_only_if_arg
|
"""
Qscattering
===========
"""
# sphinx_gallery_thumbnail_path = '../images/Experiment_QextVSDiameter.png'
def run():
import numpy as np
from PyMieSim.Experiment import SphereSet, SourceSet, Setup
scatSet = SphereSet( Diameter = np.linspace(100e-9, 10000e-9, 400),
Index = [1.4, 1.5],
nMedium = [1] )
sourceSet = SourceSet( Wavelength = 400e-9,
Polarization = 0,
Amplitude = 1)
Experiment = Setup(ScattererSet = scatSet, SourceSet = sourceSet)
Data = Experiment.Get(Input=['Qext'])
Data.Plot(y='Qext', x='Diameter')
if __name__ == '__main__':
run()
|
from django.shortcuts import render
from .models import Subscribe
from .forms import NewsletterForm, SubscribeForm
import sendmail
from markdown import markdown
from django.http import HttpResponseRedirect
def newsletter(request):
if request.user.is_authenticated():
form=NewsletterForm(request.POST, request.FILES)
if "newsletter" in request.POST :
destination=Subscribe.objects.filter(newsletter=request.POST.get("newsletter"))
else :
destination=Subscribe.objects.only("email") #values("email")
addresses=[]
for i in destination:
addresses.append(str(i))
context={'form':form, 'newsletters':Subscribe.objects.values('newsletter').distinct()}
if "envoyer" in request.POST:
email=form.save(commit=False,)
email.save()
try:
sendmail.sendmail(addresses,str(email.subject),markdown(str(email.body)), str(email.attachement.path))
except:
sendmail.sendmail(addresses,str(email.subject),markdown(str(email.body)))
return render(request,'newsletter.html',context)
else:
return HttpResponseRedirect('/')
def subscribe_default(request):
form = SubscribeForm(request.POST or None )
if form.is_valid():
new_subscribe=form.save(commit=False)
ip=get_ip(request)
new_subscribe.save()
return render(request, "subscribed.html")
def subscribe_specific(request):
form = SubscribeForm(request.POST or None )
if form.is_valid():
if 'subscribe' in form.POST:
new_subscribe=form.save(commit=False)
ip=get_ip(request)
new_subscribe.save()
if 'unsubscribe' in form.POST:
pass
return render(request, "subscribed.html", context)
def unsubscribe (request):
subscriber=Subscribe.objects.filter(email=request.POST.get("email"))[0]
subscriber.newsletter="deleted"
subscriber.save()
return render(request, "subscribed.html")
def get_ip(request):
try:
x_forward = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forward:
ip=x_forward.split(",")[0]
else:
ip=request.META.get("REMOTE_ADDR")
except:
ip=""
return ip
|
# -*- python -*-
load(
"@drake//tools/workspace:execute.bzl",
"execute_and_return",
)
def setup_new_deb_archive(repo_ctx):
"""Behaves like new_deb_archive, except that (1) this is a macro instead of
a rule and (2) this macro returns an error status instead of fail()ing.
The return value is a struct with a field `error` that will be None on
success or else a detailed message on any failure.
"""
name = repo_ctx.attr.name
filenames = repo_ctx.attr.filenames
mirrors = repo_ctx.attr.mirrors
sha256s = repo_ctx.attr.sha256s
build_file = repo_ctx.attr.build_file
# Download and unpack all of the debs.
for i in range(len(filenames)):
filename = filenames[i]
if i == len(sha256s):
sha256s = sha256s + [""]
sha256 = sha256s[i]
if not sha256:
# We do not permit an empty checksum; empty means "don't care".
sha256 = "0" * 64
repo_ctx.download(
url = [mirror + "/" + filename for mirror in mirrors],
output = filename,
sha256 = sha256,
)
result = execute_and_return(
repo_ctx,
["dpkg-deb", "-x", filename, "."],
)
if result.error:
return result
# Add in the build file.
repo_ctx.symlink(build_file, "BUILD.bazel")
# Success.
return struct(error = None)
def _impl(repo_ctx):
result = setup_new_deb_archive(repo_ctx)
if result.error != None:
fail("Unable to complete setup for @{} repository: {}".format(
# (forced line break)
repo_ctx.name,
result.error,
))
new_deb_archive = repository_rule(
attrs = {
"filenames": attr.string_list(
doc = """
Base filenames of the debs, e.g., ["libfoo-dev_123_amd64.deb"].
When multiple files are listed, they will all be extracted atop
each other (within our sandbox), as is typical for Debian install.
""",
mandatory = True,
allow_empty = False,
),
"mirrors": attr.string_list(
doc = """
List of URLs to download from, without the filename portion, e.g.,
["https://example.com/archives"].
""",
mandatory = True,
allow_empty = False,
),
"sha256s": attr.string_list(
doc = """
Checksums of the files. When unsure, you may set it to an empty
string or list; the checksum error will offer a suggestion. The
sha256s and filenames are matched ordering (i.e., parallel lists).
""",
),
"build_file": attr.label(
doc = """
Label for BUILD.bazel file to add into the repository. This should
contain the rules that expose the archive contents for consumers.
The *.deb file contents will appear at ".", so paths are like,
e.g., `hdrs = glob(["usr/include/foo/**/*.h"]),`.
""",
mandatory = True,
allow_files = True,
),
},
implementation = _impl,
)
"""A repository rule that downloads and unpacks one or more *.deb files.
"""
|
from typing import Any, Dict, Sequence
from coba.exceptions import CobaException
from coba.utilities import PackageChecker
from coba.environments import Context, Action
from coba.pipes import Flatten
from coba.encodings import InteractionsEncoder
from coba.learners.primitives import Probs, Info, Learner
class LinUCBLearner(Learner):
"""A contextual bandit learner that represents expected reward as a
linear function of context and action features. Exploration is carried
out according to upper confidence bound estimates.
This is an implementation of the Chu et al. (2011) LinUCB algorithm using the
`Sherman-Morrison formula`__ to iteratively calculate the inversion matrix. This
implementation's computational complexity is linear with respect to feature count.
Remarks:
The Sherman-Morrsion implementation used below is given in long form `here`__.
References:
Chu, Wei, Lihong Li, Lev Reyzin, and Robert Schapire. "Contextual bandits
with linear payoff functions." In Proceedings of the Fourteenth International
Conference on Artificial Intelligence and Statistics, pp. 208-214. JMLR Workshop
and Conference Proceedings, 2011.
__ https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula
__ https://research.navigating-the-edge.net/assets/publications/linucb_alternate_formulation.pdf
"""
def __init__(self, alpha: float = 1, features: Sequence[str] = [1, 'a', 'ax']) -> None:
"""Instantiate a LinUCBLearner.
Args:
alpha: This parameter controls the exploration rate of the algorithm. A value of 0 will cause actions
to be selected based on the current best point estimate (i.e., no exploration) while a value of inf
means that actions will be selected based solely on the bounds of the action point estimates (i.e.,
we will always take actions that have the largest bound on their point estimate).
features: Feature set interactions to use when calculating action value estimates. Context features
are indicated by x's while action features are indicated by a's. For example, xaa means to cross the
features between context and actions and actions.
"""
PackageChecker.numpy("LinUCBLearner.__init__")
self._alpha = alpha
self._X = features
self._X_encoder = InteractionsEncoder(features)
self._theta = None
self._A_inv = None
@property
def params(self) -> Dict[str, Any]:
return {'family': 'LinUCB', 'alpha': self._alpha, 'features': self._X}
def predict(self, context: Context, actions: Sequence[Action]) -> Probs:
import numpy as np #type: ignore
if isinstance(actions[0], dict) or isinstance(context, dict):
raise CobaException("Sparse data cannot be handled by this algorithm.")
if not context:
self._X_encoder = InteractionsEncoder(list(set(filter(None,[ f.replace('x','') if isinstance(f,str) else f for f in self._X ]))))
context = list(Flatten().filter([list(context)]))[0] if context else []
features: np.ndarray = np.array([self._X_encoder.encode(x=context,a=action) for action in actions]).T
if(self._A_inv is None):
self._theta = np.zeros(features.shape[0])
self._A_inv = np.identity(features.shape[0])
point_estimate = self._theta @ features
point_bounds = np.diagonal(features.T @ self._A_inv @ features)
action_values = point_estimate + self._alpha*np.sqrt(point_bounds)
max_indexes = np.where(action_values == np.amax(action_values))[0]
return [ int(ind in max_indexes)/len(max_indexes) for ind in range(len(actions))]
def learn(self, context: Context, action: Action, reward: float, probability: float, info: Info) -> None:
import numpy as np
if isinstance(action, dict) or isinstance(context, dict):
raise CobaException("Sparse data cannot be handled by this algorithm.")
if not context:
self._X_encoder = InteractionsEncoder(list(set(filter(None,[ f.replace('x','') if isinstance(f,str) else f for f in self._X ]))))
context = list(Flatten().filter([list(context)]))[0] if context else []
features: np.ndarray = np.array(self._X_encoder.encode(x=context,a=action)).T
if(self._A_inv is None):
self._theta = np.zeros((features.shape[0]))
self._A_inv = np.identity(features.shape[0])
r = self._theta @ features
w = self._A_inv @ features
v = features @ w
self._A_inv = self._A_inv - np.outer(w,w)/(1+v)
self._theta = self._theta + (reward-r)/(1+v) * w
|
#!/usr/bin/env python
"""
Classes:
Game: The game object which holds utility methods for printing the game board,
calculating winner, making moves
AI: The AI player
"""
from random import randint
from typing import List
import argparse
class Game():
"""
Class for the game itself
"""
# Three in a row combos that win the game
winning_combos = (
[0, 1, 2], [3, 4, 5], [6, 7, 8],
[0, 3, 6], [1, 4, 7], [2, 5, 8],
[0, 4, 8], [2, 4, 6])
def __init__(self):
"""Constructor"""
self.board = [None for i in range(9)]
self.ai_moves_count = 0
self.status = "Running"
self.current_player = "X"
def print_board(self) -> None:
"""Print the game board"""
print('\n\n\n')
for element in [self.board[i:i + 3] for i in range(0, len(self.board), 3)]:
print(element)
def change_player(self) -> None:
"""Change the current player"""
self.current_player = self.get_enemy(self.current_player)
@staticmethod
def get_enemy(player: str) -> str:
"""Return the enemy player for a given player"""
if player == "X":
return "O"
return "X"
def available_cells(self) -> List[int]:
"""Return an array of empty board cells"""
return [cell for cell, cell_value in enumerate(self.board) if cell_value is None]
def get_own_squares(self, player: str) -> List[int]:
"""Return the board cells occupied by given player"""
return [cell for cell, cell_value in enumerate(self.board) if cell_value == player]
def get_winner(self):
"""
Check for a winner
Returns:
Either the winning player or None
"""
for player in ('X', 'O'):
positions = self.get_own_squares(player)
for combo in self.winning_combos:
win = True
for pos in combo:
if pos not in positions:
win = False
if win:
return player
return None
def is_game_over(self) -> bool:
"""Check whether the game ends or not"""
if not self.available_cells():
return True
if self.get_winner() is not None:
return True
return False
def make_move(self, cell: int, player: str) -> None:
"""Make a move on the board"""
self.board[cell] = player
self.change_player()
def validate_move(self, cell: int) -> bool:
"""Validate that the move is legal"""
if cell < 0 or cell > 8:
return False
if self.board[cell] is not None:
return False
return True
class AI(object):
"""The AI class to play against"""
def __init__(self, difficulty, game):
"""
Args:
difficulty: The difficulty level of the AI. Either master (default) or easy
game: The current game being played
"""
self.game = game
self.difficulty = 'master'
if difficulty:
self.difficulty = difficulty
def __minimax(self, game, depth, player) -> int:
"""Recursively calculate the minimax value of a given game state"""
if game.is_game_over():
if game.get_winner() == "X":
return -1
elif game.get_winner() == "Y":
return 1
return 0
if player == "O":
best_value = -1
for move in game.available_cells():
game.make_move(move, player)
move_value = self.__minimax(game, depth-1, game.get_enemy(player))
game.make_move(move, None)
best_value = max(best_value, move_value)
return best_value
best_value = 1
for move in game.available_cells():
game.make_move(move, player)
move_value = self.__minimax(game, depth-1, game.get_enemy(player))
game.make_move(move, None)
best_value = min(best_value, move_value)
return best_value
def __get_best_choice(self, game, depth, player) -> int:
"""
Calculate the best possible move for the given game state
Args:
game: The current game being played
depth: How far along the game is
player: The player who wants to get the best choice
Returns:
The position of the cell to play
"""
neutral_value = 0
choices = []
for move in game.available_cells():
game.make_move(move, player)
move_value = self.__minimax(game, depth-1, game.get_enemy(player))
game.make_move(move, None)
if move_value > neutral_value:
choices = [move]
elif move_value == neutral_value:
choices.append(move)
if choices is not None:
return randint(min(choices), max(choices))
free_cells = game.available_cells()
return randint(min(free_cells), max(free_cells))
def __make_easy_move(self) -> int:
"""Make a random move and return the played cell"""
available_moves = self.game.available_cells()
cell = randint(min(available_moves), max(available_moves))
return cell
def __make_master_move(self) -> int:
"""Make a calculated move"""
turns_left = len(self.game.available_cells())
move = self.__get_best_choice(self.game, turns_left, self.game.current_player)
return move
def play(self) -> int:
"""Call the correct function depending on the selected difficulty"""
if self.difficulty == 'easy':
return self.__make_easy_move()
return self.__make_master_move()
def main() -> None:
"""Main game loop"""
parser = argparse.ArgumentParser()
parser.add_argument("--easy", help="Run the AI in easy mode", action="store_true")
args = parser.parse_args()
if args.easy:
difficulty = "easy"
else:
difficulty = "master"
# Create the game and the ai player
game = Game()
ai_player = AI(difficulty, game)
while game.is_game_over() is False:
game.print_board()
if game.current_player == "X":
player_move = None
while player_move is None or game.validate_move(player_move) is False:
try:
player_move = int(input('0-8: '))
except (ValueError, IndexError):
print("Please insert a number")
game.make_move(player_move, game.current_player)
else:
game.make_move(ai_player.play(), game.current_player)
game.print_board()
if __name__ == "__main__":
main()
|
from dtech_instagram.InstagramAPI.src.http.Response.Objects.Location import Location
from .Response import Response
class LocationResponse(Response):
def __init__(self, response):
self.venues = None
if self.STATUS_OK == response['status']:
locations = []
for location in response['venues']:
locations.append(Location(location))
self.venues = locations
else:
self.setMessage(response['message'])
self.setStatus(response['status'])
def getVenues(self):
return self.venues
|
import MonsterBuilder
def create(lb,xpos):
xml = """
<level>
<!-- watchtower -->
<sprite type = 'ZoomTrigger.ZoomTriggerSprite' x='0' y='250' width='100' height='500' zoom_fact='1.0'/>
<sprite type = 'ZoomTrigger.ZoomTriggerSprite' x='165' y='260' width='128' height='100' zoom_fact='0.1666'/>
<sprite type = 'ZoomTrigger.ZoomTriggerSprite' x='330' y='250' width='100' height='500' zoom_fact='1.0'/>
<sprite type = 'WatchtowerVisual.WatchtowerVisualSprite' x='165' y='92' width='128' height='285' angle='0' restitution='0.2' static='true' friction='0.5' density='20' firstframe='watchtower.png' />
</level>
"""
MonsterBuilder.createFromXMLString(lb,xpos,xml)
|
# Note: Be sure to clean up output and dask work-space before running test
import argparse
import os
import time
import cudf
from dask.distributed import Client, performance_report
from dask_cuda import LocalCUDACluster
from nvtabular import Dataset, Workflow
from nvtabular import io as nvt_io
from nvtabular import ops as ops
def setup_rmm_pool(client, pool_size):
client.run(cudf.set_allocator, pool=True, initial_pool_size=pool_size, allocator="default")
return None
def main(args):
# Input
data_path = args.data_path
out_path = args.out_path
freq_limit = args.freq_limit
out_files_per_proc = args.splits
if args.protocol == "ucx":
os.environ["UCX_TLS"] = "tcp,cuda_copy,cuda_ipc,sockcm"
# Use Criteo dataset by default (for now)
cont_names = (
args.cont_names.split(",") if args.cont_names else ["I" + str(x) for x in range(1, 14)]
)
cat_names = (
args.cat_names.split(",") if args.cat_names else ["C" + str(x) for x in range(1, 27)]
)
label_name = ["label"]
if args.cat_splits:
tree_width = {name: int(s) for name, s in zip(cat_names, args.cat_splits.split(","))}
else:
tree_width = {col: 1 for col in cat_names}
if args.cat_names is None:
# Using Criteo... Use more hash partitions for
# known high-cardinality columns
tree_width["C20"] = 8
tree_width["C1"] = 8
tree_width["C22"] = 4
tree_width["C10"] = 4
tree_width["C21"] = 2
tree_width["C11"] = 2
tree_width["C23"] = 2
tree_width["C12"] = 2
# Specify categorical caching location
cat_cache = None
if args.cat_cache:
cat_cache = args.cat_cache.split(",")
if len(cat_cache) == 1:
cat_cache = cat_cache[0]
else:
# If user is specifying a list of options,
# they must specify an option for every cat column
assert len(cat_names) == len(cat_cache)
if isinstance(cat_cache, str):
cat_cache = {col: cat_cache for col in cat_names}
elif isinstance(cat_cache, list):
cat_cache = {name: c for name, c in zip(cat_names, cat_cache)}
else:
# Criteo/DLRM Defaults
cat_cache = {col: "device" for col in cat_names}
if args.cat_names is None:
cat_cache["C20"] = "host"
cat_cache["C1"] = "host"
# Only need to cache the largest two on a dgx-2
if args.n_workers < 16:
cat_cache["C22"] = "host"
cat_cache["C10"] = "host"
# Use total device size to calculate args.device_limit_frac
device_size = nvt_io.device_mem_size(kind="total")
device_limit = int(args.device_limit_frac * device_size)
device_pool_size = int(args.device_pool_frac * device_size)
part_size = int(args.part_mem_frac * device_size)
# Setup LocalCUDACluster
if args.protocol == "tcp":
cluster = LocalCUDACluster(
protocol=args.protocol,
n_workers=args.n_workers,
CUDA_VISIBLE_DEVICES=args.devs,
device_memory_limit=device_limit,
local_directory=args.dask_workspace,
dashboard_address=":3787",
)
else:
cluster = LocalCUDACluster(
protocol=args.protocol,
n_workers=args.n_workers,
CUDA_VISIBLE_DEVICES=args.devs,
enable_nvlink=True,
device_memory_limit=device_limit,
local_directory=args.dask_workspace,
dashboard_address=":3787",
)
client = Client(cluster)
# Setup RMM pool
if not args.no_rmm_pool:
setup_rmm_pool(client, device_pool_size)
# Define Dask NVTabular "Workflow"
processor = Workflow(
cat_names=cat_names, cont_names=cont_names, label_name=label_name, client=client
)
processor.add_feature([ops.ZeroFill(), ops.LogOp()])
processor.add_preprocess(
ops.Categorify(
out_path=out_path,
tree_width=tree_width,
cat_cache=cat_cache,
freq_threshold=freq_limit,
on_host=args.cat_on_host,
)
)
processor.finalize()
dataset = Dataset(data_path, "parquet", part_size=part_size)
# Execute the dask graph
runtime = time.time()
if args.profile is not None:
with performance_report(filename=args.profile):
processor.apply(
dataset,
shuffle=nvt_io.Shuffle.PER_WORKER
if args.worker_shuffle
else nvt_io.Shuffle.PER_PARTITION,
out_files_per_proc=out_files_per_proc,
output_path=out_path,
)
else:
processor.apply(
dataset,
shuffle=nvt_io.Shuffle.PER_WORKER
if args.worker_shuffle
else nvt_io.Shuffle.PER_PARTITION,
out_files_per_proc=out_files_per_proc,
output_path=out_path,
)
runtime = time.time() - runtime
print("\nDask-NVTabular DLRM/Criteo benchmark")
print("--------------------------------------")
print(f"partition size | {part_size}")
print(f"protocol | {args.protocol}")
print(f"device(s) | {args.devs}")
print(f"rmm-pool | {(not args.no_rmm_pool)}")
print(f"out_files_per_proc | {args.splits}")
print(f"worker-shuffle | {args.worker_shuffle}")
print("======================================")
print(f"Runtime[s] | {runtime}")
print("======================================\n")
client.close()
def parse_args():
parser = argparse.ArgumentParser(description="Merge (dask/cudf) on LocalCUDACluster benchmark")
parser.add_argument(
"-d",
"--devs",
default="0,1,2,3",
type=str,
help='GPU devices to use (default "0, 1, 2, 3").',
)
parser.add_argument(
"-p",
"--protocol",
choices=["tcp", "ucx"],
default="tcp",
type=str,
help="The communication protocol to use.",
)
parser.add_argument("--no-rmm-pool", action="store_true", help="Disable the RMM memory pool")
parser.add_argument(
"--profile",
metavar="PATH",
default=None,
type=str,
help="Write dask profile report (E.g. dask-report.html)",
)
parser.add_argument("--data-path", type=str, help="Raw dataset path.")
parser.add_argument("--out-path", type=str, help="Root output path.")
parser.add_argument("--dask-workspace", default=None, type=str, help="Dask workspace path.")
parser.add_argument(
"-s", "--splits", default=24, type=int, help="Number of splits to shuffle each partition"
)
parser.add_argument(
"--part-mem-frac",
default=0.162,
type=float,
help="Fraction of device memory for each partition",
)
parser.add_argument(
"-f", "--freq-limit", default=0, type=int, help="Frequency limit on cat encodings."
)
parser.add_argument(
"--device-limit-frac",
default=0.8,
type=float,
help="Fractional device-memory limit (per worker).",
)
parser.add_argument(
"--device-pool-frac", default=0.8, type=float, help="Fractional rmm pool size (per worker)."
)
parser.add_argument(
"--worker-shuffle", action="store_true", help="Perform followup shuffle on each worker."
)
parser.add_argument(
"--cat-names", default=None, type=str, help="List of categorical column names."
)
parser.add_argument(
"--cat-cache",
default=None,
type=str,
help='Where to cache each category (Ex "device, host, disk").',
)
parser.add_argument(
"--cat-on-host",
action="store_true",
help="Whether to move categorical data to host between tasks.",
)
parser.add_argument(
"--cat-splits",
default=None,
type=str,
help='How many splits to use for each category (Ex "8, 4, 2, 1").',
)
parser.add_argument(
"--cont-names", default=None, type=str, help="List of continuous column names."
)
args = parser.parse_args()
args.n_workers = len(args.devs.split(","))
return args
if __name__ == "__main__":
main(parse_args())
|
# -*- coding: utf-8 -*-
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Kite install utilities test."""
# Local imports
import os
import re
import sys
# Third-party imports
import pytest
# Local imports
from spyder.plugins.completion.kite.utils.install import (
KiteInstallationThread, DOWNLOADING_INSTALLER, DOWNLOADING_SCRIPT,
INSTALLING, FINISHED)
from spyder.plugins.completion.kite.utils.status import (
check_if_kite_installed, check_if_kite_running)
# Time to wait until the installation finishes
# (6 minutes in milliseconds)
INSTALL_TIMEOUT = 360000
@pytest.mark.slow
@pytest.mark.first
@pytest.mark.skip(reason="Fail on CIs and it's too heavy to run locally")
def test_kite_install(qtbot):
"""Test the correct execution of the installation process of kite."""
install_manager = KiteInstallationThread(None)
installation_statuses = []
def installation_status(status):
installation_statuses.append(status)
def error_msg(error):
# Should not enter here
assert False
def download_progress(progress, total):
assert total != 0
def finished():
if sys.platform.startswith("linux"):
expected_installation_status = [
DOWNLOADING_SCRIPT,
DOWNLOADING_INSTALLER,
INSTALLING,
FINISHED]
else:
expected_installation_status = [
DOWNLOADING_INSTALLER,
INSTALLING,
FINISHED]
# This status can be obtained the second time our tests are run
if not installation_statuses == ['Installation finished']:
assert installation_statuses == expected_installation_status
install_manager.sig_installation_status.connect(installation_status)
install_manager.sig_error_msg.connect(error_msg)
install_manager.sig_download_progress.connect(download_progress)
install_manager.finished.connect(finished)
with qtbot.waitSignal(install_manager.finished, timeout=INSTALL_TIMEOUT):
install_manager.install()
# Check that kite was installed and is running
qtbot.waitUntil(
lambda: check_if_kite_installed() and check_if_kite_running(),
timeout=5000)
if __name__ == "__main__":
pytest.main()
|
__author__ = 'asistente'
from unittest import TestCase
from selenium import webdriver
from selenium.webdriver.common.by import By
class FunctionalTest(TestCase):
def setUp(self):
self.browser = webdriver.Chrome()
self.browser.implicitly_wait(2)
def tearDown(self):
self.browser.quit()
def test_t1(self): # test_title
self.browser.get('http://localhost:8000')
self.assertIn('Busco Ayuda', self.browser.title)
def test_t2(self): # test_registro
self.browser.get('http://localhost:8000')
link = self.browser.find_element_by_id('id_register')
link.click()
nombre = self.browser.find_element_by_id('id_nombre')
nombre.send_keys('Juan Daniel')
apellidos = self.browser.find_element_by_id('id_apellidos')
apellidos.send_keys('Arevalo')
experiencia = self.browser.find_element_by_id('id_aniosExperiencia')
experiencia.send_keys('5')
self.browser.find_element_by_xpath(
"//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']").click()
telefono = self.browser.find_element_by_id('id_telefono')
telefono.send_keys('3173024578')
correo = self.browser.find_element_by_id('id_correo')
correo.send_keys('[email protected]')
imagen = self.browser.find_element_by_id('id_imagen')
imagen.send_keys('C:\imagen.png')
nombreUsuario = self.browser.find_element_by_id('id_username')
nombreUsuario.send_keys('juan645')
clave = self.browser.find_element_by_id('id_password')
clave.send_keys('clave123')
botonGrabar = self.browser.find_element_by_id('id_grabar')
botonGrabar.click()
self.browser.implicitly_wait(3)
span = self.browser.find_element(By.XPATH, '//span[text()="Juan Daniel Arevalo"]')
self.assertIn('Juan Daniel Arevalo', span.text)
def test_t3(self): # test_verDetalle
self.browser.get('http://localhost:8000')
span = self.browser.find_element(By.XPATH, '//span[text()="Juan Daniel Arevalo"]')
span.click()
self.browser.implicitly_wait(3)
h2 = self.browser.find_element(By.XPATH, '//h2[text()="Juan Daniel Arevalo"]')
self.assertIn('Juan Daniel Arevalo', h2.text)
def test_t4(self): # test_login
self.browser.get('http://localhost:8000')
link = self.browser.find_element_by_id('id_login')
link.click()
nombreUsuario = self.browser.find_element_by_id('id_username1')
nombreUsuario.send_keys('juan645')
clave = self.browser.find_element_by_id('id_password1')
clave.send_keys('clave123')
botonGrabar = self.browser.find_element_by_id('id_entrar')
botonGrabar.click()
self.browser.implicitly_wait(3)
span = self.browser.find_elements_by_class_name("glyphicon-log-out")
self.assertTrue(len(span) > 0)
def test_t5(self): # Editar
self.browser.get('http://localhost:8000')
link = self.browser.find_element_by_id('id_login')
link.click()
nombreUsuario = self.browser.find_element_by_id('id_username1')
nombreUsuario.send_keys('juan645')
clave = self.browser.find_element_by_id('id_password1')
clave.send_keys('clave123')
botonGrabar = self.browser.find_element_by_id('id_entrar')
botonGrabar.click()
self.browser.implicitly_wait(5)
# link = WebDriverWait( self.browser, 60).until(EC.presence_of_element_located((By.ID, 'id_editar')))
#visibility_of_element_located
self.browser.implicitly_wait(5)
span = self.browser.find_elements_by_class_name('close')
self.browser.implicitly_wait(15)
span[0].click()
link = self.browser.find_element_by_id('id_editar')
self.browser.implicitly_wait(10)
link.click()
self.browser.implicitly_wait(5)
experiencia = self.browser.find_element_by_id('id_aniosExperiencia')
experiencia.clear()
experiencia.send_keys('1')
telefono = self.browser.find_element_by_id('id_telefono')
telefono.clear()
telefono.send_keys('3164177888')
imagen = self.browser.find_element_by_id('id_imagen')
imagen.send_keys('C:\imagen.png')
self.browser.implicitly_wait(5)
botonGrabar = self.browser.find_element_by_id('id_modificar')
botonGrabar.click()
self.browser.implicitly_wait(5)
span = self.browser.find_elements_by_class_name("glyphicon-log-out")
self.assertTrue(len(span) > 0)
def test_t6(self): #test_comentar
self.browser.get('http://localhost:8000')
span = self.browser.find_element(By.XPATH, '//span[text()="Juan Daniel Arevalo"]')
span.click()
self.browser.implicitly_wait(3)
correo = self.browser.find_element_by_id('correo')
correo.send_keys('[email protected]')
comentario= self.browser.find_element_by_id('comentario')
comentario.send_keys('Este es un comentario. Le vendi mi alma a satan para terminar este taller')
botonGrabar = self.browser.find_element_by_id('id_submitComentario')
botonGrabar.click()
p = self.browser.find_element(By.XPATH,
'//p[text()="Este es un comentario. Le vendi mi alma a satan para terminar este taller"]')
self.assertIn('Este es un comentario. Le vendi mi alma a satan para terminar este taller', p.text)
|
# Generated by Django 2.0.2 on 2018-02-27 13:49
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50, verbose_name='First name')),
('last_name', models.CharField(max_length=50, verbose_name='Last name')),
('age', models.IntegerField(verbose_name='Age')),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120, verbose_name='Title')),
('form', models.CharField(default='A5', max_length=10, verbose_name='Format')),
('pages', models.IntegerField(verbose_name='Count pages')),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='testproject.Author', verbose_name='Author')),
],
),
]
|
#pocket/__init__.py
from flask import Flask
app = Flask(__name__)
import pocket.views |
"""
Copyright 2012-2019 Ministerie van Sociale Zaken en Werkgelegenheid
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import logging
from hqlib.typing import MetricValue
from ... import metric_source
from ...domain import LowerIsBetterMetric
class LastSecurityTest(LowerIsBetterMetric):
""" Metric for measuring period from last security test. """
name = 'Beveiligingstest frequentie'
unit = 'dag(en)'
norm_template = 'De beveiligingstest wordt minimaal een keer per {target} {unit} uitgevoerd. Als de test langer ' \
'dan {low_target} {unit} geleden is uitgevoerd is deze metriek rood.'
template = 'De beveiligingstest is {value} {unit} geleden uitgevoerd.'
missing_template = 'De datum van de laatste beveiligingstest is niet aangetroffen.'
target_value = 180
low_target_value = 360
metric_source_class = metric_source.FileWithDate
def value(self) -> MetricValue:
""" Return the number of days since the last security test. """
if not (self._metric_source and self._metric_source_id):
return -1
read_date = self._metric_source.get_datetime_from_content(self._metric_source_id)
if read_date == datetime.datetime.min:
return -1
if read_date > datetime.datetime.now() + datetime.timedelta(seconds=60):
logging.warning("%s at %s returned a date and time in the future: %s",
self.metric_source_class.metric_source_name, self._metric_source.url(), read_date)
return -1
return max(0, (datetime.datetime.now() - read_date).days)
|
from .. import ast
from .base import BaseNodeTransformer
from typing import List, Union
def _find_bool(nodes: Union[List[ast.AST], List[ast.expr]]) -> bool:
for node in nodes:
if isinstance(node, ast.Name):
if node.id == '__bool__':
return True
elif isinstance(node, ast.Tuple):
if _find_bool(node.elts):
return True
return False
class ClassBoolMethodTransformer(BaseNodeTransformer):
"""Compiles:
class A:
def __bool__(self):
return False
To:
class A:
def __bool__(self):
return False
__nonzero__ = __bool__
"""
target = (2, 7)
def visit_ClassDef(self, node: ast.ClassDef) -> ast.ClassDef:
has_bool = False
for n in node.body:
if has_bool:
break
if isinstance(n, ast.Assign):
has_bool = _find_bool(n.targets)
elif isinstance(n, ast.FunctionDef):
has_bool = (n.name == '__bool__')
if has_bool:
self._tree_changed = True
nonzero = ast.Name(id='__nonzero__', ctx=ast.Store())
bool_ = ast.Name(id='__bool__', ctx=ast.Load())
node.body.append(ast.Assign(targets=[nonzero], value=bool_))
return self.generic_visit(node) # type: ignore
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class BeltConfig(AppConfig):
"""Django Belt AppConfig."""
name = "belt"
verbose_name = _("Belt")
|
import sys
from textwrap import dedent
import pytest
from IPython.testing import globalipapp
from julia import magic
@pytest.fixture
def julia_magics(julia):
return magic.JuliaMagics(shell=globalipapp.get_ipython())
# fmt: off
@pytest.fixture
def run_cell(julia_magics):
# a more convenient way to run strings (possibly with magic) as if they were
# an IPython cell
def run_cell_impl(cell):
cell = dedent(cell).strip()
if cell.startswith("%%"):
return julia_magics.shell.run_cell_magic("julia","",cell.replace("%%julia","").strip())
else:
exec_result = julia_magics.shell.run_cell(cell)
if exec_result.error_in_exec:
raise exec_result.error_in_exec
else:
return exec_result.result
return run_cell_impl
def test_register_magics(julia):
magic.load_ipython_extension(globalipapp.get_ipython())
def test_success_line(julia_magics):
ans = julia_magics.julia('1')
assert ans == 1
def test_success_cell(julia_magics):
ans = julia_magics.julia(None, '2')
assert ans == 2
def test_failure_line(julia_magics):
with pytest.raises(Exception):
julia_magics.julia('pop!([])')
def test_failure_cell(julia_magics):
with pytest.raises(Exception):
julia_magics.julia(None, '1 += 1')
# In IPython, $x does a string interpolation handled by IPython itself for
# *line* magic, which prior to IPython 7.3 could not be turned off. However,
# even prior to IPython 7.3, *cell* magic never did the string interpolation, so
# below, any time we need to test $x interpolation, do it as cell magic so it
# works on IPython < 7.3
def test_interp_var(run_cell):
run_cell("x=1")
assert run_cell("""
%%julia
$x
""") == 1
def test_interp_expr(run_cell):
assert run_cell("""
x=1
%julia py"x+1"
""") == 2
def test_bad_interp(run_cell):
with pytest.raises(Exception):
assert run_cell("""
%%julia
$(x+1)
""")
def test_string_interp(run_cell):
run_cell("foo='python'")
assert run_cell("""
%%julia
foo="julia"
"$foo", "$($foo)"
""") == ('julia','python')
def test_expr_interp(run_cell):
run_cell("foo='python'")
assert run_cell("""
%%julia
foo="julia"
:($foo), :($($foo))
""") == ('julia','python')
def test_expr_py_interp(run_cell):
assert "baz" in str(run_cell("""
%julia :(py"baz")
"""))
def test_macro_esc(run_cell):
assert run_cell("""
%%julia
x = 1
@eval y = $x
y
""") == 1
def test_type_conversion(run_cell):
assert run_cell("""
%julia py"1" isa Integer && py"1"o isa PyObject
""") == True
def test_local_scope(run_cell):
assert run_cell("""
x = "global"
def f():
x = "local"
ret = %julia py"x"
return ret
f()
""") == "local"
def test_global_scope(run_cell):
assert run_cell("""
x = "global"
def f():
ret = %julia py"x"
return ret
f()
""") == "global"
def test_noretvalue(run_cell):
assert run_cell("""
%%julia
1+2;
""") is None
# fmt: on
def test_revise_error():
from julia.ipy import revise
counter = [0]
def throw():
counter[0] += 1
raise RuntimeError("fake revise error")
revise_wrapper = revise.make_revise_wrapper(throw)
revise.revise_errors = 0
try:
assert revise.revise_errors_limit == 1
with pytest.warns(UserWarning) as record1:
revise_wrapper() # called
assert len(record1) == 2
assert "fake revise error" in record1[0].message.args[0]
assert "Turning off Revise.jl" in record1[1].message.args[0]
revise_wrapper() # not called
assert counter[0] == 1
assert revise.revise_errors == 1
finally:
revise.revise_errors = 0
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 not supported")
def test_completions(julia):
from IPython.core.completer import provisionalcompleter
from julia.ipy.monkeypatch_completer import JuliaCompleter
jc = JuliaCompleter(julia)
t = "%julia Base.si"
with provisionalcompleter():
completions = jc.julia_completions(t, len(t))
assert {"sin", "sign", "sizehint!"} <= {c.text for c in completions}
|
import numpy as np
import sys
import os
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
from tensorflow.python.keras.models import model_from_yaml
sys.stderr = stderr
# disable any warnings by TensorFlow.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
name = 'NeuralNet_TF_2L'
# dir = '/Users/sangtruong_2021/Documents/GitHub/GR-Git/GR-TestModel/NN/'
dir = '/home/LDAPdir/struong21/GR/GinRummy/GR-TestModel/NN/'
# load model from yaml
with open(dir + name + '.yaml', 'r') as file:
model = model_from_yaml(file.read())
file.close()
# load weights from h5 file to model
model.load_weights(dir + name + '.h5')
import socket
import ast
listensocket = socket.socket()
listensocket.bind(("127.0.0.1", 8888))
listensocket.listen(999999999)
print("Server started at 127.0.0.1 on port " + str(8888))
running = True
while running:
(clientsocket, address) = listensocket.accept()
# print("New connection make!")
features_list = clientsocket.recv(1024).decode() # Get a number from Java
# print("Input: " + features_list)
features_list = np.array(ast.literal_eval(features_list))
features_list = features_list.reshape((1, 178))
# print(features_list.shape)
result = model.predict(features_list)
result = result.reshape((-1))
newMessage = str(result.tolist())
# print("Output: " + newMessage)
clientsocket.send(newMessage.encode()) # Send a number back to Java
# print("Computed and sent!")
clientsocket.close()
|
"""
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
Acceleration
------------
"""
from abc import abstractmethod
import numpy as np
from pyfme.utils.coordinates import body2hor, hor2body
class Acceleration:
"""Acceleration
Attributes
----------
accel_body : ndarray, shape(3)
(u_dot [m/s²], v_dot [m/s²], w_dot [m/s²])
u_dot
v_dot
w_dot
accel_NED : ndarray, shape(3)
(VN_dot [m/s²], VE_dot [m/s²], VD_dot [m/s²])
VN_dot
VE_dot
VD_dot
"""
def __init__(self):
# Body axis
self._accel_body = np.zeros(3) # m/s²
# Local horizon (NED)
self._accel_NED = np.zeros(3) # m/s²
@abstractmethod
def update(self, coords, attitude):
raise NotImplementedError
@property
def accel_body(self):
return self._accel_body
@property
def u_dot(self):
return self._accel_body[0]
@property
def v_dot(self):
return self._accel_body[1]
@property
def w_dot(self):
return self._accel_body[2]
@property
def accel_NED(self):
return self._accel_NED
@property
def v_north_dot(self):
return self._accel_NED[0]
@property
def v_east_dot(self):
return self._accel_NED[1]
@property
def v_down_dot(self):
return self._accel_NED[2]
@property
def value(self):
"""Only for testing purposes"""
return np.hstack((self.accel_body, self.accel_NED))
class BodyAcceleration(Acceleration):
def __init__(self, u_dot, v_dot, w_dot, attitude):
super().__init__()
self.update(np.array([u_dot, v_dot, w_dot]), attitude)
def update(self, coords, attitude):
self._accel_body[:] = coords
self._accel_NED = body2hor(coords,
attitude.theta,
attitude.phi,
attitude.psi)
def __repr__(self):
rv = (f"u_dot: {self.u_dot:.2f} m/s², v_dot: {self.v_dot:.2f} m/s², "
f"w_dot: {self.u_dot:.2f} m/s²")
return rv
class NEDAcceleration(Acceleration):
def __init__(self, vn_dot, ve_dot, vd_dot, attitude):
super().__init__()
self.update(np.array([vn_dot, ve_dot, vd_dot]), attitude)
def update(self, coords, attitude):
self._accel_NED[:] = coords
self._accel_body = hor2body(coords,
attitude.theta,
attitude.phi,
attitude.psi)
def __repr__(self):
rv = (f"V_north_dot: {self.v_north_dot:.2f} m/s², "
f"V_east_dot: {self.v_east_dot:.2f} m/s², "
f"V_down_dot: {self.v_down_dot:.2f} m/s²")
return rv
|
from keras import Model
from abc import ABCMeta, abstractclassmethod
class Autoencoder(metaclass=ABCMeta):
@abstractclassmethod
def encoder(self) -> Model:
raise NotImplementedError
@abstractclassmethod
def decoder(self) -> Model:
raise NotImplementedError
@abstractclassmethod
def autoencoder(self) -> Model:
raise NotImplementedError
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['UserArgs', 'User']
@pulumi.input_type
class UserArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
admin_user: pulumi.Input[bool],
auth_admin_user: pulumi.Input[bool],
nick_name: pulumi.Input[str],
user_type: pulumi.Input[str],
account_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a User resource.
:param pulumi.Input[str] account_name: An Alibaba Cloud account, Alibaba Cloud name.
:param pulumi.Input[bool] admin_user: Whether it is the administrator. Valid values: `true` and `false`.
:param pulumi.Input[bool] auth_admin_user: Whether this is a permissions administrator. Valid values: `false`, `true`.
:param pulumi.Input[str] nick_name: The nickname of the user.
:param pulumi.Input[str] user_type: The members of the organization of the type of role separately. Valid values: `Analyst`, `Developer` and `Visitor`.
:param pulumi.Input[str] account_id: Alibaba Cloud account ID.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "admin_user", admin_user)
pulumi.set(__self__, "auth_admin_user", auth_admin_user)
pulumi.set(__self__, "nick_name", nick_name)
pulumi.set(__self__, "user_type", user_type)
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
An Alibaba Cloud account, Alibaba Cloud name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="adminUser")
def admin_user(self) -> pulumi.Input[bool]:
"""
Whether it is the administrator. Valid values: `true` and `false`.
"""
return pulumi.get(self, "admin_user")
@admin_user.setter
def admin_user(self, value: pulumi.Input[bool]):
pulumi.set(self, "admin_user", value)
@property
@pulumi.getter(name="authAdminUser")
def auth_admin_user(self) -> pulumi.Input[bool]:
"""
Whether this is a permissions administrator. Valid values: `false`, `true`.
"""
return pulumi.get(self, "auth_admin_user")
@auth_admin_user.setter
def auth_admin_user(self, value: pulumi.Input[bool]):
pulumi.set(self, "auth_admin_user", value)
@property
@pulumi.getter(name="nickName")
def nick_name(self) -> pulumi.Input[str]:
"""
The nickname of the user.
"""
return pulumi.get(self, "nick_name")
@nick_name.setter
def nick_name(self, value: pulumi.Input[str]):
pulumi.set(self, "nick_name", value)
@property
@pulumi.getter(name="userType")
def user_type(self) -> pulumi.Input[str]:
"""
The members of the organization of the type of role separately. Valid values: `Analyst`, `Developer` and `Visitor`.
"""
return pulumi.get(self, "user_type")
@user_type.setter
def user_type(self, value: pulumi.Input[str]):
pulumi.set(self, "user_type", value)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[pulumi.Input[str]]:
"""
Alibaba Cloud account ID.
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_id", value)
@pulumi.input_type
class _UserState:
def __init__(__self__, *,
account_id: Optional[pulumi.Input[str]] = None,
account_name: Optional[pulumi.Input[str]] = None,
admin_user: Optional[pulumi.Input[bool]] = None,
auth_admin_user: Optional[pulumi.Input[bool]] = None,
nick_name: Optional[pulumi.Input[str]] = None,
user_type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering User resources.
:param pulumi.Input[str] account_id: Alibaba Cloud account ID.
:param pulumi.Input[str] account_name: An Alibaba Cloud account, Alibaba Cloud name.
:param pulumi.Input[bool] admin_user: Whether it is the administrator. Valid values: `true` and `false`.
:param pulumi.Input[bool] auth_admin_user: Whether this is a permissions administrator. Valid values: `false`, `true`.
:param pulumi.Input[str] nick_name: The nickname of the user.
:param pulumi.Input[str] user_type: The members of the organization of the type of role separately. Valid values: `Analyst`, `Developer` and `Visitor`.
"""
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
if account_name is not None:
pulumi.set(__self__, "account_name", account_name)
if admin_user is not None:
pulumi.set(__self__, "admin_user", admin_user)
if auth_admin_user is not None:
pulumi.set(__self__, "auth_admin_user", auth_admin_user)
if nick_name is not None:
pulumi.set(__self__, "nick_name", nick_name)
if user_type is not None:
pulumi.set(__self__, "user_type", user_type)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[pulumi.Input[str]]:
"""
Alibaba Cloud account ID.
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> Optional[pulumi.Input[str]]:
"""
An Alibaba Cloud account, Alibaba Cloud name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="adminUser")
def admin_user(self) -> Optional[pulumi.Input[bool]]:
"""
Whether it is the administrator. Valid values: `true` and `false`.
"""
return pulumi.get(self, "admin_user")
@admin_user.setter
def admin_user(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "admin_user", value)
@property
@pulumi.getter(name="authAdminUser")
def auth_admin_user(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this is a permissions administrator. Valid values: `false`, `true`.
"""
return pulumi.get(self, "auth_admin_user")
@auth_admin_user.setter
def auth_admin_user(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auth_admin_user", value)
@property
@pulumi.getter(name="nickName")
def nick_name(self) -> Optional[pulumi.Input[str]]:
"""
The nickname of the user.
"""
return pulumi.get(self, "nick_name")
@nick_name.setter
def nick_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "nick_name", value)
@property
@pulumi.getter(name="userType")
def user_type(self) -> Optional[pulumi.Input[str]]:
"""
The members of the organization of the type of role separately. Valid values: `Analyst`, `Developer` and `Visitor`.
"""
return pulumi.get(self, "user_type")
@user_type.setter
def user_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_type", value)
class User(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
account_name: Optional[pulumi.Input[str]] = None,
admin_user: Optional[pulumi.Input[bool]] = None,
auth_admin_user: Optional[pulumi.Input[bool]] = None,
nick_name: Optional[pulumi.Input[str]] = None,
user_type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Quick BI User resource.
For information about Quick BI User and how to use it, see [What is User](https://www.alibabacloud.com/help/doc-detail/33813.htm).
> **NOTE:** Available in v1.136.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.quickbi.User("example",
account_name="example_value",
admin_user=False,
auth_admin_user=False,
nick_name="example_value",
user_type="Analyst")
```
## Import
Quick BI User can be imported using the id, e.g.
```sh
$ pulumi import alicloud:quickbi/user:User example <id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: Alibaba Cloud account ID.
:param pulumi.Input[str] account_name: An Alibaba Cloud account, Alibaba Cloud name.
:param pulumi.Input[bool] admin_user: Whether it is the administrator. Valid values: `true` and `false`.
:param pulumi.Input[bool] auth_admin_user: Whether this is a permissions administrator. Valid values: `false`, `true`.
:param pulumi.Input[str] nick_name: The nickname of the user.
:param pulumi.Input[str] user_type: The members of the organization of the type of role separately. Valid values: `Analyst`, `Developer` and `Visitor`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Quick BI User resource.
For information about Quick BI User and how to use it, see [What is User](https://www.alibabacloud.com/help/doc-detail/33813.htm).
> **NOTE:** Available in v1.136.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.quickbi.User("example",
account_name="example_value",
admin_user=False,
auth_admin_user=False,
nick_name="example_value",
user_type="Analyst")
```
## Import
Quick BI User can be imported using the id, e.g.
```sh
$ pulumi import alicloud:quickbi/user:User example <id>
```
:param str resource_name: The name of the resource.
:param UserArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
account_name: Optional[pulumi.Input[str]] = None,
admin_user: Optional[pulumi.Input[bool]] = None,
auth_admin_user: Optional[pulumi.Input[bool]] = None,
nick_name: Optional[pulumi.Input[str]] = None,
user_type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserArgs.__new__(UserArgs)
__props__.__dict__["account_id"] = account_id
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
if admin_user is None and not opts.urn:
raise TypeError("Missing required property 'admin_user'")
__props__.__dict__["admin_user"] = admin_user
if auth_admin_user is None and not opts.urn:
raise TypeError("Missing required property 'auth_admin_user'")
__props__.__dict__["auth_admin_user"] = auth_admin_user
if nick_name is None and not opts.urn:
raise TypeError("Missing required property 'nick_name'")
__props__.__dict__["nick_name"] = nick_name
if user_type is None and not opts.urn:
raise TypeError("Missing required property 'user_type'")
__props__.__dict__["user_type"] = user_type
super(User, __self__).__init__(
'alicloud:quickbi/user:User',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
account_name: Optional[pulumi.Input[str]] = None,
admin_user: Optional[pulumi.Input[bool]] = None,
auth_admin_user: Optional[pulumi.Input[bool]] = None,
nick_name: Optional[pulumi.Input[str]] = None,
user_type: Optional[pulumi.Input[str]] = None) -> 'User':
"""
Get an existing User resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: Alibaba Cloud account ID.
:param pulumi.Input[str] account_name: An Alibaba Cloud account, Alibaba Cloud name.
:param pulumi.Input[bool] admin_user: Whether it is the administrator. Valid values: `true` and `false`.
:param pulumi.Input[bool] auth_admin_user: Whether this is a permissions administrator. Valid values: `false`, `true`.
:param pulumi.Input[str] nick_name: The nickname of the user.
:param pulumi.Input[str] user_type: The members of the organization of the type of role separately. Valid values: `Analyst`, `Developer` and `Visitor`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UserState.__new__(_UserState)
__props__.__dict__["account_id"] = account_id
__props__.__dict__["account_name"] = account_name
__props__.__dict__["admin_user"] = admin_user
__props__.__dict__["auth_admin_user"] = auth_admin_user
__props__.__dict__["nick_name"] = nick_name
__props__.__dict__["user_type"] = user_type
return User(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> pulumi.Output[Optional[str]]:
"""
Alibaba Cloud account ID.
"""
return pulumi.get(self, "account_id")
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Output[str]:
"""
An Alibaba Cloud account, Alibaba Cloud name.
"""
return pulumi.get(self, "account_name")
@property
@pulumi.getter(name="adminUser")
def admin_user(self) -> pulumi.Output[bool]:
"""
Whether it is the administrator. Valid values: `true` and `false`.
"""
return pulumi.get(self, "admin_user")
@property
@pulumi.getter(name="authAdminUser")
def auth_admin_user(self) -> pulumi.Output[bool]:
"""
Whether this is a permissions administrator. Valid values: `false`, `true`.
"""
return pulumi.get(self, "auth_admin_user")
@property
@pulumi.getter(name="nickName")
def nick_name(self) -> pulumi.Output[str]:
"""
The nickname of the user.
"""
return pulumi.get(self, "nick_name")
@property
@pulumi.getter(name="userType")
def user_type(self) -> pulumi.Output[str]:
"""
The members of the organization of the type of role separately. Valid values: `Analyst`, `Developer` and `Visitor`.
"""
return pulumi.get(self, "user_type")
|
# -*- coding: utf-8 -*-
"""The BitLocker Drive Encryption (BDE) credentials."""
from __future__ import unicode_literals
from dfvfs.credentials import credentials
from dfvfs.credentials import manager
from dfvfs.lib import definitions
# TODO: add means to set the credentials in the bde_volume using the helper?
class BDECredentials(credentials.Credentials):
"""BitLocker Drive Encryption (BDE) credentials."""
# TODO: add support for key_data credential, needs pybde update.
CREDENTIALS = frozenset([
'password', 'recovery_password', 'startup_key'])
TYPE_INDICATOR = definitions.TYPE_INDICATOR_BDE
manager.CredentialsManager.RegisterCredentials(BDECredentials())
|
#!/usr/bin/env python
# Copyright (c) 2016, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
import unittest
import numpy as np
import wradlib.trafo as trafo
class TransformationTest(unittest.TestCase):
def setUp(self):
self.rvp = np.array([0., 128., 255.])
self.dbz = np.array([-32.5, 31.5, 95.0])
self.lin = np.array([1e-4, 1, 1e4])
self.dec = np.array([-40, 0, 40])
self.r = np.array([5., 10., 20.])
self.kdp = np.array([0., 1., 2., 5.])
# speed in m/s
self.speedsi = np.array([0., 1., 50.])
# speed in km/h
self.speedkmh = np.array([0., 3.6, 180.])
# speed in miles/h
self.speedmph = np.array([0., 2.23693629, 111.8468146])
# speed in knots
self.speedkts = np.array([0., 1.94384449, 97.19222462])
def test_rvp2dBZ(self):
self.assertTrue(np.allclose(trafo.rvp2dBZ(self.rvp), self.dbz))
def test_decibel(self):
self.assertTrue(np.allclose(trafo.decibel(self.lin), self.dec))
def test_idecibel(self):
self.assertTrue(np.allclose(trafo.idecibel(self.dec), self.lin))
def test_r2depth(self):
self.assertTrue(
np.allclose(trafo.r2depth(self.r, 720), np.array([1., 2., 4.])))
self.assertTrue(
np.allclose(trafo.r2depth(self.r, 360), np.array([0.5, 1., 2.])))
def test_kdp2r(self):
self.assertTrue(np.allclose(trafo.kdp2r(self.kdp, 9.45), np.array(
[0., 19.11933017, 34.46261032, 75.09260608])))
def test_si2kmh(self):
self.assertTrue(np.allclose(trafo.si2kmh(self.speedsi), self.speedkmh))
def test_si2mph(self):
self.assertTrue(np.allclose(trafo.si2mph(self.speedsi), self.speedmph))
def test_si2kts(self):
self.assertTrue(np.allclose(trafo.si2kts(self.speedsi), self.speedkts))
def test_kmh2si(self):
self.assertTrue(np.allclose(trafo.kmh2si(self.speedkmh), self.speedsi))
def test_mph2si(self):
self.assertTrue(np.allclose(trafo.mph2si(self.speedmph), self.speedsi))
def test_kts2si(self):
self.assertTrue(np.allclose(trafo.kts2si(self.speedkts), self.speedsi))
if __name__ == '__main__':
unittest.main()
|
class DataProcJob(object):
def __init__(self, client, project, region, response):
self.client = client
self.project = project
self.region = region
self.response = response
@classmethod
def from_id(cls, client, project, job_id, region="global"):
return DataProcJob(client, project, region, {'reference': {'jobId': job_id}}).update()
def update(self):
self.response = self.client.execute(
lambda x: x.projects().regions().jobs().get(projectId=self.project,
region=self.region,
jobId=self.get_job_id()))
return self
def get_current_state(self):
self.update()
return self.get_last_state()
def get_last_state(self):
return self.response['status']['state']
def get_details_message(self):
status = self.response['status']
return status['details'] if 'details' in status else None
def get_job_id(self):
return self.response['reference']['jobId']
|
market=list()
def getLowestcap():
lowest=market[0][1]
coin=""
for item in market:
if lowest>=item[1]:
coin=item[0]
index=0
for coinsss in market:
if coinsss[0] == coin:
market.pop(index)
break
else:
index+=1
return coin
x=int(input("enter the number of currencies : "))
for _ in range(x):
name = input("Enter coin name : ")
cap= float(input("enter market cap : "))
market.append((name,cap))
print(market)
rank=1
for _ in range(x):
getCoin=getLowestcap()
print(" #%d %s "%(rank,getCoin.upper()))
rank+=1 |
import base64
import sys
import warnings
from typing import (
AbstractSet,
Any,
Callable,
Dict,
List,
Mapping,
MutableSequence,
Optional,
Set,
TYPE_CHECKING,
Tuple,
Type,
Union,
cast,
)
try:
import orjson as json
except ImportError: # pragma: no cover
import json # type: ignore
import databases
import pydantic
import sqlalchemy
from pydantic import BaseModel
import ormar # noqa I100
from ormar.exceptions import ModelError, ModelPersistenceError
from ormar.fields import BaseField
from ormar.fields.foreign_key import ForeignKeyField
from ormar.models.helpers import register_relation_in_alias_manager
from ormar.models.helpers.relations import expand_reverse_relationship
from ormar.models.helpers.sqlalchemy import (
populate_meta_sqlalchemy_table_if_required,
update_column_definition,
)
from ormar.models.metaclass import ModelMeta, ModelMetaclass
from ormar.models.modelproxy import ModelTableProxy
from ormar.queryset.utils import translate_list_to_dict
from ormar.relations.alias_manager import AliasManager
from ormar.relations.relation_manager import RelationsManager
if TYPE_CHECKING: # pragma no cover
from ormar.models import Model
from ormar.signals import SignalEmitter
IntStr = Union[int, str]
DictStrAny = Dict[str, Any]
AbstractSetIntStr = AbstractSet[IntStr]
MappingIntStrAny = Mapping[IntStr, Any]
class NewBaseModel(pydantic.BaseModel, ModelTableProxy, metaclass=ModelMetaclass):
"""
Main base class of ormar Model.
Inherits from pydantic BaseModel and has all mixins combined in ModelTableProxy.
Constructed with ModelMetaclass which in turn also inherits pydantic metaclass.
Abstracts away all internals and helper functions, so final Model class has only
the logic concerned with database connection and data persistance.
"""
__slots__ = ("_orm_id", "_orm_saved", "_orm", "_pk_column", "__pk_only__")
__relations_map__ = None
if TYPE_CHECKING: # pragma no cover
pk: Any
__model_fields__: Dict[str, BaseField]
__table__: sqlalchemy.Table
__fields__: Dict[str, pydantic.fields.ModelField]
__pydantic_model__: Type[BaseModel]
__pkname__: str
__tablename__: str
__metadata__: sqlalchemy.MetaData
__database__: databases.Database
__relation_map__: Optional[List[str]]
_orm_relationship_manager: AliasManager
_orm: RelationsManager
_orm_id: int
_orm_saved: bool
_related_names: Optional[Set]
_through_names: Optional[Set]
_related_names_hash: str
_choices_fields: Optional[Set]
_pydantic_fields: Set
_quick_access_fields: Set
_json_fields: Set
_bytes_fields: Set
Meta: ModelMeta
# noinspection PyMissingConstructor
def __init__(self, *args: Any, **kwargs: Any) -> None: # type: ignore
"""
Initializer that creates a new ormar Model that is also pydantic Model at the
same time.
Passed keyword arguments can be only field names and their corresponding values
as those will be passed to pydantic validation that will complain if extra
params are passed.
If relations are defined each relation is expanded and children models are also
initialized and validated. Relation from both sides is registered so you can
access related models from both sides.
Json fields are automatically loaded/dumped if needed.
Models marked as abstract=True in internal Meta class cannot be initialized.
Accepts also special __pk_only__ flag that indicates that Model is constructed
only with primary key value (so no other fields, it's a child model on other
Model), that causes skipping the validation, that's the only case when the
validation can be skipped.
Accepts also special __excluded__ parameter that contains a set of fields that
should be explicitly set to None, as otherwise pydantic will try to populate
them with their default values if default is set.
:raises ModelError: if abstract model is initialized, model has ForwardRefs
that has not been updated or unknown field is passed
:param args: ignored args
:type args: Any
:param kwargs: keyword arguments - all fields values and some special params
:type kwargs: Any
"""
self._verify_model_can_be_initialized()
self._initialize_internal_attributes()
pk_only = kwargs.pop("__pk_only__", False)
object.__setattr__(self, "__pk_only__", pk_only)
new_kwargs, through_tmp_dict = self._process_kwargs(kwargs)
if not pk_only:
values, fields_set, validation_error = pydantic.validate_model(
self, new_kwargs # type: ignore
)
if validation_error:
raise validation_error
else:
fields_set = {self.Meta.pkname}
values = new_kwargs
object.__setattr__(self, "__dict__", values)
object.__setattr__(self, "__fields_set__", fields_set)
# add back through fields
new_kwargs.update(through_tmp_dict)
model_fields = object.__getattribute__(self, "Meta").model_fields
# register the columns models after initialization
for related in self.extract_related_names().union(self.extract_through_names()):
model_fields[related].expand_relationship(
new_kwargs.get(related), self, to_register=True,
)
if hasattr(self, "_init_private_attributes"):
# introduced in pydantic 1.7
self._init_private_attributes()
def __setattr__(self, name: str, value: Any) -> None: # noqa CCR001
"""
Overwrites setattr in pydantic parent as otherwise descriptors are not called.
:param name: name of the attribute to set
:type name: str
:param value: value of the attribute to set
:type value: Any
:return: None
:rtype: None
"""
if hasattr(self, name):
object.__setattr__(self, name, value)
else:
# let pydantic handle errors for unknown fields
super().__setattr__(name, value)
def __getattr__(self, item: str) -> Any:
"""
Used only to silence mypy errors for Through models and reverse relations.
Not used in real life as in practice calls are intercepted
by RelationDescriptors
:param item: name of attribute
:type item: str
:return: Any
:rtype: Any
"""
return super().__getattribute__(item)
def _internal_set(self, name: str, value: Any) -> None:
"""
Delegates call to pydantic.
:param name: name of param
:type name: str
:param value: value to set
:type value: Any
"""
super().__setattr__(name, value)
def _verify_model_can_be_initialized(self) -> None:
"""
Raises exception if model is abstract or has ForwardRefs in relation fields.
:return: None
:rtype: None
"""
if self.Meta.abstract:
raise ModelError(f"You cannot initialize abstract model {self.get_name()}")
if self.Meta.requires_ref_update:
raise ModelError(
f"Model {self.get_name()} has not updated "
f"ForwardRefs. \nBefore using the model you "
f"need to call update_forward_refs()."
)
def _process_kwargs(self, kwargs: Dict) -> Tuple[Dict, Dict]:
"""
Initializes nested models.
Removes property_fields
Checks if field is in the model fields or pydatnic fields.
Nullifies fields that should be excluded.
Extracts through models from kwargs into temporary dict.
:param kwargs: passed to init keyword arguments
:type kwargs: Dict
:return: modified kwargs
:rtype: Tuple[Dict, Dict]
"""
property_fields = self.Meta.property_fields
model_fields = self.Meta.model_fields
pydantic_fields = set(self.__fields__.keys())
# remove property fields
for prop_filed in property_fields:
kwargs.pop(prop_filed, None)
excluded: Set[str] = kwargs.pop("__excluded__", set())
if "pk" in kwargs:
kwargs[self.Meta.pkname] = kwargs.pop("pk")
# extract through fields
through_tmp_dict = dict()
for field_name in self.extract_through_names():
through_tmp_dict[field_name] = kwargs.pop(field_name, None)
try:
new_kwargs: Dict[str, Any] = {
k: self._convert_to_bytes(
k,
self._convert_json(
k,
model_fields[k].expand_relationship(v, self, to_register=False,)
if k in model_fields
else (v if k in pydantic_fields else model_fields[k]),
),
)
for k, v in kwargs.items()
}
except KeyError as e:
raise ModelError(
f"Unknown field '{e.args[0]}' for model {self.get_name(lower=False)}"
)
# explicitly set None to excluded fields
# as pydantic populates them with default if set
for field_to_nullify in excluded:
new_kwargs[field_to_nullify] = None
return new_kwargs, through_tmp_dict
def _initialize_internal_attributes(self) -> None:
"""
Initializes internal attributes during __init__()
:rtype: None
"""
# object.__setattr__(self, "_orm_id", uuid.uuid4().hex)
object.__setattr__(self, "_orm_saved", False)
object.__setattr__(self, "_pk_column", None)
object.__setattr__(
self,
"_orm",
RelationsManager(
related_fields=self.extract_related_fields(), owner=cast("Model", self),
),
)
def __eq__(self, other: object) -> bool:
"""
Compares other model to this model. when == is called.
:param other: other model to compare
:type other: object
:return: result of comparison
:rtype: bool
"""
if isinstance(other, NewBaseModel):
return self.__same__(other)
return super().__eq__(other) # pragma no cover
def __same__(self, other: "NewBaseModel") -> bool:
"""
Used by __eq__, compares other model to this model.
Compares:
* _orm_ids,
* primary key values if it's set
* dictionary of own fields (excluding relations)
:param other: model to compare to
:type other: NewBaseModel
:return: result of comparison
:rtype: bool
"""
return (
# self._orm_id == other._orm_id
(self.pk == other.pk and self.pk is not None)
or (
(self.pk is None and other.pk is None)
and {
k: v
for k, v in self.__dict__.items()
if k not in self.extract_related_names()
}
== {
k: v
for k, v in other.__dict__.items()
if k not in other.extract_related_names()
}
)
)
@classmethod
def get_name(cls, lower: bool = True) -> str:
"""
Returns name of the Model class, by default lowercase.
:param lower: flag if name should be set to lowercase
:type lower: bool
:return: name of the model
:rtype: str
"""
name = cls.__name__
if lower:
name = name.lower()
return name
@property
def pk_column(self) -> sqlalchemy.Column:
"""
Retrieves primary key sqlalchemy column from models Meta.table.
Each model has to have primary key.
Only one primary key column is allowed.
:return: primary key sqlalchemy column
:rtype: sqlalchemy.Column
"""
if object.__getattribute__(self, "_pk_column") is not None:
return object.__getattribute__(self, "_pk_column")
pk_columns = self.Meta.table.primary_key.columns.values()
pk_col = pk_columns[0]
object.__setattr__(self, "_pk_column", pk_col)
return pk_col
@property
def saved(self) -> bool:
"""Saved status of the model. Changed by setattr and loading from db"""
return self._orm_saved
@property
def signals(self) -> "SignalEmitter":
"""Exposes signals from model Meta"""
return self.Meta.signals
@classmethod
def pk_type(cls) -> Any:
"""Shortcut to models primary key field type"""
return cls.Meta.model_fields[cls.Meta.pkname].__type__
@classmethod
def db_backend_name(cls) -> str:
"""Shortcut to database dialect,
cause some dialect require different treatment"""
return cls.Meta.database._backend._dialect.name
def remove(self, parent: "Model", name: str) -> None:
"""Removes child from relation with given name in RelationshipManager"""
self._orm.remove_parent(self, parent, name)
def set_save_status(self, status: bool) -> None:
"""Sets value of the save status"""
object.__setattr__(self, "_orm_saved", status)
@classmethod
def get_properties(
cls, include: Union[Set, Dict, None], exclude: Union[Set, Dict, None]
) -> Set[str]:
"""
Returns a set of names of functions/fields decorated with
@property_field decorator.
They are added to dictionary when called directly and therefore also are
present in fastapi responses.
:param include: fields to include
:type include: Union[Set, Dict, None]
:param exclude: fields to exclude
:type exclude: Union[Set, Dict, None]
:return: set of property fields names
:rtype: Set[str]
"""
props = cls.Meta.property_fields
if include:
props = {prop for prop in props if prop in include}
if exclude:
props = {prop for prop in props if prop not in exclude}
return props
@classmethod
def update_forward_refs(cls, **localns: Any) -> None:
"""
Processes fields that are ForwardRef and need to be evaluated into actual
models.
Expands relationships, register relation in alias manager and substitutes
sqlalchemy columns with new ones with proper column type (null before).
Populates Meta table of the Model which is left empty before.
Sets self_reference flag on models that links to themselves.
Calls the pydantic method to evaluate pydantic fields.
:param localns: local namespace
:type localns: Any
:return: None
:rtype: None
"""
globalns = sys.modules[cls.__module__].__dict__.copy()
globalns.setdefault(cls.__name__, cls)
fields_to_check = cls.Meta.model_fields.copy()
for field in fields_to_check.values():
if field.has_unresolved_forward_refs():
field = cast(ForeignKeyField, field)
field.evaluate_forward_ref(globalns=globalns, localns=localns)
field.set_self_reference_flag()
if field.is_multi and not field.through:
field = cast(ormar.ManyToManyField, field)
field.create_default_through_model()
expand_reverse_relationship(model_field=field)
register_relation_in_alias_manager(field=field)
update_column_definition(model=cls, field=field)
populate_meta_sqlalchemy_table_if_required(meta=cls.Meta)
super().update_forward_refs(**localns)
cls.Meta.requires_ref_update = False
@staticmethod
def _get_not_excluded_fields(
fields: Union[List, Set], include: Optional[Dict], exclude: Optional[Dict],
) -> List:
"""
Returns related field names applying on them include and exclude set.
:param include: fields to include
:type include: Union[Set, Dict, None]
:param exclude: fields to exclude
:type exclude: Union[Set, Dict, None]
:return:
:rtype: List of fields with relations that is not excluded
"""
fields = [*fields] if not isinstance(fields, list) else fields
if include:
fields = [field for field in fields if field in include]
if exclude:
fields = [
field
for field in fields
if field not in exclude
or (
exclude.get(field) is not Ellipsis
and exclude.get(field) != {"__all__"}
)
]
return fields
@staticmethod
def _extract_nested_models_from_list(
relation_map: Dict,
models: MutableSequence,
include: Union[Set, Dict, None],
exclude: Union[Set, Dict, None],
exclude_primary_keys: bool,
exclude_through_models: bool,
) -> List:
"""
Converts list of models into list of dictionaries.
:param models: List of models
:type models: List
:param include: fields to include
:type include: Union[Set, Dict, None]
:param exclude: fields to exclude
:type exclude: Union[Set, Dict, None]
:return: list of models converted to dictionaries
:rtype: List[Dict]
"""
result = []
for model in models:
try:
result.append(
model.dict(
relation_map=relation_map,
include=include,
exclude=exclude,
exclude_primary_keys=exclude_primary_keys,
exclude_through_models=exclude_through_models,
)
)
except ReferenceError: # pragma no cover
continue
return result
@classmethod
def _skip_ellipsis(
cls, items: Union[Set, Dict, None], key: str, default_return: Any = None
) -> Union[Set, Dict, None]:
"""
Helper to traverse the include/exclude dictionaries.
In dict() Ellipsis should be skipped as it indicates all fields required
and not the actual set/dict with fields names.
:param items: current include/exclude value
:type items: Union[Set, Dict, None]
:param key: key for nested relations to check
:type key: str
:return: nested value of the items
:rtype: Union[Set, Dict, None]
"""
result = cls.get_child(items, key)
return result if result is not Ellipsis else default_return
@staticmethod
def _convert_all(items: Union[Set, Dict, None]) -> Union[Set, Dict, None]:
"""
Helper to convert __all__ pydantic special index to ormar which does not
support index based exclusions.
:param items: current include/exclude value
:type items: Union[Set, Dict, None]
"""
if isinstance(items, dict) and "__all__" in items:
return items.get("__all__")
return items
def _extract_nested_models( # noqa: CCR001, CFQ002
self,
relation_map: Dict,
dict_instance: Dict,
include: Optional[Dict],
exclude: Optional[Dict],
exclude_primary_keys: bool,
exclude_through_models: bool,
) -> Dict:
"""
Traverse nested models and converts them into dictionaries.
Calls itself recursively if needed.
:param nested: flag if current instance is nested
:type nested: bool
:param dict_instance: current instance dict
:type dict_instance: Dict
:param include: fields to include
:type include: Optional[Dict]
:param exclude: fields to exclude
:type exclude: Optional[Dict]
:return: current model dict with child models converted to dictionaries
:rtype: Dict
"""
fields = self._get_not_excluded_fields(
fields=self.extract_related_names(), include=include, exclude=exclude
)
for field in fields:
if not relation_map or field not in relation_map:
continue
try:
nested_model = getattr(self, field)
if isinstance(nested_model, MutableSequence):
dict_instance[field] = self._extract_nested_models_from_list(
relation_map=self._skip_ellipsis( # type: ignore
relation_map, field, default_return=dict()
),
models=nested_model,
include=self._convert_all(self._skip_ellipsis(include, field)),
exclude=self._convert_all(self._skip_ellipsis(exclude, field)),
exclude_primary_keys=exclude_primary_keys,
exclude_through_models=exclude_through_models,
)
elif nested_model is not None:
dict_instance[field] = nested_model.dict(
relation_map=self._skip_ellipsis(
relation_map, field, default_return=dict()
),
include=self._convert_all(self._skip_ellipsis(include, field)),
exclude=self._convert_all(self._skip_ellipsis(exclude, field)),
exclude_primary_keys=exclude_primary_keys,
exclude_through_models=exclude_through_models,
)
else:
dict_instance[field] = None
except ReferenceError:
dict_instance[field] = None
return dict_instance
def dict( # type: ignore # noqa A003
self,
*,
include: Union[Set, Dict] = None,
exclude: Union[Set, Dict] = None,
by_alias: bool = False,
skip_defaults: bool = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
exclude_primary_keys: bool = False,
exclude_through_models: bool = False,
relation_map: Dict = None,
) -> "DictStrAny": # noqa: A003'
"""
Generate a dictionary representation of the model,
optionally specifying which fields to include or exclude.
Nested models are also parsed to dictionaries.
Additionally fields decorated with @property_field are also added.
:param exclude_through_models: flag to exclude through models from dict
:type exclude_through_models: bool
:param exclude_primary_keys: flag to exclude primary keys from dict
:type exclude_primary_keys: bool
:param include: fields to include
:type include: Union[Set, Dict, None]
:param exclude: fields to exclude
:type exclude: Union[Set, Dict, None]
:param by_alias: flag to get values by alias - passed to pydantic
:type by_alias: bool
:param skip_defaults: flag to not set values - passed to pydantic
:type skip_defaults: bool
:param exclude_unset: flag to exclude not set values - passed to pydantic
:type exclude_unset: bool
:param exclude_defaults: flag to exclude default values - passed to pydantic
:type exclude_defaults: bool
:param exclude_none: flag to exclude None values - passed to pydantic
:type exclude_none: bool
:param relation_map: map of the relations to follow to avoid circural deps
:type relation_map: Dict
:return:
:rtype:
"""
pydantic_exclude = self._update_excluded_with_related(exclude)
pydantic_exclude = self._update_excluded_with_pks_and_through(
exclude=pydantic_exclude,
exclude_primary_keys=exclude_primary_keys,
exclude_through_models=exclude_through_models,
)
dict_instance = super().dict(
include=include,
exclude=pydantic_exclude,
by_alias=by_alias,
skip_defaults=skip_defaults,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
dict_instance = {
k: self._convert_bytes_to_str(column_name=k, value=v)
for k, v in dict_instance.items()
}
if include and isinstance(include, Set):
include = translate_list_to_dict(include)
if exclude and isinstance(exclude, Set):
exclude = translate_list_to_dict(exclude)
relation_map = (
relation_map
if relation_map is not None
else translate_list_to_dict(self._iterate_related_models())
)
pk_only = getattr(self, "__pk_only__", False)
if relation_map and not pk_only:
dict_instance = self._extract_nested_models(
relation_map=relation_map,
dict_instance=dict_instance,
include=include, # type: ignore
exclude=exclude, # type: ignore
exclude_primary_keys=exclude_primary_keys,
exclude_through_models=exclude_through_models,
)
# include model properties as fields in dict
if object.__getattribute__(self, "Meta").property_fields:
props = self.get_properties(include=include, exclude=exclude)
if props:
dict_instance.update({prop: getattr(self, prop) for prop in props})
return dict_instance
def json( # type: ignore # noqa A003
self,
*,
include: Union[Set, Dict] = None,
exclude: Union[Set, Dict] = None,
by_alias: bool = False,
skip_defaults: bool = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
encoder: Optional[Callable[[Any], Any]] = None,
exclude_primary_keys: bool = False,
exclude_through_models: bool = False,
**dumps_kwargs: Any,
) -> str:
"""
Generate a JSON representation of the model, `include` and `exclude`
arguments as per `dict()`.
`encoder` is an optional function to supply as `default` to json.dumps(),
other arguments as per `json.dumps()`.
"""
if skip_defaults is not None: # pragma: no cover
warnings.warn(
f'{self.__class__.__name__}.json(): "skip_defaults" is deprecated '
f'and replaced by "exclude_unset"',
DeprecationWarning,
)
exclude_unset = skip_defaults
encoder = cast(Callable[[Any], Any], encoder or self.__json_encoder__)
data = self.dict(
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
exclude_primary_keys=exclude_primary_keys,
exclude_through_models=exclude_through_models,
)
if self.__custom_root_type__: # pragma: no cover
data = data["__root__"]
return self.__config__.json_dumps(data, default=encoder, **dumps_kwargs)
def update_from_dict(self, value_dict: Dict) -> "NewBaseModel":
"""
Updates self with values of fields passed in the dictionary.
:param value_dict: dictionary of fields names and values
:type value_dict: Dict
:return: self
:rtype: NewBaseModel
"""
for key, value in value_dict.items():
setattr(self, key, value)
return self
def _convert_to_bytes(self, column_name: str, value: Any) -> Union[str, Dict]:
"""
Converts value to bytes from string
:param column_name: name of the field
:type column_name: str
:param value: value fo the field
:type value: Any
:return: converted value if needed, else original value
:rtype: Any
"""
if column_name not in self._bytes_fields:
return value
field = self.Meta.model_fields[column_name]
if not isinstance(value, bytes):
if field.represent_as_base64_str:
value = base64.b64decode(value)
else:
value = value.encode("utf-8")
return value
def _convert_bytes_to_str(self, column_name: str, value: Any) -> Union[str, Dict]:
"""
Converts value to str from bytes for represent_as_base64_str columns.
:param column_name: name of the field
:type column_name: str
:param value: value fo the field
:type value: Any
:return: converted value if needed, else original value
:rtype: Any
"""
if column_name not in self._bytes_fields:
return value
field = self.Meta.model_fields[column_name]
if not isinstance(value, str) and field.represent_as_base64_str:
return base64.b64encode(value).decode()
return value
def _convert_json(self, column_name: str, value: Any) -> Union[str, Dict]:
"""
Converts value to/from json if needed (for Json columns).
:param column_name: name of the field
:type column_name: str
:param value: value fo the field
:type value: Any
:return: converted value if needed, else original value
:rtype: Any
"""
if column_name not in self._json_fields:
return value
if not isinstance(value, str):
try:
value = json.dumps(value)
except TypeError: # pragma no cover
pass
return value.decode("utf-8") if isinstance(value, bytes) else value
def _extract_own_model_fields(self) -> Dict:
"""
Returns a dictionary with field names and values for fields that are not
relations fields (ForeignKey, ManyToMany etc.)
:return: dictionary of fields names and values.
:rtype: Dict
"""
related_names = self.extract_related_names()
self_fields = {k: v for k, v in self.__dict__.items() if k not in related_names}
return self_fields
def _extract_model_db_fields(self) -> Dict:
"""
Returns a dictionary with field names and values for fields that are stored in
current model's table.
That includes own non-relational fields ang foreign key fields.
:return: dictionary of fields names and values.
:rtype: Dict
"""
self_fields = self._extract_own_model_fields()
self_fields = {
k: v
for k, v in self_fields.items()
if self.get_column_alias(k) in self.Meta.table.columns
}
for field in self._extract_db_related_names():
relation_field = self.Meta.model_fields[field]
target_pk_name = relation_field.to.Meta.pkname
target_field = getattr(self, field)
self_fields[field] = getattr(target_field, target_pk_name, None)
if not relation_field.nullable and not self_fields[field]:
raise ModelPersistenceError(
f"You cannot save {relation_field.to.get_name()} "
f"model without pk set!"
)
return self_fields
def get_relation_model_id(self, target_field: "BaseField") -> Optional[int]:
"""
Returns an id of the relation side model to use in prefetch query.
:param target_field: field with relation definition
:type target_field: "BaseField"
:return: value of pk if set
:rtype: Optional[int]
"""
if target_field.virtual or target_field.is_multi:
return self.pk
related_name = target_field.name
related_model = getattr(self, related_name)
return None if not related_model else related_model.pk
|
import unittest
import torch
from joeynmt.data import load_data
from joeynmt.helpers import expand_reverse_index
from joeynmt.model import build_model
from joeynmt.prediction import parse_test_args, validate_on_data
# TODO make sure rnn also returns the nbest list in the resorted order
class TestHelpers(unittest.TestCase):
def test_expand_reverse_index(self):
reverse_index = [1, 0, 2]
n_best = 1
reverse_index_1best = expand_reverse_index(reverse_index, n_best)
self.assertEqual(reverse_index_1best, [1, 0, 2])
n_best = 2
reverse_index_2best = expand_reverse_index(reverse_index, n_best)
self.assertEqual(reverse_index_2best, [2, 3, 0, 1, 4, 5])
n_best = 3
reverse_index_3best = expand_reverse_index(reverse_index, n_best)
self.assertEqual(reverse_index_3best, [3, 4, 5, 0, 1, 2, 6, 7, 8])
class TestPrediction(unittest.TestCase):
def setUp(self):
seed = 42
torch.manual_seed(seed)
self.cfg = {
"data": {
"src": "de",
"trg": "en",
"train": "test/data/toy/train", # needed for vocab
"test": "test/data/toy/test",
"level": "word",
"lowercase": False,
"max_sent_length": 10
},
"testing": {
"bpe_type": None,
"beam_size": 5,
"alpha": 1.0
},
"training": {
"batch_size": 2,
"batch_type": "sentence",
"eval_metric": "bleu"
},
"model": {
"tied_embeddings": False,
"tied_softmax": False,
"encoder": {
"type": "transformer",
"hidden_size": 12,
"ff_size": 24,
"embeddings": {"embedding_dim": 12},
"num_layers": 1,
"num_heads": 4
},
"decoder": {
"type": "transformer",
"hidden_size": 12,
"ff_size": 24,
"embeddings": {"embedding_dim": 12},
"num_layers": 1,
"num_heads": 4
},
}
}
# load data
_, _, test_data, src_vocab, trg_vocab = load_data(
self.cfg["data"], datasets=["train", "test"])
self.test_data = test_data
self.parsed_cfg = parse_test_args(self.cfg, mode="translate")
# build model
active_layers = cfg["testing"].get("active_layers", [])
self.model = build_model(self.cfg["model"], active_layers,
src_vocab=src_vocab, trg_vocab=trg_vocab)
def _translate(self, n_best):
(batch_size, batch_type, use_cuda, device, n_gpu, level, eval_metric,
max_output_length, beam_size, beam_alpha, postprocess, bpe_type,
sacrebleu, _, _) = self.parsed_cfg
(score, loss, ppl, sources, sources_raw, references, hypotheses,
hypotheses_raw, attention_scores) = validate_on_data(
self.model, data=self.test_data, batch_size=batch_size,
batch_type=batch_type, level=level, use_cuda=use_cuda,
max_output_length=max_output_length, eval_metric=None,
compute_loss=False, beam_size=beam_size, beam_alpha=beam_alpha,
postprocess=postprocess, bpe_type=bpe_type, sacrebleu=sacrebleu,
n_gpu=n_gpu, n_best=n_best)
return sources, hypotheses
def test_transformer_nbest(self):
n_best = 1
sources_1best, hypotheses_1best = self._translate(n_best)
self.assertEqual(len(self.test_data), len(hypotheses_1best))
n_best = 5
sources_5best, hypotheses_5best = self._translate(n_best)
self.assertEqual(len(self.test_data) * n_best, len(hypotheses_5best))
for n in range(n_best):
hyp = [hypotheses_5best[i]
for i in range(n, len(hypotheses_5best), n_best)]
self.assertEqual(len(self.test_data), len(hyp)) # unroll
if n == 0:
# hypotheses must match 1best_hypotheses
self.assertEqual(hypotheses_1best, hyp)
n_best = 10
with self.assertRaises(AssertionError) as e:
self._translate(n_best)
self.assertEqual('Can only return 5 best hypotheses.', str(e.exception))
|
from dbaas_zabbix.database_providers import DatabaseZabbixProvider
class Host(object):
def __init__(self, address, dns):
self.address = address
self.hostname = dns
class Engine(object):
def __init__(self, name, version='0.0.0'):
self.engine_type = EngineType(name)
self.version = version
class EngineType(object):
def __init__(self, name):
self.name = name
class Plan(object):
def __init__(self, is_ha):
self.is_ha = is_ha
class Instance(object):
def __init__(self, dns, hostname):
self.address = hostname.address
self.dns = dns
self.hostname = hostname
class Team(object):
def __init__(self, name):
self.name = name
@property
def organization(self):
return None
class Database(object):
def __init__(self, name):
self.name = name
self.team = Team(name)
def first(self):
return self
class Driver(object):
def __init__(self, databaseinfra):
self.databaseinfra = databaseinfra
def get_database_instances(self):
return self.databaseinfra.instances
def get_non_database_instances(self):
return self.databaseinfra.instances
class CloudStackInfra(object):
def all(self):
return []
class DatabaseInfra(object):
def __init__(self, instances, environment, plan, name):
self.instances = instances
self.environment = environment
self.name = name
self.engine = Engine(name)
self.plan = plan
self.cs_dbinfra_attributes = CloudStackInfra()
self.databases = Database(name)
def get_driver(self):
if hasattr(self, 'driver'):
return self.driver
self.driver = Driver(self)
return self.driver
class InstanceList(list):
def all(self):
return self
def set_up_databaseinfra(is_ha=True, name="fake"):
instances = InstanceList()
plan = Plan(is_ha)
for n in range(1, 4):
address = '10.10.10.1{}'.format(n)
dns = 'myhost_{}'.format(n)
host = Host(address, dns + '.com')
instance = Instance(dns + '.database.com', host)
instances.append(instance)
return DatabaseInfra(instances, 'development', plan, name)
class FakeZabbixAPI(object):
def __init__(self, server,):
self.server = server
self.id = id(self)
self.last_call = []
self.triggers = []
def login(self, user, password):
pass
def do_request(self, method, params):
request_json = {
'jsonrpc': '2.0',
'method': method,
'params': params,
'id': self.id,
}
self.last_call.append(request_json)
if method == 'host.get':
request_json = [{'name': 'fake', 'hostid': '3309'}]
elif method == 'hostinterface.get':
request_json = [{'interfaceid': '3310'}]
elif method == 'trigger.get':
request_json = self.triggers
return request_json
def __getattr__(self, attr):
return FakeZabbixAPIObjectClass(attr, self)
def add_trigger(self, status, id):
self.triggers.append({'status': str(status), 'triggerid': str(id)})
class FakeZabbixAPIObjectClass(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __getattr__(self, attr):
def fn(*args, **kwargs):
if args and kwargs:
raise TypeError("Found both args and kwargs")
return self.parent.do_request(
'{0}.{1}'.format(self.name, attr),
args or kwargs
)
return fn
class FakeDatabaseZabbixProvider(DatabaseZabbixProvider):
@property
def secondary_ips(self,):
return []
class FakeCredential(object):
def __init__(self):
self.user = ''
self.password = ''
self.endpoint = ''
@property
def slack_notification(self):
return None
def get_parameter_by_name(self, name):
if "slack_notification":
return self.slack_notification
return ''
def get_parameters_by_group(self, group_name):
if group_name == "group_host":
return {
"support": "ZabbixSupport",
"grafana": "GrafanaTeam",
"graphite": "GraphiteGroup"
}
if group_name == "group_database":
return {
"dbproduction": "DBA Team",
"grafana": "GrafanaTeam",
"dbaasmetrics": "DBaaS/Metrics"
}
return {}
class FakeCredentialWithSlack(FakeCredential):
@property
def slack_notification(self):
return "@user,#channel"
|
import aioredis
import asyncio
import base64
import concurrent
import json
import logging
import os
import uvloop
from aiohttp import web
import kubernetes_asyncio as kube
from prometheus_async.aio.web import server_stats # type: ignore
from hailtop.config import get_deploy_config
from hailtop.google_storage import GCS
from hailtop.hail_logging import AccessLogger
from hailtop.tls import internal_server_ssl_context
from hailtop.utils import AsyncWorkerPool, retry_transient_errors
from gear import setup_aiohttp_session, rest_authenticated_users_only, monitor_endpoint
uvloop.install()
DEFAULT_NAMESPACE = os.environ['HAIL_DEFAULT_NAMESPACE']
log = logging.getLogger('batch')
routes = web.RouteTableDef()
socket = '/redis/redis.sock'
@routes.get('/healthcheck')
async def healthcheck(request): # pylint: disable=unused-argument
return web.Response()
@routes.get('/api/v1alpha/objects')
@monitor_endpoint
@rest_authenticated_users_only
async def get_object(request, userdata):
filename = request.query.get('q')
etag = request.query.get('etag')
userinfo = await get_or_add_user(request.app, userdata)
username = userdata['username']
log.info(f'memory: request for object {filename} from user {username}')
result = await get_file_or_none(request.app, username, userinfo, filename, etag)
if result is None:
raise web.HTTPNotFound()
etag, body = result
return web.Response(headers={'ETag': etag}, body=body)
async def get_or_add_user(app, userdata):
users = app['users']
username = userdata['username']
if username not in users:
k8s_client = app['k8s_client']
gsa_key_secret = await retry_transient_errors(
k8s_client.read_namespaced_secret,
userdata['gsa_key_secret_name'],
DEFAULT_NAMESPACE,
_request_timeout=5.0)
gsa_key = base64.b64decode(gsa_key_secret.data['key.json']).decode()
users[username] = {'fs': GCS(blocking_pool=app['thread_pool'], key=json.loads(gsa_key))}
return users[username]
def make_redis_key(username, filepath):
return f'{ username }_{ filepath }'
async def get_file_or_none(app, username, userinfo, filepath, etag):
file_key = make_redis_key(username, filepath)
fs = userinfo['fs']
cached_etag, result = await app['redis_pool'].execute('HMGET', file_key, 'etag', 'body')
if cached_etag is not None and cached_etag.decode('ascii') == etag:
log.info(f"memory: Retrieved file {filepath} for user {username} with etag'{etag}'")
return cached_etag.decode('ascii'), result
log.info(f"memory: Couldn't retrieve file {filepath} for user {username}: current version not in cache (requested '{etag}', found '{cached_etag}').")
if file_key not in app['files_in_progress']:
try:
log.info(f"memory: Loading {filepath} to cache for user {username}")
app['worker_pool'].call_nowait(load_file, app['redis_pool'], app['files_in_progress'], file_key, fs, filepath)
app['files_in_progress'].add(file_key)
except asyncio.QueueFull:
pass
return None
async def load_file(redis, files, file_key, fs, filepath):
try:
log.info(f"memory: {file_key}: reading.")
data = await fs.read_binary_gs_file(filepath)
etag = await fs.get_etag(filepath)
log.info(f"memory: {file_key}: read {filepath} with etag {etag}")
await redis.execute('HMSET', file_key, 'etag', etag.encode('ascii'), 'body', data)
log.info(f"memory: {file_key}: stored {filepath} ('{etag}').")
finally:
files.remove(file_key)
async def on_startup(app):
app['thread_pool'] = concurrent.futures.ThreadPoolExecutor()
app['worker_pool'] = AsyncWorkerPool(parallelism=100, queue_size=10)
app['files_in_progress'] = set()
app['users'] = {}
kube.config.load_incluster_config()
k8s_client = kube.client.CoreV1Api()
app['k8s_client'] = k8s_client
app['redis_pool'] = await aioredis.create_pool(socket)
async def on_cleanup(app):
try:
app['thread_pool'].shutdown()
finally:
try:
app['worker_pool'].shutdown()
finally:
try:
app['redis_pool'].close()
finally:
del app['k8s_client']
await asyncio.gather(*(t for t in asyncio.all_tasks() if t is not asyncio.current_task()))
def run():
app = web.Application()
setup_aiohttp_session(app)
app.add_routes(routes)
app.router.add_get("/metrics", server_stats)
app.on_startup.append(on_startup)
app.on_cleanup.append(on_cleanup)
deploy_config = get_deploy_config()
web.run_app(
deploy_config.prefix_application(app, 'memory'),
host='0.0.0.0',
port=5000,
access_log_class=AccessLogger,
ssl_context=internal_server_ssl_context())
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Appier Framework
# Copyright (c) 2008-2021 Hive Solutions Lda.
#
# This file is part of Hive Appier Framework.
#
# Hive Appier Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Appier Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Appier Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <[email protected]>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2021 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import copy
from . import common
from . import legacy
from . import exceptions
class ModelAsync(object):
@classmethod
async def get_a(cls, *args, **kwargs):
fields,\
eager,\
eager_l,\
map,\
rules,\
meta,\
build,\
fill,\
resolve_a,\
skip,\
limit,\
sort,\
raise_e = cls._get_attrs(kwargs, (
("fields", None),
("eager", None),
("eager_l", None),
("map", False),
("rules", True),
("meta", False),
("build", True),
("fill", True),
("resolve_a", None),
("skip", 0),
("limit", 0),
("sort", None),
("raise_e", True)
))
if eager_l == None: eager_l = map
if resolve_a == None: resolve_a = map
if eager_l: eager = cls._eager_b(eager)
fields = cls._sniff(fields, rules = rules)
collection = cls._collection_a()
model = await collection.find_one(
kwargs,
fields,
skip = skip,
limit = limit,
sort = sort
)
if not model and raise_e:
is_devel = common.is_devel()
if is_devel: message = "%s not found for %s" % (cls.__name__, str(kwargs))
else: message = "%s not found" % cls.__name__
raise exceptions.NotFoundError(message = message)
if not model and not raise_e: return model
cls.types(model)
if fill: cls.fill(model, safe = True)
if build: cls.build(model, map = map, rules = rules, meta = meta)
if eager: model = cls._eager(model, eager, map = map)
if resolve_a: model = cls._resolve_all(model, resolve = False)
return model if map else cls.old(model = model, safe = False)
@classmethod
async def find_a(cls, *args, **kwargs):
fields,\
eager,\
eager_l,\
map,\
rules,\
meta,\
build,\
fill,\
resolve_a,\
skip,\
limit,\
sort,\
raise_e = cls._get_attrs(kwargs, (
("fields", None),
("eager", None),
("eager_l", False),
("map", False),
("rules", True),
("meta", False),
("build", True),
("fill", True),
("resolve_a", None),
("skip", 0),
("limit", 0),
("sort", None),
("raise_e", False)
))
if resolve_a == None: resolve_a = map
if eager_l: eager = cls._eager_b(eager)
cls._find_s(kwargs)
cls._find_d(kwargs)
fields = cls._sniff(fields, rules = rules)
collection = cls._collection_a()
models = collection.find(
kwargs,
fields,
skip = skip,
limit = limit,
sort = sort
)
if not models and raise_e:
is_devel = common.is_devel()
if is_devel: message = "%s not found for %s" % (cls.__name__, str(kwargs))
else: message = "%s not found" % cls.__name__
raise exceptions.NotFoundError(message = message)
models = [cls.types(model) async for model in models]
if fill: models = [cls.fill(model, safe = True) for model in models]
if build: [cls.build(model, map = map, rules = rules, meta = meta) for model in models]
if eager: models = cls._eager(models, eager, map = map)
if resolve_a: models = [cls._resolve_all(model, resolve = False) for model in models]
models = models if map else [cls.old(model = model, safe = False) for model in models]
return models
@classmethod
async def _increment_a(cls, name):
_name = cls._name() + ":" + name
store = cls._collection_a(name = "counters")
value = await store.find_and_modify(
{
"_id" : _name
},
{
"$inc" : {
"seq" : 1
}
},
new = True,
upsert = True
)
value = value or await store.find_one({
"_id" : _name
})
return value["seq"]
async def save_a(
self,
validate = True,
verify = True,
is_new = None,
increment_a = None,
immutables_a = None,
pre_validate = True,
pre_save = True,
pre_create = True,
pre_update = True,
post_validate = True,
post_save = True,
post_create = True,
post_update = True,
before_callbacks = [],
after_callbacks = []
):
# ensures that the current instance is associated with
# a concrete model, ready to be persisted in database
if verify: self.assert_is_concrete()
# checks if the instance to be saved is a new instance
# or if this is an update operation and then determines
# series of default values taking that into account
if is_new == None: is_new = self.is_new()
if increment_a == None: increment_a = is_new
if immutables_a == None: immutables_a = not is_new
# runs the validation process in the current model, this
# should ensure that the model is ready to be saved in the
# data source, without corruption of it, only run this process
# in case the validate flag is correctly set
validate and self._validate(
pre_validate = pre_validate,
post_validate = post_validate
)
# calls the complete set of event handlers for the current
# save operation, this should trigger changes in the model
pre_save and self.pre_save()
pre_create and is_new and self.pre_create()
pre_update and not is_new and self.pre_update()
# filters the values that are present in the current model
# so that only the valid ones are stored in, invalid values
# are going to be removed, note that if the operation is an
# update operation and the "immutable rules" also apply, the
# returned value is normalized meaning that for instance if
# any relation is loaded the reference value is returned instead
# of the loaded relation values (required for persistence)
model = await self._filter_a(
increment_a = increment_a,
immutables_a = immutables_a,
normalize = True
)
# in case the current model is not new must create a new
# model instance and remove the main identifier from it
if not is_new: _model = copy.copy(model); del _model["_id"]
# calls the complete set of callbacks that should be called
# before the concrete data store save operation
for callback in before_callbacks: callback(self, model)
# retrieves the reference to the store object to be used and
# uses it to store the current model data
store = self._get_store_a()
if is_new:
await store.insert(model)
self.apply(model, safe_a = False)
else:
await store.update({"_id" : model["_id"]}, {"$set" : _model})
# calls the complete set of callbacks that should be called
# after the concrete data store save operation
for callback in after_callbacks: callback(self, model)
# calls the post save event handlers in order to be able to
# execute appropriate post operations
post_save and self.post_save()
post_create and is_new and self.post_create()
post_update and not is_new and self.post_update()
# returns the instance that has just been used for the save
# operation, this may be used for chaining operations
return self
async def delete_a(
self,
verify = True,
pre_delete = True,
post_delete = True,
before_callbacks = [],
after_callbacks = []
):
# ensures that the current instance is associated with
# a concrete model, ready to be persisted in database
if verify: self.assert_is_concrete()
# calls the complete set of event handlers for the current
# delete operation, this should trigger changes in the model
pre_delete and self.pre_delete()
# calls the complete set of callbacks that should be called
# before the concrete data store delete operation
for callback in before_callbacks: callback(self)
# retrieves the reference to the store object to be able to
# execute the removal command for the current model
store = self._get_store_a()
await store.remove({"_id" : self._id})
# calls the underlying delete handler that may be used to extend
# the default delete functionality
self._delete()
# calls the complete set of callbacks that should be called
# after the concrete data store delete operation
for callback in after_callbacks: callback(self)
# calls the complete set of event handlers for the current
# delete operation, this should trigger changes in the model
post_delete and self.post_delete()
async def reload_a(self, *args, **kwargs):
is_new = self.is_new()
if is_new: raise exceptions.OperationalError(
message = "Can't reload a new model entity",
code = 412
)
cls = self.__class__
return await cls.get_a(_id = self._id, *args, **kwargs)
async def _filter_a(
self,
increment_a = True,
immutables_a = False,
normalize = False,
resolve = False,
all = False,
evaluator = "json_v"
):
# creates the model that will hold the "filtered" model
# with all the items that conform with the class specification
model = {}
# retrieves the class associated with the current instance
# to be able to retrieve the correct definition methods
cls = self.__class__
# retrieves the (schema) definition for the current model
# to be "filtered" it's going to be used to retrieve the
# various definitions for the model fields
definition = cls.definition()
# retrieves the complete list of fields that are meant to be
# automatically incremented for every save operation
increments = cls.increments()
# gather the set of elements that are considered immutables and
# that are not meant to be changed if the current operation to
# apply the filter is not a new operation (update operation)
immutables = cls.immutables()
# iterates over all the increment fields and increments their
# fields so that a new value is set on the model, note that if
# the increment apply is unset the increment operation is ignored
for name in increments:
if not increment_a: continue
if name in self.model:
model[name] = cls._ensure_min(name, self.model[name])
else:
model[name] = await cls._increment_a(name)
# iterates over all the model items to filter the ones
# that are not valid for the current class context
for name, value in legacy.eager(self.model.items()):
if not name in definition: continue
if immutables_a and name in immutables: continue
value = self._evaluate(name, value, evaluator = evaluator)
model[name] = value
# in case the normalize flag is set must iterate over all
# items to try to normalize the values by calling the reference
# value this will returns the reference index value instead of
# the normal value that would prevent normalization
if normalize:
for name, value in legacy.eager(self.model.items()):
if not name in definition: continue
if not hasattr(value, "ref_v"): continue
model[name] = value.ref_v()
# in case the resolution flag is set, it means that a recursive
# approach must be performed for the resolution of values that
# implement the map value (recursive resolution) method, this is
# a complex (and possible computational expensive) process that
# may imply access to the base data source
if resolve:
for name, value in legacy.eager(self.model.items()):
if not name in definition: continue
model[name] = cls._resolve(name, value)
# in case the all flag is set the extra fields (not present
# in definition) must also be used to populate the resulting
# (filtered) map so that it contains the complete set of values
# present in the base map of the current instance
if all:
for name, value in legacy.eager(self.model.items()):
if name in model: continue
model[name] = value
# returns the model containing the "filtered" items resulting
# from the validation of the items against the model class
return model
|
def dF(x, a, theta):
"""
Population activation function.
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
dFdx : the population activation response F(x) for input x
"""
# Calculate the population activation
dFdx = a * np.exp(-a * (x - theta)) * (1 + np.exp(-a * (x - theta)))**-2
return dFdx
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
df = dF(x, pars['a'], pars['theta'])
with plt.xkcd():
plot_dFdt(x, df) |
import pyrealsense2 as rs
width = 848
height = 480
fps = 30
config = rs.config()
config.enable_stream(rs.stream.color, width, height, rs.format.bgr8, fps)
config.enable_stream(rs.stream.depth, width, height, rs.format.z16, fps)
# ストリーミング開始
pipeline = rs.pipeline()
profile = pipeline.start(config)
depth_intrinsics = rs.video_stream_profile(profile.get_stream(rs.stream.depth)).get_intrinsics()
color_intrinsics = rs.video_stream_profile(profile.get_stream(rs.stream.color)).get_intrinsics()
print("depth_intrinsics")
print(depth_intrinsics)
print()
print("color_intrinsics")
print(color_intrinsics)
print()
|
'''Implementation Vibrator for Android.'''
from jnius import autoclass
from plyer_lach.facades import Vibrator
from plyer_lach.platforms.android import activity
from plyer_lach.platforms.android import SDK_INT
Context = autoclass('android.content.Context')
vibrator = activity.getSystemService(Context.VIBRATOR_SERVICE)
class AndroidVibrator(Vibrator):
'''Android Vibrator class.
Supported features:
* vibrate for some period of time.
* vibrate from given pattern.
* cancel vibration.
* check whether Vibrator exists.
'''
def _vibrate(self, time=None, **kwargs):
if vibrator:
vibrator.vibrate(int(1000 * time))
def _pattern(self, pattern=None, repeat=None, **kwargs):
pattern = [int(1000 * time) for time in pattern]
if vibrator:
vibrator.vibrate(pattern, repeat)
def _exists(self, **kwargs):
if SDK_INT >= 11:
return vibrator.hasVibrator()
elif activity.getSystemService(Context.VIBRATOR_SERVICE) is None:
raise NotImplementedError()
return True
def _cancel(self, **kwargs):
vibrator.cancel()
def instance():
'''Returns Vibrator with android features.
:return: instance of class AndroidVibrator
'''
return AndroidVibrator()
|
import csv
import sys
import re
if len(sys.argv) != 3:
print("Usage: python dna.py data.csv sequence.txt")
# Read DNA sequence
with open(sys.argv[2], "r") as txt:
dna = txt.read(-1)
# Open CSV file
with open(sys.argv[1], "r") as data:
# Open CSV to get the key
read = csv.DictReader(data)
for row in read:
i = list(row)
break
i = i[1:]
s = {}
for STR in i:
result = re.findall(f"(?:{STR})+", dna)
if result:
s[STR] = str(int(len(max(result)) / len(STR)))
# Searching to find and result
with open(sys.argv[1], "r") as data:
read = csv.DictReader(data)
for row in read:
a = dict(row)
del a["name"]
if a == s:
print(row["name"])
break
# End :)
else:
print("No match")
sys.exit(0) |
"""Sweep tests"""
import wandb
def test_create_sweep(live_mock_server, test_settings):
live_mock_server.set_ctx({"resume": True})
sweep_config = {
"name": "My Sweep",
"method": "grid",
"parameters": {"parameter1": {"values": [1, 2, 3]}},
}
sweep_id = wandb.sweep(sweep_config)
assert sweep_id == "test"
|
def setup():
size(500, 500)
smooth()
background(255)
strokeWeight(30)
noLoop()
def draw():
stroke(20)
i=0
while i < 7:
i=i+1
line(i*50, 200, 150 + (i-1)*50, 300)
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import webkitpy.thirdparty.unittest2 as unittest
from webkitpy.common import lru_cache
class LRUCacheTest(unittest.TestCase):
def setUp(self):
self.lru = lru_cache.LRUCache(3)
self.lru['key_1'] = 'item_1'
self.lru['key_2'] = 'item_2'
self.lru['key_3'] = 'item_3'
self.lru2 = lru_cache.LRUCache(1)
self.lru2['key_1'] = 'item_1'
def test_items(self):
self.assertEqual(set(self.lru.items()), set([('key_1', 'item_1'), ('key_3', 'item_3'), ('key_2', 'item_2')]))
def test_put(self):
self.lru['key_4'] = 'item_4'
self.assertEqual(set(self.lru.items()), set([('key_4', 'item_4'), ('key_3', 'item_3'), ('key_2', 'item_2')]))
def test_update(self):
self.lru['key_1']
self.lru['key_5'] = 'item_5'
self.assertEqual(set(self.lru.items()), set([('key_1', 'item_1'), ('key_3', 'item_3'), ('key_5', 'item_5')]))
def test_keys(self):
self.assertEqual(set(self.lru.keys()), set(['key_1', 'key_2', 'key_3']))
def test_delete(self):
del self.lru['key_1']
self.assertFalse('key_1' in self.lru)
def test_contain(self):
self.assertTrue('key_1' in self.lru)
self.assertFalse('key_4' in self.lru)
def test_values(self):
self.assertEqual(set(self.lru.values()), set(['item_1', 'item_2', 'item_3']))
def test_len(self):
self.assertEqual(len(self.lru), 3)
def test_size_one_pop(self):
self.lru2['key_2'] = 'item_2'
self.assertEqual(self.lru2.keys(), ['key_2'])
def test_size_one_delete(self):
del self.lru2['key_1']
self.assertFalse('key_1' in self.lru2)
def test_pop_error(self):
self.assertRaises(KeyError, self.lru2.__getitem__, 'key_2')
del self.lru2['key_1']
self.assertRaises(KeyError, self.lru2.__getitem__, 'key_2')
def test_get_middle_item(self):
self.lru['key_2']
self.lru['key_4'] = 'item_4'
self.lru['key_5'] = 'item_5'
self.assertEqual(set(self.lru.keys()), set(['key_2', 'key_4', 'key_5']))
def test_set_again(self):
self.lru['key_1'] = 'item_4'
self.assertEqual(set(self.lru.items()), set([('key_1', 'item_4'), ('key_3', 'item_3'), ('key_2', 'item_2')]))
|
import pytest
from ddht.tools.factories.alexandria import AdvertisementFactory
from ddht.v5_1.alexandria.broadcast_log import BroadcastLog
@pytest.mark.trio
async def test_broadcast_log(alice, bob, conn):
ad_a = AdvertisementFactory(private_key=alice.private_key)
ad_b = AdvertisementFactory(private_key=alice.private_key)
ad_c = AdvertisementFactory(private_key=alice.private_key)
ad_d = AdvertisementFactory(private_key=bob.private_key)
ad_e = AdvertisementFactory(private_key=bob.private_key)
all_ads = (ad_a, ad_b, ad_c, ad_d, ad_e)
broadcast_log = BroadcastLog(conn, max_records=8)
# not logged when empty
for ad in all_ads:
assert not broadcast_log.was_logged(alice.node_id, ad)
assert not broadcast_log.was_logged(bob.node_id, ad)
broadcast_log.log(alice.node_id, ad_a)
assert broadcast_log.was_logged(alice.node_id, ad_a)
assert not broadcast_log.was_logged(bob.node_id, ad_a)
# we insert 8 more, which should evict alice's entries for `ad_a`
for ad in all_ads[1:]:
broadcast_log.log(alice.node_id, ad)
broadcast_log.log(bob.node_id, ad)
assert not broadcast_log.was_logged(alice.node_id, ad_a)
|
import py, os, sys
from pytest import mark, raises
from .support import setup_make
noboost = False
if not (os.path.exists(os.path.join(os.path.sep, 'usr', 'include', 'boost')) or \
os.path.exists(os.path.join(os.path.sep, 'usr', 'local', 'include', 'boost'))):
noboost = True
@mark.skipif(noboost == True, reason="boost not found")
class AppTestBOOSTANY:
spaceconfig = dict(usemodules=['_cppyy', '_rawffi', 'itertools'])
def setup_class(cls):
cls.space.appexec([], """():
import ctypes, _cppyy
_cppyy._post_import_startup()
_cppyy.gbl.gInterpreter.Declare('#include "boost/any.hpp"')
""")
def test01_any_class(self):
"""Availability of boost::any"""
import _cppyy as cppyy
assert cppyy.gbl.boost.any
std = cppyy.gbl.std
any = cppyy.gbl.boost.any
assert std.list[any]
def test02_any_usage(self):
"""boost::any assignment and casting"""
import _cppyy as cppyy
assert cppyy.gbl.boost
std = cppyy.gbl.std
boost = cppyy.gbl.boost
val = boost.any()
# test both by-ref and by rvalue
v = std.vector[int]()
val.__assign__(v)
val.__assign__(std.move(std.vector[int](range(100))))
assert val.type() == cppyy.typeid(std.vector[int])
extract = boost.any_cast[std.vector[int]](val)
assert type(extract) is std.vector[int]
assert len(extract) == 100
extract += range(100)
assert len(extract) == 200
val.__assign__(std.move(extract)) # move forced
#assert len(extract) == 0 # not guaranteed by the standard
# TODO: we hit boost::any_cast<int>(boost::any* operand) instead
# of the reference version which raises
boost.any_cast.__useffi__ = False
try:
# raises(Exception, boost.any_cast[int], val)
assert not boost.any_cast[int](val)
except Exception:
# getting here is good, too ...
pass
extract = boost.any_cast[std.vector[int]](val)
assert len(extract) == 200
@mark.skipif(noboost == True, reason="boost not found")
class AppTestBOOSTOPERATORS:
spaceconfig = dict(usemodules=['_cppyy', '_rawffi', 'itertools'])
def setup_class(cls):
cls.space.appexec([], """():
import ctypes, _cppyy
_cppyy._post_import_startup()
_cppyy.gbl.gInterpreter.Declare('#include "boost/operators.hpp"')
""")
def test01_ordered(self):
"""ordered_field_operators as base used to crash"""
import _cppyy as cppyy
cppyy.gbl.gInterpreter.Declare('#include "gmpxx.h"')
cppyy.gbl.gInterpreter.Declare("""
namespace boost_test {
class Derived : boost::ordered_field_operators<Derived>, boost::ordered_field_operators<Derived, mpq_class> {};
}
""")
assert cppyy.gbl.boost_test.Derived
@mark.skipif(noboost == True, reason="boost not found")
class AppTestBOOSTVARIANT:
spaceconfig = dict(usemodules=['_cppyy', '_rawffi', 'itertools'])
def setup_class(cls):
cls.space.appexec([], """():
import ctypes, _cppyy
_cppyy._post_import_startup()
_cppyy.gbl.gInterpreter.Declare('#include "boost/variant/variant.hpp"')
_cppyy.gbl.gInterpreter.Declare('#include "boost/variant/get.hpp"')
""")
def test01_variant_usage(self):
"""boost::variant usage"""
# as posted on stackoverflow as example
import _cppyy as cppyy
try:
cpp = cppyy.gbl
except:
pass
cpp = cppyy.gbl
std = cpp.std
boost = cpp.boost
cppyy.gbl.gInterpreter.Declare("""namespace BV {
class A { };
class B { };
class C { }; } """)
VariantType = boost.variant['BV::A, BV::B, BV::C']
VariantTypeList = std.vector[VariantType]
v = VariantTypeList()
v.push_back(VariantType(cpp.BV.A()))
assert v.back().which() == 0
v.push_back(VariantType(cpp.BV.B()))
assert v.back().which() == 1
v.push_back(VariantType(cpp.BV.C()))
assert v.back().which() == 2
assert type(boost.get['BV::A'](v[0])) == cpp.BV.A
raises(Exception, boost.get['BV::B'], v[0])
assert type(boost.get['BV::B'](v[1])) == cpp.BV.B
assert type(boost.get['BV::C'](v[2])) == cpp.BV.C
@mark.skipif(noboost == True, reason="boost not found")
class AppTestBOOSTERASURE:
spaceconfig = dict(usemodules=['_cppyy', '_rawffi', 'itertools'])
def setup_class(cls):
cls.space.appexec([], """():
import ctypes, _cppyy
_cppyy._post_import_startup()
_cppyy.gbl.gInterpreter.Declare('#include "boost/type_erasure/any.hpp"')
_cppyy.gbl.gInterpreter.Declare('#include "boost/type_erasure/member.hpp"')
""")
def test01_erasure_usage(self):
"""boost::type_erasure usage"""
import _cppyy as cppyy
cppyy.gbl.gInterpreter.Declare("""
BOOST_TYPE_ERASURE_MEMBER((has_member_f), f, 0)
using LengthsInterface = boost::mpl::vector<
boost::type_erasure::copy_constructible<>,
has_member_f<std::vector<int>() const>>;
using Lengths = boost::type_erasure::any<LengthsInterface>;
struct Unerased {
std::vector<int> f() const { return std::vector<int>{}; }
};
Lengths lengths() {
return Unerased{};
}
""")
assert cppyy.gbl.lengths() is not None
|
import operator
import os
import traceback
import warnings
import numpy as np
import pandas as pd
import tables
from pandas.errors import PerformanceWarning
from tables.exceptions import NaturalNameWarning
from tqdm import tqdm
from .._utils import std_out_err_redirect_tqdm
from ..instruments import _get_dataset_instruments_df, _supported
# TODO - debug the following warnings...
warnings.simplefilter(action="ignore", category=NaturalNameWarning)
warnings.simplefilter(action="ignore", category=PerformanceWarning)
def custom_format_warning(msg, *args, **kwargs):
# ignore everything except the message
return "WARNING: " + str(msg) + "\n"
warnings.formatwarning = custom_format_warning
def _metadata_template(calibration_sources=None):
# There are the columns that are required for the metadata file.
meta_df_cols = [
"datetime_utc",
"sample_set",
"scan_type",
"filename",
"description",
"comments",
"collected_by",
"dilution_factor",
]
cal_cols = ["calibration_sample", "prototypical_sample", "test_sample"]
if calibration_sources:
meta_df_cols = meta_df_cols + calibration_sources + cal_cols
return pd.DataFrame(columns=meta_df_cols)
def create_metadata_template(filepath, calibration_sources=None):
"""Creates an empty metadata template for future data collection.
Args:
filepath (str): A filepath with which the new template will be written to.
calibration_sources (dict of {str : str}, optional): The calibration sources
which will be measured in the future dataset. Defaults to None.
Returns:
pandas.DataFrame: The metadata template which was written to a CSV.
"""
abs_filepath = os.path.abspath(filepath)
meta_df = _metadata_template(calibration_sources=calibration_sources)
meta_df.to_csv(abs_filepath, index=False)
return meta_df
class Dataset:
"""An EEM dataset which keeps track of measurement data and metadata."""
def __init__(
self,
data_dir,
raman_instrument,
absorbance_instrument,
eem_instrument,
scan_sets_subdir="raw_sample_sets",
metadata_filename="metadata.csv",
hdf_filename="root.hdf5",
calibration_sources=None,
progress_bar=False,
**kwargs,
):
"""
Args:
data_dir (str): The path for the directory which contains the raw data and metadata.
raman_instrument (str, optional): The type of instrument used to collect Raman scans. Defaults to None.
absorbance_instrument (str, optional): The type of instrument used to collect absorbance scans. Defaults to None.
eem_instrument (str, optional): The type of instrument used to collect EEM scans. Defaults to None.
scan_sets_subdir (str, optional): The path for subdirectory containing the sample sets. Defaults to "raw_sample_sets".
metadata_filename (str, optional): The filename of the metadata file which keeps track of all the sample sets.
Defaults to "metadata.csv".
hdf_filename (str, optional): The filename of the HDF5 file. Defaults to "root.hdf5".
calibration_sources (dict of {str : str}, optional): A dictionary of calibration sources measured in the dataset.
Each source must be specified with its units. Defaults to None.
progress_bar (bool, optional): Determines whether or not a progress bar will be displayed to show progress of
dataset loading. Defaults to False.
"""
self.data_dir = os.path.abspath(data_dir)
self.scan_sets_subdir = os.path.join(self.data_dir, scan_sets_subdir)
self.metadata_path = os.path.join(self.data_dir, metadata_filename)
self.hdf_path = os.path.join(self.data_dir, hdf_filename)
self.hdf = self._get_hdf_store(**kwargs)
self.calibration_sources = calibration_sources
self.progress_bar = progress_bar
self.meta_df = self.load_metadata()
self.instruments_df = _get_dataset_instruments_df(
raman_instrument, absorbance_instrument, eem_instrument
)
self.load_sample_sets()
# In the future, consider using something like the library formencode
# to validate the inputs to this class. There has to be a cleaner way
# to do this.
data_dir = property(operator.attrgetter("_data_dir"))
scan_sets_subdir = property(operator.attrgetter("_scan_sets_subdir"))
metadata_path = property(operator.attrgetter("_metadata_path"))
@data_dir.setter
def data_dir(self, d):
# Ensure the data directory exists.
d = os.path.abspath(d)
if not os.path.isdir(d):
raise FileNotFoundError(d)
self._data_dir = d
@scan_sets_subdir.setter
def scan_sets_subdir(self, s):
# Ensure the scan sets subdirectory exists.
if not os.path.isdir(s):
raise FileNotFoundError(s)
self._scan_sets_subdir = s
@metadata_path.setter
def metadata_path(self, m):
# Ensure the metadata file exists.
if not os.path.exists(m):
raise FileNotFoundError(m)
self._metadata_path = m
def _get_hdf_store(self, **kwargs):
mode = kwargs.get("mode", "a")
if mode == "w":
# Encountering weird issues with file locks on previously opened but never
# closed hdf5 files... So we'll close open HDF5 files somewhat forcefully.
# Please look at this issue on Github for more details:
# https://github.com/pandas-dev/pandas/issues/4409
hdf_store = pd.HDFStore(self.hdf_path)
if hdf_store.is_open:
hdf_store.close()
# .close() does not seem to actually close previously opened files...
# So we'll use the pytables api instead.
# This seems like an ugly hack but if you look at that github issue, it might
# make more sense. One of the maintainers said he usually just avoids opening
# HDF5 files with mode="w". He just manually deletes the file if he ever
# needs a fresh start...
if self.hdf_path in tables.file._open_files.filenames:
tables.file._open_files.close_all()
return pd.HDFStore(
self.hdf_path,
mode=mode,
complevel=kwargs.get("complevel", 0),
complib=kwargs.get("complib", None),
fletcher32=kwargs.get("fletcher32", False),
)
def _calibration_metadata(self, meta_df):
cal_source_names = list(self.calibration_sources)
# Ensure the metadata file contains all of the source columns
if not set(cal_source_names).issubset(meta_df.columns):
raise Exception("Not all calibration source columns exist in metadata.")
# Ensure the metadata file contains prototypical and validation columns
if not set(
("calibration_sample", "prototypical_sample", "test_sample")
).issubset(meta_df.columns):
raise Exception(
"calibration_sample/prototypical_sample/test_sample/ "
"columns do not exist in metadata."
)
# Set NaN values to 0 for the each of the sources columns
meta_df[cal_source_names] = meta_df[cal_source_names].fillna(0)
cal_sample_types = ["calibration_sample", "prototypical_sample", "test_sample"]
# Convert columns to lower case
meta_df[cal_sample_types] = meta_df[cal_sample_types].applymap(
lambda s: s.lower() if type(s) == str else s
)
yes_list = ["y", "yes", "ye"]
# Convert calibration sample type columns to boolean
meta_df[cal_sample_types] = meta_df[cal_sample_types].isin(yes_list)
return meta_df
def _qc_metadata(self, meta_df, meta_df_cols):
# Ensure the metadata file contains all of the required columns
if not set(meta_df_cols).issubset(meta_df.columns):
raise Exception("Not all required columns exist in metadata.")
# Ensure datetime column is in the correct format
try:
pd.to_datetime(
meta_df["datetime_utc"],
format="%YYYY-%mm-%dd %HH:%MM:%SS",
errors="raise",
)
except ValueError:
warnings.warn(
(
"Incorrect datetime format in the datetime_utc column of metadata.csv, "
"requires %YYYY-%mm-%dd %HH:%MM:%SS"
)
)
# Ensure datetime values are all unique
if not meta_df["datetime_utc"].is_unique:
warnings.warn(
"Non-unique datetime values present in datetime_utc column of metadata.csv."
)
"""
# Ensure no values are NULL in all columns except for the
# description and comments columns.
if meta_df[meta_df.columns.difference(["description", "comments"])
].isnull().values.any():
# raise warning
raise Exception("NULL values found in columns besides "
"description and comments.")
"""
def load_metadata(self):
"""Loads the metadata file which keeps track of all the sample sets.
Returns:
pandas.DataFrame:The metadata which tracks samples and their associated info.
"""
template = _metadata_template()
meta_df_cols = list(template.columns)
# Load the metadata csv into a dataframe
meta_df = pd.read_csv(self.metadata_path, parse_dates=["datetime_utc"])
meta_df["filepath"] = meta_df.apply(
lambda row: os.path.join(
*[self.scan_sets_subdir, str(row["sample_set"]), row["filename"]]
),
axis=1,
)
meta_df["name"] = meta_df["filename"].str.rsplit(".", n=1, expand=True)[0]
self._qc_metadata(meta_df, meta_df_cols)
# Set NaN values to empty strings for the columns:
# "description", "comments", "collected_by"
nan_str_cols = ["description", "comments", "collected_by"]
meta_df[nan_str_cols] = meta_df[nan_str_cols].fillna("")
# set NaN values to 1.0 for the column dilution
meta_df["dilution_factor"] = meta_df["dilution_factor"].fillna(1)
if self.calibration_sources:
meta_df = self._calibration_metadata(meta_df)
# Add multi-index with sample_set and scan_type
meta_df.set_index(["sample_set", "scan_type"], inplace=True)
meta_df.to_hdf(self.hdf, key=os.path.join("metadata"))
return meta_df
def metadata_summary_info(self):
"""Summary information about the dataset which is stored in the metadata.
Returns:
pandas.DataFrame:The summary table.
"""
num_sample_sets = self.meta_df.groupby(level="sample_set").ngroups
summary_dict = {
"Start datetime (UTC)": self.meta_df["datetime_utc"].min(),
"End datetime (UTC)": self.meta_df["datetime_utc"].max(),
"Number of sample sets": num_sample_sets,
}
scan_types = {
"blank_eem": {"Number of blank EEMs": 0},
"sample_eem": {"Number of sample EEMs": 0},
"water_raman": {"Number of water raman scans": 0},
"absorb": {"Number of absorbance scans": 0},
}
scan_type_counts = self.meta_df.groupby(level="scan_type").size()
for st, st_dict in scan_types.items():
key = list(scan_types[st].keys())[0]
if st in scan_type_counts:
scan_types[st][key] = scan_type_counts[st]
summary_dict[key] = scan_types[st][key]
summary_df = pd.DataFrame(summary_dict, index=[0])
return summary_df
def _process_scan_type(self, scan_type_row):
try:
sample_set = str(scan_type_row.name[0])
scan_type = scan_type_row.name[1]
name = scan_type_row["name"]
filepath = scan_type_row["filepath"]
if not os.path.isfile(filepath):
raise Exception("The file %s does not exist." % (filepath))
if scan_type == "absorb":
instrument = self.instruments_df["absorbance"].item()
df = instrument.load_absorbance(filepath)
elif scan_type == "water_raman":
instrument = self.instruments_df["water_raman"].item()
df = instrument.load_water_raman(filepath)
elif "eem" in scan_type:
instrument = self.instruments_df["eem"].item()
df = instrument.load_eem(filepath)
else:
raise Exception(
"Invalid scan_type for %s in sample_set %s" % (name, sample_set)
)
hdf_path = os.path.join(*["raw_sample_sets", sample_set, name])
df.to_hdf(self.hdf, key=hdf_path)
except Exception as e:
hdf_path = None
warnings.warn(str(e))
return hdf_path
def _qc_scan_type_group(self, scan_type):
# Check that filenames are monotonically increasing
pass
def _process_scan_type_group(self, scan_type_group):
scan_type_group["hdf_path"] = scan_type_group.apply(
self._process_scan_type, axis=1
)
return scan_type_group
def _check_unique_scan_types(self):
pass
def _check_blank_raman_scans(self):
pass
def _check_sample_absorbance_scans(self):
pass
def _qc_sample_set(self, sample_set):
# This function is UGLY! REFACTOR!
sample_set_name = str(
sample_set.index.get_level_values(level="sample_set").unique().item()
)
# There should only be one blank scan and one water raman scan for
# each scan set. If there are more than one, just use the first one.
mono_types = {"blank_eem": "Blank EEM", "water_raman": "Water Raman"}
for key, value in mono_types.items():
# check to see if any exists first
if key in sample_set.index.get_level_values(level="scan_type"):
nunique = sample_set.xs(key, level="scan_type")["filename"].nunique()
if nunique > 1:
first_scan = sample_set.xs(key, level="scan_type")[
"filename"
].unique()[0]
msg = (
"More than one %s found in sample set %s, only %s will be used going forward."
% (value, sample_set_name, first_scan)
)
warnings.warn(msg)
else:
msg = "No %s scan found in sample set %s." % (value, sample_set_name)
warnings.warn(msg)
# Ensure there are N absorbance scan_types for N sample EEM scan_types
if "sample_eem" in sample_set.index.get_level_values(level="scan_type"):
sample_eem_rows = sample_set.xs("sample_eem", level="scan_type")
if "absorb" in sample_set.index.get_level_values(level="scan_type"):
absorb_rows = sample_set.xs("absorb", level="scan_type")
else:
absorb_rows = pd.DataFrame()
for index, row in sample_eem_rows.iterrows():
absorbance_filename = "absorb" + row["filename"].split("sample_eem")[-1]
if absorb_rows.empty:
pass
elif (
not absorb_rows["filename"].str.contains(absorbance_filename).any()
):
pass
else:
continue
msg = (
"No corresponding absorbance scan for sample EEM %s in sample set %s. There should be an absorbance measurement named %s in this sample set."
% (row["filename"], sample_set_name, absorbance_filename)
)
warnings.warn(msg)
else:
msg = "No Sample EEM scans were found in sample set %s." % (sample_set_name)
warnings.warn(msg)
return sample_set
def _process_sample_set(self, sample_set_group):
sample_set = (
sample_set_group.index.get_level_values(level="sample_set").unique().item()
)
sample_set_group = self._qc_sample_set(sample_set_group)
# Group by scan types
return sample_set_group.groupby(level="scan_type", as_index=False).apply(
self._process_scan_type_group
)
def load_sample_sets(self):
"""Loads all sample sets which are tracked in the metadata from disk and write to the HDF5 file."""
# Group by sample sets
if self.progress_bar:
with std_out_err_redirect_tqdm() as orig_stdout:
tqdm.pandas(
desc="Loading scan sets", file=orig_stdout, dynamic_ncols=True
)
self.meta_df = self.meta_df.groupby(
level="sample_set", as_index=False
).progress_apply(self._process_sample_set)
else:
self.meta_df = self.meta_df.groupby(
level="sample_set", as_index=False
).apply(self._process_sample_set)
|
#!python
from distutils.core import setup
description = 'Generate randomized strings of characters using a template'
with open('README.txt') as file:
long_description = file.read()
setup(name='StringGenerator',
description=description,
url='https://github.com/paul-wolf/strgen',
author='Paul Wolf',
author_email='[email protected]',
version='0.1.9',
#version = module.__version__,
packages=['strgen', ],
license='BSD',
long_description=long_description,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
import asyncio
import json
import logging
import random
import time
from json import JSONDecodeError
from struct import pack, unpack
from xml.etree import ElementTree
from xmlrpc.client import ServerProxy
import aiohttp
from others.settings import *
class BilibiliClient:
def __init__(self, url_room_id):
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
self.protocol_version = 1
self.reader = None
self.writer = None
self.connected = False
self.cmt_server = ''
self.url_room_id = int(url_room_id)
self.room_id = 0
self.uid = 0
self.living = False
async def connect(self):
url = BASE_ROOM_URL.format(self.url_room_id)
logging.info('entering room: ' + url)
# r = requests.get(url)
# html = r.text
# m = ROOM_ID_RE.findall(html)
# room_id = m[0]
# self.room_id = int(room_id)
# r = requests.get(BASE_CID_URL.format(room_id))
# xml_string = '<root>' + r.text + '</root>'
# t = ElementTree.fromstring(xml_string)
# self.cmt_server = t.findtext('server')
# state = t.findtext('state')
# if state == "LIVE":
# await self.go_living()
with aiohttp.ClientSession() as s:
async with s.get(url) as r:
html = await r.text()
m = ROOM_ID_RE.findall(html)
room_id = m[0]
self.room_id = int(room_id)
async with s.get(BASE_CID_URL.format(room_id)) as r:
xml_string = '<root>' + await r.text() + '</root>'
t = ElementTree.fromstring(xml_string)
self.cmt_server = t.findtext('server')
state = t.findtext('state')
if state == "LIVE":
await self.go_living()
self.reader, self.writer = await asyncio.open_connection(self.cmt_server, CMT_PORT)
logging.info('connecting cmt server')
if await self.join_channel(self.room_id):
self.connected = True
logging.info('connected')
asyncio.ensure_future(self.heartbeat_loop())
asyncio.ensure_future(self.message_loop())
async def go_living(self):
if not self.living:
logging.info('living')
self.living = True
await self.send_download()
async def go_preparing(self):
logging.info('preparing')
self.living = False
async def heartbeat_loop(self):
while self.connected:
await self.send_socket_data(2)
logging.debug('sent heartbeat')
await asyncio.sleep(30)
async def join_channel(self, channel_id):
self.uid = random.randrange(100000000000000, 300000000000000)
body = '{"roomid":%s,"uid":%s}' % (channel_id, self.uid)
await self.send_socket_data(7, body)
return True
async def send_socket_data(self, action, body='', magic=16, ver=1, param=1):
body_byte = bytes(body, 'utf-8')
length = len(body_byte) + 16
send_bytes = pack('!IHHII', length, magic, ver, action, param)
send_bytes = send_bytes + body_byte
self.writer.write(send_bytes)
await self.writer.drain()
async def message_loop(self):
while self.connected:
receive_bytes = await self.reader.read(16)
length, _, action, _ = unpack('!IIII', receive_bytes)
body_bytes = await self.reader.read(length - 16)
if action == 3:
logging.debug('online count packet')
elif action == 8:
logging.info('joined')
elif action == 5:
try:
body_str = body_bytes.decode('utf-8')
await self.parse_msg(body_str)
except JSONDecodeError as e:
logging.warning(e)
else:
logging.warning(action)
async def parse_msg(self, messages):
d = json.loads(messages)
cmd = d['cmd']
if cmd == 'LIVE':
await self.go_living()
elif cmd == 'PREPARING':
await self.go_preparing()
else:
logging.debug(cmd)
async def send_download(self):
with aiohttp.ClientSession() as s:
async with s.get(BASE_PLAYER_URL.format(self.room_id)) as r:
xml_string = await r.text()
t = ElementTree.fromstring(xml_string)
video_url = t.find(VIDEO_URL_CHOICE).text
logging.info(video_url)
aria2c_server = ServerProxy(ARIA2C_RPC_URL)
gid = aria2c_server.aria2.addUri([video_url], {
"out": BASE_FILENAME.format(self.room_id, time.strftime('%Y-%m-%d %H:%M:%S'))})
logging.info('download request sent with #' + gid) |
from gocept.amqprun.readfiles import FileStoreReader, FileStoreDataManager
from unittest import mock
import gocept.amqprun.interfaces
import gocept.amqprun.testing
import os
import shutil
import tempfile
import unittest
import zope.component
import time
class ReaderTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.sender = mock.Mock()
zope.component.provideUtility(
self.sender, gocept.amqprun.interfaces.ISender)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
super().tearDown()
def test_empty_new_directory_nothing_happens(self):
reader = FileStoreReader(self.tmpdir, 'route')
reader.scan()
self.assertFalse(self.sender.send.called)
def test_should_move_file_to_cur_on_commit(self):
reader = FileStoreReader(self.tmpdir, 'route')
f = open(os.path.join(self.tmpdir, 'new', 'foo'), 'w')
f.write('contents')
f.close()
reader.send = mock.Mock()
reader.session = mock.Mock()
reader.scan()
self.assertTrue(reader.session.mark_done.called)
def test_exception_in_send_should_not_move_file(self):
reader = FileStoreReader(self.tmpdir, 'route')
f = open(os.path.join(self.tmpdir, 'new', 'foo'), 'w')
f.write('contents')
f.close()
reader.send = mock.Mock()
reader.send.side_effect = RuntimeError('provoked')
reader.session = mock.Mock()
reader.scan()
self.assertFalse(reader.session.mark_done.called)
class FileStoreDataManagerTest(unittest.TestCase):
def setUp(self):
self.session = mock.Mock()
self.dm = FileStoreDataManager(self.session)
def test_committing_transaction_should_commit_and_reset_session(self):
UNUSED_TRANSACTION = None
self.dm.tpc_begin(UNUSED_TRANSACTION)
self.dm.commit(UNUSED_TRANSACTION)
self.dm.tpc_vote(UNUSED_TRANSACTION)
self.dm.tpc_finish(UNUSED_TRANSACTION)
self.assertTrue(self.session.commit.called)
self.assertTrue(self.session.reset.called)
def test_aborting_transaction_should_commit_and_reset_session(self):
UNUSED_TRANSACTION = None
self.dm.abort(UNUSED_TRANSACTION)
self.assertFalse(self.session.commit.called)
self.assertTrue(self.session.reset.called)
class ReaderIntegrationTest(gocept.amqprun.testing.MainTestCase):
def setUp(self):
super().setUp()
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
super().tearDown()
shutil.rmtree(self.tmpdir)
def wait_for_directory_present(self, path, timeout=10):
wait = 0
while wait < timeout:
if os.path.exists(path):
return
time.sleep(0.25)
wait += 0.25
raise RuntimeError
def test_should_send_message_and_move_file(self):
self.make_config(__name__, 'readfiles')
self.expect_message_on('test.data')
self.start_server_in_subprocess(
self.tmpdir, 'test.data', module='gocept.amqprun.readfiles')
new_path = os.path.join(self.tmpdir, 'new')
self.wait_for_directory_present(new_path)
with open(os.path.join(new_path, 'foo.xml'), 'w') as f:
f.write('contents')
message = self.wait_for_message()
self.assertEqual('contents', message.body)
self.assertEqual(
'foo.xml', message.properties['application_headers']['X-Filename'])
self.assertEqual(0, len(os.listdir(os.path.join(self.tmpdir, 'new'))))
self.assertEqual(1, len(os.listdir(os.path.join(self.tmpdir, 'cur'))))
self.stop_server_in_subprocess()
def test_process_should_exit_on_filesystem_error(self):
self.make_config(__name__, 'readfiles-error')
self.start_server_in_subprocess(
self.tmpdir, 'test.route', module='gocept.amqprun.readfiles')
directory_path = os.path.join(self.tmpdir, 'new')
self.wait_for_directory_present(directory_path)
os.rmdir(directory_path)
status = self.wait_for_subprocess_exit()
self.assertNotEqual(0, status)
self.stdout.seek(0)
self.assertIn('Unhandled exception, terminating.', self.stdout.read())
|
cubos = []
for value in range(1,11):
cubos.append(value**3)
for cubo in cubos:
print(cubo)
|
import datetime
from dateutil.parser import parse
import re
from aws.api import CloudTrail
class Trail():
def __init__(self, cloudTrailDict):
self.isMultiRegionTrail = cloudTrailDict['IsMultiRegionTrail']
self.logFileValidationEnabled = cloudTrailDict['LogFileValidationEnabled']
self.name = cloudTrailDict['Name']
self.s3bucket = cloudTrailDict['S3BucketName']
self.encrypted = ('KmsKeyId' in cloudTrailDict)
if 'CloudWatchLogsLogGroupArn' in cloudTrailDict:
self.cloudwatchLogsIntegrated = True
self.cloudWatchLogGroup = re.search(':log-group:(.+?):\*', cloudTrailDict['CloudWatchLogsLogGroupArn']).group(1)
else:
self.cloudwatchLogsIntegrated = False
self.cloudWatchLogGroup = None
def cloudWatchUpdated(self, hours):
lastUpdated = str(CloudTrail().getTrailStatus(self.name)['LatestDeliveryTime'])
now = str(datetime.datetime.now()) + "+00:00"
timeSinceLastUpdate = parse(now) - parse(lastUpdated)
return (timeSinceLastUpdate.seconds / 3600) <= hours
|
from .utils.Classes.RegEx import RegEx
from .utils.assert_string import assert_string
def is_full_width(input: str) -> bool:
input = assert_string(input)
full_width_pattern = RegEx(r"[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]")
return full_width_pattern.match(input)
|
from mongoengine.fields import (
StringField,
DBRef,
ListField,
ObjectIdField,
ReferenceField,
DictField,
BooleanField,
)
from .nanopublication import Nanopublication
from .entity_base import EntityBase
class Workflow(EntityBase):
meta = {"collection": "workflows"}
# identify
id = StringField(required=True, primary_key=True)
# protocol uri
protocol = StringField(required=True)
# label
label = StringField(required=True, max_length=250)
# description
description = StringField(required=True, max_length=1000)
# list of nanoPublications with which it was built
nanopubs = ListField(ReferenceField(Nanopublication, dbref=True))
# author of workflow
author = StringField(required=True, max_length=120)
# info about fairworkflows publication
publication_info = DictField()
# local rdf
rdf = StringField(required=True)
@property
def permissions(self) -> dict:
"""
returns the security actions and who can do
"""
return {
"read": [self.author, "any"],
"update": [self.author],
"delete": [self.author],
}
def to_json_map(self):
base = super().to_json_map()
try:
base["nanopubs"] = list(
map((lambda nanopub: nanopub.to_json_map()), self.nanopubs)
)
except Exception as _:
base["nanopubs"] = []
base["permissions"] = self.permissions
return base
|
import numpy as np
from scipy.special import j0 as BesselJ0, j1 as BesselJ1, jn as BesselJ
from scipy.optimize import root
def shoot_S1(central_value: float,
w: float,
R: np.ndarray,
coeffs: np.ndarray,
S_harmonics: np.ndarray = None) -> np.ndarray:
"""
Shoots S1 from the center, starting from a central_value and
zero-drivative.
Parameters
----------
central_value : the value of S1 at r=0
w : the frequency of the oscillon
R : the grid of radii
coeffs : the Fourier coefficients of the potential, normalized
S_harmonics : the in-phase perturbative radiative harmonics
Returns
-------
S1 : the values of S1 over the grid when shooting from the center
"""
S1 = np.empty_like(R)
S1[0], S1[1] = central_value, central_value
dr = R[1] - R[0]
if S_harmonics is None:
def f_(i):
return (
S1[i - 1] * w**2 - 2 *
(coeffs * BesselJ1(S1[i - 1] * np.arange(1,
len(coeffs) + 1)) /
np.arange(1,
len(coeffs) + 1)).sum())
else:
N_harmonics = S_harmonics.shape[0]
def f_(i):
return (
S1[i - 1] * w**2 - 2 *
(coeffs * BesselJ1(S1[i - 1] * np.arange(1,
len(coeffs) + 1)) /
np.arange(1,
len(coeffs) + 1)).sum() -
((coeffs[:, np.newaxis] * BesselJ(
2 * np.arange(0, N_harmonics, 1) + 2, S1[i - 1] *
np.arange(1,
len(coeffs) + 1, 1)[:, np.newaxis])).sum(axis=0)
* S_harmonics[:, i - 1]).sum() +
((coeffs[:, np.newaxis] * BesselJ(
2 * np.arange(0, N_harmonics, 1) + 4, S1[i - 1] *
np.arange(1,
len(coeffs) + 1, 1)[:, np.newaxis])).sum(axis=0)
* S_harmonics[:, i - 1]).sum())
for i in range(2, len(S1)):
S1[i] = (
2 * S1[i-1] -
dr**2 * f_(i) +
S1[i-2] * (2 * dr/(2*R[i-1]) - 1)) \
/ (2 * dr/(2*R[i-1]) + 1)
return S1
def initial_S1(w: float,
R: np.ndarray,
coeffs: np.ndarray,
S_harmonics: np.ndarray = None) -> np.ndarray:
"""
Defines the binary search procedure to find the initial condition which
shoots to zero at infinity.
"""
# find the value at the same potential energy as the zero-field. We know
# the true value will be slightly higher due to friction.
c = root(
lambda x: 0.5 * x**2 * w**2 + 2 * (coeffs * (BesselJ0(x * np.arange(
1,
len(coeffs) + 1)) - 1) / np.arange(1,
len(coeffs) + 1)**2).sum(),
10).x[0]
# define the left- and right-boundaries of the search and push
# these values apart until they have the appropriate signs:
left, right = c, c
left_condition = (shoot_S1(left, w, R, coeffs, S_harmonics)[-1] >= 0)
while not left_condition:
left = 0.95 * left
left_condition = (shoot_S1(left, w, R, coeffs, S_harmonics)[-1] >= 0)
right_condition = shoot_S1(right, w, R, coeffs, S_harmonics)[-1] < 0
while not right_condition:
right = 1.1 * right
right_condition = shoot_S1(right, w, R, coeffs, S_harmonics)[-1] < 0
# perform the binary search for 60 steps:
for _ in range(60):
m = (left + right) / 2
S1 = shoot_S1(m, w, R, coeffs, S_harmonics)
if S1[-1] >= 0:
left = m
else:
right = m
# zero-out the far-field:
S1[np.abs(S1).argmin():] = 0.0
return S1
|
import requests
import structlog
from datetime import datetime
log = structlog.get_logger(__name__)
def register(explorer_endpoint, rsk_address, rns_domain):
try:
if rns_domain is None:
rns_domain = ''
data_registry = {'node_address': rsk_address,
'rns_address': rns_domain,
'last_alive_signal': datetime.utcnow().isoformat()}
response = requests.post(explorer_endpoint + 'luminoNode/', json=data_registry)
if response.status_code == 200:
log.info("Succesfully registered into Lumino Explorer")
else:
log.info("Warning: There was an error registering into Lumino Explorer. Status: " + str(response.status_code))
except requests.exceptions.RequestException as e:
log.info("Warning: Could not connect to Lumino Explorer. Your node will not be registered.")
|
import os
import torch
import torchvision
import cv2
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
from functools import reduce
class PipelineFacesDatasetGenerator(Dataset):
negative_extensions = ['.txt']
def folder_filter(self, _path):
return reduce(
lambda x, y: x and y,
[ext not in _path for ext in self.negative_extensions]
)
def __init__(self,
path_to_dataset_folder,
shape=(224, 224),
augmentations=False):
images_folders = [
os.path.join(path_to_dataset_folder, p)
for p in os.listdir(path_to_dataset_folder)
if self.folder_filter(p)
]
self.num_classes = len(images_folders)
self.images_data = [
{
'img_path': os.path.join(folder_path, image_name),
'class': i
}
for i, folder_path in enumerate(images_folders)
for image_name in os.listdir(folder_path)
]
self.shape = shape
self.preprocessing = torchvision.transforms.Compose(
[
torchvision.transforms.Resize(self.shape, interpolation=2),
torchvision.transforms.ToTensor()
]
)
self.augmentations = torchvision.transforms.Compose(
[
torchvision.transforms.ColorJitter(),
torchvision.transforms.RandomHorizontalFlip(p=0.5),
torchvision.transforms.RandomAffine(
15, translate=None,
scale=None, shear=None,
resample=False, fillcolor=0
),
torchvision.transforms.RandomPerspective(
distortion_scale=0.1, p=0.5, interpolation=Image.NEAREST
),
torchvision.transforms.Resize(
self.shape,
interpolation=Image.NEAREST
),
torchvision.transforms.RandomChoice(
[
torchvision.transforms.CenterCrop(self.shape[0] - k)
for k in range(0, int(
self.shape[0] * 0.05), 1)
]
),
torchvision.transforms.Resize(
self.shape,
interpolation=Image.NEAREST
),
torchvision.transforms.RandomGrayscale(p=0.1),
torchvision.transforms.ToTensor()
]
) if augmentations else None
def __len__(self):
return len(self.images_data)
def apply_augmentations(self, img):
if self.augmentations is not None:
return (torch.clamp(self.augmentations(img), 0, 1) - 0.5) * 2
return (self.preprocessing(img) - 0.5) * 2
def __getitem__(self, idx):
selected_item = self.images_data[idx]
image = Image.open(selected_item['img_path'])
return self.apply_augmentations(image), selected_item['class']
|
import printer
# Possibly refactor out to a message exception base class
class SymbolNotFound(Exception):
def __init__(self, symbol):
self.symbol = symbol
def __repr__(self):
return "'" + self.symbol + "' not found"
class Env:
def __init__(self, outer=None):
self.outer = outer
self.data = {}
def __repr__(self):
items = []
for key, value in self.data.items():
items.append(key + ": " + repr(value))
result = "\n".join(items)
if (self.outer):
result += "\nOuter:\n"
result += (repr(self.outer))
return result
def set(self, symbol, native_value):
self.data[symbol] = native_value
def find(self, symbol):
try:
return self.data[symbol]
except KeyError:
if self.outer:
return self.outer.find(symbol)
else:
raise SymbolNotFound(symbol)
def get(self, symbol):
return self.find(symbol)
def bind(self, symbols, values):
for i in range(0, len(symbols)):
symbol = symbols[i]
if symbol == "&":
# Set all the following symbol to the rest
# of the argument list
self.set(symbols[i + 1], values[i:])
return
else:
self.set(symbols[i], values[i])
|
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from pid import PID
from yaw_controller import YawController
from lowpass import LowPassFilter
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, vehicle_mass, fuel_capacity, brake_deadband, decel_limit, accel_limit, wheel_radius, wheel_base, steer_ratio, max_lat_accel, max_steer_angle):
# TODO: Implement
self.yaw_controller = YawController(wheel_base, steer_ratio, 0.1, max_lat_accel, max_steer_angle)
kp = 0.3 # proportional
ki = 0.1 # integral
kd = 0. # differential
mn = 0. # minimum throttle
mx = 0.2 # maximum throttle
self.throttle_controller = PID (kp, ki, kd, mn, mx)
tau = 0.5 # 1 / (2pi * tau) is cut_off frequency
ts = 0.2 # sampling time
self.vel_lpf = LowPassFilter (tau, ts)
self.vehicle_mass = vehicle_mass
self.fuel_capacity = fuel_capacity
self.brake_deadband = brake_deadband
self.decel_limit = decel_limit
self.accel_limit = accel_limit
self.wheel_radius = wheel_radius
self.last_time = rospy.get_time()
# pass
def control(self, current_vel, dbw_enabled, linear_vel, angular_vel):
# TODO: Change the arg, kwarg list to suit your needs
# drive by wire not enabled, just reser the controller
if not dbw_enabled:
self.throttle_controller.reset()
return (0., 0., 0.)
current_vel = self.vel_lpf.filt (current_vel)
steering_value = self.yaw_controller.get_steering (linear_vel, angular_vel, current_vel)
vel_error = linear_vel - current_vel
self.last_vel = current_vel
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step (vel_error, sample_time)
brake = 0.
if (linear_vel == 0.) and (current_vel < 0.1):
throttle = 0.
brake = 700 # as per Q&A foot notes 400 would not be enough.
elif (throttle < 0.1) and (vel_error < 0.):
decel = max (vel_error, self.decel_limit)
brake = abs (decel) * (self.vehicle_mass + self.fuel_capacity*GAS_DENSITY) * self.wheel_radius
return throttle, brake, steering_value
|
# Generated by Django 3.1.7 on 2021-03-30 02:51
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('company', '0005_company_description'),
]
operations = [
migrations.AlterField(
model_name='company',
name='administrator',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
]
|
# -*- coding: utf-8 -*-
"""Unit test package for ndexnetworkcollapse."""
|
from newim import get_info, get_vtn
import requests
import json
import logging
from sqlalchemy import create_engine
import pytricia
pyt = pytricia.PyTricia()
e = create_engine('sqlite:///database/wim_info.db')
def setRules(cond_name, in_seg,out_seg,ordered_pop,index):
logging.debug("Ordered PoPs are:")
logging.debug(ordered_pop)
logging.info("Calling set_condition method")
flag = set_condition(cond_name,in_seg, out_seg,index)
logging.debug("Flag incoming:" +str(flag))
if flag != 200:
abort(500, message="Set condition uncompleted")
logging.info("Condition set completed")
flag = 200
#TODO FIX incoming traffic
port_in, vbr1 = get_switch(in_seg)
port_out, vbr2 = get_switch(ordered_pop[0][0])
if vbr1 == 'notsure':
port_in = get_exit(vbr2)
if vbr1 != vbr2 :
port_in = get_exit(vbr2)
bridge = vbr2 # Set final bridge
port = port_out
set_redirect(cond_name, vbr2, port_in, port_out,index)
logging.info("Redirect from source to First PoP completed")
# Redirecting through the PoPs now
logging.debug("Redirect traffic through PoPs")
for i in range(1,len(ordered_pop)):
port_1, vbr1 = get_switch(ordered_pop[i-1][0])
logging.debug("port coming is: "+port_in+" with vbridge "+vbr1)
port_2, vbr2 = get_switch(ordered_pop[i][0])
if vbr1 == vbr2:
logging.debug("port to redirect is: "+port_out+" with vbridge "+vbr2)
set_redirect(cond_name, vbr1, port_1, port_2,index)
else:
logging.debug("redirecting through different bridges")
port_ex = get_exit(vbr1)
set_redirect(cond_name, vbr1, port_1, port_ex,index)
port_in = get_exit(vbr2)
set_redirect(cond_name, vbr2, port_in, port_2,index)
bridge = vbr2
port = port_2
logging.debug(" Inter PoP redirections completed ")
port_out, exitbridge = get_switch(out_seg)
if exitbridge == 'notsure':
port_out = get_exit(bridge)
elif exitbridge != bridge :
logging.debug("redirecting through different bridges")
port_ex = get_exit(bridge)
set_redirect(cond_name, bridge, port, port_ex,index)
port = get_exit(exitbridge)
#set_redirect(cond_name, exitbridge, port_in, port_out)
bridge = exitbridge
else:
bridge = exitbridge
set_redirect(cond_name, bridge, port, port_out,index)
# Need to implement (or not) going from last PoP to Outer Segment -- leaving Wan
#Just add to the flow array
logging.info("Posting new flow completed")
return (flag)
def get_switch(seg):
logging.debug("Incoming request for segment: "+seg)
conn = e.connect()
segment = pyt.get(seg)
logging.debug("Segment to look in the database is: "+segment)
query = conn.execute('SELECT port_id, bridge_name FROM connectivity WHERE segment="%s";'%segment)
dt = query.fetchone()
#TODO implement try
port, switch = dt[0],dt[1]
logging.info("get_switch method completed. Returning: "+port+" "+switch+". If segment is 0.0.0.0/0, then it may not be correct")
if segment == '0.0.0.0/0':
switch = 'notsure'
return (port, switch)
def get_exit(vbr):
logging.debug("Incoming request to find exit port of vbridge: "+vbr)
conn = e.connect()
query = conn.execute('SELECT port_id FROM connectivity WHERE segment="0.0.0.0/0" AND bridge_name="%s";'%vbr)
dt = query.fetchone()
port = dt[0]
logging.info("get_exit method completed. Returning: "+port )
return (port )
def set_condition(cond_name, source, dest,index):
logging.debug("Incoming set_condition call")
s_url = 'operations/vtn-flow-condition:set-flow-condition'
username, password, host, url, headers = get_info()
data = {'input': {'name': cond_name, 'vtn-flow-match': [
{'index': index, 'vtn-inet-match': {'source-network': source, 'destination-network': dest}}]}}
'''
this curl --user "username":"pass" -H "Content-type: application/json" -X POST http://localhost:8181/restconf/operations/vtn-flow-condition:set-flow-condition
# -d '{"input":{"name":"cond1", "vtn-flow-match":[{"index":"1",
# "vtn-inet-match":{"source-network":"10.0.0.1/32",
# "destination-network":"10.0.0.3/32"}}]}}'
'''
logging.debug("Sending request to VTN to implement condition "+cond_name)
r = requests.post(url + s_url, headers=headers,
auth=(username, password), json=data)
logging.info("Got this as response: " +str(r) )
if not r.status_code == 200:
logging.error('FLOW COND ERROR ' + str(r.status_code))
return (r.status_code)
def delete_condition(cond_name):
s_url = 'operations/vtn-flow-condition:remove-flow-condition'
username, password, host, url, headers = get_info()
data = {'input': {'name': cond_name}}
logging.debug("Sending request to delete condition "+cond_name)
r = requests.post(url+s_url, headers=headers, auth=(username, password), json=data)
logging.info("Got response:" +str(r))
if not r.status_code == 200:
logging.error("Condition removal ERROR " + str(r.status_code))
return (r.status_code)
def set_redirect(cond_name, vbr, port_id_in, port_id_out,index):
s_url = 'operations/vtn-flow-filter:set-flow-filter'
logging.debug("Incoming set_redirect call")
username, password, host, url, headers = get_info()
vtn_name = get_vtn_name()
data = {"input": {"output": "false", "tenant-name": vtn_name, "bridge-name": vbr, "interface-name": port_id_in, "vtn-flow-filter": [
{"index": index, "condition": cond_name, "vtn-redirect-filter": {"redirect-destination": {"bridge-name": vbr, "interface-name": port_id_out}, "output": "true"}}]}}
'''
this: curl --user "username":"pass" -H "Content-type: application/json" -X POST http://localhost:8181/restconf/operations/vtn-flow-filter:set-flow-filter
-d '{"input":{"output":"false","tenant-name":"vtn1", "bridge-name":"vbr", interface-name":"if5", "vtn-flow-filter":[{"condition":"cond_1","index":"1","vtn-redirect-filter":
{"redirect-destination":{"bridge-name":"vbr1","interface-name":"if3"},"output":"true"}}]}}'
'''
logging.debug("Sending request to set condition: "+str(data))
r = requests.post(url + s_url, headers=headers,
auth=(username, password), json=data)
logging.info("Got response:" +str(r))
if not r.status_code == 200:
logging.error('FLOW FILTER ERROR ' + str(r.status_code))
def get_vtn_name():
name = get_vtn()
return name
def order_pop(pops):
ordered_pop = []
for item in pops:
ordered_pop.append((item["port"],item["order"]))
ordered_pop.sort(key=lambda tup: tup[1])
logging.debug("Ordered the PoP list")
return ordered_pop
def get_locations():
logging.debug("Incoming request for location")
conn = e.connect()
query = conn.execute('SELECT segment, location FROM connectivity;')
dt = query.fetchall()
logging.debug("Show locations: " + str(dt))
locations = []
for d in dt:
dicti = {"segment" : d[0], "location" : d[1]}
locations.append(dicti)
return locations
def pop_nets():
logging.debug("Populating network segments table")
conn = e.connect()
query = conn.execute('SELECT segment FROM connectivity;')
dt = query.fetchall()
logging.debug("Show segments: " + str(dt))
for d in dt:
pyt[d[0]] = d[0]
|
# -*- coding: utf-8 -*-
from unittest import TestCase
import sympy as sp
import numpy as np
class TestParserNonLinearInputData(TestCase):
def test_hyperbox_states(self):
from ETCetera.util.parsing.parser_nonlinear_systems import parse_nonlinear
self.assertTrue(np.allclose(parse_nonlinear('Hyperbox States : [1 2]'),
np.array([[1, 2]], dtype='f'), rtol=1e-05, atol=1e-08))
self.assertTrue(np.allclose(parse_nonlinear('Hyperbox States : [1 2], '),
np.array([[1, 2]], dtype='f'), rtol=1e-05, atol=1e-08))
for item in parse_nonlinear('Hyperbox States : [1 2], [1 2], [1 2]'):
with self.subTest(line=item):
self.assertTrue(np.allclose(item, np.array([[1, 2]], dtype='f'), rtol=1e-05, atol=1e-08))
for item in parse_nonlinear('Hyperbox States : [-1 -2 -3 -4], [-1 -2 -3 -4]'):
with self.subTest(line=item):
self.assertTrue(np.allclose(item, np.array([[-1, -2, -3, -4]], dtype='f'), rtol=1e-05, atol=1e-08))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox States : [1 2; 3 4]')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox States : 1 2 3 4]')
self.assertTrue('Syntax error for value' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox States : [1 2 3 4], [')
self.assertTrue('Syntax error for value' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox States : asdf')
self.assertTrue('Syntax error for value' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox States : []')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox States :')
self.assertTrue('Syntax error for value' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox States : ')
self.assertTrue('Syntax error for value' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox States ')
self.assertTrue('Syntax error' in str(context.exception))
def test_hyperbox_disturbances(self):
from ETCetera.util.parsing.parser_nonlinear_systems import parse_nonlinear
self.assertTrue(np.allclose(parse_nonlinear('Hyperbox Disturbances : [1 2]'),
np.array([[1, 2]], dtype='f'), rtol=1e-05, atol=1e-08))
self.assertTrue(np.allclose(parse_nonlinear('Hyperbox Disturbances : [1 2], '),
np.array([[1, 2]], dtype='f'), rtol=1e-05, atol=1e-08))
for item in parse_nonlinear('Hyperbox Disturbances : [1 2], [1 2], [1 2]'):
with self.subTest(line=item):
self.assertTrue(np.allclose(item, np.array([[1, 2]], dtype='f'), rtol=1e-05, atol=1e-08))
for item in parse_nonlinear('Hyperbox Disturbances : [-1 -2 -3 -4], [-1 -2 -3 -4]'):
with self.subTest(line=item):
self.assertTrue(np.allclose(item, np.array([[-1, -2, -3, -4]], dtype='f'), rtol=1e-05, atol=1e-08))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox Disturbances : [1 2; 3 4]')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox Disturbances : 1 2 3 4]')
self.assertTrue('Syntax error for value' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox Disturbances : [1 2 3 4], [')
self.assertTrue('Syntax error for value' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox Disturbances : asdf')
self.assertTrue('Syntax error for value' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox Disturbances : []')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox Disturbances :')
self.assertTrue('Syntax error for value' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox Disturbances : ')
self.assertTrue('Syntax error for value' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Hyperbox Disturbances ')
self.assertTrue('Syntax error' in str(context.exception))
def test_dynamics(self):
from ETCetera.util.parsing.parser_nonlinear_systems import parse_nonlinear
self.assertEqual(parse_nonlinear('Dynamics : x0**2+u0+d0, x1+x0*x2**2+d1, x2*sin(x0)+u1+d2'), [sp.sympify('x0**2+u0+d0'), sp.sympify('x1+x0*x2**2+d1'), sp.sympify('x2*sin(x0)+u1+d2') ])
self.assertEqual(parse_nonlinear('Dynamics : 1.2, x0**2'), [sp.sympify('1.2'), sp.sympify('x0**2')])
with self.assertRaises(Exception) as context:
parse_nonlinear('Dynamics : a0+x0')
self.assertTrue('Incorrect symbols in expressions' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Dynamics : 1.2. a')
self.assertTrue('Incorrect symbols in expressions' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Dynamics : x2*sin()+u1+d2')
self.assertTrue('Incorrect expression' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Dynamics : x2*sin(x0+u1+d2')
self.assertTrue('Incorrect expression' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Dynamics : gfjg')
self.assertTrue('Incorrect symbols in expressions' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Dynamics :')
self.assertTrue('Incorrect expression' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Dynamics : ')
self.assertTrue('Incorrect expression' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Dynamics ')
self.assertTrue('Syntax error' in str(context.exception))
def test_controller(self):
from ETCetera.util.parsing.parser_nonlinear_systems import parse_nonlinear
self.assertEqual(parse_nonlinear('Controller : -x0**2 - x0**3, -x2*sin(x0)-x2'), [sp.sympify('-x0**2 - x0**3'), sp.sympify('-x2*sin(x0)-x2')])
self.assertEqual(parse_nonlinear('Controller : sin(x0)+x1'), [sp.sympify('sin(x0)+x1')])
self.assertEqual(parse_nonlinear('Controller : 1.2, x0**2'), [sp.sympify('1.2'), sp.sympify('x0**2')])
with self.assertRaises(Exception) as context:
parse_nonlinear('Controller : x0+e0, x0+e0')
self.assertTrue('Incorrect symbols in expressions' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Controller : a0+x0')
self.assertTrue('Incorrect symbols in expressions' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Controller : 1.2. a')
self.assertTrue('Incorrect symbols in expressions' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Controller :')
self.assertTrue('Incorrect expression' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Controller : ')
self.assertTrue('Incorrect expression' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Controller ')
self.assertTrue('Syntax error' in str(context.exception))
def test_triggering_condition(self):
from ETCetera.util.parsing.parser_nonlinear_systems import parse_nonlinear
self.assertEqual(parse_nonlinear('Triggering Condition : x0+e0'), sp.sympify('x0+e0'))
self.assertEqual(parse_nonlinear('Triggering Condition : sin(x0)+e0'), sp.sympify('sin(x0)+e0'))
self.assertEqual(parse_nonlinear('Triggering Condition : x0**e0'), sp.sympify('x0**e0'))
self.assertEqual(parse_nonlinear('Triggering Condition : 1.2'), sp.sympify('1.2'))
with self.assertRaises(Exception) as context:
parse_nonlinear('Triggering Condition : x0+e0, x0+e0')
self.assertTrue('Only one expression expected' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Triggering Condition : a0+x0')
self.assertTrue('Incorrect symbols in expressions' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Triggering Condition : 1.2. a')
self.assertTrue('Incorrect symbols in expressions' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Triggering Condition :')
self.assertTrue('Incorrect expression' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Triggering Condition : ')
self.assertTrue('Incorrect expression' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Triggering Condition ')
self.assertTrue('Syntax error' in str(context.exception))
def test_lyapunov_function(self):
from ETCetera.util.parsing.parser_nonlinear_systems import parse_nonlinear
self.assertEqual(parse_nonlinear('Lyapunov Function : x0'), sp.sympify('x0'))
self.assertEqual(parse_nonlinear('Lyapunov Function : sin(x0)'), sp.sympify('sin(x0)'))
self.assertEqual(parse_nonlinear('Lyapunov Function : x0**2'), sp.sympify('x0**2'))
self.assertEqual(parse_nonlinear('Lyapunov Function : 1.2'), sp.sympify('1.2'))
with self.assertRaises(Exception) as context:
parse_nonlinear('Lyapunov Function : x0, x1')
self.assertTrue('Only one expression expected' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Lyapunov Function : e0+x0')
self.assertTrue('Incorrect symbols in expressions' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Lyapunov Function : 1.2. a')
self.assertTrue('Incorrect symbols in expressions' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Lyapunov Function :')
self.assertTrue('Incorrect expression' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Lyapunov Function : ')
self.assertTrue('Incorrect expression' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Lyapunov Function ')
self.assertTrue('Syntax error' in str(context.exception))
def test_triggering_times(self):
from ETCetera.util.parsing.parser_nonlinear_systems import parse_nonlinear
self.assertEqual(parse_nonlinear('Triggering Times : 1, 2, 3'), [1.0, 2.0, 3.0])
self.assertEqual(parse_nonlinear('Triggering Times : 1.2, 2.4, 3.7'), [1.2, 2.4, 3.7])
self.assertEqual(parse_nonlinear('Triggering Times : 12., 3.7'), [12.0, 3.7])
with self.assertRaises(Exception) as context:
parse_nonlinear('Triggering Times : 1.2, a, 3.7')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Triggering Times : 1.2,3.7')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Triggering Times : 1.2. a, 3.7')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Triggering Times : 1.2; 3.7')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Triggering Times : ')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Triggering Times :')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Triggering Times ')
self.assertTrue('Syntax error' in str(context.exception))
def test_solver_options(self):
from ETCetera.util.parsing.parser_nonlinear_systems import parse_nonlinear
self.assertEqual(parse_nonlinear('Solver Options : 1, 2, 3'), [1.0, 2.0, 3.0])
self.assertEqual(parse_nonlinear('Solver Options : 1.2, 2.4, 3.7'), [1.2, 2.4, 3.7])
self.assertEqual(parse_nonlinear('Solver Options : 12., 3.7'), [12.0, 3.7])
self.assertEqual(parse_nonlinear('Solver Options :'), [])
self.assertEqual(parse_nonlinear('Solver Options : '), [])
with self.assertRaises(Exception) as context:
parse_nonlinear('Solver Options : 1.2, a, 3.7')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Solver Options : 1.2,3.7')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Solver Options : 1.2. a, 3.7')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Solver Options : 1.2; 3.7')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Solver Options ')
self.assertTrue('Syntax error' in str(context.exception))
def test_linesearch_options(self):
from ETCetera.util.parsing.parser_nonlinear_systems import parse_nonlinear
self.assertEqual(parse_nonlinear('Linesearch Options : 1, 2, 3'), [1.0, 2.0, 3.0])
self.assertEqual(parse_nonlinear('Linesearch Options : 1.2, 2.4, 3.7'), [1.2, 2.4, 3.7])
self.assertEqual(parse_nonlinear('Linesearch Options : 12., 3.7'), [12.0, 3.7])
self.assertEqual(parse_nonlinear('Linesearch Options :'), [])
self.assertEqual(parse_nonlinear('Linesearch Options : '), [])
with self.assertRaises(Exception) as context:
parse_nonlinear('Linesearch Options : 1.2, a, 3.7')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Linesearch Options : 1.2,3.7')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Linesearch Options : 1.2. a, 3.7')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Linesearch Options : 1.2; 3.7')
self.assertTrue('Non-numerical values found' in str(context.exception))
with self.assertRaises(Exception) as context:
parse_nonlinear('Linesearch Options ')
self.assertTrue('Syntax error' in str(context.exception)) |
import gym
from vel.api.base import LinearBackboneModel, Model, ModelFactory
from vel.rl.api import Evaluator, Rollout
from vel.rl.modules.q_head import QHead
class QModelEvaluator(Evaluator):
""" Evaluate simple q-model """
def __init__(self, model: 'QModel', rollout: Rollout):
super().__init__(rollout)
self.model = model
@Evaluator.provides('model:q')
def model_q(self):
""" Action values for all (discrete) actions """
observations = self.get('rollout:observations')
return self.model(observations)
@Evaluator.provides('model:action:q')
def model_action_q(self):
""" Action values for all (discrete) actions """
q = self.get('model:q')
actions = self.get('rollout:actions')
return q.gather(1, actions.unsqueeze(1)).squeeze(1)
@Evaluator.provides('model:q_next')
def model_q_next(self):
""" Action values for all (discrete) actions """
observations = self.get('rollout:observations_next')
return self.model(observations)
class QModel(Model):
""" Wraps a backbone model into API we need for Deep Q-Learning """
def __init__(self, backbone: LinearBackboneModel, action_space: gym.Space):
super().__init__()
self.backbone = backbone
self.q_head = QHead(input_dim=backbone.output_dim, action_space=action_space)
def forward(self, observations):
""" Model forward pass """
base_output = self.backbone(observations)
q_values = self.q_head(base_output)
return q_values
def reset_weights(self):
""" Initialize weights to reasonable defaults """
self.backbone.reset_weights()
self.q_head.reset_weights()
def step(self, observations):
""" Sample action from an action space for given state """
q_values = self(observations)
return {
'actions': self.q_head.sample(q_values),
'values': q_values
}
def evaluate(self, rollout: Rollout) -> Evaluator:
""" Evaluate model on a rollout """
return QModelEvaluator(self, rollout)
class QModelFactory(ModelFactory):
""" Factory class for q-learning models """
def __init__(self, backbone: ModelFactory):
self.backbone = backbone
def instantiate(self, **extra_args):
""" Instantiate the model """
backbone = self.backbone.instantiate(**extra_args)
return QModel(backbone, extra_args['action_space'])
def create(backbone: ModelFactory):
""" Q-Learning model factory """
return QModelFactory(backbone=backbone)
|
import cv2
from threading import Thread
class Webcam:
def __init__(self):
self.video_capture = cv2.VideoCapture(0)
self.current_frame = self.video_capture.read()[1]
# create thread for capturing images
def start(self):
Thread(target=self._update_frame, args=()).start()
def _update_frame(self):
while(True):
self.current_frame = self.video_capture.read()[1]
# get the current frame
def get_current_frame(self):
return self.current_frame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.