content
stringlengths 5
1.05M
|
---|
my_name = 'Zed A. Show'
my_age = 35 # not a lie
my_height = 74 # inches
my_weight = 180 #lbs
my_eyes = 'blue'
my_teeth = 'white'
my_hair = 'brown'
print(f"Let's talk about {my_name}.")
print(f"He's {my_height} inches tall.")
print(f"He's {my_weight} pounds heavy.")
print("Actually that's not too heavy.")
print(f"He's got {my_eyes} eyes and {my_hair} hair.")
print(f"He's teeth are usually {my_teeth} depending onthe coffee.")
#this line is tricky, try to get it exactly right
total = my_age + my_height + my_weight
print(f"if I add {my_age}, {my_height}, and {my_weight} I get {total}.")
|
#!/usr/bin/env python
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4 import Qt
import PyQt4.Qwt5 as Qwt
import os
class Relay_Frame_Vertical(QtGui.QFrame):
def __init__(self, cfg):
super(Relay_Frame_Vertical, self).__init__()
#self.parent = parent
self.rel_idx = cfg['idx']
#self.value = value
self.name = cfg['name']
self.device = cfg['device']
self.groups = cfg['groups']
self.state = False
self.initUI()
def initUI(self):
self.setFrameShape(QtGui.QFrame.StyledPanel)
self.initWidgets()
def initWidgets(self):
#btn_hbox = QtGui.QHBoxLayout()
self.on_btn = QtGui.QPushButton()
self.on_btn.setCheckable(True)
self.on_btn.setFixedWidth(60)
self.on_btn.setFixedHeight(25)
self.on_btn.setStyleSheet("QPushButton {background-color:rgb(0,0,0);}")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(os.getcwd() + '/gui/icons/led-green-off-oval.png'))
icon.addPixmap(QtGui.QPixmap(os.getcwd() + '/gui/icons/led-green-on-oval.png'), \
QtGui.QIcon.Normal, \
QtGui.QIcon.On)
self.on_btn.setIcon(icon)
self.on_btn.setIconSize(QtCore.QSize(50,25))
self.on_btn.toggled.connect(self.onButtonClicked)
self.idx_lbl = QtGui.QLabel(self.rel_idx)
self.idx_lbl.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)
self.idx_lbl.setStyleSheet("QLabel {font-size:12px; color:rgb(255,255,255);}")
self.name_lbl = QtGui.QLabel(self.name)
self.name_lbl.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)
self.name_lbl.setStyleSheet("QLabel {font-size:12px; color:rgb(255,255,255);}")
self.device_lbl = QtGui.QLabel(self.device)
self.device_lbl.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)
self.device_lbl.setStyleSheet("QLabel {font-size:12px; color:rgb(255,255,255);}")
str_groups = ""
if (type(self.groups) is list):
str_groups = ",".join(self.groups)
elif type(self.groups) is unicode:
str_groups = self.groups
self.groups_lbl = QtGui.QLabel(str_groups)
self.groups_lbl.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)
self.groups_lbl.setStyleSheet("QLabel {font-size:12px; color:rgb(255,255,255);}")
self.grid = QtGui.QGridLayout()
self.grid.addWidget(self.name_lbl ,1,0,1,1)
self.grid.addWidget(self.groups_lbl ,2,0,1,1)
self.grid.addWidget(self.on_btn ,0,0,1,1)
#self.grid.setColumnStretch(1,1)
self.grid.setColumnStretch(3,10)
self.grid.setSpacing(1)
self.grid.setContentsMargins(1,1,1,1)
self.setLayout(self.grid)
#btn_hbox.addWidget(self.on_btn)
#btn_hbox.addWidget(self.off_btn)
#btn_hbox.setContentsMargins(0, 0, 0, 0)
#self.setLayout(btn_hbox)
def onButtonClicked(self, state):
print state
self.state = state
def offButtonClicked(self):
self.buttonClicked(False)
class Relay_Frame(QtGui.QFrame):
def __init__(self, cfg):
super(Relay_Frame, self).__init__()
#self.parent = parent
self.rel_idx = cfg['idx']
#self.value = value
self.name = cfg['name']
self.device = cfg['device']
self.groups = cfg['groups']
self.state = False
self.initUI()
def initUI(self):
self.setFrameShape(QtGui.QFrame.StyledPanel)
self.initWidgets()
def initWidgets(self):
#btn_hbox = QtGui.QHBoxLayout()
self.on_btn = QtGui.QPushButton("ON")
self.on_btn.setCheckable(True)
self.on_btn.setFixedWidth(40)
self.on_btn.setStyleSheet("QPushButton {font-size: 14px; \
font-weight:bold; \
background-color:rgb(0,255,0); \
color:rgb(0,0,0);} ")
self.off_btn = QtGui.QPushButton("OFF")
self.off_btn.setCheckable(True)
self.off_btn.setChecked(True)
self.off_btn.setEnabled(False)
self.off_btn.setFixedWidth(40)
self.off_btn.setStyleSheet("QPushButton {font-size: 14px; \
font-weight:bold; \
background-color:rgb(255,0,0); \
color:rgb(0,0,0);} ")
self.on_btn.clicked.connect(self.onButtonClicked)
self.off_btn.clicked.connect(self.offButtonClicked)
self.idx_lbl = QtGui.QLabel(self.rel_idx)
self.idx_lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.idx_lbl.setStyleSheet("QLabel {font-size:18px; color:rgb(255,255,255);}")
self.name_lbl = QtGui.QLabel(self.name)
self.name_lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.name_lbl.setStyleSheet("QLabel {font-size:18px; color:rgb(255,255,255);}")
self.device_lbl = QtGui.QLabel(self.device)
self.device_lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.device_lbl.setStyleSheet("QLabel {font-size:18px; color:rgb(255,255,255);}")
str_groups = ""
if (type(self.groups) is list):
str_groups = ",".join(self.groups)
elif type(self.groups) is unicode:
str_groups = self.groups
self.groups_lbl = QtGui.QLabel(str_groups)
self.groups_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.groups_lbl.setStyleSheet("QLabel {font-size:18px; color:rgb(255,255,255);}")
self.grid = QtGui.QGridLayout()
self.grid.addWidget(self.name_lbl ,0,0,1,1)
self.grid.addWidget(self.groups_lbl ,0,1,1,1)
self.grid.addWidget(self.on_btn ,0,2,1,1)
self.grid.addWidget(self.off_btn ,0,3,1,1)
#self.grid.setColumnStretch(1,1)
#self.grid.setColumnStretch(0,10)
#self.grid.setSpacing(0)
self.grid.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.grid)
#btn_hbox.addWidget(self.on_btn)
#btn_hbox.addWidget(self.off_btn)
#btn_hbox.setContentsMargins(0, 0, 0, 0)
#self.setLayout(btn_hbox)
def onButtonClicked(self):
self.buttonClicked(True)
def offButtonClicked(self):
self.buttonClicked(False)
def buttonClicked(self, btn_id):
if btn_id: #ON
#print "on btn clicked"
self.on_btn.setEnabled(False)
self.off_btn.setEnabled(True)
self.off_btn.setChecked(False)
self.state = True
else:#OFF
#print "off btn clicked"
self.on_btn.setEnabled(True)
self.off_btn.setEnabled(False)
self.on_btn.setChecked(False)
self.state = False
print self.name, self.state
|
class WorldObject(object):
def __init__(self):
self.is_player = False
self.is_crate = False
self.is_wall = False
self.is_goal = False
def is_empty(self):
return (not self.is_player) and (not self.is_crate) and (not self.is_wall) and (not self.is_goal)
class World(object):
def __init__(self):
self.height = 0
self.width = 0
self.map = []
def has_player_won(self):
pass
|
# Crie um programa que leia quanto dinheiro uma pessoa tem na carteira.
# E mostre quantos dólares ela pode comprar.
r = float(input('Quantos R$ você tem na carteira ? '))
d = r / 3.27
print(f'Com R${r:.2f} você pode comprar US${d:.2f}')
|
import logging
import unittest
from unittest.mock import Mock
from MyPiEye.Storage import S3Storage
from MyPiEye.CLI import load_config
logging.basicConfig(level=logging.INFO)
class S3Tests(unittest.TestCase):
def test_upload(self):
config = load_config(Mock(), Mock(), 'D:\\Data\\projects\\python\\tmp\\mypieye.ini')
config['credential_folder'] = 'D:\\Data\\projects\\python\\tmp'
s3 = S3Storage(config)
ret = s3.upload('test/this', 'data/test_image.jpg')
self.assertTrue(ret)
|
def function():
foo = 1
gen_expr = (bar for bar in xrange(10))
print foo
|
def reverse_string(str1):
return str1[::-1]
def capitalize_string(str1):
return str1.capitalize()
|
from utop.pane import PaneSet, Pane
from utop.views.footer import Footer as FooterView
from utop.views.header import Header as HeaderView
from utop.views.content import Content as ContentView
class DefaultPaneSet(PaneSet):
def set_panes(self):
stdscr = self.model.stdscr
maxx = self.model.maxx
maxy = self.model.maxy
header_size = 8
footer_size = 2
header = Pane(stdscr, maxx, header_size)
header.set_view(HeaderView(self.model))
content = Pane(stdscr, maxx, maxy - (header_size + footer_size), 0, header_size)
content.set_view(ContentView(self.model))
footer = Pane(stdscr, maxx, footer_size, 0, maxy - footer_size)
footer.set_view(FooterView(self.model))
self.panes.append(header)
self.panes.append(content)
self.panes.append(footer)
self.header = header
self.content = content
self.footer = footer
|
#!/usr/bin/env python
# In[11]:
# OBJECTS AS A METAPHOR
from datetime import date
today = date(2011, 9, 2)
str(date(2011, 12, 2) - today)
today.year
today.strftime("%A, %B %d")
# Objects, class, instances ofclass, attributes,
# methods (function valued attributes),behaviour and information.
# Dot notation-combined expression in Python
# In[12]:
# NATIVE DATA TYPES
type(today)
# Native data type properties, literals, native numeric types,float vs int
# In[15]:
# DATA ABSTRACTION
# compund structure, compound data value, lat long regarded as single
# conceptual unit,technique of isolating how data is represented vs how
# data is maniplated, assupmtions about data should be minimum
# Selectors and Constructors, finite binary expansion, # wishful thinking
"""make_rat(n, d) returns the rational number with numerator n and denominator d. #constructor
numer(x) returns the numerator of the rational number x. #selector
denom(x) returns the denominator of the rational number x. #selector"""
def add_rat(x, y):
nx, dx = numer(x), denom(x)
ny, dy = numer(y), denom(y)
return make_rat(nx * dy + ny * dx, dx * dy)
def mul_rat(x, y):
return make_rat(numer(x) * numer(y), denom(x) * denom(y))
def eq_rat(x, y):
return numer(x) * denom(y) == numer(y) * denom(x)
# we have defined operations on rational numbers defined, in terms of numer and
# denom and the constructor fxn make_rat. Need to glue together num & denom.
# In[18]:
# Tuples - a compound structure
pair = (1, 2)
x, y = pair
x
y
pair[0]
pair[1]
# indexing from 0 because we want to see how far is an element from the
# beginning of the tuple
from operator import getitem
getitem(pair, 0)
# In[19]:
def make_rat(n, d):
return (n, d)
def numer(x):
return getitem(x, 0)
def denom(x):
return getitem(x, 1)
def str_rat(x):
return "{}/{}".format(numer(x), denom(x))
# In[21]:
half = make_rat(1, 2)
str_rat(half)
# In[22]:
third = make_rat(1, 3)
str_rat(third)
# In[24]:
str_rat(mul_rat(half, third))
# In[25]:
str_rat(add_rat(third, third))
# In[29]:
# final evaluation above shows our implementation does not reduce rational numbers.
# need a function to compute greatest common denominator of integers
from fractions import gcd
from math import gcd
def make_rat(n, d):
g = gcd(n, d)
return (n // g, d // g)
# "//" expresses integer division , rounds down to fractional part of result of division.
str_rat(add_rat(third, third))
# In[55]:
# DIVE INTO PYTHON (FURTHER READING)
uid = "sa"
pwd = "secret"
print(pwd, "is not a passowrd for", uid)
usercount = (6,)
print("Users connected: %s" % (usercount))
# In[74]:
def make_pair(x, y):
def dispatch(m):
if m == 0:
return x
elif m == 1:
return y
return dispatch
def getitem_pair(p, i):
return p(i)
p = make_pair(3, 7)
getitem_pair(p, 1)
# In[77]:
k = (1, (2, (3, (4, None))))
type(k)
# In[4]:
empty_rlist = None
def make_rlist(first, rest):
"""Make a recursive list from its first element and the rest."""
return (first, rest)
def first(s):
"""Return the first element of a recursive list s."""
return s[0]
def rest(s):
"""Return the rest of the elements of a recursive list s."""
return s[1]
# In[5]:
counts = make_rlist(1, make_rlist(2, make_rlist(3, make_rlist(4, empty_rlist))))
counts
# In[6]:
def len_rlist(s):
length = 0
while s != empty_rlist:
s, length = rest(s), length + 1
return length
def getitem_rlist(s, i):
while i > 0:
s, i = rest(s), i - 1
return first(s)
# In[16]:
getitem_rlist(counts, 3)
# In[25]:
def count(s, value):
total, index = 0, 0
while index < len(s):
if s[index] == value:
total = total + 1
index = index + 1
return total
digits = (2, 5, 6, 7, 7, 8, 9, 9, 0, 9, 2, 1, 3, 4)
count(digits, 9)
# In[29]:
def count(s, value):
total = 0
for j in s: # s is iterable
if j == value:
total = total + 1
return total
count(digits, 7)
# In[42]:
# sequence unpacking, rememember x,y=2,2 works. Similar syntax in For:
mypairs = ((2, 1), (2, 2), (6, 7), (8, 8), (3, 4))
# Count number of same value pairs
def countsame(s):
total = 0
for x, y in s:
if x == y:
total = total + 1
return total
countsame(mypairs)
# In[46]:
tuple(range(1, 10))
# In[69]:
# LISTS
chinese_suits = ["coin", "string", "myriad"]
suits = chinese_suits
alphasuits = suits
betasuits = alphasuits
suits.pop()
suits.remove("string")
suits.append("cup")
suits.extend(("sword", "club"))
suits[2] = "spade"
suits[0:2] = ("heart", "diamond")
nest = list(suits)
nest[0] = suits
suits.insert(2, "joker")
nest
# In[72]:
# nest[0].pop(2)
suits
# In[66]:
suits is nest[0]
# In[78]:
suits is [
"heart",
"diamond",
"spade",
"club",
] # because suits is a list object not just
# its values, which are compared here [....]
# In[76]:
suits is suits
# In[77]:
suits == nest[0]
# In[79]:
suits == ["heart", "diamond", "spade", "club"]
# In[87]:
# LIST IMPLEMENTATION WITH RECRUSIVE PROGRAMMING. ACTUAL IMPLEMENTATION IS HIDDEN IN PYTHON
# empty list, unique list
empty_rlist = None
# In[89]:
# first & rest concept of a sequene
def make_rlist(first, rest):
return (first, rest)
def first(s):
return s[0]
def rest(s):
return s[1]
# In[92]:
# now to make a list, use nested function:
counts = make_rlist(
1,
make_rlist(
2, make_rlist(7, make_rlist(5, make_rlist(8, make_rlist(12, empty_rlist))))
),
)
counts
# In[97]:
def len_rlist(s):
length = 0
while s != empty_rlist:
s, length = rest(s), length + 1
return length
def getitem_rlist(s, i):
while i > 0:
s, i = rest(s), i - 1
return first(s)
getitem_rlist(counts, 2)
# In[120]:
# len,getitem,pushfirst,popfirst,string,a convenience fxn
def make_mutable_rlist():
contents = empty_rlist
def dispatch(message, value=None):
nonlocal contents
if message == "length":
return len_rlist(contents)
elif message == "getitem":
return getitem_rlist(contents, value)
elif message == "push_first":
contents = make_rlist(value, contents)
elif message == "pop_first":
f = first(contents)
contents = rest(contents)
return f
elif message == "str":
return str(contents)
return dispatch
# CONVENIENCE FUNCTION / ADDING ELEMENTS THE LIST
def to_mutable_rlist(source):
s = make_mutable_rlist()
for element in reversed(source):
s("push_first", element)
return s
# In[122]:
s = to_mutable_rlist(counts)
type(s)
# In[125]:
s("length")
# In[126]:
len_rlist(counts)
# In[130]:
mylist = ["google", "amazon", "microsoft", "apple", "netflix"]
mys = to_mutable_rlist(mylist)
type(mys)
# In[132]:
mys("length")
# In[146]:
counts = make_rlist(
1,
make_rlist(
2, make_rlist(7, make_rlist(5, make_rlist(8, make_rlist(12, empty_rlist))))
),
)
len(counts)
len_rlist(counts)
s("length")
suits = ["heart", "diamond", "spade", "club"]
# In[149]:
# MYISSUE
counts = make_rlist(
1,
make_rlist(
2, make_rlist(7, make_rlist(5, make_rlist(8, make_rlist(12, empty_rlist))))
),
)
counts
# inbuilt function sees the comma outside the inner brackets hence counts only 2
len(counts)
# this function doesn't stop counting till it reached the 'empt_rlist' value at the end of the list, hence counts recursively = 6
def len_rlist(s):
length = 0
while s != empty_rlist:
s, length = rest(s), length + 1
return length
len_rlist(counts)
# built a message based function that calculates length based on 'len_rlist' and fit it into below program:
def make_mutable_rlist():
contents = empty_rlist
def dispatch(message, value=None):
nonlocal contents
if message == "length":
return len_rlist(contents)
elif message == "getitem":
return getitem_rlist(contents, value)
elif message == "push_first":
contents = make_rlist(value, contents)
elif message == "pop_first":
f = first(contents)
contents = rest(contents)
return f
elif message == "str":
return str(contents)
return dispatch
# to use this program used below function
def to_mutable_rlist(source):
s = make_mutable_rlist()
for element in reversed(source):
s("push_first", element)
return s
s = to_mutable_rlist(suits)
# new function s=... should give us the length based on 'length message using len_rlist function
s("str")
s("length")
# <font color = green>__My issue__</font> was that I was getting different results for **len** and **len_rlist** for the same list. But this was happening when I was using the message function to fetch the length of the list, which uses **len_rlist**. When I was using **len_rlist** directly on the list, it was giving me the correct result. What I didn't realize, I was appling **to_mutable_rlist** to use the message fxn **len_rlist** on a list, that was already a mutable recursive list, made by *counts=make_rlist(...(..))))*. Using **to_mutable_rlist** on this rlist gave me a new recursive list, which had the length 2.
# In[152]:
# Dictionaries
# getitem, setitem, # dispatch - keys and values
def make_dict():
records = []
# v is stored according to k , hence, if k=key return (remember wishful thinking). Iterable value here is records which is called (k,v) a pair.
# hence return v will return corresponding value.
def getitem(key):
for k, v in records:
if k == key:
return v
# setitem will take key, value and attach new value to the key, wether or not key already exists.
def setitem(key, value):
for item in records:
if item[0] == key:
item[1] = value
return
records.append([key, value])
def dispatch(message, key=None, value=None):
if message == "getitem":
return getitem(key)
elif message == "setitem":
return setitem(key, value)
elif message == "keys":
return tuple(k for k, _ in records)
elif message == "values":
return tuple(v for _, v in records)
elif message == "string":
return str(records)
return dispatch
# In[160]:
mydict = make_dict()
mydict("setitem", 3, 4)
mydict("setitem", ("angel", "misha", "CMD"), ("42", "Rstudio", "Bash"))
mydict("string")
# ### Propogating constraints, Example
#
# <font color = blue>__Keywords:__</font> Constraints, Connectors, nonlocal, dictionaries, general linear method, association, dispatch
# In[ ]:
from operator import add, mul, sub, truediv
def inform_all_except(source, message, constraints):
"""Inform all constraints of the message except source"""
for c in constraints:
if c != source:
c[message]()
def make_ternary_constraints(a, b, c, ab, ca, cb):
"""The constraint that ab(a,b)=c and ca(c,a)=b and cb(c,b)=a"""
def new_value():
av, bv, cv = [connector["has_val"]() for connector in (a, b, c)]
if av and bv:
c["set_val"](constraint, ab(a["val"], b["val"]))
elif av and cv:
b["set_val"](constraint, ac(a["val"], c["val"]))
elif cv and bv:
a["set_val"](constraint, cb(c["val"], b["val"]))
def forget_value():
for connector in (a, b, c):
connector["forget"](constraint)
constraint = {"new_val": new_value, "forget": forget_value}
for connector in (a, b, c):
connector["connect"](constraint)
return constraint
def adder(a, b, c):
"""the constraint that a+b=c"""
return make_ternary_constraints(a, b, c, add, sub, sub)
def multiplier(a, b, c):
"""the constraint that a*b=c"""
return make_ternary_constraints(a, b, c, mul, truediv, truediv)
def constant(connector, value):
"""the constraint that connector=value"""
constraint = {}
connector["set_val"](constraint, value)
return constraint
def make_connector(name=None):
"""A connector between constraints"""
informant = None
constraints = []
def set_value(source, value):
nonlocal informant
val = connector["val"]
if val is None:
informant, connector["val"] = source, value
if name is not None:
print(name, "=", value)
inform_all_except(source, "new_val", constraints)
elif val != value:
print("Contradiction detected", val, "vs", value)
def forget_value(source):
nonlocal informant
if informant == source:
informant, connector["val"] = None, None
if name is not None:
print(name, "is forgotten")
inform_all_except(source, "forget", constraints)
connector = {
"val": None,
"set_val": set_value,
"forget": forget_value,
"has_val": lambda: connector["val"] is not None,
"connect": lambda source: constraints.append(source),
}
return connector
celsius = make_connector("Celsius")
fahrenheit = make_connector("Fahrenheit")
def make_converter(c, f):
"""connect c to f with constraints from celsius to fahrenheit"""
u, v, w, x, y = [make_connector() for _ in range(5)]
multiplier(c, w, u)
multiplier(v, x, u)
adder(v, y, f)
constant(w, 9)
constant(x, 5)
constant(y, 32)
make_converter(celsius, fahrenheit)
# ## Implementing Classes and Objects
#
#
# In[2]:
def make_instance(cls):
"""return a new object instance, which is a dispatch dictionary"""
def get_value(name):
if name in attributes:
return attributes[name]
else:
value = cls["get"](name)
return bind_method(value, instance)
def set_value(name, value):
attributes[name] = value
attributes = {}
instance = {"get": get_value, "set": set_value}
return instance
# In[3]:
def bind_method(value, instance):
"""Return a bound method if value is callable, or value otherwise"""
if callable(value):
def method(*args):
return value(instance, *args)
return method
else:
return value
# In[4]:
def make_class(attributes, base_class=None):
"""Return a new class, which is a dispatch dictionary."""
def get_value(name):
if name in attributes:
return attributes[name]
elif base_class is not None:
return base_class["get"](name)
def set_value(name, value):
attributes[name] = value
def new(*args):
return init_instance(cls, *args)
cls = {"get": get_value, "set": set_value, "new": new}
return cls
# In[5]:
def init_instance(cls, *args):
"""Return a new object with type cls, initialized with args"""
instance = make_instance(cls)
init = cls["get"]("__init__")
if init:
init(instance, *args)
return instance
# In[6]:
def make_account_class():
"""Return the Account class, which has deposit and withdraw methods."""
def __init__(self, account_holder):
self["set"]("holder", account_holder)
self["set"]("balance", 0)
def deposit(self, amount):
"""Increase the account balance by amount and return the new balance."""
new_balance = self["get"]("balance") + amount
self["set"]("balance", new_balance)
return self["get"]("balance")
def withdraw(self, amount):
"""Decrease the account balance by amount and return the new balance."""
balance = self["get"]("balance")
if amount > balance:
return "Insufficient funds"
self["set"]("balance", balance - amount)
return self["get"]("balance")
return make_class(
{
"__init__": __init__,
"deposit": deposit,
"withdraw": withdraw,
"interest": 0.02,
}
)
# In[9]:
def init_instance1(cls, *args):
"""Return a new object with type cls, initialized with args"""
instance = make_instance(cls)
init = cls["get"]("__init__")
# if init:
# init (instance, *args)
return init
# In[ ]:
# In[10]:
jim_acct = Account["new"]("Jim")
type(jim_acct)
# In[12]:
jim_acct["get"]("holder")
# In[13]:
jim_acct["get"]("interest")
# In[15]:
jim_acct["get"]("deposit")(20)
# In[16]:
jim_acct["get"]("withdraw")(5)
# In[20]:
jim_acct["set"]("interest", 0.04)
Account["get"]("interest")
# In[21]:
jim_acct["get"]("interest")
# ### Inheritance
# In[22]:
def make_checking_account_class():
"""Return the CheckingAccount class, which imposes a $1 withdrawal fee."""
def withdraw(self, amount):
return Account["get"]("withdraw")(self, amount + 1)
return make_class({"withdraw": withdraw, "interest": 0.01}, Account)
# In[23]:
CheckingAcc = make_checking_account_class()
jack_acct = CheckingAcc["new"]("jack")
# In[24]:
print(jack_acct["get"]("interest"))
print(jack_acct["get"]("deposit")(20))
print(jack_acct["get"]("withdraw")(9))
# ### Generic Operations
#
# Combining and manipulating objects of different types to build a large program. Till now we have used message functions, with dot expressions was what we have used till now.
#
# Using message passing, we endowed our abstract data types with behaviour directly. Using the object methaphor, we bundled together the representation of data and the methods used to manipulate that data to modularize data-driven programs with local state.
#
# In[1]:
def hexid(obj):
return hex(id(obj))
def make_instance(cls): # good with this
""" Return a new object instance, which is a dispatch dictionary """
def get_value(name):
print("INSTANCE GET_VALUE", name, "from", hexid(attributes))
if name in attributes:
return attributes[name]
else:
value = cls["get"](name)
return bind_method(value, instance)
def set_value(name, value):
attributes[name] = value
attributes = {"test": "Default Test"}
print("Created instance attributes", hexid(attributes))
instance = {"get": get_value, "set": set_value}
return instance
def bind_method(value, instance): # good with this
""" Return a bound method if value is callable, or value otherwise """
if callable(value):
def method(*args):
return value(instance, *args)
return method
else:
return value
def make_class(attributes, base_class=None):
""" Return a new class, which is a dispatch dictionary. """
def get_value(name):
print("\nCLASS GET_VALUE", name, "from", hexid(attributes))
if name in attributes:
return attributes[name]
elif base_class is not None:
return base_class["get"](name)
def set_value(name, value):
attributes[name] = value
def new(*args):
return init_instance(cls, *args)
print("Creating class with attributes", hexid(attributes))
cls = {"get": get_value, "set": set_value, "new": new}
return cls
def init_instance(cls, *args): # problem here
""" Return a new object with type cls, initialized with args """
instance = make_instance(cls)
init = cls["get"]("__init__")
if init:
print("Calling init of", hexid(cls), "on", hexid(instance), "with", args)
init(instance, *args) # No return here
return instance
def make_my_class(): # define a custom class
# Create a simple __init__ for the class
def __init__(inst, *args):
print("INIT", hexid(inst), args)
inst["set"]("data", args)
# return a dict that implements class
return make_class({"__init__": __init__})
# test
# create a class
my_class = make_my_class()
# create some class instances
jim = my_class["new"]("Jim")
jim["set"]("test", "Hello")
fred = my_class["new"]("Fred")
print("CLASS", hexid(my_class))
print("\nINSTANCE", hexid(jim))
print(jim["get"]("data"))
print(jim["get"]("test"))
print("\nINSTANCE", hexid(fred))
print(fred["get"]("data"))
print(fred["get"]("test"))
# In[3]:
help(hex)
# # Super SO References
#
# https://stackoverflow.com/questions/50769327/class-instance-implementation-initializing-instance-from-sicp-python/50871026#50871026
#
#
# https://stackoverflow.com/questions/4020419/why-arent-python-nested-functions-called-closures/20898085#20898085
#
#
# https://stackoverflow.com/questions/12919278/how-to-define-free-variable-in-python
# In[ ]:
|
"""Tests for `osxphotos exiftool` command."""
import glob
import json
import os
import pytest
from click.testing import CliRunner
from osxphotos.cli.exiftool_cli import exiftool
from osxphotos.cli.export import export
from osxphotos.exiftool import ExifTool, get_exiftool_path
from .test_cli import CLI_EXIFTOOL, PHOTOS_DB_15_7
# determine if exiftool installed so exiftool tests can be skipped
try:
exiftool_path = get_exiftool_path()
except FileNotFoundError:
exiftool_path = None
@pytest.mark.skipif(exiftool_path is None, reason="exiftool not installed")
def test_export_exiftool():
"""Test osxphotos exiftool"""
runner = CliRunner()
cwd = os.getcwd()
with runner.isolated_filesystem() as temp_dir:
uuid_option = []
for uuid in CLI_EXIFTOOL:
uuid_option.extend(("--uuid", uuid))
# first, export without --exiftool
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
*uuid_option,
],
)
assert result.exit_code == 0
files = glob.glob("*")
assert sorted(files) == sorted(
[CLI_EXIFTOOL[uuid]["File:FileName"] for uuid in CLI_EXIFTOOL]
)
# now, run exiftool command to update exiftool metadata
result = runner.invoke(
exiftool,
["--db", os.path.join(cwd, PHOTOS_DB_15_7), "-V", "--db-config", temp_dir],
)
assert result.exit_code == 0
exif = ExifTool(CLI_EXIFTOOL[uuid]["File:FileName"]).asdict()
for key in CLI_EXIFTOOL[uuid]:
if type(exif[key]) == list:
assert sorted(exif[key]) == sorted(CLI_EXIFTOOL[uuid][key])
else:
assert exif[key] == CLI_EXIFTOOL[uuid][key]
# now, export with --exiftool --update, no files should be updated
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--exiftool",
"--update",
*uuid_option,
],
)
assert result.exit_code == 0
assert f"exported: 0, updated: 0, skipped: {len(CLI_EXIFTOOL)}" in result.output
@pytest.mark.skipif(exiftool_path is None, reason="exiftool not installed")
def test_export_exiftool_album_keyword():
"""Test osxphotos exiftool with --album-template."""
runner = CliRunner()
cwd = os.getcwd()
with runner.isolated_filesystem() as temp_dir:
# first, export without --exiftool
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--album",
"Pumpkin Farm",
],
)
assert result.exit_code == 0
files = glob.glob("*")
assert len(files) == 3
# now, run exiftool command to update exiftool metadata
result = runner.invoke(
exiftool,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
"-V",
"--db-config",
"--report",
"exiftool.json",
"--album-keyword",
temp_dir,
],
)
assert result.exit_code == 0
report = json.load(open("exiftool.json", "r"))
assert len(report) == 3
# verify exiftool metadata was updated
for file in report:
exif = ExifTool(file["filename"]).asdict()
assert "Pumpkin Farm" in exif["IPTC:Keywords"]
# now, export with --exiftool --update, no files should be updated
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--exiftool",
"--update",
"--album",
"Pumpkin Farm",
"--album-keyword",
],
)
assert result.exit_code == 0
assert f"exported: 0, updated: 0, skipped: 3" in result.output
@pytest.mark.skipif(exiftool_path is None, reason="exiftool not installed")
def test_export_exiftool_keyword_template():
"""Test osxphotos exiftool with --keyword-template."""
runner = CliRunner()
cwd = os.getcwd()
with runner.isolated_filesystem() as temp_dir:
uuid_option = []
for uuid in CLI_EXIFTOOL:
uuid_option.extend(("--uuid", uuid))
# first, export without --exiftool
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
*uuid_option,
],
)
assert result.exit_code == 0
# now, run exiftool command to update exiftool metadata
result = runner.invoke(
exiftool,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
"-V",
"--db-config",
"--keyword-template",
"FOO",
temp_dir,
"--report",
"exiftool.json",
],
)
assert result.exit_code == 0
report = json.load(open("exiftool.json", "r"))
for file in report:
exif = ExifTool(file["filename"]).asdict()
assert "FOO" in exif["IPTC:Keywords"]
# now, export with --exiftool --update, no files should be updated
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--exiftool",
"--keyword-template",
"FOO",
"--update",
*uuid_option,
],
)
assert result.exit_code == 0
assert f"exported: 0, updated: 0, skipped: {len(CLI_EXIFTOOL)}" in result.output
@pytest.mark.skipif(exiftool_path is None, reason="exiftool not installed")
def test_export_exiftool_load_config():
"""Test osxphotos exiftool with --load-config"""
runner = CliRunner()
cwd = os.getcwd()
with runner.isolated_filesystem() as temp_dir:
uuid_option = []
for uuid in CLI_EXIFTOOL:
uuid_option.extend(("--uuid", uuid))
# first, export without --exiftool
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--save-config",
"config.toml",
*uuid_option,
],
)
assert result.exit_code == 0
# now, run exiftool command to update exiftool metadata
result = runner.invoke(
exiftool,
["-V", "--load-config", "config.toml", temp_dir],
)
assert result.exit_code == 0
exif = ExifTool(CLI_EXIFTOOL[uuid]["File:FileName"]).asdict()
for key in CLI_EXIFTOOL[uuid]:
if type(exif[key]) == list:
assert sorted(exif[key]) == sorted(CLI_EXIFTOOL[uuid][key])
else:
assert exif[key] == CLI_EXIFTOOL[uuid][key]
# now, export with --exiftool --update, no files should be updated
result = runner.invoke(
export,
[
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
temp_dir,
"-V",
"--exiftool",
"--update",
*uuid_option,
],
)
assert result.exit_code == 0
assert f"exported: 0, updated: 0, skipped: {len(CLI_EXIFTOOL)}" in result.output
|
from django.core.exceptions import ImproperlyConfigured
from django.test.utils import override_settings
from django.urls import NoReverseMatch
from django_hosts.resolvers import (get_host, get_host_patterns, get_hostconf,
get_hostconf_module, reverse, reverse_host)
from .base import HostsTestCase
from .hosts import simple
class ReverseTest(HostsTestCase):
@override_settings(ROOT_HOSTCONF='tests.hosts.simple')
def test_reverse_host(self):
self.assertRaises(ValueError,
reverse_host, 'with_kwargs', ['spam'], dict(eggs='spam'))
self.assertRaises(NoReverseMatch,
reverse_host, 'with_kwargs', ['spam', 'eggs'])
self.assertRaises(NoReverseMatch,
reverse_host, 'with_kwargs', [], dict(eggs='spam', spam='eggs'))
self.assertEqual('johndoe',
reverse_host('with_kwargs', None, dict(username='johndoe')))
self.assertEqual(reverse_host('with_args', ['johndoe']), 'johndoe')
with self.settings(PARENT_HOST='spam.eggs'):
self.assertEqual(reverse_host('with_args', ['johndoe']),
'johndoe.spam.eggs')
@override_settings(
ROOT_HOSTCONF='tests.hosts.simple',
PARENT_HOST='spam.eggs')
def test_reverse(self):
self.assertEqual(reverse('simple-direct', host='static'),
'//static.spam.eggs/simple/')
@override_settings(
ROOT_HOSTCONF='tests.hosts.simple',
PARENT_HOST='example.com')
def test_reverse_without_www(self):
self.assertEqual(reverse('simple-direct', host='without_www'),
'//example.com/simple/')
@override_settings(
ROOT_HOSTCONF='tests.hosts.blank',
PARENT_HOST='example.com')
def test_reverse_blank(self):
self.assertEqual(reverse('simple-direct', host='blank_or_www'),
'//example.com/simple/')
@override_settings(
ROOT_HOSTCONF='tests.hosts.simple',
PARENT_HOST='spam.eggs')
def test_reverse_custom_scheme(self):
self.assertEqual(reverse('simple-direct', host='scheme'),
'https://scheme.spam.eggs/simple/')
self.assertEqual(reverse('simple-direct', host='scheme', scheme='ftp'),
'ftp://scheme.spam.eggs/simple/')
@override_settings(
ROOT_HOSTCONF='tests.hosts.simple',
PARENT_HOST='spam.eggs')
def test_reverse_custom_port(self):
self.assertEqual(reverse('simple-direct', host='port'),
'//port.spam.eggs:12345/simple/')
self.assertEqual(reverse('simple-direct', host='port', port='1337'),
'//port.spam.eggs:1337/simple/')
class UtilityTests(HostsTestCase):
@override_settings(ROOT_HOSTCONF='tests.hosts.simple')
def test_get_hostconf_module(self):
self.assertEqual(get_hostconf_module(), simple)
def test_get_hostconf_module_no_default(self):
self.assertEqual(
get_hostconf_module('tests.hosts.simple'), simple)
def test_missing_host_patterns(self):
self.assertRaisesMessage(ImproperlyConfigured,
'Missing ROOT_HOSTCONF setting', get_host_patterns)
@override_settings(ROOT_HOSTCONF='tests.hosts')
def test_missing_host_patterns_in_module(self):
self.assertRaisesMessage(ImproperlyConfigured,
"Missing host_patterns in 'tests.hosts'",
get_host_patterns)
@override_settings(ROOT_HOSTCONF='tests.hosts.simple')
def test_get_working_host_patterns(self):
self.assertEqual(get_host_patterns(), simple.host_patterns)
@override_settings(ROOT_HOSTCONF='tests.hosts.simple')
def test_get_host(self):
self.assertEqual(get_host('static').name, 'static')
self.assertRaisesMessage(NoReverseMatch,
"No host called 'non-existent' exists", get_host, 'non-existent')
@override_settings(ROOT_HOSTCONF='tests.hosts.appended')
def test_appended_patterns(self):
self.assertEqual(get_host('special').name, 'special')
@override_settings(
ROOT_HOSTCONF='tests.hosts.simple',
DEFAULT_HOST='www',
)
class SettingChangedClearCacheTests(HostsTestCase):
def test_root_hostconf(self):
self.assertEqual(get_hostconf(), 'tests.hosts.simple')
with self.settings(ROOT_HOSTCONF='tests.hosts.appended'):
self.assertEqual(get_hostconf(), 'tests.hosts.appended')
self.assertEqual(get_hostconf(), 'tests.hosts.simple')
def test_default_host(self):
self.assertEqual(get_host().name, 'www')
with self.settings(DEFAULT_HOST='static'):
self.assertEqual(get_host().name, 'static')
self.assertEqual(get_host().name, 'www')
|
from .simple_pick_and_place import SimplePickAndPlace
from .machine_blocking_process import MachineBlockingProcess
from .close_gripper import CloseGripper
from .initialize import Initialize
from .open_gripper import OpenGripper |
"""
"""
CASSANDRA_1 = "192.168.59.103"
CASSANDRA_KEYSPACE = "test"
NUMBER_OF_NODES = 1
PROVIDERS = [
'dataone',
'arxiv_oai',
'crossref',
'pubmed',
'figshare',
'scitech',
'clinicaltrials',
'plos',
'mit',
'vtech',
'cmu',
'columbia',
'calpoly',
'opensiuc',
'doepages',
'stcloud',
'spdataverse',
'trinity',
'texasstate',
'valposcholar',
'utaustin',
'uwashington',
'uiucideals',
'ucescholarship',
'upennsylvania',
'utaustin',
'waynestate',
]
|
#!/usr/bin/env python
# coding: utf-8
#
# # Tutorial 1: "What" models
# __Content creators:__ Matt Laporte, Byron Galbraith, Konrad Kording
#
# __Content reviewers:__ Dalin Guo, Aishwarya Balwani, Madineh Sarvestani, Maryam Vaziri-Pashkam, Michael Waskom
#
# We would like to acknowledge [Steinmetz _et al._ (2019)](https://www.nature.com/articles/s41586-019-1787-x) for sharing their data, a subset of which is used here.
#
# ___
# # Tutorial Objectives
# This is tutorial 1 of a 3-part series on different flavors of models used to understand neural data. In this tutorial we will explore 'What' models, used to describe the data. To understand what our data looks like, we will visualize it in different ways. Then we will compare it to simple mathematical models. Specifically, we will:
#
# - Load a dataset with spiking activity from hundreds of neurons and understand how it is organized
# - Make plots to visualize characteristics of the spiking activity across the population
# - Compute the distribution of "inter-spike intervals" (ISIs) for a single neuron
# - Consider several formal models of this distribution's shape and fit them to the data "by hand"
# In[ ]:
#@title Video 1: "What" Models
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='KgqR_jbjMQg', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
# # Setup
#
#
# Python requires you to explictly "import" libraries before their functions are available to use. We will always specify our imports at the beginning of each notebook or script.
# In[ ]:
import numpy as np
import matplotlib.pyplot as plt
# Tutorial notebooks typically begin with several set-up steps that are hidden from view by default.
#
# **Important:** Even though the code is hidden, you still need to run it so that the rest of the notebook can work properly. Step through each cell, either by pressing the play button in the upper-left-hand corner or with a keyboard shortcut (`Cmd-Return` on a Mac, `Ctrl-Enter` otherwise). A number will appear inside the brackets (e.g. `[3]`) to tell you that the cell was executed and what order that happened in.
#
# If you are curious to see what is going on inside each cell, you can double click to expand. Once expanded, double-click the white space to the right of the editor to collapse again.
# In[ ]:
#@title Figure Settings
import ipywidgets as widgets #interactive display
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# In[ ]:
#@title Helper functions
#@markdown Most of the tutorials make use of helper functions
#@markdown to simplify the code that you need to write. They are defined here.
# Please don't edit these, or worry about understanding them now!
def restrict_spike_times(spike_times, interval):
"""Given a spike_time dataset, restrict to spikes within given interval.
Args:
spike_times (sequence of np.ndarray): List or array of arrays,
each inner array has spike times for a single neuron.
interval (tuple): Min, max time values; keep min <= t < max.
Returns:
np.ndarray: like `spike_times`, but only within `interval`
"""
interval_spike_times = []
for spikes in spike_times:
interval_mask = (spikes >= interval[0]) & (spikes < interval[1])
interval_spike_times.append(spikes[interval_mask])
return np.array(interval_spike_times, object)
# In[ ]:
#@title Data retrieval
#@markdown This cell downloads the example dataset that we will use in this tutorial.
import io
import requests
r = requests.get('https://osf.io/sy5xt/download')
if r.status_code != 200:
print('Failed to download data')
else:
spike_times = np.load(io.BytesIO(r.content), allow_pickle=True)['spike_times']
# ---
#
# # Section 1: Exploring the Steinmetz dataset
#
# In this tutorial we will explore the structure of a neuroscience dataset.
#
# We consider a subset of data from a study of [Steinmetz _et al._ (2019)](https://www.nature.com/articles/s41586-019-1787-x). In this study, Neuropixels probes were implanted in the brains of mice. Electrical potentials were measured by hundreds of electrodes along the length of each probe. Each electrode's measurements captured local variations in the electric field due to nearby spiking neurons. A spike sorting algorithm was used to infer spike times and cluster spikes according to common origin: a single cluster of sorted spikes is causally attributed to a single neuron.
#
# In particular, a single recording session of spike times and neuron assignments was loaded and assigned to `spike_times` in the preceding setup.
#
# Typically a dataset comes with some information about its structure. However, this information may be incomplete. You might also apply some transformations or "pre-processing" to create a working representation of the data of interest, which might go partly undocumented depending on the circumstances. In any case it is important to be able to use the available tools to investigate unfamiliar aspects of a data structure.
#
# Let's see what our data looks like...
# ## Section 1.1: Warming up with `spike_times`
# What is the Python type of our variable?
# In[ ]:
type(spike_times)
# You should see `numpy.ndarray`, which means that it's a normal NumPy array.
#
# If you see an error message, it probably means that you did not execute the set-up cells at the top of the notebook. So go ahead and make sure to do that.
#
# Once everything is running properly, we can ask the next question about the dataset: what's its shape?
# In[ ]:
spike_times.shape
# There are 734 entries in one dimension, and no other dimensions. What is the Python type of the first entry, and what is *its* shape?
# In[ ]:
idx = 0
print(
type(spike_times[idx]),
spike_times[idx].shape,
sep="\n",
)
# It's also a NumPy array with a 1D shape! Why didn't this show up as a second dimension in the shape of `spike_times`? That is, why not `spike_times.shape == (734, 826)`?
#
# To investigate, let's check another entry.
# In[ ]:
idx = 321
print(
type(spike_times[idx]),
spike_times[idx].shape,
sep="\n",
)
# It's also a 1D NumPy array, but it has a different shape. Checking the NumPy types of the values in these arrays, and their first few elements, we see they are composed of floating point numbers (not another level of `np.ndarray`):
# In[ ]:
i_neurons = [0, 321]
i_print = slice(0, 5)
for i in i_neurons:
print(
"Neuron {}:".format(i),
spike_times[i].dtype,
spike_times[i][i_print],
"\n",
sep="\n"
)
# Note that this time we've checked the NumPy `dtype` rather than the Python variable type. These two arrays contain floating point numbers ("floats") with 32 bits of precision.
#
# The basic picture is coming together:
# - `spike_times` is 1D, its entries are NumPy arrays, and its length is the number of neurons (734): by indexing it, we select a subset of neurons.
# - An array in `spike_times` is also 1D and corresponds to a single neuron; its entries are floating point numbers, and its length is the number of spikes attributed to that neuron. By indexing it, we select a subset of spike times for that neuron.
#
# Visually, you can think of the data structure as looking something like this:
#
# ```
# | . . . . . |
# | . . . . . . . . |
# | . . . |
# | . . . . . . . |
# ```
#
# Before moving on, we'll calculate and store the number of neurons in the dataset and the number of spikes per neuron:
# In[ ]:
n_neurons = len(spike_times)
total_spikes_per_neuron = [len(spike_times_i) for spike_times_i in spike_times]
print(f"Number of neurons: {n_neurons}")
print(f"Number of spikes for first five neurons: {total_spikes_per_neuron[:5]}")
# In[ ]:
#@title Video 2: Exploring the dataset
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='oHwYWUI_o1U', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
# ## Section 1.2: Getting warmer: counting and plotting total spike counts
#
# As we've seen, the number of spikes over the entire recording is variable between neurons. More generally, some neurons tend to spike more than others in a given period. Lets explore what the distribution of spiking looks like across all the neurons in the dataset.
# Are most neurons "loud" or "quiet", compared to the average? To see, we'll define bins of constant width in terms of total spikes and count the neurons that fall in each bin. This is known as a "histogram".
#
# You can plot a histogram with the matplotlib function `plt.hist`. If you just need to compute it, you can use the numpy function `np.histogram` instead.
# In[ ]:
plt.hist(total_spikes_per_neuron, bins=50, histtype="stepfilled")
plt.xlabel("Total spikes per neuron")
plt.ylabel("Number of neurons");
# Let's see what percentage of neurons have a below-average spike count:
# In[ ]:
mean_spike_count = np.mean(total_spikes_per_neuron)
frac_below_mean = (total_spikes_per_neuron < mean_spike_count).mean()
print(f"{frac_below_mean:2.1%} of neurons are below the mean")
# We can also see this by adding the average spike count to the histogram plot:
# In[ ]:
plt.hist(total_spikes_per_neuron, bins=50, histtype="stepfilled")
plt.xlabel("Total spikes per neuron")
plt.ylabel("Number of neurons")
plt.axvline(mean_spike_count, color="orange", label="Mean neuron")
plt.legend();
# This shows that the majority of neurons are relatively "quiet" compared to the mean, while a small number of neurons are exceptionally "loud": they must have spiked more often to reach a large count.
#
# ### Exercise 1: Comparing mean and median neurons
#
# If the mean neuron is more active than 68% of the population, what does that imply about the relationship between the mean neuron and the median neuron?
#
# *Exercise objective:* Reproduce the plot above, but add the median neuron.
#
# In[ ]:
# To complete the exercise, fill in the missing parts (...) and uncomment the code
median_spike_count = ... # Hint: Try the function np.median
# plt.hist(..., bins=50, histtype="stepfilled")
# plt.axvline(..., color="limegreen", label="Median neuron")
# plt.axvline(mean_spike_count, color="orange", label="Mean neuron")
# plt.xlabel("Total spikes per neuron")
# plt.ylabel("Number of neurons")
# plt.legend()
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D1_ModelTypes/solutions/W1D1_Tutorial1_Solution_b3411d5d.py)
#
# *Example output:*
#
# <img alt='Solution hint' align='left' width=558 height=413 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D1_ModelTypes/static/W1D1_Tutorial1_Solution_b3411d5d_0.png>
#
#
#
# *Bonus:* The median is the 50th percentile. What about other percentiles? Can you show the interquartile range on the histogram?
# ---
#
# # Section 2: Visualizing neuronal spiking activity
# ## Section 2.1: Getting a subset of the data
#
# Now we'll visualize trains of spikes. Because the recordings are long, we will first define a short time interval and restrict the visualization to only the spikes in this interval. We defined a utility function, `restrict_spike_times`, to do this for you. If you call `help()` on the function, it will tell you a little bit about itself:
# In[ ]:
help(restrict_spike_times)
# In[ ]:
t_interval = (5, 15) # units are seconds after start of recording
interval_spike_times = restrict_spike_times(spike_times, t_interval)
# Is this a representative interval? What fraction of the total spikes fall in this interval?
# In[ ]:
original_counts = sum([len(spikes) for spikes in spike_times])
interval_counts = sum([len(spikes) for spikes in interval_spike_times])
frac_interval_spikes = interval_counts / original_counts
print(f"{frac_interval_spikes:.2%} of the total spikes are in the interval")
# How does this compare to the ratio between the interval duration and the experiment duration? (What fraction of the total time is in this interval?)
#
# We can approximate the experiment duration by taking the minimum and maximum spike time in the whole dataset. To do that, we "concatenate" all of the neurons into one array and then use `np.ptp` ("peak-to-peak") to get the difference between the maximum and minimum value:
# In[ ]:
spike_times_flat = np.concatenate(spike_times)
experiment_duration = np.ptp(spike_times_flat)
interval_duration = t_interval[1] - t_interval[0]
frac_interval_time = interval_duration / experiment_duration
print(f"{frac_interval_time:.2%} of the total time is in the interval")
# These two values—the fraction of total spikes and the fraction of total time—are similar. This suggests the average spike rate of the neuronal population is not very different in this interval compared to the entire recording.
#
# ## Section 2.2: Plotting spike trains and rasters
#
# Now that we have a representative subset, we're ready to plot the spikes, using the matplotlib `plt.eventplot` function. Let's look at a single neuron first:
# In[ ]:
neuron_idx = 1
plt.eventplot(interval_spike_times[neuron_idx], color=".2")
plt.xlabel("Time (s)")
plt.yticks([]);
# We can also plot multiple neurons. Here are three:
# In[ ]:
neuron_idx = [1, 11, 51]
plt.eventplot(interval_spike_times[neuron_idx], color=".2")
plt.xlabel("Time (s)")
plt.yticks([]);
# This makes a "raster" plot, where the spikes from each neuron appear in a different row.
#
# Plotting a large number of neurons can give you a sense for the characteristics in the population. Let's show every 5th neuron that was recorded:
# In[ ]:
neuron_idx = np.arange(0, len(spike_times), 5)
plt.eventplot(interval_spike_times[neuron_idx], color=".2")
plt.xlabel("Time (s)")
plt.yticks([]);
# *Question*: How does the information in this plot relate to the histogram of total spike counts that you saw above?
# In[ ]:
#@title Video 3: Visualizing activity
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='QGA5FCW7kkA', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
# ---
#
# # Section 3: Inter-spike intervals and their distributions
# Given the ordered arrays of spike times for each neuron in `spike_times`, which we've just visualized, what can we ask next?
#
# Scientific questions are informed by existing models. So, what knowledge do we already have that can inform questions about this data?
#
# We know that there are physical constraints on neuron spiking. Spiking costs energy, which the neuron's cellular machinery can only obtain at a finite rate. Therefore neurons should have a refractory period: they can only fire as quickly as their metabolic processes can support, and there is a minimum delay between consecutive spikes of the same neuron.
#
# More generally, we can ask "how long does a neuron wait to spike again?" or "what is the longest a neuron will wait?" Can we transform spike times into something else, to address questions like these more directly?
#
# We can consider the inter-spike times (or interspike intervals: ISIs). These are simply the time differences between consecutive spikes of the same neuron.
#
# ### Exercise 2: Plot the distribution of ISIs for a single neuron
#
# *Exercise objective:* make a histogram, like we did for spike counts, to show the distribution of ISIs for one of the neurons in the dataset.
#
# Do this in three steps:
#
# 1. Extract the spike times for one of the neurons
# 2. Compute the ISIs (the amount of time between spikes, or equivalently, the difference between adjacent spike times)
# 3. Plot a histogram with the array of individual ISIs
# In[ ]:
def compute_single_neuron_isis(spike_times, neuron_idx):
"""Compute a vector of ISIs for a single neuron given spike times.
Args:
spike_times (list of 1D arrays): Spike time dataset, with the first
dimension corresponding to different neurons.
neuron_idx (int): Index of the unit to compute ISIs for.
Returns:
isis (1D array): Duration of time between each spike from one neuron.
"""
#############################################################################
# Students: Fill in missing code (...) and comment or remove the next line
raise NotImplementedError("Exercise: compute single neuron ISIs")
#############################################################################
# Extract the spike times for the specified neuron
single_neuron_spikes = ...
# Compute the ISIs for this set of spikes
# Hint: the function np.diff computes discrete differences along an array
isis = ...
return isis
# Uncomment the following lines when you are ready to test your function
# single_neuron_isis = compute_single_neuron_isis(spike_times, neuron_idx=283)
# plt.hist(single_neuron_isis, bins=50, histtype="stepfilled")
# plt.axvline(single_neuron_isis.mean(), color="orange", label="Mean ISI")
# plt.xlabel("ISI duration (s)")
# plt.ylabel("Number of spikes")
# plt.legend()
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D1_ModelTypes/solutions/W1D1_Tutorial1_Solution_4792dbfa.py)
#
# *Example output:*
#
# <img alt='Solution hint' align='left' width=558 height=414 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D1_ModelTypes/static/W1D1_Tutorial1_Solution_4792dbfa_0.png>
#
#
# ---
#
# In general, the shorter ISIs are predominant, with counts decreasing rapidly (and smoothly, more or less) with increasing ISI. However, counts also rapidly decrease to zero with _decreasing_ ISI, below the maximum of the distribution (8-11 ms). The absence of these very low ISIs agrees with the refractory period hypothesis: the neuron cannot fire quickly enough to populate this region of the ISI distribution.
#
# Check the distributions of some other neurons. To resolve various features of the distributions, you might need to play with the value of `n_bins`. Using too few bins might smooth over interesting details, but if you use too many bins, the random variability will start to dominate.
#
# You might also want to restrict the range to see the shape of the distribution when focusing on relatively short or long ISIs. *Hint:* `plt.hist` takes a `range` argument
# ---
#
# # Section 4: What is the functional form of an ISI distribution?
# In[ ]:
#@title Video 4: ISI distribution
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='DHhM80MOTe8', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
# The ISI histograms seem to follow continuous, monotonically decreasing functions above their maxima. The function is clearly non-linear. Could it belong to a single family of functions?
#
# To motivate the idea of using a mathematical function to explain physiological phenomena, let's define a few different function forms that we might expect the relationship to follow: exponential, inverse, and linear.
# In[ ]:
def exponential(xs, scale, rate, x0):
"""A simple parametrized exponential function, applied element-wise.
Args:
xs (np.ndarray or float): Input(s) to the function.
scale (float): Linear scaling factor.
rate (float): Exponential growth (positive) or decay (negative) rate.
x0 (float): Horizontal offset.
"""
ys = scale * np.exp(rate * (xs - x0))
return ys
def inverse(xs, scale, x0):
"""A simple parametrized inverse function (`1/x`), applied element-wise.
Args:
xs (np.ndarray or float): Input(s) to the function.
scale (float): Linear scaling factor.
x0 (float): Horizontal offset.
"""
ys = scale / (xs - x0)
return ys
def linear(xs, slope, y0):
"""A simple linear function, applied element-wise.
Args:
xs (np.ndarray or float): Input(s) to the function.
slope (float): Slope of the line.
y0 (float): y-intercept of the line.
"""
ys = slope * xs + y0
return ys
# ### Interactive Demo: ISI functions explorer
#
# Here is an interactive demo where you can vary the parameters of these functions and see how well the resulting outputs correspond to the data. Adjust the parameters by moving the sliders and see how close you can get the lines to follow the falling curve of the histogram. This will give you a taste of what you're trying to do when you *fit a model* to data.
#
# "Interactive demo" cells have hidden code that defines an interface where you can play with the parameters of some function using sliders. You don't need to worry about how the code works – but you do need to **run the cell** to enable the sliders.
#
# In[ ]:
#@title
#@markdown Be sure to run this cell to enable the demo
# Don't worry about understanding this code! It's to setup an interactive plot.
single_neuron_idx = 283
single_neuron_spikes = spike_times[single_neuron_idx]
single_neuron_isis = np.diff(single_neuron_spikes)
counts, edges = np.histogram(
single_neuron_isis,
bins=50,
range=(0, single_neuron_isis.max())
)
functions = dict(
exponential=exponential,
inverse=inverse,
linear=linear,
)
colors = dict(
exponential="C1",
inverse="C2",
linear="C4",
)
@widgets.interact(
exp_scale=widgets.FloatSlider(1000, min=0, max=20000, step=250),
exp_rate=widgets.FloatSlider(-10, min=-200, max=50, step=1),
exp_x0=widgets.FloatSlider(0.1, min=-0.5, max=0.5, step=0.005),
inv_scale=widgets.FloatSlider(1000, min=0, max=3e2, step=10),
inv_x0=widgets.FloatSlider(0, min=-0.2, max=0.2, step=0.01),
lin_slope=widgets.FloatSlider(-1e5, min=-6e5, max=1e5, step=10000),
lin_y0=widgets.FloatSlider(10000, min=0, max=4e4, step=1000),
)
def fit_plot(
exp_scale=1000, exp_rate=-10, exp_x0=0.1,
inv_scale=1000, inv_x0=0,
lin_slope=-1e5, lin_y0=2000,
):
"""Helper function for plotting function fits with interactive sliders."""
func_params = dict(
exponential=(exp_scale, exp_rate, exp_x0),
inverse=(inv_scale, inv_x0),
linear=(lin_slope, lin_y0),
)
f, ax = plt.subplots()
ax.fill_between(edges[:-1], counts, step="post", alpha=.5)
xs = np.linspace(1e-10, edges.max())
for name, function in functions.items():
ys = function(xs, *func_params[name])
ax.plot(xs, ys, lw=3, color=colors[name], label=name);
ax.set(
xlim=(edges.min(), edges.max()),
ylim=(0, counts.max() * 1.1),
xlabel="ISI (s)",
ylabel="Number of spikes",
)
ax.legend()
# In[ ]:
#@title Video 5: Fitting models by hand
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='uW2HDk_4-wk', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
# # Summary
#
# In this tutorial, we loaded some neural data and poked at it to understand how the dataset is organized. Then we made some basic plots to visualize (1) the average level of activity across the population and (2) the distribution of ISIs for an individual neuron. In the very last bit, we started to think about using mathematical formalisms to understand or explain some physiological phenomenon. All of this only allowed us to understand "What" the data looks like.
#
# This is the first step towards developing models that can tell us something about the brain. That's what we'll focus on in the next two tutorials.
|
import math
import numpy as np
import string
import json
import os
import sys
import logging
import boto3
import nltk
from dotenv import load_dotenv
from nltk.corpus import stopwords
from fastprogress.fastprogress import progress_bar
from nltk.tokenize import word_tokenize
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Embedding
nltk.download('punkt')
nltk.download('stopwords')
chars = ''
maxlen = 60
def setup():
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
load_dotenv('.env')
def shutdown(seconds=0, os='linux'):
if os == 'linux':
os.system(f'sudo shutdown -h -t sec {seconds}')
elif os == 'windows':
os.system(f'shutdown -s -t {seconds}')
def downloadDataset():
s3 = boto3.client('s3')
bucket = os.getenv('S3_DATASET_BUCKET')
file = os.getenv('DATASET')
s3.download_file(bucket, file, file)
logging.info(f'dataset downloaded')
def prepareData(dataFile):
f = open(dataFile,)
data = json.load(f)
content = list(data[x] for x in data.keys())
text = ''
for c in content:
for i in c:
text += i
logging.info(f'Corpus length: {len(text)}')
tokens = word_tokenize(text)
tokens = [w.lower() for w in tokens]
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in tokens]
words = [word for word in stripped if word.isalpha()]
stop_words = set(stopwords.words('english'))
words = [w for w in words if not w in stop_words]
text = ''
for c in words:
text += c
text += ' '
text = text.strip()
logging.info(f'Finished to load file')
return text
def prepareTrainingData(text):
global chars
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
logging.info(f'Number of sequences: {len(sentences)}')
chars = sorted(list(set(text)))
logging.info(f'Unique characters: {len(chars)}')
char_indices = dict((char, chars.index(char)) for char in chars)
logging.info(f'Vectorizing text')
x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
x[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
logging.info(f'Finished to prepare data')
return x, y
def prepareTrainModel(x, y):
model = Sequential([
LSTM(len(chars), return_sequences=True,
input_shape=(maxlen, len(chars))),
LSTM(len(chars), return_sequences=True),
LSTM(len(chars)),
Dense(len(chars), activation='relu'),
Dropout(0.2),
Dense(len(chars), activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
logging.info(f'Starting to train model')
for epoch in progress_bar(range(100)):
model.fit(x, y, batch_size=128, epochs=1)
logging.info(f'Training epoch: {epoch}')
logging.info(f'Finished to train model')
return model
def saveModel(model):
logging.info(f'Saving model to S3')
s3 = boto3.client('s3')
file = 'wikipedia-nlp.h5'
gen = os.getenv('GENERATION')
bucket = os.getenv('S3_BUCKET')
model.save(file)
s3.upload_file(file, bucket, gen+'/'+file)
return 0
def main():
setup()
downloadDataset()
text = prepareData('wikipedia-reduced-content-dataset.json')
x, y = prepareTrainingData(text)
model = prepareTrainModel(x, y)
saveModel(model)
logging.info(f'Model training finished and file saved to s3.')
shutdown()
if __name__ == "__main__":
main()
|
from django.shortcuts import render
from django.conf import settings
import jwt, requests
from .models import User
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, serializers
from rest_framework import permissions, generics
from .serializers import UserLoginSerializer, UserRegistrationSerializer
# Create your views here.
class UserRegistration(APIView):
permission_classes = (permissions.AllowAny,)
serializer_class = UserRegistrationSerializer
def post(self, request):
regno = request.data['regno']
try:
category = User.objects.get(regno = regno).category
except:
return Response('Invalid Registration Number Entered', status = 403)
serializer = self.serializer_class(data=request.data)
try:
serializer.is_valid(raise_exception = True)
serializer.save()
return Response({"email" : serializer.data["email"],"token" : serializer.data["token"], "category" : category}, status=status.HTTP_201_CREATED)
except:
return Response({"error" : "Username already exists"}, status = 403)
class UserLogin(APIView):
permission_classes = (permissions.AllowAny,)
serializer_class = UserLoginSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
email = serializer.data['email']
regno = User.objects.get(email = email).regno
category = User.objects.get(regno = regno).category
return Response({"email" : serializer.data["email"],"token" : serializer.data["token"], "category" : category}, status=status.HTTP_200_OK)
|
#!/usr/bin/python3
# coding: utf-8
"""
This module is to download subtitle from Disney+
"""
import re
import os
import logging
import shutil
from common.utils import Platform, get_locale, http_request, HTTPMethod, download_files
from common.subtitle import convert_subtitle, merge_subtitle_fragments
from services.service import Service
class Viu(Service):
def __init__(self, args):
super().__init__(args)
self.logger = logging.getLogger(__name__)
self._ = get_locale(__name__, self.locale)
self.subtitle_language = args.subtitle_language
self.language_list = []
self.api = {
'ott': 'https://www.viu.com/ott/{region}/index.php?area_id={area_id}&language_flag_id={language_flag_id}&r=vod/ajax-detail&platform_flag_label=web&area_id={area_id}&language_flag_id={language_flag_id}&product_id={product_id}'
}
def get_language_code(self, lang):
language_code = {
'en': 'en',
'zh': 'zh-Hans',
'zh-Hant': 'zh-Hant',
'ms': 'ms',
'th': 'th',
'id': 'id',
'my': 'my'
}
if language_code.get(lang):
return language_code.get(lang)
def get_language_list(self):
if not self.subtitle_language:
self.subtitle_language = 'zh-Hant'
self.language_list = tuple([
language for language in self.subtitle_language.split(',')])
def get_all_languages(self, data):
available_languages = tuple([self.get_language_code(
sub['code']) for sub in data])
if 'all' in self.language_list:
self.language_list = available_languages
if not set(self.language_list).intersection(set(available_languages)):
self.logger.error(
self._("\nSubtitle available languages: %s"), available_languages)
exit(0)
def download_subtitle(self):
product_id_search = re.search(r'vod\/(\d+)\/', self.url)
product_id = product_id_search.group(1)
response = http_request(
session=self.session, url=self.url, method=HTTPMethod.GET, raw=True)
match = re.search(
r'href=\"\/ott\/(.+)\/index\.php\?r=campaign\/connectwithus\&language_flag_id=(\d+)\&area_id=(\d+)\"', response)
if match:
region = match.group(1)
language_flag_id = match.group(2)
area_id = match.group(3)
else:
# region = 'sg'
# language_flag_id = '3'
# area_id = '2'
region = 'hk'
language_flag_id = '1'
area_id = '1'
meta_url = self.api['ott'].format(
region=region, area_id=area_id, language_flag_id=language_flag_id, product_id=product_id)
self.logger.debug(meta_url)
data = http_request(
session=self.session, url=meta_url, method=HTTPMethod.GET)['data']
title = data['series']['name']
if data['series']['name'].split(' ')[-1].isdecimal():
title = title.replace(
data['series']['name'].split(' ')[-1], '').strip()
season_name = data['series']['name'].split(' ')[-1].zfill(2)
else:
season_name = '01'
self.logger.info(self._("\n%s Season %s"),
title, int(season_name))
folder_path = os.path.join(
self.output, f'{title}.S{season_name}')
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
episode_num = data['series']['product_total']
current_eps = data['current_product']['released_product_total']
episode_list = reversed(data['series']['product'])
if self.last_episode:
episode_list = [list(episode_list)[-1]]
self.logger.info(self._("\nSeason %s total: %s episode(s)\tdownload season %s last episode\n---------------------------------------------------------------"),
int(season_name), current_eps, int(season_name))
else:
if current_eps != episode_num:
self.logger.info(self._("\nSeason %s total: %s episode(s)\tupdate to episode %s\tdownload all episodes\n---------------------------------------------------------------"),
int(season_name), episode_num, current_eps)
else:
self.logger.info(self._("\nSeason %s total: %s episode(s)\tdownload all episodes\n---------------------------------------------------------------"),
int(season_name), current_eps)
languages = set()
subtitles = []
for episode in episode_list:
episode_name = str(episode['number']).zfill(2)
episode_url = re.sub(r'(.+product_id=).+', '\\1',
meta_url) + episode['product_id']
file_name = f'{title}.S{season_name}E{episode_name}.WEB-DL.{Platform.VIU}.vtt'
self.logger.info(self._("Finding %s ..."), file_name)
episode_data = http_request(session=self.session,
url=episode_url, method=HTTPMethod.GET)['data']['current_product']['subtitle']
self.get_all_languages(episode_data)
subs, lang_paths = self.get_subtitle(
episode_data, folder_path, file_name)
subtitles += subs
languages = set.union(languages, lang_paths)
download_files(subtitles)
display = True
for lang_path in sorted(languages):
if 'tmp' in lang_path:
merge_subtitle_fragments(
folder_path=lang_path, file_name=os.path.basename(lang_path.replace('tmp_', '')), lang=self.locale, display=display)
display = False
convert_subtitle(folder_path=lang_path, lang=self.locale)
convert_subtitle(folder_path=folder_path,
platform=Platform.VIU, lang=self.locale)
def get_subtitle(self, data, folder_path, file_name):
lang_paths = set()
subtitles = []
for sub in data:
self.logger.debug(sub['code'])
sub_lang = self.get_language_code(sub['code'])
if sub_lang in self.language_list:
subtitle = dict()
if len(self.language_list) > 1:
lang_folder_path = os.path.join(folder_path, sub_lang)
else:
lang_folder_path = folder_path
subtitle_file_name = file_name.replace(
'.vtt', f'.{sub_lang}.vtt')
subtitle['url'] = sub['subtitle_url'].replace('\\/', '/')
subtitle['segment'] = False
if 'second_subtitle_url' in sub and sub['second_subtitle_url']:
lang_folder_path = os.path.join(
lang_folder_path, f"tmp_{subtitle_file_name.replace('.vtt', '.srt')}")
subtitle['segment'] = True
subtitle['name'] = subtitle_file_name
subtitle['path'] = lang_folder_path
subtitles.append(subtitle)
if 'second_subtitle_url' in sub and sub['second_subtitle_url']:
second_subtitle = dict()
second_subtitle['segment'] = True
second_subtitle['url'] = sub['second_subtitle_url'].replace(
'\\/', '/')
second_subtitle['name'] = subtitle_file_name
second_subtitle['path'] = lang_folder_path
subtitles.append(second_subtitle)
lang_paths.add(lang_folder_path)
os.makedirs(lang_folder_path,
exist_ok=True)
return subtitles, lang_paths
def main(self):
self.get_language_list()
self.download_subtitle()
|
# Copyright 2021 Uday Vidyadharan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import config
from config import KEY_SELF_HOSTING_LIMITS
MAX_VIDEO_RESOLUTION_WIDTH = 3840
MAX_VIDEO_RESOLUTION_HEIGHT = 2160
if config.config[KEY_SELF_HOSTING_LIMITS]:
MAX_DESCRIPTION_LENGTH = 60
MAX_VIDEOS_PER_TEAM = 100
MAX_VIDEO_SIZE_MB = 500
MAX_VIDEO_SIZE_BYTES = MAX_VIDEO_SIZE_MB * 1000 * 1000
MAX_VIDEO_LENGTH_SECONDS = 300
MAX_FRAMES_PER_VIDEO = 5000
MAX_VIDEOS_TRACKING_PER_TEAM = 5
MAX_BOUNDING_BOX_PER_FRAME = 20
MAX_DATASETS_PER_TEAM = 50
else:
MAX_DESCRIPTION_LENGTH = 30
MAX_VIDEOS_PER_TEAM = 50
MAX_VIDEO_SIZE_MB = 100
MAX_VIDEO_SIZE_BYTES = MAX_VIDEO_SIZE_MB * 1000 * 1000
MAX_VIDEO_LENGTH_SECONDS = 120
MAX_FRAMES_PER_VIDEO = 1000
MAX_VIDEOS_TRACKING_PER_TEAM = 3
MAX_BOUNDING_BOX_PER_FRAME = 10
MAX_DATASETS_PER_TEAM = 20
|
# coding=utf-8
"""
Core Commands plugin for DecoraterBot.
"""
import traceback
import time
import discord
from discord.ext import commands
from DecoraterBotUtils.BotErrors import CogUnloadError
from DecoraterBotUtils.utils import *
class CoreCommands:
"""
Core Commands class for DecoraterBot.
"""
def __init__(self):
self.corecommands_text = PluginTextReader(
file='corecommands.json')
@commands.command(name='uptime', pass_context=True, no_pm=False)
async def uptime_command(self, ctx):
"""
Command.
"""
if ctx.message.channel.id in ctx.bot.ignoreslist["channels"]:
return
if ctx.message.author.id in ctx.bot.banlist['Users']:
return
else:
stop = time.time()
seconds = stop - ctx.bot.uptime_count_begin
days = int(((seconds / 60) / 60) / 24)
hours = str(int((seconds / 60) / 60 - (days * 24)))
minutes = str(int((seconds / 60) % 60))
seconds = str(int(seconds % 60))
days = str(days)
time_001 = str(self.corecommands_text['Uptime_command_data'][0]
).format(days, hours, minutes, seconds)
time_parse = time_001
try:
await ctx.bot.send_message(ctx.message.channel,
content=time_parse)
except discord.Forbidden:
return
@commands.command(name='load', pass_context=True, no_pm=True)
async def load_command(self, ctx):
"""
Command.
"""
if ctx.message.author.id == ctx.bot.BotConfig.discord_user_id:
desmod_new = ctx.message.content.lower()[len(
ctx.prefix + 'load '):].strip()
ctx.bot._somebool = False
ret = ""
if desmod_new is not None:
ctx.bot._somebool = True
try:
ret = ctx.bot.load_plugin(desmod_new)
except ImportError:
ret = str(traceback.format_exc())
if ctx.bot._somebool is True:
if ret is not None:
try:
reload_data = str(
self.corecommands_text['reload_command_data'][1]
).format(ret).replace('Reloading', 'Loading Plugin')
await ctx.bot.send_message(ctx.message.channel,
content=reload_data)
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
else:
try:
msgdata = str(
self.corecommands_text['reload_command_data'][0])
message_data = msgdata + ' Loaded ' + desmod_new + '.'
await ctx.bot.send_message(ctx.message.channel,
content=message_data)
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
else:
try:
await ctx.bot.send_message(ctx.message.channel,
content=str(
self.corecommands_text[
'reload_command_data'][
2]))
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
else:
try:
await ctx.bot.send_message(
ctx.message.channel,
content=str(
self.corecommands_text[
'reload_command_data'
][3]).replace('reload', 'load'))
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
@commands.command(name='unload', pass_context=True, no_pm=True)
async def unload_command(self, ctx):
"""
Command.
"""
if ctx.message.author.id == ctx.bot.BotConfig.discord_user_id:
desmod_new = ctx.message.content.lower()[len(
ctx.prefix + 'unload '):].strip()
ctx.bot._somebool = False
ret = ""
if desmod_new is not None:
ctx.bot._somebool = True
try:
ret = ctx.bot.unload_plugin(desmod_new)
except CogUnloadError:
ret = str(traceback.format_exc())
if ctx.bot._somebool is True:
if ret is not None:
try:
reload_data = str(
self.corecommands_text['reload_command_data'][1]
).format(ret).replace('Reloading', 'Unloading Plugin')
await ctx.bot.send_message(ctx.message.channel,
content=reload_data)
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
else:
try:
msgdata = str(
self.corecommands_text['reload_command_data'][0])
message_data = msgdata + ' Unloaded ' + desmod_new +\
'.'
await ctx.bot.send_message(ctx.message.channel,
content=message_data)
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
else:
try:
await ctx.bot.send_message(ctx.message.channel,
content=str(
self.corecommands_text[
'reload_command_data'][
2]))
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
else:
try:
await ctx.bot.send_message(
ctx.message.channel,
content=str(
self.corecommands_text[
'reload_command_data'
][3]).replace('reload', 'unload'))
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
@commands.command(name='reload', pass_context=True, no_pm=True)
async def reload_plugin_command(self, ctx):
"""
Command.
"""
if ctx.message.author.id == ctx.bot.BotConfig.discord_user_id:
desmod_new = ctx.message.content.lower()[len(
ctx.prefix + 'reload '):].strip()
ctx.bot._somebool = False
ret = ""
if desmod_new is not None:
ctx.bot._somebool = True
try:
ret = ctx.bot.reload_plugin(desmod_new)
except ImportError:
ret = str(traceback.format_exc())
if ctx.bot._somebool is True:
if ret is not None:
try:
reload_data = str(
self.corecommands_text['reload_command_data'][1]
).format(ret).replace('Reloading', 'Reloading Plugin')
await ctx.bot.send_message(ctx.message.channel,
content=reload_data)
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
else:
try:
msgdata = str(
self.corecommands_text['reload_command_data'][0])
message_data = msgdata + ' Reloaded ' + desmod_new +\
'.'
await ctx.bot.send_message(ctx.message.channel,
content=message_data)
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
else:
try:
await ctx.bot.send_message(
ctx.message.channel, content=str(
self.corecommands_text[
'reload_command_data'
][2]))
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
else:
try:
await ctx.bot.send_message(ctx.message.channel,
content=str(
self.corecommands_text[
'reload_command_data'][3]))
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
@commands.command(name='install', pass_context=True, no_pm=True)
async def install_command(self, ctx):
# TODO: finish command.
pass
@commands.command(name='uninstall', pass_context=True, no_pm=True)
async def uninstall_command(self, ctx):
# TODO: finish command.
pass
def setup(bot):
"""
DecoraterBot's Core Commands Plugin.
"""
bot.add_cog(CoreCommands())
|
#!opt/anaconda3/bin/python python
# -*- coding: utf-8 -*-
from os import listdir
from os.path import isfile
from posixpath import join
import numpy as np
from scipy.io.wavfile import write
from systems.cubic_map import cubic_map
from systems.cyclostationary import cyclostationary
from systems.freitas import freitas
from systems.gen_henon import gen_henon
from systems.gopy import gopy
from systems.granulocyte import granulocyte
from systems.henon import henon
from systems.ikeda import ikeda
from systems.izhikevich import izhikevich
from systems.logistic import logistic
from systems.lorenz import lorenz
from systems.mackey_glass import mackey_glass
from systems.noise_sine import noise_sine
from systems.random_arma import random_arma
from systems.randomwalk import randomwalk
from systems.rossler import rossler
# all scripts for dataset creation
script_path = 'systems'
files = [f for f in listdir(script_path) if isfile(join(script_path, f))]
print(files)
n = 1024 # number of points to simulate
data = np.zeros([len(files)*10-24, n+1])
# (1) generate cubic map
regimes = ['periodic', 'chaotic', 'Heagy-Hammel', 'S3', '2T', '1T']
for i, regime in enumerate(regimes):
time_series = cubic_map(length=n, regime=regime, level=0, discard=int(n/10))
data[i, :] = np.concatenate(([1], time_series), axis=0)
# (2) generate cyclostationary
m1 = 10 # number of different time series of the same system
t = np.linspace(10, 500, num=m1)
for i, t in zip(range(m1), t):
time_series = cyclostationary(length=n, t=t)
data[i+6, :] = np.concatenate(([2], time_series), axis=0)
# (3) generate freitas
level = np.linspace(0, 0.75, num=m1)
for i, level, in zip(range(m1), level):
time_series = freitas(length=n, level=level, discard=int(n/10))
data[i+16, :] = np.concatenate(([3], time_series), axis=0)
# (4) generate gen. henon
a = np.linspace(1.51, 0.89, num=m1)
b = np.linspace(0.05, 0.15, num=m1)
for i, a, b, in zip(range(m1), a, b):
time_series = gen_henon(length=n, a=a, b=b, level=0, discard=int(n/10))
data[i+26, :] = np.concatenate(([4], time_series), axis=0)
# (5) generate gopy
sigma = np.linspace(0.5, 2, num=m1)
for i, sigma, in zip(range(m1), sigma):
time_series = gopy(length=n, sigma=1.5, level=0, discard=int(n/10))
data[i+36, :] = np.concatenate(([5], time_series), axis=0)
# (6) generate granulocyte
a = np.linspace(0.1, 0.4, num=m1)
b = np.linspace(0.05, 0.2, num=m1)
c = np.linspace(5, 20, num=m1)
s = np.linspace(1, 20, num=m1)
for i, a, b, c, s in zip(range(m1), a, b, c, s):
time_series = granulocyte(length=n, a=0.2, b=0.1,
c=10, s=10, level=0, discard=int(n/10))
data[i+46, :] = np.concatenate(([6], time_series), axis=0)
# (7) generate henon
a = np.linspace(1, 1.5, num=m1)
b = np.linspace(0.1, 0.5, num=m1)
for i, a, b in zip(range(5), a, b):
x, y = henon(length=n, a=a, b=b, level=0, discard=int(n/10))
data[i+56, :] = np.concatenate(([7], x), axis=0)
data[i+61, :] = np.concatenate(([7], y), axis=0)
# (8) generate ikeda
mu = np.linspace(0.51, 0.89, num=5)
for i, mu in zip(range(5), mu):
x, y = ikeda(length=n, level=0, mu=mu, discard=int(n/10))
data[i+66, :] = np.concatenate(([8], x), axis=0)
data[i+71, :] = np.concatenate(([8], y), axis=0)
# (9) generate izhikevich
a = np.linspace(0.21, 0.24, num=m1)
b = np.linspace(2, 2.5, num=m1)
c = np.linspace(-55, -57, num=m1)
d = np.linspace(-15, -17, num=m1)
for i, a, b, c, d in zip(range(m1), a, b, c, d):
time_series = izhikevich(length=n, a=a, b=b, c=c,
d=d, level=0, discard=int(n/10))
data[i+76, :] = np.concatenate(([9], time_series), axis=0)
# (10) generate logistic
r = np.linspace(3.5, 3.9, num=m1)
for i, r in zip(range(m1), r):
time_series = logistic(length=n, r=r, level=0, discard=int(n/10))
data[i+86, :] = np.concatenate(([10], time_series), axis=0)
# (11) generate lorenz
sigma = np.linspace(12, 17, num=5)
beta = np.linspace(1.5, 4.5, num=5)
rho = np.linspace(41, 49, num=5)
for i, sigma, beta, rho in zip(range(5), sigma, beta, rho):
time_series = lorenz(length=n, sigma=sigma, beta=beta,
rho=rho, discard=int(n/10))
x, y, z = time_series[:, 0], time_series[:, 1], time_series[:, 2]
data[i+96, :] = np.concatenate(([11], x), axis=0)
data[i+101, :] = np.concatenate(([11], y), axis=0)
# (12) generate mackey glass
a = np.linspace(0.3, 0.5, num=m1)
b = np.linspace(0.1, 0.4, num=m1)
c = np.linspace(10, 15, num=m1)
for i, a, b, c in zip(range(m1), a, b, c):
time_series = mackey_glass(length=n, a=a, b=b, c=c, discard=int(n/10))
data[i+106, :] = np.concatenate(([12], time_series), axis=0)
# (13) generate noise sine
mu = np.linspace(1, 2.5, num=m1)
for i, mu in zip(range(m1), mu):
time_series = noise_sine(length=n, mu=mu, level=0, discard=int(n/10))
data[i+116, :] = np.concatenate(([13], time_series), axis=0)
# (14) generate randomwalk arma
p = np.linspace(0.1, 0.9, num=m1)
theta = np.linspace(0.1, 0.9, num=m1)
for i, p, theta in zip(range(m1), p, theta):
time_series = random_arma(length=n, level=0, p=p, theta=theta, discard=int(n/10))
data[i+126, :] = np.concatenate(([14], time_series), axis=0)
# (15) generate randomwalk
for i in range(m1):
time_series = randomwalk(length=n, level=0, discard=int(n/10))
data[i+136, :] = np.concatenate(([15], time_series), axis=0)
# (16) generate rossler
a = np.linspace(0.1, 0.3, num=5)
b = np.linspace(0.1, 0.3, num=5)
c = np.linspace(4, 7, num=5)
for i, a, b, c in zip(range(5), a, b, c):
time_series = rossler(length=n, a=a, b=b, c=c, discard=int(n/10))
x, y, z = time_series[:, 0], time_series[:, 1], time_series[:, 2]
data[i+146, :] = np.concatenate(([16], x), axis=0)
data[i+151, :] = np.concatenate(([16], y), axis=0)
# save dataset to txt file
np.savetxt(f'data/data.txt', data, delimiter=',')
|
import logging
from django.conf import settings
from django.db import transaction
from django.contrib.auth.models import User, Group, ContentType
from django.core.exceptions import PermissionDenied
from django.http import HttpRequest, HttpResponseRedirect, HttpResponse, \
HttpResponseForbidden, HttpResponseNotFound, JsonResponse, \
HttpResponseNotAllowed, HttpResponseServerError
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import user_passes_test
from tardis.tardis_portal.auth import decorators as authz
from tardis.tardis_portal.models import Experiment, ExperimentParameter, \
DatafileParameter, DatasetParameter, ObjectACL, DataFile, \
DatafileParameterSet, ParameterName, GroupAdmin, Schema, \
Dataset, ExperimentParameterSet, DatasetParameterSet, \
License, UserProfile, UserAuthentication, Token
from tardis.tardis_portal.api import MyTardisAuthentication
import tasks
logger = logging.getLogger(__name__)
# we use the same custom tastypie Authentication class used by the core REST API
authentication = MyTardisAuthentication()
def require_authentication(f):
def wrap(*args, **kwargs):
request = args[0]
if not isinstance(request, HttpRequest):
request = args[1]
if not authentication.is_authenticated(request):
return jsend_fail_response('Unauthorized', 401, None)
return f(*args, **kwargs)
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
return wrap
def _jsend_response(status, message, status_code, data):
"""
Send a simple JSend-style JSON response with an HTTP status code.
https://labs.omniti.com/labs/jsend
"""
return JsonResponse({'status': status,
'message': message,
'data': data,
status: status_code})
def jsend_success_response(message, status_code=200, data=None):
return _jsend_response('success', message, status_code, data)
def jsend_error_response(message, status_code, data=None):
return _jsend_response('error', message, status_code, data)
def jsend_fail_response(message, status_code, data=None):
return _jsend_response('fail', message, status_code, data)
def get_version_json(request):
from . import __version__
return JsonResponse({'version': __version__})
def stats_ingestion_timeline(include_titles=False, as_csv=False):
"""
Returns JSON or CSV summarizing title, number and size of files in all runs.
Could be used to render a Javascript timeline of ingestion.
(eg, like this: https://plot.ly/javascript/range-slider/ )
:param include_titles:
:type include_titles:
:param as_csv:
:type as_csv:
:return:
:rtype:
"""
import json
from datetime import datetime
import csv
from StringIO import StringIO
from .views import _get_paramset_by_subtype
# custom datetime formatter, vanilla json.dumps can't serialize datetimes
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return json.JSONEncoder.default(self, o)
trash_username = '__trashman__'
runs = []
projects = []
datafile_size_cum = 0
datafile_count_cum = 0
for e in Experiment.objects.all().order_by('end_time'):
trashed = False
for user in e.get_owners():
if user.username == trash_username:
trashed = True
break
if not trashed:
datafiles_size = e.get_size()
datafiles_count = e.get_datafiles().count()
datafile_size_cum += datafiles_size
datafile_count_cum += datafiles_count
title = ''
if include_titles:
title = e.title
row = (e.end_time, title,
datafiles_count, datafiles_size,
datafile_count_cum, datafile_size_cum)
if _get_paramset_by_subtype(e, 'illumina-sequencing-run'):
runs.append(row)
if _get_paramset_by_subtype(e, 'demultiplexed-samples'):
projects.append(row)
if as_csv:
header = 'Date,Title,Files,Size,Files(Cumulative),Size(Cumulative)'
run_csv = StringIO()
run_csvwriter = csv.writer(run_csv, delimiter=',', quotechar='"',
quoting=csv.QUOTE_NONNUMERIC)
run_csvwriter.writerow(header.split(','))
for r in runs:
run_csvwriter.writerow(r)
project_csv = StringIO()
project_csvwriter = csv.writer(run_csv, delimiter=',', quotechar='"',
quoting=csv.QUOTE_NONNUMERIC)
project_csvwriter.writerow(header.split(','))
for p in projects:
project_csvwriter.writerow(p)
return run_csv.getvalue(), project_csv.getvalue()
else:
jdict = {'runs': runs, 'projects': projects}
return json.dumps(jdict, cls=DateTimeEncoder)
# @authz.experiment_access_required # won't do tastypie API key auth ?
@csrf_exempt # so we can use the PUT method without a csrf_token
@require_authentication
def trash_experiment(request, experiment_id=None):
if request.method != 'PUT':
raise HttpResponseNotAllowed()
try:
expt = Experiment.safe.get(request.user, experiment_id)
except PermissionDenied as ex:
return jsend_fail_response('Permission denied', 401,
{'id': experiment_id})
if expt:
ct = expt.get_ct()
user_acls = ObjectACL.objects.filter(content_type=ct,
object_id=expt.id,
pluginId='django_user')
group_acls = ObjectACL.objects.filter(content_type=ct,
object_id=expt.id,
pluginId='django_group')
else:
return jsend_fail_response('Experiment %s not found' % experiment_id,
404, {'id': experiment_id})
trash_username = getattr(settings, 'TRASH_USERNAME', '__trashman__')
trash_group_name = getattr(settings, 'TRASH_GROUP_NAME', '__trashcan__')
try:
trashman = User.objects.filter(username=trash_username)[0]
except IndexError as ex:
logger.error('Cannot find ID for trash user: %s (Does it exist ? Are '
'ingestor user permissions correct ?)' % trash_username)
raise ex
try:
trashcan = Group.objects.filter(name=trash_group_name)[0]
except IndexError as ex:
logger.error('Cannot find ID for trash group: %s (Does it exist ? Are '
'ingestor user permissions correct ?)' % trash_group_name)
raise ex
acls_to_remove = []
has_trashman = False
for acl in user_acls:
if acl.entityId == trashman.id:
has_trashman = True
continue
acls_to_remove.append(acl)
has_trashcan = False
for acl in group_acls:
if acl.entityId == trashcan.id:
has_trashcan = True
continue
acls_to_remove.append(acl)
# Add ObjectACLs to experiment for trashman/trashcan
if not has_trashman:
acl = ObjectACL(content_type=ct,
object_id=expt.id,
pluginId='django_user',
entityId=trashman.id,
aclOwnershipType=ObjectACL.OWNER_OWNED,
isOwner=True,
canRead=True,
canWrite=True,
canDelete=False)
acl.save()
if not has_trashcan:
acl = ObjectACL(content_type=ct,
object_id=expt.id,
pluginId='django_group',
entityId=trashcan.id,
aclOwnershipType=ObjectACL.OWNER_OWNED,
isOwner=True,
canRead=True,
canWrite=True,
canDelete=False)
acl.save()
# remove all the non-trashman/trashcan ACLs
[acl.delete() for acl in acls_to_remove]
# ensure experiment is not publicly accessible
expt.public_access = Experiment.PUBLIC_ACCESS_NONE
expt.save()
return jsend_success_response(
'Experiment %s moved to trash' % experiment_id, {'id': experiment_id})
@require_authentication
@user_passes_test(lambda u: u.is_superuser)
def _delete_all_trashed(request):
try:
# tasks.delete_all_trashed_task.delay()
tasks.delete_all_trashed_task()
except Exception as e:
return jsend_fail_response('Delete operation failed. '
'Some trashed experiments were not deleted.',
500, {})
return jsend_success_response('Queued trashed Experiments for deletion',
200, {})
|
#!/usr/bin/env python3
import argparse
import os.path
import sys
import pandas as pd
import pyshark
from scapy.utils import RawPcapReader
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, TCP
from tqdm import tqdm
#--------------------------------------------------
def render_csv_row(pkt_sh, pkt_sc):
"""Write one packet entry into the CSV file.
pkt_sh is the PyShark representation of the packet
pkt_sc is a 'bytes' representation of the packet as returned from
scapy's RawPcapReader
fh_csv is the csv file handle
"""
ether_pkt_sc = Ether(pkt_sc)
try:
if ether_pkt_sc.type != 0x800:
# print('Ignoring non-IP packet')
return [pkt_sh.no,pkt_sh.time,pkt_sh.protocol,None,None,None,None,None,pkt_sh.length]
except:
return False
ip_pkt_sc = ether_pkt_sc[IP] # <<<< Assuming Ethernet + IPv4 here
proto = ip_pkt_sc.fields['proto']
if proto == 17:
udp_pkt_sc = ip_pkt_sc[UDP]
l4_payload_bytes = bytes(udp_pkt_sc.payload)
l4_proto_name = 'UDP'
l4_sport = udp_pkt_sc.sport
l4_dport = udp_pkt_sc.dport
elif proto == 6:
tcp_pkt_sc = ip_pkt_sc[TCP]
l4_payload_bytes = bytes(tcp_pkt_sc.payload)
l4_proto_name = 'TCP'
l4_sport = tcp_pkt_sc.sport
l4_dport = tcp_pkt_sc.dport
else:
# Currently not handling packets that are not UDP or TCP
# print('Ignoring non-UDP/TCP packet')
return False
# Each line of the CSV has this format
fmt = '{0}|{1}|{2}({3})|{4}|{5}:{6}|{7}:{8}|{9}|{10}'
# | | | | | | | | | | |
# | | | | | | | | | | o-> {10} L4 payload hexdump
# | | | | | | | | | o-----> {9} total pkt length
# | | | | | | | | o---------> {8} dst port
# | | | | | | | o-------------> {7} dst ip address
# | | | | | | o-----------------> {6} src port
# | | | | | o---------------------> {5} src ip address
# | | | | o-------------------------> {4} text description
# | | | o------------------------------> {3} L4 protocol
# | | o----------------------------------> {2} highest protocol
# | o--------------------------------------> {1} time
# o------------------------------------------> {0} frame number
# Example:
# 1|0.0|DNS(UDP)|Standard query 0xf3de A www.cisco.com|192.168.1.116:57922|1.1.1.1:53|73|f3de010000010000000000000377777705636973636f03636f6d0000010001
return [pkt_sh.no, # {0}
pkt_sh.time, # {1}
pkt_sh.protocol, # {2}
l4_proto_name, # {3}
# {4}
pkt_sh.source, # {5}
int(l4_sport), # {6}
pkt_sh.destination, # {7}
int(l4_dport), # {8}
int(pkt_sh.length)] # {9}] # {10}]
# print(fmt.format(pkt_sh.no, # {0}
# pkt_sh.time, # {1}
# pkt_sh.protocol, # {2}
# l4_proto_name, # {3}
# pkt_sh.info, # {4}
# pkt_sh.source, # {5}
# l4_sport, # {6}
# pkt_sh.destination, # {7}
# l4_dport, # {8}
# pkt_sh.length, # {9}
# l4_payload_bytes.hex()), # {10}
# file=fh_csv)
# return True
#--------------------------------------------------
def pcap2csv(in_pcap):
"""Main entry function called from main to process the pcap and
generate the csv file.
in_pcap = name of the input pcap file (guaranteed to exist)
out_csv = name of the output csv file (will be created)
This function walks over each packet in the pcap file, and for
each packet invokes the render_csv_row() function to write one row
of the csv.
"""
# Open the pcap file with PyShark in "summary-only" mode, since this
# is the mode where the brief textual description of the packet (e.g.
# "Standard query 0xf3de A www.cisco.com", "Client Hello" etc.) are
# made available.
print('-----Start extract {}----'.format(in_pcap))
pcap_pyshark = pyshark.FileCapture(in_pcap, only_summaries=True)
pcap_pyshark.load_packets()
pcap_pyshark.reset()
result_columns=['frame number','time','highest protocal','L4 Protocal','src ip','src port','dst ip','dst port','pkt len']
results=[]
frame_num = 0
ignored_packets = 0
# Open the pcap file with scapy's RawPcapReader, and iterate over
# each packet. In each iteration get the PyShark packet as well,
# and then call render_csv_row() with both representations to generate
# the CSV row.
for (pkt_scapy, _) in RawPcapReader(in_pcap):
try:
pkt_pyshark = pcap_pyshark.next_packet()
frame_num += 1
result_row=render_csv_row(pkt_pyshark, pkt_scapy)
if not result_row:
ignored_packets += 1
else:
results.append(result_row)
except StopIteration:
# Shouldn't happen because the RawPcapReader iterator should also
# exit before this happens.
break
pd.DataFrame(results,columns=result_columns).to_csv('scapy_output/{}.csv'.format(in_pcap.split('.')[0].split('/')[-1]),index=False)
print('{} packets read, {} packets not written to CSV'.
format(frame_num, ignored_packets))
#--------------------------------------------------
def main():
# existing_files=[]
# for root, dirs, fnames in os.walk('output/'):
# for fname in fnames:
# if '.csv' in fname:
# existing_files.append(fname.split('.csv')[0])
for root, dirs, fnames in os.walk('data/'):
for fname in tqdm(fnames):
# if fname.split('.pcapng')[0] in existing_files:
# print('{}already exists'.format(fname))
# continue
if 'pcapng' in fname:
pcap2csv(os.path.join(root,fname))
#--------------------------------------------------
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging as loggers
import numpy as np
import theano
from deepy.utils import UniformInitializer
from deepy.core.env import env
from deepy.core.tensor_conversion import neural_computation_prefer_tensor, convert_to_theano_var
logging = loggers.getLogger("deepy")
class NeuralLayer(object):
def __init__(self, name=None):
"""
Create a neural layer.
"""
self.name = name if name else self.__class__.__name__
self.input_dim = 0
self.input_dims = [0]
self.output_dim = 0
self.output_dims= [0]
self._linked_block = None
self.initialized = False
self.updates = []
self.training_updates = []
self.free_parameters = []
self.parameters = []
self.training_monitors = []
self.testing_monitors = []
self._registered_monitors = set()
self._registered_updates = set()
self._registered_training_updates = set()
self.external_inputs = []
self.external_targets = []
self.parameter_count = 0
self.epoch_callbacks = []
self.training_callbacks = []
self.testing_callbacks = []
def init(self, input_dim=0, input_dims=None, no_prepare=False):
"""
Initialize the layer.
:param no_prepare: avoid calling preparation function
"""
if self.initialized:
return
# configure input dimensions
if input_dims:
self.input_dims = input_dims
self.input_dim = input_dims[0]
else:
self.input_dim = input_dim
self.input_dims = [input_dims]
# set default output dimension
if self.output_dim == 0:
self.output_dim = self.input_dim
self.initialized = True
# call prepare
if not no_prepare:
self.prepare()
return self
def compute(self, *inputs, **kwargs):
"""
Compute based on NeuralVariable.
:type inputs: list of NeuralVariable
:return: NeuralVariable
"""
from deepy.core.neural_var import NeuralVariable
from deepy.core.graph import graph
if type(inputs[0]) != NeuralVariable:
raise SystemError("The input of `compute` must be NeuralVar")
dims = [t.dim() for t in inputs]
if len(inputs) == 1:
self.init(input_dim=dims[0])
else:
self.init(input_dims=dims)
# Check block
if self.parameters and not self._linked_block:
self.belongs_to(graph.default_block())
# convert kwargs
train_kwargs, _, _ = convert_to_theano_var(kwargs)
output = self.compute_tensor(*[t.tensor for t in inputs], **train_kwargs)
if type(output) != list and type(output) != tuple:
return NeuralVariable(output, dim=self.output_dim)
else:
return [NeuralVariable(*item) for item in zip(output, self.output_dims)]
def prepare(self):
"""
Prepare function will be called after connected.
"""
@neural_computation_prefer_tensor
def compute_tensor(self, *args, **kwargs):
"""
Compute with tensors in Theano.
"""
raise NotImplementedError("output function of '%s' is not implemented" % self.name)
def belongs_to(self, block):
"""
Let the given block or network manage the parameters of this layer.
:param block: Block or NeuralNetwork
:return: NeuralLayer
"""
if self._linked_block:
raise SystemError("The layer {} has already blonged to {}".format(self.name, self._linked_block.name))
self._linked_block = block
block.register_layer(self)
return self
def register(self, *layers):
"""
Register inner layers.
"""
self.register_inner_layers(*layers)
def register_inner_layers(self, *layers):
for layer in layers:
self.register_parameters(*layer.parameters)
self.register_updates(*layer.updates)
self.register_training_updates(*layer.training_updates)
self.training_monitors.extend(layer.training_monitors)
self.testing_monitors.extend(layer.testing_monitors)
def register_parameters(self, *parameters):
"""
Register parameters.
"""
for param in parameters:
self.parameter_count += np.prod(param.get_value().shape)
self.parameters.extend(parameters)
def register_free_parameters(self, *free_parameters):
"""
Register free parameters, which means their value will not be learned by trainer.
"""
return self.free_parameters.extend(free_parameters)
def register_updates(self, *updates):
"""
Register updates that will be executed in each iteration.
"""
for key, node in updates:
if key not in self._registered_updates:
self.updates.append((key, node))
self._registered_updates.add(key)
def register_training_updates(self, *updates):
"""
Register updates that will only be executed in training phase.
"""
for key, node in updates:
if key not in self._registered_training_updates:
self.training_updates.append((key, node))
self._registered_training_updates.add(key)
def register_monitors(self, *monitors):
"""
Register monitors they should be tuple of name and Theano variable.
"""
for key, node in monitors:
if key not in self._registered_monitors:
node *= 1.0 # Avoid CudaNdarray
self.training_monitors.append((key, node))
self.testing_monitors.append((key, node))
self._registered_monitors.add(key)
def register_external_inputs(self, *variables):
"""
Register external input variables.
"""
self.external_inputs.extend(variables)
def register_external_targets(self, *variables):
"""
Register extenal target variables.
"""
self.external_targets.extend(variables)
def register_training_callbacks(self, *callbacks):
"""
Register callback for each iteration in the training.
"""
self.training_callbacks.extend(callbacks)
def register_testing_callbacks(self, *callbacks):
"""
Register callback for each iteration in the testing.
"""
self.testing_callbacks.extend(callbacks)
def register_epoch_callbacks(self, *callbacks):
"""
Register callback which will be called after epoch finished.
"""
self.epoch_callbacks.extend(callbacks)
def create_weight(self, input_n=1, output_n=1, label="W", initializer=None, shape=None):
if not shape:
shape = (input_n, output_n)
if not initializer:
initializer = env.default_initializer
weight = theano.shared(initializer.sample(shape).astype(env.FLOATX), name='{}_{}'.format(self.name, label))
logging.info('create param %s %s for %s', label, str(shape), self.name)
return weight
def create_bias(self, output_n=1, label="B", value=0., shape=None):
if not shape:
shape = (output_n, )
bs = np.ones(shape)
bs *= value
bias = theano.shared(bs.astype(env.FLOATX), name='{}_{}'.format(self.name, label))
logging.info('create param %s %s for %s', label, str(shape), self.name)
return bias
def create_scalar(self, name="S", value=0, dtype=env.FLOATX):
bs = np.array(0, dtype=dtype)
bs += value
v = theano.shared(bs, name='{}_{}'.format(self.name, name))
logging.info('create scalar %s', name)
return v
def create_vector(self, n, name="V", dtype=env.FLOATX):
bs = np.zeros(n, dtype=dtype)
v = theano.shared(bs, name='{}_{}'.format(self.name, name))
logging.info('create vector %s: %d', name, n)
return v
def create_matrix(self, m, n, name="M"):
matrix = theano.shared(np.zeros((m, n)).astype(env.FLOATX), name="{}_{}".format(self.name, name))
logging.info('create matrix %s: %d x %d', name, m, n)
return matrix
def activation(self, name):
from deepy.tensor.activations import get_activation
return get_activation(name)
def callback_forward_propagation(self):
pass
def set_name(self, name):
"""
Set the name of this layer.
This will be the key of saved parameters.
"""
self.name = name |
"""VerbCL Elastic."""
import multiprocessing
from typing import Any
from typing import List
from elasticsearch_dsl import analyzer
from elasticsearch_dsl import Boolean
from elasticsearch_dsl import connections
from elasticsearch_dsl import Document
from elasticsearch_dsl import Float
from elasticsearch_dsl import Integer
from elasticsearch_dsl import Text
class OpinionDocument(Document): # type: ignore
"""Court Opinion."""
opinion_id = Integer()
raw_text = Text(
analyzer=analyzer(
"alpha_stop_stem",
type="custom",
tokenizer="classic",
filter=["lowercase", "asciifolding", "stop", "snowball"],
)
)
class Index:
"""Index Name for OpinionDocument."""
name = "verbcl_opinions"
@property
def key(self) -> int:
"""Unique Key to identify one document."""
# noinspection PyTypeChecker
return self.opinion_id # type: ignore
class OpinionSentence(Document): # type: ignore
"""Single Sentence within a Court Opinion."""
opinion_id = Integer()
sentence_id = Integer()
highlight = Boolean()
count_citations = Integer()
raw_text = Text(
analyzer=analyzer(
"alpha_stop_stem",
type="custom",
tokenizer="classic",
filter=["lowercase", "asciifolding", "stop", "snowball"],
)
)
class Index:
"""Index Name for OpinionSentence."""
name = "verbcl_highlights"
def save(self, **kwargs: Any) -> bool:
"""Overloads save to implement defaut values."""
if self.highlight is None:
self.highlight = False
if self.count_citations is None:
self.count_citations = 0
return super().save(**kwargs) # type: ignore
@property
def key(self) -> str:
"""Unique key to identify one sentence."""
# noinspection PyTypeChecker
return f"{self.opinion_id}-{self.sentence_id}"
class OpinionCitationGraph(Document): # type: ignore
"""Citation of a Cited Opinion in a Citing Opinion, using a specific sentence of the Cited Opinion."""
citing_opinion_id = Integer()
cited_opinion_id = Integer()
cited_sentence_id = Integer()
verbatim = Text(
analyzer=analyzer(
"alpha_stop_stem",
type="custom",
tokenizer="classic",
filter=["lowercase", "asciifolding", "stop", "snowball"],
)
)
snippet = Text(
analyzer=analyzer(
"alpha_stop_stem",
type="custom",
tokenizer="classic",
filter=["lowercase", "asciifolding", "stop", "snowball"],
)
)
score = Float()
class Index:
"""Index Name for OpinioNCitationGraph."""
name = "verbcl_citation_graph"
def _create_connection(alias_name: str, **kwargs: Any) -> None:
"""
Add an elasticsearch instance connection to the DSL classes, using an alias.
:param alias_name
"""
connections.create_connection(alias=alias_name, **kwargs)
# Global state variables for the connection management
_class_init: bool = False
_list_aliases: List[str] = []
_default_connection: str = "default"
def verbcl_es_init(**kwargs: Any) -> str:
"""
Manages connections to ElasticSearch instance. There is one connection per process.
:param kwargs: parameters for creating an elasticsearch connection
:return: alias name for the connection to use
"""
# Initialize the connection to Elasticsearch
# Making use of elasticsearch_dsl persistence features
proc = multiprocessing.current_process()
alias_name = proc.name
global _list_aliases
if alias_name in _list_aliases:
return alias_name
_create_connection(alias_name=alias_name, **kwargs)
_list_aliases.append(alias_name)
global _class_init
if not _class_init:
# Always have a connection named "default"
if _default_connection not in _list_aliases:
_create_connection(_default_connection, **kwargs)
OpinionDocument.init()
OpinionSentence.init()
OpinionCitationGraph.init()
_class_init = True
return alias_name
|
import tensorflow as tf
from absl import app
from absl import flags
import urllib.request
from tqdm import tqdm
import json
import tarfile
import os
print("Path at terminal when executing this file")
print(os.getcwd() + "\n")
print("This file path, relative to os.getcwd()")
print(__file__ + "\n")
print("This file full path (following symlinks)")
full_path = os.path.realpath(__file__)
print(full_path + "\n")
print("This file directory and name")
path, filename = os.path.split(full_path)
print(path + ' --> ' + filename + "\n")
print("This file directory only")
print(os.path.dirname(full_path))
from urllib.parse import urlparse
import sys
import subprocess
def main(_):
# fine-tuning training
subprocess.call([sys.executable,
"main.py",
"--mode=train_and_eval",
"--train_file_pattern=../tmp/pascal/tfrecords/tfrecords*.tfrecord",
"--val_file_pattern=../tmp/pascal/tfrecords/tfrecords*.tfrecord",
"--model_name=efficientdet-d0",
"--model_dir=../tmp/efficientdet/pascal/train_00",
"--ckpt=../tmp/efficientdet/coco2/efficientdet-d0/",
"--train_batch_size=64",
"--eval_batch_size=64 ",
"--eval_samples=1024",
"--num_examples_per_epoch=5717",
"--num_epochs=300",
"--hparams=config/pascal/pascal.yaml",
"--strategy=gpus"])
if __name__ == '__main__':
app.run(main) |
# Copyright 2004-present Facebook. All rights reserved.
# pyre-unsafe
import functools
import glob
import json
import logging
import os
import subprocess
import sys
import tempfile
import threading
from collections import namedtuple
from json.decoder import JSONDecodeError
from logging import Logger
from typing import Dict, Iterable, List, Optional, Set, Tuple, Union, cast # noqa
from .filesystem import BuckBuilder, find_root
LOG: Logger = logging.getLogger(__name__)
BuckOut = namedtuple("BuckOut", "source_directories targets_not_found")
class BuckException(Exception):
pass
class FastBuckBuilder(BuckBuilder):
def __init__(
self,
buck_root: str,
output_directory: Optional[str] = None,
buck_builder_binary: Optional[str] = None,
buck_builder_target: Optional[str] = None,
debug_mode: bool = False,
) -> None:
self._buck_root = buck_root
self._output_directory = output_directory or tempfile.mkdtemp(
prefix="pyre_tmp_"
)
self._buck_builder_binary = buck_builder_binary
self._buck_builder_target = buck_builder_target
self._debug_mode = debug_mode
self.conflicting_files = []
self.unsupported_files = []
def _get_builder_executable(self) -> str:
builder_binary = self._buck_builder_binary
if not self._debug_mode:
if builder_binary is None:
raise BuckException(
"--buck-builder-binary must be provided "
"if --buck-builder-debug is not enabled."
)
return builder_binary
target = self._buck_builder_target
if target is None:
raise BuckException(
"--buck-builder-target must be provided "
"if --buck-builder-debug is enabled."
)
binary_relative_path = (
subprocess.check_output(
[
"buck",
"build",
"--show-output",
"//tools/pyre/facebook/fb_buck_project_builder",
],
stderr=subprocess.DEVNULL,
)
.decode()
.strip()
.split(" ")[1]
)
return os.path.join(self._buck_root, binary_relative_path)
def build(self, targets: Iterable[str]) -> List[str]:
command = [
self._get_builder_executable(),
"-J-Djava.net.preferIPv6Addresses=true",
"-J-Djava.net.preferIPv6Stack=true",
"--buck_root",
self._buck_root,
"--output_directory",
self._output_directory,
] + list(targets)
if self._debug_mode:
command.append("--debug")
LOG.info("Buck builder command: `{}`".format(" ".join(command)))
with subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
) as buck_builder_process:
# Java's logging conflicts with Python's logging, we capture the
# logs and re-log them with python's logger.
log_processor = threading.Thread(
target=self._read_stderr, args=(buck_builder_process.stderr,)
)
log_processor.daemon = True
log_processor.start()
return_code = buck_builder_process.wait()
# Wait until all stderr have been printed.
log_processor.join()
if return_code == 0:
LOG.info("Finished building targets.")
if self._debug_mode:
debug_output = json.loads(
"".join([line.decode() for line in buck_builder_process.stdout])
)
self.conflicting_files += debug_output["conflictingFiles"]
self.unsupported_files += debug_output["unsupportedFiles"]
return [self._output_directory]
else:
raise BuckException(
"Could not build targets. Check the paths or run `buck clean`."
)
def _read_stderr(self, stream: Iterable[bytes]) -> None:
for line in stream:
line = line.decode().rstrip()
if line.startswith("INFO: "):
LOG.info(line[6:])
elif line.startswith("WARNING: "):
LOG.warning(line[9:])
elif line.startswith("ERROR: "):
LOG.error(line[7:])
elif line.startswith("[WARNING:"):
# Filter away thrift warnings.
pass
else:
LOG.error(line)
class SimpleBuckBuilder(BuckBuilder):
def __init__(self, build: bool = True) -> None:
self._build = build
def build(self, targets: Iterable[str]) -> Iterable[str]:
"""
Shell out to buck to build the targets, then yield the paths to the
link trees.
"""
return generate_source_directories(targets, build=self._build)
def presumed_target_root(target):
root_index = target.find("//")
if root_index != -1:
target = target[root_index + 2 :]
target = target.replace("/...", "")
target = target.split(":")[0]
return target
# Expects the targets to be already normalized.
def _find_built_source_directories(
targets_to_destinations: Iterable[Tuple[str, str]]
) -> BuckOut:
targets_not_found = []
source_directories = []
buck_root = find_buck_root(os.getcwd())
if buck_root is None:
raise Exception("No .buckconfig found in ancestors of the current directory.")
directories = set()
for target, destination in targets_to_destinations:
directories.add((target, os.path.dirname(destination)))
for target, directory in directories:
target_name = target.split(":")[1]
discovered_source_directories = glob.glob(
os.path.join(buck_root, directory, "{}#*link-tree".format(target_name))
)
if len(discovered_source_directories) == 0:
targets_not_found.append(target)
source_directories.extend(
[
tree
for tree in discovered_source_directories
if not tree.endswith(
(
"-vs_debugger#link-tree",
"-interp#link-tree",
"-ipython#link-tree",
)
)
]
)
return BuckOut(set(source_directories), set(targets_not_found))
def _normalize(targets: List[str]) -> List[Tuple[str, str]]:
LOG.info(
"Normalizing target%s `%s`",
"s:" if len(targets) > 1 else "",
"`, `".join(targets),
)
try:
command = (
["buck", "targets", "--show-output"]
+ targets
+ ["--type", "python_binary", "python_test"]
)
targets_to_destinations = (
subprocess.check_output(command, stderr=subprocess.PIPE, timeout=600)
.decode()
.strip()
.split("\n")
) # type: List[str]
targets_to_destinations = list(filter(bool, targets_to_destinations))
# The output is of the form //target //corresponding.par
result = []
for target in targets_to_destinations:
pair = target.split(" ")
if len(pair) != 2:
pass
else:
result.append((pair[0], pair[1]))
if not result:
LOG.warning(
"Provided targets do not contain any binary or unittest targets."
)
return []
else:
LOG.info(
"Found %d buck target%s.", len(result), "s" if len(result) > 1 else ""
)
return result
except subprocess.TimeoutExpired as error:
LOG.error("Buck output so far: %s", error.stderr.decode().strip())
raise BuckException(
"Seems like `{}` is hanging.\n "
"Try running `buck clean` before trying again.".format(
# pyre-fixme: command not always defined
" ".join(command[:-1])
)
)
except subprocess.CalledProcessError as error:
LOG.error("Buck returned error: %s" % error.stderr.decode().strip())
raise BuckException(
"Could not normalize targets. Check the paths or run `buck clean`."
)
def _build_targets(targets: List[str], original_targets: List[str]) -> None:
LOG.info(
"Building target%s `%s`",
"s:" if len(original_targets) > 1 else "",
"`, `".join(original_targets),
)
command = ["buck", "build"] + targets
try:
subprocess.check_output(command, stderr=subprocess.PIPE)
LOG.warning("Finished building targets.")
except subprocess.CalledProcessError as error:
# The output can be overwhelming, hence print only the last 20 lines.
lines = error.stderr.decode().splitlines()
LOG.error("Buck returned error: %s" % "\n".join(lines[-20:]))
raise BuckException(
"Could not build targets. Check the paths or run `buck clean`."
)
def _map_normalized_targets_to_original(
unbuilt_targets: Iterable[str], original_targets: Iterable[str]
) -> List[str]:
mapped_targets = set()
for target in unbuilt_targets:
# Each original target is either a `/...` glob or a proper target.
# If it's a glob, we're looking for the glob to be a prefix of the unbuilt
# target. Otherwise, we care about exact matches.
name = None
for original in original_targets:
if original.endswith("/..."):
if target.startswith(original[:-4]):
name = original
else:
if target == original:
name = original
# No original target matched, fallback to normalized.
if name is None:
name = target
mapped_targets.add(name)
return list(mapped_targets)
@functools.lru_cache()
def find_buck_root(path: str) -> Optional[str]:
return find_root(path, ".buckconfig")
def query_buck_relative_paths(
project_paths: Iterable[str], targets: Iterable[str]
) -> Dict[str, str]:
"""Return a mapping from each absolute project path to its relative location
in the buck output directory.
This queries buck and only returns paths that are covered by `targets`."""
buck_root = find_buck_root(os.getcwd())
if buck_root is None:
LOG.error(
"Buck root couldn't be found. Returning empty analysis directory mapping."
)
return {}
target_string = " ".join(targets)
command = [
"buck",
"query",
"--json",
"--output-attribute",
".*",
# This will get only those owner targets that are beneath our targets or
# the dependencies of our targets.
f"owner(%s) ^ deps(set({target_string}))",
*project_paths,
]
LOG.info(f"Running command: {command}")
try:
owner_output = json.loads(
subprocess.check_output(command, timeout=30, stderr=subprocess.DEVNULL)
.decode()
.strip()
)
except (
subprocess.TimeoutExpired,
subprocess.CalledProcessError,
JSONDecodeError,
) as error:
raise BuckException("Querying buck for relative paths failed: {}".format(error))
results = {}
for project_path in project_paths:
for target_data in owner_output.values():
prefix = os.path.join(buck_root, target_data["buck.base_path"]) + os.sep
suffix = project_path[len(prefix) :]
if not project_path.startswith(prefix) or suffix not in target_data["srcs"]:
continue
if "buck.base_module" in target_data:
base_path = os.path.join(*target_data["buck.base_module"].split("."))
else:
base_path = target_data["buck.base_path"]
results[project_path] = os.path.join(base_path, target_data["srcs"][suffix])
# Break after the first one because there might be multiple matches.
break
return results
def generate_source_directories(
original_targets: Iterable[str], build: bool
) -> Set[str]:
original_targets = list(original_targets)
targets_to_destinations = _normalize(original_targets)
targets = [pair[0] for pair in targets_to_destinations]
if build:
_build_targets(targets, original_targets)
buck_out = _find_built_source_directories(targets_to_destinations)
source_directories = buck_out.source_directories
if buck_out.targets_not_found:
if not build:
# Build all targets to ensure buck doesn't remove some link trees as we go.
_build_targets(targets, original_targets)
buck_out = _find_built_source_directories(targets_to_destinations)
source_directories = buck_out.source_directories
if buck_out.targets_not_found:
message_targets = _map_normalized_targets_to_original(
buck_out.targets_not_found, original_targets
)
raise BuckException(
"Could not find link trees for:\n `{}`.\n "
"See `{} --help` for more information.".format(
" \n".join(message_targets), sys.argv[0]
)
)
return source_directories
|
import logging
import random
import pickle
import bz2
import _pickle as cPickle
import praw
import sklearn
from fastapi import APIRouter
import pandas as pd
from pydantic import BaseModel, Field, validator
log = logging.getLogger(__name__)
router = APIRouter()
class Item(BaseModel):
"""Use this data model to parse the request body JSON."""
title: str = Field(..., example="Is Fusion nullified for the Extreme Z Awakening Event?")
body: str = Field(..., example="On JP I missed out on my chance to do SSJ3 Goku the first time so I'm doing it now. Been lucked out of rotations for most of these stages and I've noticed that for my Fusions team, LR Gogeta would NEVER fuse. I'm genuinely curious if the mechanic is nullified for the event or i'm just getting AWFUL RNG.")
# Load your pickled model here:
def decompress_pickle(file):
data = bz2.BZ2File(file, 'rb')
data = cPickle.load(data)
return data
model = decompress_pickle('Baseline_SGD_Model.pbz2')
with open("subreddit_list.pkl", 'rb') as file:
subreddit_list = pickle.load(file)
@router.post('/predict')
async def predict(item: Item):
"""Make baseline predictions for classification problem."""
# You can access the attributes like this:
post = item.title + ' ' + item.body
log.info(post)
# Prediction function utilized to make 10 predictions
# based off post request
preds = pd.Series(model.decision_function([post])[0])
preds.index = model.classes_
preds = preds.sort_values(ascending=False)
preds = sorted(dict(preds).items(), key=lambda x: x[1], reverse=True)
preds = [subreddit_list[x] for x,_ in preds[:10]]
return {
'title': item.title,
'body': item.body,
'prediction': preds
} |
#!/usr/bin/env python
#coding=utf-8
###############################################
# File Name: p_feature.py
# Author: Junliang Guo@USTC
# Email: [email protected]
###############################################
import sys
from scipy import sparse as sp
from scipy import io as sio
import numpy as np
from io import open
#python p_feature.py in_file out_file dict_file
feature_in = sys.argv[1]
out_p = sys.argv[2]
dic_p = sys.argv[3]
dic = {}
k = 0
with open(dic_p, 'r') as f:
for line in f:
#print line
if k == 0:
k += 1
else:
node = line.strip().split()[0]
dic[k] = node
k += 1
#print len(dic)
features = sio.loadmat(feature_in)['features']
#print features[int(dic[11])]
features = features.todense()
#print features.shape
temp_m = np.zeros([len(dic), features.shape[1]])
#print temp_m.shape
for i in xrange(temp_m.shape[0]):
temp_m[i] = features[int(dic[i + 1])]
temp_m = sp.csr_matrix(temp_m, dtype = 'double')
#print temp_m[10]
sio.savemat(out_p, {'features': temp_m})
|
"""
- (main_strat_random.py)
"""
from time import sleep
from datetime import date
from random import randrange
from tda.client import synchronous
from decimal import Decimal, setcontext, BasicContext
# slowly converting everyting to the new API
from z_art.progress_report.api_progress_report import report_config
from z_art.progress_report.api_progress_report import Progress as progress
from z_art.strategy_select.api_strat_select import OrderType
from z_art.strategy_select.api_strat_select import data_syndicate
from z_art.strategy_select.api_strat_select import order_get_details
from z_art.strategy_select.api_strat_select import order_place_equity_market
# set decimal context, precision = 9, rounding = round half even
setcontext(BasicContext)
class RandomStratTypeException(TypeError):
'''Raised when there is a type error in :meth:`run_random_strat`.'''
class RandomStratValueException(ValueError):
'''Raised when there is a value error in :meth:`run_random_strat`.'''
def run_strat_random(tda_client, stocks_to_trade):
'''
Randomly trade stocks by defining constants below.
- The RANDOM strat takes a list and randomly
selects one stock,buys, holds, sells, repeats.
Hold is also random, as well as wait time
between buying again.
CAUTION: this strat never wins
:param tda_client: The client object created by tda-api.
'''
if not isinstance(tda_client, synchronous.Client):
raise RandomStratTypeException('tda client object is required')
if not isinstance(stocks_to_trade, list):
raise RandomStratTypeException('stocks_to_trade must be a list')
TRADE_CYCLE_DEFAULT_R = 300
TRADE_CYCLES_R = 300
SHARE_QUANTITY = 1
HOLD_POSITION = 50
HOLD_TOLERANCE = 10
WAIT_TO_BUY = 10
WAIT_TOLERANCE = 5
STOP_TRYING_TO_BUY = 10
accumulated_profit = 0
config_variables = [TRADE_CYCLE_DEFAULT_R, TRADE_CYCLES_R, SHARE_QUANTITY, HOLD_POSITION, WAIT_TO_BUY, STOP_TRYING_TO_BUY]
report_config('strat_random', config_variables, report_config=True)
generated_date = date.today().strftime('%d-%b-%Y')
file_name = 'z_art/data_visual/data_dump/PROFIT_' + str(generated_date) + '.log'
f = open(file_name, 'w+')
f.write(str(0.00))
f.close()
# begin trading
while TRADE_CYCLES_R > 0:
# generate a random number, select a stock to trade, and report
random_number = randrange(len(stocks_to_trade))
symbol_to_trade = [random_symbol[1] for random_symbol in enumerate(stocks_to_trade) if random_number == random_symbol[0]]
progress.w('WE_WILL_TRADE_(' + str(symbol_to_trade[0]) + ')')
# place buy order, get details, and report
order_response = order_place_equity_market(tda_client, OrderType.BUY, symbol_to_trade[0], SHARE_QUANTITY)
order_json = order_get_details(tda_client, order_response, wait_for_fill=STOP_TRYING_TO_BUY, order_report=True)
data_syndicate(order_json, report_data=True)
# rand tolerance +/-10 seconds from hold
hold_max = HOLD_POSITION + HOLD_TOLERANCE
hold_min = HOLD_POSITION - HOLD_TOLERANCE
new_hold = randrange(hold_min, hold_max)
# sleep between buy and sell
progress.w('SLEEPING_(hold_position_(' + str(new_hold) + '_seconds))')
sleep(new_hold)
# place sell order, get details, and report
order_response = order_place_equity_market(tda_client, OrderType.SELL, symbol_to_trade[0], SHARE_QUANTITY)
order_json = order_get_details(tda_client, order_response, wait_for_fill=True, order_report=True)
profit_data = data_syndicate(order_json, report_data=True, report_lists=True, report_profit=True, return_profit=True)
# write accumulation to 'z_art/data_visual/data_dump/PROFIT_dd-mmm-yyyy.log'
for profit_tuple in profit_data:
accumulated_profit = Decimal(accumulated_profit) + Decimal(profit_tuple[1])
f = open(file_name, 'w')
f.write(str(accumulated_profit))
f.close()
# rand tolerance +/-5 seconds from hold
buy_again_max = WAIT_TO_BUY + WAIT_TOLERANCE
buy_again_min = WAIT_TO_BUY - WAIT_TOLERANCE
new_buy_again = randrange(buy_again_min, buy_again_max)
# sleep before we buy another stock
progress.w('SLEEPING_(' + str(new_buy_again) + 'seconds)_(cycle(' + str(TRADE_CYCLE_DEFAULT_R-TRADE_CYCLES_R+1) + ' of ' + str(TRADE_CYCLE_DEFAULT_R) + '))')
sleep(new_buy_again)
# decrease TRADE_CYCLES
TRADE_CYCLES_R = TRADE_CYCLES_R - 1
# clear profit data just in case
profit_data.clear() |
import path_magic
import unittest
import os
from function_space import FunctionSpace
from mesh import CrazyMesh
import numpy as np
import numpy.testing as npt
from inner_product import inner
from forms import Form, ExtGaussForm_0
import matplotlib.pyplot as plt
from basis_forms import BasisForm
def func(x, y):
return x + y
class TestForm0(unittest.TestCase):
"""Test case for the class of 0-forms."""
def pfun(self, x, y):
return np.sin(np.pi * x) * np.sin(np.pi * y)
# @unittest.skip
def test_discretize_simple(self):
"""Simple discretization of 0 forms."""
p_s = [(2, 2)]
n = (2, 2)
for p in p_s:
mesh = CrazyMesh(2, n, ((-1, 1), (-1, 1)), 0.0)
func_space = FunctionSpace(mesh, '0-lobatto', p)
form_0 = Form(func_space)
form_0.discretize(self.pfun)
# values at x = +- 0.5 and y = +- 0.5
ref_value = np.array((1, -1, -1, 1))
npt.assert_array_almost_equal(ref_value, form_0.cochain_local[4])
def test_gauss_projection(self):
p = (10, 10)
n = (5, 6)
mesh = CrazyMesh(2, n, ((-1, 1), (-1, 1)), 0.3)
func_space = FunctionSpace(mesh, '0-gauss', p)
form_0_gauss = Form(func_space)
form_0_gauss.discretize(self.pfun)
xi = eta = np.linspace(-1, 1, 50)
form_0_gauss.reconstruct(xi, eta)
(x, y), data = form_0_gauss.export_to_plot()
npt.assert_array_almost_equal(self.pfun(x, y), data)
def test_lobatto_projection(self):
p = (10, 10)
n = (5, 6)
mesh = CrazyMesh(2, n, ((-1, 1), (-1, 1)), 0.3)
func_space = FunctionSpace(mesh, '0-lobatto', p)
form_0 = Form(func_space)
form_0.discretize(self.pfun)
xi = eta = np.linspace(-1, 1, 20)
form_0.reconstruct(xi, eta)
(x, y), data = form_0.export_to_plot()
npt.assert_array_almost_equal(self.pfun(x, y), data)
def test_ext_gauss_projection(self):
p = (10, 10)
n = (5, 6)
mesh = CrazyMesh(2, n, ((-1, 1), (-1, 1)), 0.3)
func_space_extGau = FunctionSpace(mesh, '0-ext_gauss', p)
form_0_extG = ExtGaussForm_0(func_space_extGau)
form_0_extG.discretize(self.pfun)
xi = eta = np.linspace(-1, 1, 20)
form_0_extG.reconstruct(xi, eta)
(x, y), data = form_0_extG.export_to_plot()
npt.assert_array_almost_equal(self.pfun(x, y), data)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python3
# Example line: Step A must be finished before step L can begin.
edges = [(ord(x[1]) - ord('A'), ord(x[7]) - ord('A')) for x in map(lambda x: x.split(), open('input.in').readlines())]
workers = 5
for e in edges:
print('{} → {}'.format(chr(ord('A') + e[0]),chr(ord('A') + e[1])))
class Node(object):
def __init__(self, no):
self.id = no
self.inputs = {}
self.outputs = {}
def insert_source(self, source_id, source):
self.inputs[source_id] = source
def insert_target(self, target_id, target):
self.outputs[target_id] = target
def __repr__(self):
return str({ 'in': self.inputs.keys(), 'out': self.outputs.keys(), 'id': [self.id]})
graph = {}
for l in range(ord('Z') - ord('A') + 1):
graph[l] = Node(l)
for source, target in edges:
graph[source].insert_target(target, graph[target])
graph[target].insert_source(source, graph[source])
output = []
nodes_to_insert = []
graph_len = len(graph)
while(len(output) < graph_len):
# print(len(output))
# print(len(graph))
nodes_to_insert = []
for node in graph:
# print('{} : {} → {}'.format(node, len(graph[node].inputs), len(graph[node].outputs)))
# print('{}: {}'.format(node, graph[node]))
if len(graph[node].inputs) == 0:
nodes_to_insert.append(node)
print(nodes_to_insert)
nodes_to_insert.sort()
n = nodes_to_insert[0]
if n in graph:
output.append(n)
for k in graph[n].outputs:
out = graph[n].outputs[k]
del out.inputs[n]
print("Removing {}.".format(n))
del graph[n]
result = [chr(ord('A') + x) for x in output]
print(result)
print(''.join(result))
|
### Place holder ### |
'''
Queries current running WAS stats and saves them to a file
(C) 2015 Alex Ivkin
Run using the following command:
wsadmin.[bat|sh] -lang jython -user wsadmin -password wsadmin -f collectWASPerformanceStats.py <stats_file> [-l]
You can omit -user and -password. wsadmin is in \Program Files\IBM\WebSphere\AppServer\bin\ on a default windows installation
NOTE: On Windows, due to WebSphere weirdness you have to use FORWARD slashes (/) in the output file name. Otherwise the backward slashes (\) need to be doubled or they will turn into escape sequences
Use -l after the output file name to export the list of all available performance MBeans to the output file and quit
Style note: Jython notation allows the use of shortened get/set functions for java objects, e.g using .list, instead of getList(), or .attribute=1 instead of ,setAttribute(1).
However I try to stay with the java style get/set here to signify a python object vs java object
'''
import re,sys,time,os,java
import com.ibm.websphere.pmi.stat.WSStatsHelper as WSStatsHelper
#import java.util.Locale as Locale
if len(sys.argv)==0:
print __doc__
sys.exit(1)
print "Initializing..."
# script config
scriptconfig={}
perfHash={}
perfObjects=AdminControl.queryNames('type=Perf,*').split('\n') # could be one perfmbean per WAS instance
if perfObjects is None or not perfObjects:
print "Can not retreive the performance MBean. Make sure PMI (performance metrics) is enabled on the server"
# convert to object names and hash under individual process names
for p in perfObjects:
perfObjectName=AdminControl.makeObjectName(p) # javax.management.ObjectName. Get tog full string - p.getKeyPropertyListString()
process=perfObjectName.getKeyProperty("process")
if process is None:
print "Performance object %s is not associated with a process." % p
else:
perfHash[process]=perfObjectName
#init the configs
#WSStatsHelper.initTextInfo(AdminControl.invoke_jmx(perfObjectName, 'getConfigs', [None], ['java.util.Locale']),None) # not all stats config files are in the classpath. WSStatsHelper helps init descriptions for PMIModuleConfigs that are not bundled with WAS but are added later. Second argument is None for the default locale (Locale.getDefault())
print "Performance beans found: %s" % ",".join(perfHash.keys())
# Enable PMI data using the pre-defined statistic sets.
#AdminControl.invoke_jmx (perfObjName, 'setStatisticSet', ['all'], ['java.lang.String']) #@UnusedVariable @UndefinedVariable
# Return a string with all the sub-statistics and their metrics listed recursively in a multi-line string
def getSubStatsDesc(stats,prefix,level):
ret = ""
if level:
if prefix:
prefix="%s>%s" % (prefix,stats.getName()) # keep recording sublevels
else: # dont start the first level prefix with a separator
prefix="%s" % stats.getName()
else: # dont record the root level (0) prefix
prefix=""
substats=stats.getSubStats()
if substats is not None:
for sub in substats:
subDesc=getSubStatsDesc(stats.getStats(sub.getName()),prefix,level+1)
if prefix:
name="%s>%s" % (prefix,sub.getName())
else: # dont start the first level prefix with a separator
name=sub.getName()
ret="%s\n%s>>>%s%s" % (ret," "*25,name,subDesc) #"/"*level for a simple prefix, ",".join([str(s) for s in sub.getStatisticNames()]) for the actual statistics
return ret
def clean(name): # remove substrings per the config file from the given string
if "clean" in scriptconfig.keys():
if "," in scriptconfig["clean"]['value']:
for s in scriptconfig["clean"]['value'].split(","):
name=name.replace(s,"")
else:
name=name.replace(scriptconfig["clean"]['value'],"")
return name
# Return an hash with all the sub-statistics, and their values retreived recursively
def getSubStatsHash(stats,ret,prefix):
#print prefix
if prefix:
prefix="%s>%s" % (prefix,clean(stats.getName())) # keep recording sublevels
else: # dont start the first level prefix with a separator
prefix="%s" % clean(stats.getName())
# collect all the same level statistics
for s in stats.getStatistics():
ret[prefix+" "+clean(s.getName())]=s # hasing the stat object as is, it will have to be processed via get_value later
substats=stats.getSubStats()
if substats is not None:
for sub in substats:
allsubstats=getSubStatsHash(stats.getStats(sub.getName()),ret,prefix)
#print allsubstats
ret.update(allsubstats)
return ret
# init the configs
# queriying the first available perfmbean they all return the same set of performance metrics
configs=AdminControl.invoke_jmx(perfHash[perfHash.keys()[0]], 'getConfigs', [None], ['java.util.Locale']) # returns an array of all PmiModuleConfig objects, None for the server default locale means 'use default'
WSStatsHelper.initTextInfo(configs,None) # not all stats config files are in the classpath. WSStatsHelper helps init descriptions for PMIModuleConfigs that are not bundled with WAS but are added later. Second argument is None for the default locale (Locale.getDefault())
if len(sys.argv) > 1 and sys.argv[1]=='-l':
# list objects with stats
f_out=open(sys.argv[0],"w")
statName={}
subStats={}
statDescription={}
print "Listing modules ..."
for config in configs:
if config is not None: # record the config specs, if we have not already seen it
statDescription[config.getShortName()]=[config.getDescription(), ",".join([d.name for d in config.listAllData()])]
print "Listing objects ..."
for a in AdminControl.queryNames('*').split('\n'): #type=ConnectionPool, @UndefinedVariable
obj=AdminControl.makeObjectName(a)
process=obj.getKeyProperty("process")
subs=""
if process is None:
desc="*No process"
else:
if process not in perfHash.keys(): # desc="*No perfmbean %s" % process
process=perfHash.keys()[0] # ask the first perfmbean
config=AdminControl.invoke_jmx(perfHash[process], 'getConfig', [obj], ['javax.management.ObjectName']) # returns PmiModuleConfig object
if config is not None: # record the config specs, if we have not already seen it
desc=config.getShortName()
else:
#desc="*Blank"
continue # no use in recording non-stat objects
# now get and record all sub-statistics available under that object
stats=AdminControl.invoke_jmx(perfHash[process], 'getStatsObject', [obj,java.lang.Boolean('true')], ['javax.management.ObjectName','java.lang.Boolean']) # returns WSStats object
if stats is not None and stats.getSubStats() is not None:
subs=getSubStatsDesc(stats,"",0)
#printStats(stats,1)
statName[a]=desc
subStats[a]=subs
print "Writing performance beans descriptions..."
print >> f_out, "Available performance sources aka perfmbeans (process:name [enabled statistic set]):"
skeys=perfHash.keys()
skeys.sort()
for s in skeys:
print >> f_out, "%-25s:%s [%s]" % (s,perfHash[s],AdminControl.invoke_jmx(perfHash[s], 'getStatisticSet', [],[]))
f_out.flush()
print "Writing stat descriptions..."
print >> f_out, "\nAvailable performance modules (module-description [provided statistics]):"
skeys=statDescription.keys()
skeys.sort()
for s in skeys:
print >> f_out, "%-25s-%s [%s]" % (s,statDescription[s][0],statDescription[s][1])
f_out.flush()
print "Writing stat names..."
print >> f_out, "\nPMI Managed objects (module=full managed object name):"
skeys=statName.keys()
skeys.sort() # sort() does not return a list but does sorting in place, hence the ugly multi-liner
for s in skeys:
print >> f_out, "%-25s=%s%s" % (statName[s], s,subStats[s])
print "done."
f_out.close()
sys.exit(0)
# load config
try:
for line in open("performance.prop","r").readlines():
line=line.strip()
if not line.startswith("#") and "=" in line:
configkey=line.split("=",1)[0].strip()
mbean=line.split("=",1)[1].strip()
if mbean.find(">>>") != -1: # split in two, the latter half is a substatistics
(mbean,substat)=mbean.split(">>>")
else:
substat=""
scriptconfig[configkey]={'value':mbean,'substat':substat}
print "Loaded %s settings." % len(scriptconfig.keys())
except:
print "performance.prop can't be loaded: %s" % sys.exc_info()[0]
sys.exit(1)
# convert conf into wasobjects and create a header
WASobjects={}
#statsize={} # track the number of individual statistics in each perf object. Useful for maintaining proper CSV line in case some stats need to be skipped
namelist=[] # list is used for ordering, hashtables are unordered
configkeys=scriptconfig.keys()
configkeys.sort()
for c in configkeys:
if c == "wait" or c == "clean": # skip the control config lines, since it's not a watched stat
continue
try:
WASobjects[c]=AdminControl.makeObjectName(AdminControl.completeObjectName(scriptconfig[c]['value']))
# we can get the pmiconfig from any object (e.g perfHash[perfHash.keys()[0]]), but will try to be precise here
process=WASobjects[c].getKeyProperty("process")
if process is None:
print "No process definition for %s. Skipping..." % WASobjects[c]
continue
elif process not in perfHash.keys():
print "Metrics process %s for %s has no matching perfomance bean to pull from. Skipping..." % (process,WASobjects[c])
continue
statconfig=AdminControl.invoke_jmx(perfHash[process], 'getConfig', [WASobjects[c]], ['javax.management.ObjectName'])
#WSStatsHelper.initTextInfo([statconfig],None) # not all stats config files are in the classpath. WSStatsHelper helps init descriptions for PMIModuleConfigs that are not bundled with WAS but are added later. Second argument is None for the default locale (Locale.getDefault())
if statconfig is None:
print "Empty stat config for %s. Skipping..." % c
continue
#statsize[c]=0
stats=AdminControl.invoke_jmx(perfHash[process], 'getStatsObject', [WASobjects[c],java.lang.Boolean('true')], ['javax.management.ObjectName','java.lang.Boolean'])
if stats is None:
print "No stats found for %s" % c
continue
if scriptconfig[c]['substat']: # get the substats
if stats.getStats(scriptconfig[c]['substat']) is None:
print "No substat %s in %s" % (scriptconfig[c]['substat'], c)
else:
allsubstats=getSubStatsHash(stats.getStats(scriptconfig[c]['substat']),{},None) # recursive search
print "Found %s substats for %s in %s" % (len(allsubstats.keys()),scriptconfig[c]['substat'],c)
for s in allsubstats.keys():
namelist.append(c+" "+s)
else:
# by pooling the actual stats object, and not the conf object we are ensuring that we get only actually provided statistics, not advertised statistics
#for d in statconfig.listAllData():
for s in stats.getStatistics():
#statsize[c]+=1
namelist.append(c+" "+s.getName())
except:
print "Problem looking up %s: %s, %s. Skipping..." % (c,sys.exc_info()[0],sys.exc_info()[1])
# Simulate CSV. csv library may not be available in WAS's outdated jython
# namesort is ordered and determines the order of fields in the csv
namelist.sort()
namelist.insert(0,'Time')
namelist.insert(0,'Date')
header=",".join(namelist)
# open the output file
try:
if os.path.isfile(sys.argv[0]) and open(sys.argv[0],"r").readline().strip() == header: # check if the existing header matches the new one
print "Appending to the existing file..."
f_out=open(sys.argv[0],"a")
else:
print "Starting a new stats collection file..."
f_out=open(sys.argv[0],"w")
print>>f_out,header
except:
print "Error opening file %s\n%s" % (sys.argv[0],sys.exc_info()[1])
sys.exit(2)
# decode value based on the stat type
def get_value(s):
value="."
if str(s.getClass())=='com.ibm.ws.pmi.stat.BoundedRangeStatisticImpl': # bounded range
value=s.getCurrent()
elif str(s.getClass())=='com.ibm.ws.pmi.stat.CountStatisticImpl': # count statistics
value=s.getCount()
elif str(s.getClass())=='com.ibm.ws.pmi.stat.DoubleStatisticImpl':
value=s.getDouble()
elif str(s.getClass())=='com.ibm.ws.pmi.stat.TimeStatisticImpl': # max, min, minTime,maxTime, totalTime, sumOfSquares, delta, mean
value=s.getCount()
elif str(s.getClass())=='com.ibm.ws.pmi.stat.RangeStatisticImpl': # lowWaterMark, highWaterMark, integral, delta, mean, current
value=s.getCurrent()
elif str(s.getClass())=='com.ibm.ws.pmi.stat.AverageStatisticImpl': # max, mean,min,sumOfSquares,total
value=s.getCount()
else:
value=s.getClass() # .class works too
return value
print "Pulling Websphere statistics...Press Ctrl-C to interrupt"
while 1:
try:
statshash={}
for t in namelist: # pre-init stats
if t=="Date":
statshash[t]=time.strftime("%m/%d/%Y", time.localtime())
elif t=="Time":
statshash[t]=time.strftime("%H:%M:%S", time.localtime())
else:
statshash[t]=""
print "%s %s Collecting statistics ..." % (statshash["Date"],statshash["Time"])
for obj in WASobjects.keys(): # the sorting is not really required because the ordering in CSV is controlled by the ordering in namelist
process=WASobjects[obj].getKeyProperty("process")
if process is None:
print "No process definition for %s. Skipping..." % obj
continue
elif process not in perfHash.keys():
print "Metrics process %s for %s has no matching perfomance bean to pull from. Skipping..." % (process,obj)
continue
# pull actual statistics from the associated performance bean
stats=AdminControl.invoke_jmx(perfHash[process], 'getStatsObject', [WASobjects[obj], java.lang.Boolean ('true')], ['javax.management.ObjectName', 'java.lang.Boolean']) # second argument pulls recursive substats. returns com.ibm.websphere.pmi.stat.StatsImpl or WSStats wor stat.Stats
#statshash=dict(zip(typehash.keys()),[0]*len(typehash.keys()))
if stats is None:
print "No statistics received for %s. Skipping..." % obj
continue
if scriptconfig[obj]['substat']: # get the substats
if stats.getStats(scriptconfig[obj]['substat']) is not None:
allsubstats=getSubStatsHash(stats.getStats(scriptconfig[obj]['substat']),{},None) # recursive search
for s in allsubstats.keys():
statshash[obj+" "+s]=get_value(allsubstats[s])
else: # no substats
# print "Got %s ..." % obj # = %s..." % (obj,stats.statistics)
for s in stats.getStatistics():
statshash[obj+" "+s.name]=get_value(s)
#print statshash
print>>f_out,",".join([str(statshash[v]) for v in namelist])
except:
# att Printing the traceback may cause memory leaks in Python 2.1 due to circular references. see here http://docs.python.org/library/sys.html
print "%s. Serious glitch working on %s: %s, %s, line %s" % (time.strftime("%m/%d/%Y %H:%M:%S", time.localtime()),obj,sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
time.sleep(float(scriptconfig['wait']['value'])) # collection delay, convert string to double
# take stats collection down if websphere project is down by checing the process state. OS specific
# check if the parent process is up
def run(cmd):
''' Use Java exec command to run a script due to Jython 2.1 limitations'''
process = java.lang.Runtime.getRuntime().exec(cmd)
stdoutstream = ''
errorstream = ''
running = 1
while running:
while process.getInputStream().available(): # > 0:
stdoutstream += chr(process.getInputStream().read())
while process.getErrorStream().available(): # > 0:
errorstream += chr(process.getErrorStream().read())
try:
process.exitValue()
# OK, we're done simulating:
running = 0
#print "done..."
return (stdoutstream,errorstream)
except java.lang.IllegalThreadStateException, e:
# In case of this exception the process is still running.
#print "running..." # pass
time.sleep(0.1)
# take stats collection down if websphere project is down by checing the process state. OS specific
if os.name=='nt': # do the following on windows
# check if the parent process is up
# there is a small bug - sys.argv[0] is the FIRST argument (i.e the real arg, not the script name) under wsadmin, but this is ok here.
# another small bug - due to the slow cycle time (time.sleep(60)) it may take up to a minute for the stats collection process to go down
ret1=run("wmic process where (commandline like '%"+sys.argv[0]+"%' and name like 'java.exe') get parentprocessid")
parentprocessid=re.search(r'\d{2,}',ret1[0]).group(0)
ret2=run("wmic process where (processid='"+parentprocessid+"') get name")
if re.match('No Instance.*',ret2[1]):
print "The parent process is dead. Going down."
sys.exit(10)
elif os.name=='posix': # to check for Mac add platform.system()=='Darwin'
pass # do nothing
|
# Copyright (c) 2016, #
# Author(s): Henko Aantjes, #
# Date: 28/07/2016 #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# * Neither the name of the <organization> nor the #
# names of its contributors may be used to endorse or promote products #
# derived from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND #
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND #
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
import time
import logging
import random
import numpy as np
import matplotlib.pyplot as plt
from Tkinter import *
from WModules import h2i, i2h
import tkFont
logger = logging.getLogger()
# this file contains all tkinter widgets
###### ######## ####### ######## ## ##
## ## ## ## ## ## ## ## ##
## ## ## ## ## ## ## ##
###### ## ## ## ######## #####
## ## ## ## ## ## ## ##
## ## ## ## ## ## ## ## ##
###### ## ####### ## ## ## ##
class StorkWidget(object):
"""docstring for StorkWidget"""
def __init__(self, stork, title = "",destroy_callback = None):
super(StorkWidget, self).__init__()
self.setStorkTarget([(len(x)-12)/4 for x in stork.lines])
self.destroy_callback = destroy_callback
self.root = Tk()
self.root.protocol("WM_DELETE_WINDOW", self.destroyedByUser) # this is instead of destroying the window!
# self.root.focus()
self.root.title(stork.ID+ ": " + title)
text_w = min(max(self.plot_y_target), 220)
text_h = sum(len([y for y in self.plot_y_target if y>i*150]) for i in range(10))+4
self.txt = Text(self.root,width= text_w,height= text_h)
self.txt.pack()
for x in range(len(self.plot_y_target)):
self.txt.insert(INSERT,"0"*self.plot_y_target[x]+"\n")
def setStorkTarget(self, line_heigths):
self.plot_y_target = line_heigths
self.plot_y_out = np.zeros(len(line_heigths))
self.plot_y_out_acked = np.zeros(len(line_heigths))
def ack(self,linenumber,msg_size):
begin = "%i.%i"%(linenumber+1,self.plot_y_out_acked[linenumber])
eind = "%i.%i"%(linenumber+1,self.plot_y_out[linenumber])
if(self.txt):
self.txt.delete(begin,eind)
self.txt.insert(begin,"1"*msg_size)
self.txt.tag_add("ack", begin, eind)
self.txt.tag_config("ack", background="black", foreground="white")
self.plot_y_out_acked[linenumber] +=msg_size
def setTry(self,linenumber,msg_size):
begin = "%i.%i"%(linenumber+1,self.plot_y_out_acked[linenumber])
eind = "%i.%i"%(linenumber+1,self.plot_y_out_acked[linenumber]+msg_size)
end_of_line = "%i.%i"%(linenumber+1,self.plot_y_target[linenumber])
if(self.txt):
self.txt.delete(begin,end_of_line)
self.txt.insert(begin,"0"*(self.plot_y_target[linenumber]-
self.plot_y_out_acked[linenumber]-msg_size))
self.txt.insert(begin,"X"*msg_size)
self.txt.tag_add("try", begin, eind)
self.txt.tag_config("try", background="yellow", foreground="grey")
self.plot_y_out[linenumber] = self.plot_y_out_acked[linenumber]
self.plot_y_out[linenumber] +=msg_size
def destroy(self):
try:
self.txt = None
self.root.destroy()
except Exception as e:
pass
def destroyedByUser(self):
self.txt = None
self.destroy()
if(self.destroy_callback):
self.destroy_callback()
## ## ######## ## ## ###### ## ## ######## ###### ## ##
### ### ## ### ### ## ## ## ## ## ## ## ## ##
#### #### ## #### #### ## ## ## ## ## ## ##
## ### ## ###### ## ### ## ## ######### ###### ## #####
## ## ## ## ## ## ## ## ## ## ## ##
## ## ## ## ## ## ## ## ## ## ## ## ## ##
## ## ######## ## ## ###### ## ## ######## ###### ## ##
class MemCheckWidget(object):
"""docstring for MemCheckWidget"""
def __init__(self, wispRam, title = "", destroy_callback = None):
super(MemCheckWidget, self).__init__()
memchecks = wispRam.memChecks
self.setMemCheckTarget([memchecks[x]["length"] for x in sorted(memchecks)])
self.addresses = [h2i(x) for x in sorted(memchecks)]
self.root = Tk()
self.root.protocol("WM_DELETE_WINDOW", self.destroyedByUser)
self.destroy_callback = destroy_callback
# self.root.focus()
self.root.title(wispRam.ID+ ": " + title)
self.root.geometry('+%d-%d' % ( 20, 20))
text_w = min(max(self.plot_y_target), 250)+5
text_h = sum(len([y for y in self.plot_y_target if y>i*220]) for i in range(10))+2
self.txt = Text(self.root,width= text_w,height= text_h)
self.txt.pack(fill= BOTH, expand = True)
for x in range(len(self.plot_y_target)):
self.txt.insert(INSERT,i2h(self.addresses[x])+" " +"?"*self.plot_y_target[x]+"\n")
def setMemCheckTarget(self, line_widths):
self.plot_y_target = line_widths
def getLineNumber(self, address):
for a in range(len(self.addresses))[::-1]:
if(address >= self.addresses[a]):
return a
else:
raise NameError('Address' + i2h(address) + ' not found in {}'.format(' '.join([i2h(x) for x in self.addresses])))
def getOffsetInWords(self, address):
return (address - self.addresses[self.getLineNumber(address)])/2 + 5
def ack(self,address,size_in_words):
linenumber = self.getLineNumber(address)
begin = "%i.%i"%(linenumber+1,self.getOffsetInWords(address))
eind = "%i.%i"%(linenumber+1,self.getOffsetInWords(address) + size_in_words)
if(self.txt):
self.txt.delete(begin,eind)
self.txt.insert(begin,"$"*size_in_words)
tag = "ack{}".format(random.choice("abcdefghijklmnopqrstuvwxyz"))
self.txt.tag_add(tag, begin, eind)
self.txt.tag_config(tag, background="green", foreground="black")
def nack(self,address,size_in_words):
linenumber = self.getLineNumber(address)
begin = "%i.%i"%(linenumber+1,self.getOffsetInWords(address))
eind = "%i.%i"%(linenumber+1,self.getOffsetInWords(address) + size_in_words)
if(self.txt):
self.txt.delete(begin,eind)
self.txt.insert(begin,"0"*size_in_words)
tag = "nack{}".format(random.choice("abcdefghijklmnopqrstuvwxyz"))
self.txt.tag_add(tag, begin, eind)
self.txt.tag_config(tag, background="red", foreground="white")
def chop(self,address,size_in_words):
linenumber = self.getLineNumber(address)
begin = "%i.%i"%(linenumber+1,self.getOffsetInWords(address))
eind = "%i.%i"%(linenumber+1,self.getOffsetInWords(address) + size_in_words)
if(self.txt):
self.txt.delete(begin,eind)
self.txt.insert(begin,"-"*size_in_words)
tag = "chop{}".format(random.choice("abcdefghijklmnopqrstuvwxyz"))
self.txt.tag_add(tag, begin, eind)
self.txt.tag_config(tag, background="brown", foreground="white")
def dontcare(self,address,size_in_words):
linenumber = self.getLineNumber(address)
begin = "%i.%i"%(linenumber+1,self.getOffsetInWords(address))
eind = "%i.%i"%(linenumber+1,self.getOffsetInWords(address) + size_in_words)
if(self.txt):
self.txt.delete(begin,eind)
self.txt.insert(begin,"F"*size_in_words)
tag = "dontcare{}".format(random.choice("abcdefghijklmnopqrstuvwxyz"))
self.txt.tag_add(tag, begin, eind)
self.txt.tag_config(tag, background="black", foreground="white")
def send(self,address,size_in_words):
linenumber = self.getLineNumber(address)
begin = "%i.%i"%(linenumber+1,self.getOffsetInWords(address))
eind = "%i.%i"%(linenumber+1,self.getOffsetInWords(address) + size_in_words)
if(self.txt):
self.txt.delete(begin,eind)
self.txt.insert(begin,"?"*size_in_words)
tag = "sended{}".format(random.choice("abcdefghijklmnopqrstuvwxyz"))
self.txt.tag_add(tag, begin, eind)
self.txt.tag_config(tag, background="yellow", foreground="black")
def destroy(self):
try:
self.root.destroy()
except Exception as e:
pass
def destroyedByUser(self):
self.txt = None
if(self.destroy_callback):
self.destroy_callback()
self.destroy()
#### ## ## ## ## ######## ## ## ######## ####### ######## ## ##
## ### ## ## ## ## ### ## ## ## ## ## ## ## ##
## #### ## ## ## ## #### ## ## ## ## ## ## ####
## ## ## ## ## ## ###### ## ## ## ## ## ## ######## ##
## ## #### ## ## ## ## #### ## ## ## ## ## ##
## ## ### ## ## ## ## ### ## ## ## ## ## ##
#### ## ## ### ######## ## ## ## ####### ## ## ##
class InventoryWidget(object):
"""docstring for InventoryWidget"""
def __init__(self, root, text_w = 250, text_h = 20):
super(InventoryWidget, self).__init__()
self.taglist = dict()
self.txt = Text(root,width= text_w,height= text_h)
self.txt.pack(side=LEFT, fill=BOTH, expand=True)
self.S = Scrollbar(root)
self.S.pack(side=RIGHT, fill=Y)
self.S.config(command=self.txt.yview)
self.txt.config(yscrollcommand=self.S.set)
self.updatetxt("no tags seen yet")
def updatetxt(self, text):
if(self.txt):
self.txt.delete("1.0",END)
self.txt.insert(INSERT,text)
self.txt.update_idletasks()
def showTagsInTextWidget(self, tags, EPCLength = 8):
updatespeed = 0.2
epcs_this_round = set()
if len(tags):
for tag in tags:
epc = tag['EPC-96'][0:min(8,EPCLength)]
epc += ('----' + tag['EPC-96'][5*4:6*4]) if EPCLength ==6*4 else ''
if epc not in self.taglist:
self.taglist[epc] = tag['TagSeenCount'][0]
self.taglist[epc] += tag['TagSeenCount'][0]*updatespeed - self.taglist[epc]*updatespeed
epcs_this_round |= set({epc})
for tagepc in epcs_this_round ^ set( self.taglist.keys() ):
self.taglist[tagepc] *=1-updatespeed
text = " Tag epc "+ ' '*max(EPCLength-8,0)+"| visibility \n"
for tag in self.taglist:
text += tag+ (" | %5.2f "%self.taglist[tag]) + 'x'*int(2*self.taglist[tag])+"\n"
self.updatetxt(text)
def getBestTag(self):
for tag in self.taglist:
if (self.taglist[tag] == max(self.taglist.values())):
return tag
else:
return None
def getGoodTags(self, threshold = 1.0):
return set([tag[:4] for tag in self.taglist if self.taglist[tag]>threshold])
def destroy(self):
if(self.txt):
self.txt.destroy()
self.txt= None
## ## ######## ######## ########
## ## ## ## ## ## ## ##
## ## ## ## ## ## ## ##
## ## ## ######## ###### ########
## ## ## ## ## ## ##
## ## ## ## ## ## ##
### ### ## ## ######## ##
class WrepWidget(object):
"""docstring for WrepWidget"""
PAST = -1 # to say this state (button) has been active before
FUTURE = 0 # to say this state (button) has not been active before
CURRENT = 1 # to say this state (button) is active now
def __init__(self,wispstates_LOT, destroy_callback = None):
super(WrepWidget, self).__init__()
self.wispstates_LOT = wispstates_LOT
self.destroy_callback = destroy_callback
self.wwindow = Tk()
self.wisprows = dict()
self.wwindow.protocol("WM_DELETE_WINDOW", self.destroyedByUser) # do something if user closes the window
self.wwindow.geometry('-%d+%d' % ( 20, 50))
def addWisp(self,wisp):
wisprow = Frame(self.wwindow)
self.labelt = Label(wisprow, text=' WISP: ' + wisp.ID+ " ")
self.labelt.pack(side=LEFT)
wisprow.wispstatebuttons = []
for state in wisp.getStates():
txt = self.wispstates_LOT[state]
wisprow.wispstatebuttons.append({'button' : Button(wisprow, text=txt), 'state': state, 'active': self.FUTURE,})
wisprow.wispstatebuttons[-1]['button'].pack(side=LEFT)
wisprow.pack(fill=X)
self.wisprows[wisp.ID] = wisprow
def setState(self, wispID, newState, time):
if(self.wwindow):
# reset old coloring
for WSB in self.wisprows[wispID].wispstatebuttons:
if WSB['active'] is self.CURRENT :
WSB['button'].configure(bg = 'forest green')
WSB['active'] = self.PAST
# catch the current state
# difficulty: states are not unique, so find out which of them is the next.
# solution: pick the first 'clean' one (button without past activity) or the last one
for WSB in self.wisprows[wispID].wispstatebuttons:
if WSB['state'] is newState:
if WSB['active'] is self.PAST:
selected_button = WSB
else:
WSB['button'].configure(bg = 'red2')
b_text = WSB['button'].config('text')[-1]
WSB['button'].config(text=b_text+ "\n%2.3f"%(time))
WSB['active'] = self.CURRENT
break # don't check any next buttons anymore
else: # if there was no break in the for loop, execute this
selected_button['button'].configure(bg = 'red2')
b_text = selected_button['button'].config('text')[-1]
selected_button['button'].config(text=b_text+ "\n%2.3f"%(time))
selected_button['active'] = self.CURRENT
self.wwindow.update_idletasks()
# self.wwindow.focus()
# fetch the widget tekst for a specific wispID
def toString(self, wispID):
# get the buttons, join with '\n', but first flatten out the button txt ( = replace '\n' with ' ')
# last step: wrap into brackets and attach 'WrepWidget '-txt
return "WrepWidget [" +', '.join( [(' '.join(WSB['button'].config('text')[-1].split('\n'))) for WSB in self.wisprows[wispID].wispstatebuttons]) + "]"
def destroy(self):
try:
self.wwindow.destroy()
except Exception as e:
pass
def destroyedByUser(self):
if(self.destroy_callback):
self.destroy_callback()
self.destroy()
self.wwindow = None
###### ####### ## ## ######## ######## ####### ##
## ## ## ## ### ## ## ## ## ## ## ##
## ## ## #### ## ## ## ## ## ## ##
## ## ## ## ## ## ## ######## ## ## ##
## ## ## ## #### ## ## ## ## ## ##
## ## ## ## ## ### ## ## ## ## ## ##
###### ####### ## ## ## ## ## ####### ########
class IOControlWidget(object):
"""docstring for IOControlWidget"""
def __init__(self, buttonlist, optionlist, destroy_callback, terminal_callback, pause_callback, buttoncolors = {}):
super(IOControlWidget, self).__init__()
self.destroy_callback = destroy_callback
self.terminal_callback = terminal_callback
self.pause_callback = pause_callback
self.optionlist = optionlist
self.io = Tk()
self.io.protocol("WM_DELETE_WINDOW", self.destroy) # do something if user closes the window
# self.io.focus()
self.io.title("hahahaha :D")
self.IOF = tkFont.nametofont("TkFixedFont")
# setup a list of buttons
self.buttoncollumn = Frame(self.io)
self.buttons = {key:Button(self.buttoncollumn, text=key[2:], command=buttonlist[key],font=self.IOF) for key in sorted(buttonlist)}
[self.buttons[b].pack(fill = X) for b in sorted(self.buttons.keys())]
[self.buttons[key].config(bg = buttoncolors[key][0],fg = buttoncolors[key][1]) for key in sorted(buttoncolors)]
self.buttoncollumn.pack(side = LEFT,fill = X,padx = 10,pady = 10)
# setup a list of options
self.optioncollumn = Frame(self.io)
self.selected_option = {key[2:]: StringVar() for key in sorted(optionlist)} # not using the first 2 characters of the key, for refering to the selected option
[self.selected_option[key[2:]].set(optionlist[key][0]) for key in sorted(optionlist)] # not using the first 2 characters of the key, for refering to the selected option
self.optionrows = {key:Frame(self.optioncollumn) for key in sorted(optionlist)}
self.optionlabels = {key:Label(self.optionrows[key], text= key[2:],font=self.IOF) for key in sorted(optionlist)}
self.options = {key:OptionMenu(self.optionrows[key],self.selected_option[key[2:]], *optionlist[key]) for key in sorted(optionlist)} # not using the first 2 characters of the key, for refering to the selected option
[self.options[o].pack(side = RIGHT,fill = BOTH,padx = 20) for o in sorted(self.options)]
[self.options[o].config( bg = "grey55") for o in sorted(self.options)]
[o.pack(side = LEFT, fill = BOTH) for o in self.optionlabels.values()]
[self.optionrows[key].pack(fill = X, padx = 5) for key in sorted(self.optionrows)]
self.optioncollumn.pack(side = LEFT)
self.terminalrow = Frame(self.io)
self.terminal_label = Label(self.terminalrow, text= "WISPTERM",font=self.IOF, bg = "dim gray")
self.terminal_label.pack(side = LEFT,padx = 5, pady = 5)
self.terminal = Entry(self.terminalrow,width= 50)
self.terminal.pack(fill = X,expand = True, padx = 5, pady = 5)
self.terminal.bind('<Return>', self.terminalInput)
self.terminal.bind('<KP_Enter>', self.terminalInput)
self.terminalrow.pack(fill= BOTH)
self.terminalrow.config(bg = "dim gray", padx = 5)
self.txt = Text(self.io,width= 100, height = 10)
self.S = Scrollbar(self.io)
self.S.pack(side=RIGHT, fill=Y)
self.S.config(command=self.txt.yview)
self.txt.config(yscrollcommand=self.S.set)
self.txt.pack(fill = BOTH,expand = True)
self.updatetxt("Initializing")
self.wispsrow = Frame(self.io)
self.wispsselection = Frame(self.wispsrow)
self.wisp_selection_label = Label(self.wispsselection, text= "Select Targets",bg = "dark green", fg = "white")
self.wisp_selection_label.pack(fill = BOTH, expand = True)
self.wisplist = Listbox(self.wispsselection,selectmode=EXTENDED,height= 5)
self.wisplist.config(width = 16)
self.wisplist.pack(fill = Y, expand = True)
self.setWispSelection(["0302"])
self.pause_button = Button(self.wispsselection, text="PAUSE", command=self.pauseButtonPressed,font=self.IOF, fg = 'white',bg = "dark green")
self.pause_button.pack(fill = X, padx = 3, pady = 3)
self.wispsselection.pack(side = LEFT, fill = Y,pady = 5, padx = 5)
self.tagWidget = InventoryWidget(self.wispsrow, 100, 12)
self.wispsrow.pack(fill = BOTH)
self.wispsrow.config(bg = "dark green")
def pauseButtonPressed(self):
if(self.pause_button.config('text')[-1] == "PAUSE"):
self.pause_callback(pause = True)
self.pause_button.config(text = "RESUME")
else:
self.pause_callback(resume = True)
self.pause_button.config(text = "PAUSE")
# add a new option to the option list
def addAndSetOption(self, om_key, new_option):
if om_key in self.options:
if(new_option not in self.optionlist[om_key]):
self.options[om_key]["menu"].add_command(label = new_option, command = lambda value=new_option:self.selected_option[om_key[2:]].set(value))
self.optionlist[om_key].append(new_option)
self.selected_option[om_key[2:]].set(new_option)
else:
self.showWarning("Unknown list to add the option to!")
def updatetxt(self, text, mode = 'replace'):
if(mode == 'replace'):
self.txt.delete("1.0",END)
self.txt.insert(INSERT,text)
logger.info('IOTXT'+ mode+': ' + text)
def showWarning(self, text, mode = 'replace'):
if(mode == 'replace'):
self.txt.delete("1.0",END)
self.txt.insert(INSERT,text)
self.txt.tag_add('warning', "1.0",INSERT)
self.txt.tag_config('warning', background="red", foreground="white")
logger.info('\033[1;31mIOTXT-WARNING: ' + text + '\033[1;0m')
def deletetxt(self, nr_of_chars):
for x in range(nr_of_chars):
self.txt.delete(INSERT)
def setWispSelection(self, selection):
for item in selection:
if(item not in self.wisplist.get(0,END)):
self.wisplist.insert(0, item)
for index in range(len(self.wisplist.get(0,END))):
if(self.wisplist.get(index) in selection):
self.wisplist.selection_set(index)
else:
self.wisplist.selection_clear(index)
def getSelectedWispIDs(self):
return [self.wisplist.get(int(x)) for x in self.wisplist.curselection()]
def getSelected(self, key): # give the key without the [number/hex/char] and space (= without the first 2 characters)
return self.selected_option[key].get()
def terminalInput(self, key):
if self.terminal_callback:
userinput = self.terminal.get()
if(userinput):
if(userinput[-1:].lower() in {'x','q','c'}):
# clear terminal if user types something rubbish
self.terminal.delete(0, END)
elif(userinput[-1:].lower() in {'\\',';'}):
# clear the terminal if user ends a command properly
self.terminal.delete(0, END)
# try to execute the command
self.terminal_callback(command = userinput[:-1])
else:
# try to execute the command, don't clear the terminal
self.terminal_callback(command = userinput)
def update(self):
if(self.io):
self.io.update()
return True
def destroy(self):
self.tagWidget.destroy()
self.io.destroy()
self.io = None
self.destroy_callback()
|
#数据导出 也可以使用terminal 直接运行
from scrapy import cmdline
#数据导出到json
# cmdline.execute('scrapy crawl douban_spider -o output.json'.split())
#数据导出到csv格式
cmdline.execute('scrapy crawl douban_spider -o output.csv'.split()) |
# -*- coding: utf-8 -*-
#
# test_environments.py
#
# purpose: Create a venv/conda env based on the given requirement file.
# author: Filipe P. A. Fernandes
# e-mail: ocefpaf@gmail
# web: http://ocefpaf.github.io/
# created: 14-Aug-2014
# modified: Wed 20 Aug 2014 09:56:36 PM BRT
#
# obs:
#
import os
import unittest
class RunNotebooks(unittest.TestCase):
def setUp(self):
files = []
path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
for root, dirs, fnames in os.walk(path):
for fname in fnames:
if fname.endswith(".ipynb"):
files.append(os.path.join(root, fname))
self.files = files
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_envs(self):
"""A test that would create the venv/condaenv based on a requirement
file."""
pass
def main():
unittest.main()
if __name__ == '__main__':
main()
|
# The following comments couldn't be translated into the new config version:
#tauTo3MuOutputModuleAODSIM &
import FWCore.ParameterSet.Config as cms
#
# HeavyFlavorAnalysis output modules
#
from HeavyFlavorAnalysis.Skimming.onia_OutputModules_cff import *
|
from typing import Any, List, Optional
from fastapi.exceptions import RequestValidationError
from fastapi.responses import UJSONResponse
from pydantic import EnumError, EnumMemberError, StrRegexError
from starlette.exceptions import HTTPException as StarletteHTTPException
def parse_error(err: Any, field_names: List, raw: bool = True) -> Optional[dict]:
"""
Parse single error object (such as pydantic-based or fastapi-based) to dict
:param err: Error object
:param field_names: List of names of the field that are already processed
:param raw: Whether this is a raw error or wrapped pydantic error
:return: dict with name of the field (or "__all__") and actual message
"""
if isinstance(err, list):
permitted_values = ""
for e in err:
if isinstance(e.exc, EnumMemberError):
permitted_values_temp = ", ".join(
[f"'{val}'" for val in e.exc.enum_values]
)
permitted_values += permitted_values_temp + " "
message = (
f"Value is not a valid enumeration member; "
f"permitted: {permitted_values}."
)
elif isinstance(err.exc, EnumError):
permitted_values = ", ".join([f"'{val}'" for val in err.exc.enum_values])
message = (
f"Value is not a valid enumeration member; "
f"permitted: {permitted_values}."
)
elif isinstance(err.exc, StrRegexError):
message = "Provided value doesn't match valid format"
else:
message = str(err.exc) or ""
error_code = 400
if isinstance(err, list):
if hasattr(err[0].exc, "code") and err[0].exc.code.startswith("error_code"):
error_code = int(err[0].exc.code.split(".")[-1])
elif hasattr(err.exc, "code") and err.exc.code.startswith("error_code"):
error_code = int(err.exc.code.split(".")[-1])
if not raw:
if len(err.loc_tuple()) == 2:
if str(err.loc_tuple()[0]) in ["body", "query"]:
name = err.loc_tuple()[1]
else:
name = err.loc_tuple()[0]
elif len(err.loc_tuple()) == 1:
if str(err.loc_tuple()[0]) == "body":
name = "__all__"
else:
name = str(err.loc_tuple()[0])
else:
name = "__all__"
else:
if isinstance(err, list):
if len(err[0].loc_tuple()) == 2:
name = str(err[0].loc_tuple()[0])
elif len(err[0].loc_tuple()) == 1:
name = str(err[0].loc_tuple()[0])
else:
name = "__all__"
else:
if len(err.loc_tuple()) == 2:
name = str(err.loc_tuple()[0])
elif len(err.loc_tuple()) == 1:
name = str(err.loc_tuple()[0])
else:
name = "__all__"
if name in field_names:
return None
if message and not any(
[message.endswith("."), message.endswith("?"), message.endswith("!")]
):
message = message + "."
return {"name": name, "message": message, "error_code": error_code}
def raw_errors_to_fields(raw_errors: List) -> List[dict]:
"""
Translates list of raw errors (instances) into list of dicts with name/msg
:param raw_errors: List with instances of raw error
:return: List of dicts (1 dict for every raw error)
"""
fields = []
for top_err in raw_errors:
if hasattr(top_err.exc, "raw_errors"):
for err in top_err.exc.raw_errors:
# This is a special case when errors happen both in request
# handling & internal validation
if isinstance(err, list):
err = err[0]
field_err = parse_error(
err,
field_names=list(map(lambda x: x["name"], fields)),
raw=True,
)
if field_err is not None:
fields.append(field_err)
else:
field_err = parse_error(
top_err,
field_names=list(map(lambda x: x["name"], fields)),
raw=False,
)
if field_err is not None:
fields.append(field_err)
return fields
async def http_exception_handler(_, exc: StarletteHTTPException) -> UJSONResponse:
"""
Handles StarletteHTTPException, translating it into flat dict error data:
* code - unique code of the error in the system
* detail - general description of the error
* fields - list of dicts with description of the error in each field
:param _:
:param exc: StarletteHTTPException instance
:return: UJSONResponse with newly formatted error data
"""
fields = getattr(exc, "fields", [])
message = getattr(exc, "detail", "Validation error")
headers = getattr(exc, "headers", None)
if message and not any(
[message.endswith("."), message.endswith("?"), message.endswith("!")]
):
message = message + "."
data = {
"error_codes": [getattr(exc, "error_code", exc.status_code)],
"message": message,
"fields": fields,
}
return UJSONResponse(data, status_code=exc.status_code, headers=headers)
async def validation_exception_handler(_, exc: RequestValidationError) -> UJSONResponse:
"""
Handles ValidationError, translating it into flat dict error data:
* code - unique code of the error in the system
* detail - general description of the error
* fields - list of dicts with description of the error in each field
:param _:
:param exc: StarletteHTTPException instance
:return: UJSONResponse with newly formatted error data
"""
status_code = getattr(exc, "status_code", 400)
headers = getattr(exc, "headers", None)
fields = raw_errors_to_fields(exc.raw_errors)
if fields:
error_codes = list(set(list(map(lambda x: x["error_code"], fields))))
else:
error_codes = [getattr(exc, "error_code", status_code)]
message = getattr(exc, "message", "Validation error")
if message and not any(
[message.endswith("."), message.endswith("?"), message.endswith("!")]
):
message = message + "." # pragma: no cover
data = {"error_codes": error_codes, "message": message, "fields": fields}
return UJSONResponse(data, status_code=status_code, headers=headers)
async def not_found_error_handler(_, exc: RequestValidationError) -> UJSONResponse:
code = getattr(exc, "error_code", 404)
detail = getattr(exc, "detail", "Not found")
fields = getattr(exc, "fields", [])
headers = getattr(exc, "headers", None)
status_code = getattr(exc, "status_code", 404)
data = {"error_codes": [code], "message": detail, "fields": fields}
return UJSONResponse(data, status_code=status_code, headers=headers)
async def internal_server_error_handler(
_, exc: RequestValidationError
) -> UJSONResponse:
code = getattr(exc, "error_code", 500)
detail = getattr(exc, "detail", "Internal Server Error")
fields = getattr(exc, "fields", [])
headers = getattr(exc, "headers", None)
status_code = getattr(exc, "status_code", 500)
data = {"error_codes": [code], "message": detail, "fields": fields}
return UJSONResponse(data, status_code=status_code, headers=headers)
|
# -*- coding: utf-8 -*-
from typing import Any, Dict, List, Type
from pyshacl.constraints.constraint_component import ConstraintComponent
from pyshacl.constraints.core.cardinality_constraints import MaxCountConstraintComponent, MinCountConstraintComponent
from pyshacl.constraints.core.logical_constraints import (
AndConstraintComponent,
NotConstraintComponent,
OrConstraintComponent,
XoneConstraintComponent,
)
from pyshacl.constraints.core.other_constraints import (
ClosedConstraintComponent,
HasValueConstraintComponent,
InConstraintComponent,
)
from pyshacl.constraints.core.property_pair_constraints import (
DisjointConstraintComponent,
EqualsConstraintComponent,
LessThanConstraintComponent,
LessThanOrEqualsConstraintComponent,
)
from pyshacl.constraints.core.shape_based_constraints import (
NodeConstraintComponent,
PropertyConstraintComponent,
QualifiedValueShapeConstraintComponent,
)
from pyshacl.constraints.core.string_based_constraints import (
LanguageInConstraintComponent,
MaxLengthConstraintComponent,
MinLengthConstraintComponent,
PatternConstraintComponent,
UniqueLangConstraintComponent,
)
from pyshacl.constraints.core.value_constraints import (
ClassConstraintComponent,
DatatypeConstraintComponent,
NodeKindConstraintComponent,
)
from pyshacl.constraints.core.value_range_constraints import (
MaxExclusiveConstraintComponent,
MaxInclusiveConstraintComponent,
MinExclusiveConstraintComponent,
MinInclusiveConstraintComponent,
)
from pyshacl.constraints.sparql.sparql_based_constraint_components import SPARQLConstraintComponent # noqa: F401
from pyshacl.constraints.sparql.sparql_based_constraints import SPARQLBasedConstraint
ALL_CONSTRAINT_COMPONENTS: List[Type[ConstraintComponent]] = [
ClassConstraintComponent,
DatatypeConstraintComponent,
NodeKindConstraintComponent,
MinCountConstraintComponent,
MaxCountConstraintComponent,
MinExclusiveConstraintComponent,
MinInclusiveConstraintComponent,
MaxExclusiveConstraintComponent,
MaxInclusiveConstraintComponent,
NotConstraintComponent,
AndConstraintComponent,
OrConstraintComponent,
XoneConstraintComponent,
MinLengthConstraintComponent,
MaxLengthConstraintComponent,
PatternConstraintComponent,
LanguageInConstraintComponent,
UniqueLangConstraintComponent,
EqualsConstraintComponent,
DisjointConstraintComponent,
LessThanConstraintComponent,
LessThanOrEqualsConstraintComponent,
NodeConstraintComponent,
PropertyConstraintComponent,
QualifiedValueShapeConstraintComponent,
ClosedConstraintComponent,
HasValueConstraintComponent,
InConstraintComponent,
SPARQLBasedConstraint,
# SPARQLConstraintComponent
# ^ ^ This one is deliberately not included in this
# list because it gets matched to shapes manually later
]
CONSTRAINT_PARAMETERS_MAP: Dict[Any, Type[ConstraintComponent]] = {
p: c for c in ALL_CONSTRAINT_COMPONENTS for p in c.constraint_parameters()
}
ALL_CONSTRAINT_PARAMETERS: List[Any] = list(CONSTRAINT_PARAMETERS_MAP.keys())
|
import cv2
import Model
import torch
import torch.nn as nn
import numpy as np
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import glob
import random
import math
dir_list = glob.glob('dataset/train/*.png')
tran = transforms.ToTensor()
net = Model.DNCNN(1, 64, 3)
net = net.float()
criterion = nn.MSELoss(reduction='sum')
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
epochs = len(dir_list)
loss_arr = np.zeros((epochs,1))
psnr_arr = np.zeros((epochs,1))
cnt = 0
for i in range(epochs):
optimizer.zero_grad()
im = cv2.imread(dir_list[i], 0)
row, col = im.shape
mean = 0
var = random.uniform(0,55)
sigma = var ** 0.5
gauss = np.random.normal(mean, sigma, (row, col))
gauss = gauss.reshape(row, col)
noisy = im + gauss.astype(np.uint8)
noisy = torch.reshape(tran(noisy), ( 1, 1, col, row))
gauss = torch.reshape(tran(gauss), ( 1, 1, col, row))
im2 = torch.reshape(tran(im), ( 1, 1, col, row))
out = net(noisy.float())
loss = torch.mean(torch.pow(out - im2.float(),2)/2)
loss.backward()
loss_arr[cnt] = loss.item()
psnr_arr[cnt] = 20 * math.log10(255/math.sqrt(loss_arr[cnt]) )
optimizer.step()
print(cnt)
print(loss_arr[cnt])
print(psnr_arr[cnt])
cnt += 1
PATH = 'net2.pth'
torch.save(net.state_dict(), PATH)
plt.figure(2)
plt.plot(loss_arr, 'r')
plt.show()
plt.figure(3)
plt.plot(psnr_arr, 'b')
plt.show()
|
import logging
import os
import subprocess
repo_url = os.environ.get("TIMESERIES_SERVER_REPO")
# sqlite_path = os.environ.get("TIMESERIES_SERVER_DATA_DIR") # NOT FULL FILENAME
# os.makedirs(sqlite_path, exist_ok=True)
local_repo_path = os.environ.get("TIMESERIES_SERVER_REPO_PATH")
os.makedirs(os.path.dirname(local_repo_path), exist_ok=True)
local_repo_python_entrypoint_long_fn = local_repo_path + "/timeseries_server/main.py"
SERVER_NAMES = ["run_collection_server", "run_ui_server", "run_detectors"]
# UNIT_FILE_PAYLOADS = []
# for server in SERVER_NAMES:
# UNIT_FILE_PAYLOADS.append(
# """\
# [Unit]
# Description=TimeseriesServer API Server %s
# StartLimitInterval=400
# StartLimitBurst=5
# [Service]
# Type=simple
# EnvironmentFile=/etc/environment
# WorkingDirectory=%s
# ExecStart=/usr/local/bin/poetry run python '%s' '%s'
# Restart=always
# RestartSec=30
# # WatchdogSec=60
# KillMode=process
# User=pi
# Group=pi
# [Install]
# WantedBy=multi-user.target
# """
# % (server, local_repo_path, local_repo_python_entrypoint_long_fn, server)
# )
FILENAMES_FOR_UNITFILES = [f"{server}.service" for server in SERVER_NAMES]
# PATH_FOR_UNITFILE = "/etc/systemd/system"
# unitfile_fullpaths = []
# for fn in FILENAMES_FOR_UNITFILES:
# unitfile_fullpaths.append("%s/%s" % (PATH_FOR_UNITFILE, fn))
if not os.path.exists(local_repo_path):
git_output = subprocess.check_output(["git", "clone", repo_url, local_repo_path])
logging.debug("git clone output: %s", git_output.decode())
else:
git_output = subprocess.check_output(
["git", "-C", local_repo_path, "reset", "--hard"]
)
logging.debug("git reset output: %s", git_output.decode())
git_output = subprocess.check_output(["git", "-C", local_repo_path, "clean", "-fd"])
logging.debug("git clean output: %s", git_output.decode())
git_output = subprocess.check_output(["git", "-C", local_repo_path, "pull"])
logging.debug("git pull output: %s", git_output.decode())
# update local if repo changed
repo_changed = "Already up to date." not in git_output.decode()
if repo_changed:
os.chdir(local_repo_path)
# install poetry
COMMANDS_TO_RUN = [
["apt", "install", "-y", "python3-pip"],
["pip3", "install", "poetry"],
]
for command in COMMANDS_TO_RUN:
logging.info("running command to install poetry %s", repr(command))
subprocess.run(command)
# refresh poetry requirements
COMMANDS_TO_RUN = [
["poetry", "install"],
["chown", "-R", "pi:pi", local_repo_path],
# ["chown", "-R", "pi:pi", sqlite_path],
]
for command in COMMANDS_TO_RUN:
logging.info("running command to refresh poetry %s", repr(command))
subprocess.check_output(command)
# # reapply unitfile
# for unitfile_fullpath, payload in zip(unitfile_fullpaths, UNIT_FILE_PAYLOADS):
# try:
# logging.info("creating unitfile at path %s", unitfile_fullpath)
# with open(unitfile_fullpath, "w") as f:
# f.write(payload)
# except Exception as e:
# logging.exception(
# "error creating unitfile at path %s with error %s",
# unitfile_fullpath,
# repr(e),
# )
# # refresh systemd daemon
# COMMANDS_TO_RUN = [
# ["systemctl", "daemon-reload"],
# ]
for fn in FILENAMES_FOR_UNITFILES:
COMMANDS_TO_RUN.extend(
[["systemctl", "restart", fn]]
)
for command in COMMANDS_TO_RUN:
logging.info("running command to refresh systemd daemon %s", repr(command))
subprocess.run(command)
# # restart service
# COMMANDS_TO_RUN = [["systemctl", "restart", fn] for fn in FILENAMES_FOR_UNITFILES]
# for command in COMMANDS_TO_RUN:
# logging.info("running command to restart services %s", repr(command))
# subprocess.check_output(command)
|
import cudamat_ext as cm
import GPULock
GPULock.GetGPULock()
cm.cublas_init()
cm.CUDAMatrix.init_random()
import numpy as np
import datetime, time
def test_softmax():
m = 2000
n = 128
data = np.random.randn(m, n)
prob = data - data.max(axis=0).reshape(1,-1)
prob = np.exp(prob) / np.exp(prob).sum(axis=0).reshape(1,-1)
cm_data = cm.CUDAMatrix(cm.reformat(data))
cm_prob = cm.CUDAMatrix(cm.reformat(np.zeros(data.shape)))
cm_data.compute_softmax(cm_prob)
error = np.sum((cm_prob.asarray() - prob)**2)
print "Error = ", error
assert error < 10**-2, "Error in CUDAMatrix.compute_softmax exceeded threshold"
def test_softmax_sample():
dim, num_pts = 160, 128
num_draws = 10000
probs = rand(dim, num_pts)
for i in range(min(dim, num_pts)):
probs[i,i] = 2.0
probs = probs / probs.sum(axis=0).reshape(1,-1)
cm_prob = cm.CUDAMatrix(log(probs))
cm_data = cm.empty(probs.shape)
cm_rands = cm.empty(probs.shape)
cm_counts = cm.empty(probs.shape).assign(0)
s = datetime.datetime.now()
for draw in range(num_draws):
cm_rands.fill_with_rand()
cm_prob.SampleSoftMax(cm_rands, cm_data)
cm_counts.add(cm_data)
cm_data.assign(0)
e = datetime.datetime.now()
diff= e-s
cm_counts.divide(num_draws)
est_probs = cm_counts.asarray().copy()
print "Total time for %d draws = %d microseconds\n"%(num_draws, diff.microseconds)
print "Average case error = %.5f \n"%(np.mean(abs(est_probs-probs)))
from matplotlib.pyplot import subplot, imshow, draw
subplot(311), imshow(probs, aspect='auto', interpolation='nearest')
subplot(312), imshow(est_probs, aspect='auto', interpolation='nearest')
subplot(313), plot(est_probs[:,0])
subplot(313), plot(probs[:,0])
draw(), time.sleep(0.2)
raw_input('enter to finish')
return est_probs, probs
if __name__ == "__main__":
test_softmax()
est_probs, probs = test_softmax_sample()
|
import logging
import copy
import torch
from torch import nn
class MyEmbeddings(nn.Embedding):
def __init__(self, word_to_idx, embedding_dim):
super(MyEmbeddings, self).__init__(len(word_to_idx), embedding_dim)
self.embedding_dim = embedding_dim
self.vocab_size = len(word_to_idx)
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in self.word_to_idx.items()}
def set_item_embedding(self, idx, embedding):
if len(embedding) == self.embedding_dim:
self.weight.data[idx] = torch.FloatTensor(embedding)
def load_words_embeddings(self, vec_model):
logging.info("Loading word vectors in model")
for word in vec_model:
if word in self.word_to_idx:
idx = self.word_to_idx[word]
embedding = vec_model[word]
self.set_item_embedding(idx, embedding)
class AnalogyModel(nn.Module):
def __init__(self, train_embeddings, test_embeddings, other_embeddings, reg_term_lambda=0.001, delta=0.1):
super(AnalogyModel, self).__init__()
self.train_embeddings = train_embeddings
self.original_embeddings = copy.deepcopy(train_embeddings)
self.test_embeddings = test_embeddings
self.other_embeddings = other_embeddings
self.loss = nn.CosineEmbeddingLoss()
self.regularization = nn.MSELoss(reduction='sum')
self.reg_term_lambda = reg_term_lambda
self.delta = delta
def set_mapper(self, mapper):
self.mapper = mapper
def loss_function(self, x, y):
if self.training:
e1 = x['e1']
e2 = x['e2']
e3 = x['e3']
e4 = x['e4']
offset_trick = x['offset_trick']
scores = x['scores']
distances = x['distances']
t_l = x['t_l']
t_r = x['t_r']
batch_size = e1.shape[0]
e3_embeddings = self.train_embeddings(e3)
entities = torch.cat([e1, e2, e3, e4]).unique()
reg_term = self.regularization(self.original_embeddings(entities), self.train_embeddings(entities))
score = torch.bmm(offset_trick.view(batch_size, 1, -1), e3_embeddings.view(batch_size, -1, 1)).squeeze()
neg_left_score = torch.bmm(offset_trick.view(batch_size, 1, -1), t_l.view(batch_size, -1, 1)).squeeze()
neg_right_score = torch.bmm(e3_embeddings.view(batch_size, 1, -1), t_r.view(batch_size, -1, 1)).squeeze()
left_loss = nn.functional.relu(self.delta + neg_left_score - score).sum()
right_loss = nn.functional.relu(self.delta + neg_right_score - score).sum()
loss = left_loss + right_loss + self.reg_term_lambda * reg_term
return loss
# return self.loss(offset_trick, self.trainable_embeddings(e3), y) + self.reg_term_lambda * reg_term
else:
e3 = x['e3']
offset_trick = x['offset_trick']
return self.loss(offset_trick, self.mapper.apply(self.test_embeddings(e3)), y)
def is_success(self, e3, e1_e2_e4, top4):
if e3 not in top4:
return False
else:
for elem in top4:
if elem != e3 and elem not in e1_e2_e4:
return False
if elem == e3:
return True
def accuracy(self, x, y):
e1s = x['e1']
e2s = x['e2']
e3s = x['e3']
e4s = x['e4']
scores = x['scores']
sorted_indexes_by_scores = scores.argsort(descending=True)[:, :4]
accuracies = list()
for e1, e2, e3, e4, top4_indexes in zip(e1s, e2s, e3s, e4s, sorted_indexes_by_scores):
success = self.is_success(e3, {e1, e2, e4}, top4_indexes)
if success:
accuracies.append(1)
else:
accuracies.append(0)
return sum(accuracies) / len(accuracies)
def forward(self, input_ids, distances):
e1 = input_ids[:, 0]
e2 = input_ids[:, 1]
e3 = input_ids[:, 2]
e4 = input_ids[:, 3]
if self.training:
e1_embeddings = self.train_embeddings(e1)
e2_embeddings = self.train_embeddings(e2)
e3_embeddings = self.train_embeddings(e3)
e4_embeddings = self.train_embeddings(e4)
offset_trick = e1_embeddings - e2_embeddings + e4_embeddings
a_norm = offset_trick / offset_trick.norm(dim=1)[:, None]
t_l = a_norm[a_norm.matmul(a_norm.transpose(0, 1)).argsort()[:, -2]]
e3_norm = e3_embeddings / e3_embeddings.norm(dim=1)[:, None]
t_r = e3_norm[e3_norm.matmul(e3_norm.transpose(0, 1)).argsort()[:, -2]]
b_norm = self.train_embeddings.weight / self.train_embeddings.weight.norm(dim=1)[:, None]
cosine_sims = torch.mm(a_norm, b_norm.transpose(0,1))
return {
"e1": e1,
"e2": e2,
"e3": e3,
"e4": e4,
"offset_trick": offset_trick,
"scores": cosine_sims,
"distances": distances,
"t_l": t_l,
"t_r": t_r,
}
else:
e1_embeddings = self.test_embeddings(e1)
e2_embeddings = self.test_embeddings(e2)
e3_embeddings = self.test_embeddings(e3)
e4_embeddings = self.test_embeddings(e4)
mapped_e1 = self.mapper.apply(e1_embeddings)
mapped_e2 = self.mapper.apply(e2_embeddings)
mapped_e3 = self.mapper.apply(e3_embeddings)
mapped_e4 = self.mapper.apply(e4_embeddings)
offset_trick = mapped_e1 - mapped_e2 + mapped_e4
a_norm = offset_trick / offset_trick.norm(dim=1)[:, None]
mapped_embedding_table = self.mapper.apply(self.test_embeddings.weight)
b_norm = mapped_embedding_table / mapped_embedding_table.norm(dim=1)[:, None]
cosine_sims = torch.mm(a_norm, b_norm.transpose(0,1))
return {
"e1": e1,
"e2": e2,
"e3": e3,
"e4": e4,
"offset_trick": offset_trick,
"scores": cosine_sims,
"distances": distances,
}
class IdentityMapper:
def apply(self, elems):
return elems
class NeuralMapper:
def __init__(self, mapping_model, device):
self.model = mapping_model
self.device = device
def apply(self, elems):
return self.model(elems)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
/***************************************************************************
OSGeo4Mac Python startup script for setting env vars in dev builds and
installs of QGIS when built off dependencies from homebrew-osgeo4mac tap
-------------------
begin : January 2014
copyright: (C) 2014 Larry Shaffer
email : larrys at dakotacarto dot com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
import stat
import sys
import argparse
import subprocess
from collections import OrderedDict
HOME = os.path.expanduser('~')
OSG_VERSION = '3.4.0'
HOMEBREW_PREFIX = '/usr/local'
QGIS_LOG_DIR = HOME + '/Library/Logs/QGIS'
QGIS_LOG_FILE = QGIS_LOG_DIR + '/qgis-dev.log'
def env_vars(ap, hb, qb='', ql=''):
options = OrderedDict()
options['PATH'] = '{hb}/opt/gdal2/bin:{hb}/opt/gdal2-python/bin:{hb}/bin:{hb}/sbin:' + os.environ['PATH']
dyld_path = '{hb}/opt/gdal2/lib:{hb}/opt/sqlite/lib:{hb}/opt/libxml2/lib:{hb}/lib'
run_from_build = False
if qb:
qbr = os.path.realpath(qb)
if qbr in ap:
run_from_build = True
dyld_path = '{0}/output/lib:{0}/PlugIns/qgis:'.format(qbr) + \
dyld_path
# if QGIS_MACAPP_BUNDLE == 0, or running from build directory, find the
# right frameworks and libs first
if (run_from_build or
not os.path.exists(ap + '/Contents/Frameworks/QtCore.framework')):
options['DYLD_FRAMEWORK_PATH'] = \
'{hb}/Frameworks:/System/Library/Frameworks'
options['DYLD_VERSIONED_LIBRARY_PATH'] = dyld_path
# isolate Python setup if Kyngchaos frameworks exist (they bogart sys.path)
# this keeps /Library/Python/2.7/site-packages from being searched
if (os.path.exists(hb + '/Frameworks/Python.framework') and
os.path.exists('/Library/Frameworks/GDAL.framework')):
options['PYQGIS_STARTUP'] = '{hb}/Homebrew/Library/Taps/osgeo/homebrew-osgeo4mac' \
'/enviro/python_startup.py'
options['PYTHONHOME'] = '{hb}/Frameworks/Python.framework/Versions/2.7'
options['PYTHONPATH'] = '{hb}/lib/qt-4/python2.7/site-packages:' \
'{hb}/opt/gdal2-python/lib/python2.7/site-packages:' \
'{hb}/lib/python2.7/site-packages'
options['GDAL_DRIVER_PATH'] = '{hb}/lib/gdalplugins'
options['GDAL_DATA'] = '{hb}/opt/gdal2/share/gdal'
options['GRASS_PREFIX'] = '{hb}/opt/grass7/grass-base'
options['OSG_LIBRARY_PATH'] = '{hb}/lib/osgPlugins-' + OSG_VERSION
options['QGIS_LOG_FILE'] = ql
if run_from_build:
options['QT_PLUGIN_PATH'] = '{hb}/opt/qt-4/plugins:{hb}/lib/qt-4/plugins'
for k, v in options.iteritems():
options[k] = v.format(hb=hb)
return options
def plist_bud(cmd, plist, quiet=False):
out = open(os.devnull, 'w') if quiet else None
subprocess.call(['/usr/libexec/PlistBuddy', '-c', cmd, plist],
stdout=out, stderr=out)
def arg_parser():
parser = argparse.ArgumentParser(
description="""\
Script embeds Homebrew-prefix-relative environment variables in a
development QGIS.app bundle in the LSEnvironment entity of the app's
Info.plist. Running on app bundle from build or install directory,
or whether Kyngchaos.com or Qt development package installers have
been used, yeilds different results. QGIS_LOG_FILE is at (unless
defined in env var or command option): {0}
""".format(QGIS_LOG_FILE)
)
parser.add_argument('qgis_app_path',
help='path to app bundle (relative or absolute)')
parser.add_argument(
'-p', '--homebrew-prefix', dest='hb',
metavar='homebrew_prefix',
help='homebrew prefix path, or set HOMEBREW_PREFIX (/usr/local default)'
)
parser.add_argument(
'-b', '--build-dir', dest='qb',
metavar='qgis_build_dir',
help='QGIS build directory'
)
parser.add_argument(
'-l', '--qgis-log', dest='ql',
metavar='qgis_log_file',
help='QGIS debug output log file'
)
return parser
def main():
# get defined args
args = arg_parser().parse_args()
ap = os.path.realpath(args.qgis_app_path)
if not os.path.isabs(ap) or not os.path.exists(ap):
print 'Application can not be resolved to an existing absolute path.'
sys.exit(1)
# QGIS Browser.app?
browser = 'Browser.' in ap
plist = ap + '/Contents/Info.plist'
if not os.path.exists(plist):
print 'Application Info.plist not found.'
sys.exit(1)
# generate list of environment variables
hb_prefix = HOMEBREW_PREFIX
if 'HOMEBREW_PREFIX' in os.environ:
hb_prefix = os.environ['HOMEBREW_PREFIX']
hb = os.path.realpath(args.hb) if args.hb else hb_prefix
if not os.path.isabs(hb) or not os.path.exists(hb):
print 'HOMEBREW_PREFIX not resolved to existing absolute path.'
sys.exit(1)
q_log = QGIS_LOG_FILE
if 'QGIS_LOG_FILE' in os.environ:
q_log = os.environ['QGIS_LOG_FILE']
ql = os.path.realpath(args.ql) if args.ql else q_log
try:
if not os.path.exists(ql):
if ql == os.path.realpath(QGIS_LOG_FILE):
# ok to auto-create log's parent directories
p_dir = os.path.dirname(ql)
if not os.path.exists(p_dir):
os.makedirs(p_dir)
subprocess.call(['/usr/bin/touch', ql])
except OSError, e:
print 'Could not create QGIS log file at: {0}'.format(ql)
print 'Create an empty file at the indicated path for logging to work.'
print >>sys.stderr, "Warning:", e
qb = os.path.realpath(args.qb) if args.qb else ''
if qb and (not os.path.isabs(qb) or not os.path.exists(qb)):
print 'QGIS build directory not resolved to existing absolute path.'
sys.exit(1)
# write variables to Info.plist
evars = env_vars(ap, hb, qb, ql)
# opts_s = ''
# for k, v in evars.iteritems():
# opts_s += '{0}={1}\n'.format(k, v)
# print opts_s + '\n'
# first delete any LSEnvironment setting, ignoring errors
# CAUTION!: this may not be what you want, if the .app already has
# LSEnvironment settings
plist_bud('Delete :LSEnvironment', plist, quiet=True)
# re-add the LSEnvironment entry
plist_bud('Add :LSEnvironment dict', plist)
# add the variables
for k, v in evars.iteritems():
plist_bud("Add :LSEnvironment:{0} string '{1}'".format(k, v), plist)
# set bundle identifier, so package installers don't accidentally install
# updates into dev bundles
app_id = 'qgis'
app_name = 'QGIS'
if browser:
app_id += '-browser'
app_name += ' Browser'
plist_bud('Set :CFBundleIdentifier org.qgis.{0}-dev'.format(app_id), plist)
# update modification date on app bundle, or changes won't take effect
subprocess.call(['/usr/bin/touch', ap])
# add environment-wrapped launcher shell script
wrp_scr = ap + '/Contents/MacOS/{0}.sh'.format(app_id)
# override vars that need to prepend existing vars
evars['PATH'] = '{hb}/opt/gdal2/bin:{hb}/opt/gdal2-python/bin:{hb}/bin:{hb}/sbin:$PATH'.format(hb=hb)
evars['PYTHONPATH'] = \
'{hb}/lib/qt-4/python2.7/site-packages:' \
'{hb}/opt/gdal2-python/lib/python2.7/site-packages:' \
'{hb}/lib/python2.7/site-packages:$PYTHONPATH'.format(hb=hb)
if os.path.exists(wrp_scr):
os.remove(wrp_scr)
with open(wrp_scr, 'a') as f:
f.write('#!/bin/bash\n\n')
# get runtime parent directory
f.write('DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")"; pwd -P)\n\n')
# add the variables
for k, v in evars.iteritems():
f.write('export {0}={1}\n'.format(k, v))
f.write('\n"$DIR/{0}" "$@"\n'.format(app_name))
os.chmod(wrp_scr, stat.S_IRUSR | stat.S_IWUSR | stat.S_IEXEC)
print 'Done setting variables'
if __name__ == '__main__':
main()
sys.exit(0)
|
"""
Utility routines for the maximum entropy module.
Most of them are either Python replacements for the corresponding Fortran
routines or wrappers around matrices to allow the maxent module to
manipulate ndarrays, scipy sparse matrices, and PySparse matrices a
common interface.
Perhaps the logsumexp() function belongs under the utils/ branch where other
modules can access it more easily.
Copyright: Ed Schofield, 2003-2006
License: BSD-style (see LICENSE.txt in main source directory)
"""
# Future imports must come before any code in 2.5
from __future__ import division
from __future__ import print_function
from builtins import range
__author__ = "Ed Schofield"
__version__ = '2.0'
import random
import math
import cmath
import numpy as np
#from numpy import log, exp, asarray, ndarray, empty
import scipy.sparse
from scipy.misc import logsumexp
def feature_sampler(vec_f, auxiliary_sampler):
"""
A generator function for tuples (F, log_q_xs, xs)
Parameters
----------
vec_f : function
Pass `vec_f` as a (vectorized) function that operates on a vector of
samples xs = {x1,...,xn} and returns a feature matrix (m x n), where m
is some number of feature components.
auxiliary_sampler : function
Pass `auxiliary_sampler` as a function that returns a tuple
(xs, log_q_xs) representing a sample to use for sampling (e.g.
importance sampling) on the sample space of the model.
xs : list, 1d ndarray, or 2d matrix (n x d)
We require len(xs) == n.
Yields
------
tuples (F, log_q_xs, xs)
F : matrix (m x n)
log_q_xs : as returned by auxiliary_sampler
xs : as returned by auxiliary_sampler
"""
while True:
xs, log_q_xs = auxiliary_sampler()
F = vec_f(xs) # compute feature matrix from points
yield F, log_q_xs, xs
def dictsample(freq, size=None, return_probs=None):
"""
Create a sample of the given size from the specified discrete distribution.
Parameters
----------
freq : a dictionary
A mapping from values x_j in the sample space to probabilities (or
unnormalized frequencies).
size : a NumPy size parameter (like a shape tuple)
Something passable to NumPy as a size argument to np.random.choice(...)
return_probs : int, optional (default 0)
None: don't return pmf values at each sample point
'prob': return pmf values at each sample point
'logprob': return log pmf values at each sample point
Returns
-------
Returns a sample of the given size from the keys of the given
dictionary `freq` with probabilities given according to the
values (normalized to 1). Optionally returns the probabilities
under the distribution of each observation.
Example
-------
>>> freq = {'a': 10, 'b': 15, 'c': 20}
>>> dictsample(freq, size=1)
array([c, b, b, b, b, b, c, b, b, b], dtype=object)
"""
n = len(freq)
probs = np.fromiter(freq.values(), float)
probs /= probs.sum()
indices = np.random.choice(np.arange(n), size=size, p=probs)
labels = np.empty(n, dtype=object)
for i, label in enumerate(freq.keys()):
labels[i] = label
sample = labels[indices]
if return_probs is None:
return sample
sampleprobs = probs[indices]
if return_probs == 'prob':
return sample, sampleprobs
elif return_probs == 'logprob':
return sample, np.log(sampleprobs)
else:
raise ValueError('return_probs must be "prob", "logprob", or None')
def dictsampler(freq, size=None, return_probs=None):
"""
A generator of samples of the given size from the specified discrete
distribution.
Parameters
----------
freq : a dictionary
A mapping from values x_j in the sample space to probabilities (or
unnormalized frequencies).
size : a NumPy size parameter (like a shape tuple)
Something passable to NumPy as a size argument to np.random.choice(...)
return_probs : int, optional (default 0)
None: don't return pmf values at each sample point
'prob': return pmf values at each sample point
'logprob': return log pmf values at each sample point
Returns
-------
Returns a sample of the given size from the keys of the given
dictionary `freq` with probabilities given according to the
values (normalized to 1). Optionally returns the probabilities
under the distribution of each observation.
Example
-------
>>> freq = {'a': 10, 'b': 15, 'c': 20}
>>> g = dictsample_gen(freq, size=1)
>>> next(g)
array([c, b, b, b, b, b, c, b, b, b], dtype=object)
"""
while True:
yield dictsample(freq, size=size, return_probs=return_probs)
def auxiliary_sampler_scipy(auxiliary, dimensions=1, n=10**5):
"""
Sample (once) from the given scipy.stats distribution
Parameters
----------
auxiliary : a scipy.stats distribution object (rv_frozen)
Returns
-------
sampler : function
sampler(), when called with no parameters, returns a tuple
(xs, log_q_xs), where:
xs : matrix (n x d): [x_1, ..., x_n]: a sample
log_q_xs: log pdf values under the auxiliary sampler for each x_j
"""
def sampler():
xs = auxiliary.rvs(size=(n, dimensions))
log_q_xs = np.log(auxiliary.pdf(xs.T)).sum(axis=0)
return (xs, log_q_xs)
return sampler
def _logsumexpcomplex(values):
"""A version of logsumexp that should work if the values passed are
complex-numbered, such as the output of robustarraylog(). So we
expect:
cmath.exp(logsumexpcomplex(robustarraylog(values))) ~= sum(values,axis=0)
except for a small rounding error in both real and imag components.
The output is complex. (To recover just the real component, use
A.real, where A is the complex return value.)
"""
if len(values) == 0:
return 0.0
iterator = iter(values)
# Get the first element
while True:
# Loop until we have a value greater than -inf
try:
b_i = next(iterator) + 0j
except StopIteration:
# empty
return float('-inf')
if b_i.real != float('-inf'):
break
# Now the rest
for a_i in iterator:
a_i += 0j
if b_i.real > a_i.real:
increment = robustlog(1.+cmath.exp(a_i - b_i))
# print "Increment is " + str(increment)
b_i = b_i + increment
else:
increment = robustlog(1.+cmath.exp(b_i - a_i))
# print "Increment is " + str(increment)
b_i = a_i + increment
return b_i
def logsumexp_naive(values):
"""For testing logsumexp(). Subject to numerical overflow for large
values (e.g. 720).
"""
s = 0.0
for x in values:
s += math.exp(x)
return math.log(s)
def robustlog(x):
"""Returns log(x) if x > 0, the complex log cmath.log(x) if x < 0,
or float('-inf') if x == 0.
"""
if x == 0.:
return float('-inf')
elif type(x) is complex or (type(x) is float and x < 0):
return cmath.log(x)
else:
return math.log(x)
def _robustarraylog(x):
""" An array version of robustlog. Operates on a real array x.
"""
arraylog = empty(len(x), np.complex64)
for i in range(len(x)):
xi = x[i]
if xi > 0:
arraylog[i] = math.log(xi)
elif xi == 0.:
arraylog[i] = float('-inf')
else:
arraylog[i] = cmath.log(xi)
return arraylog
# def arrayexp(x):
# """
# OBSOLETE?
#
# Returns the elementwise antilog of the real array x.
#
# We try to exponentiate with np.exp() and, if that fails, with
# python's math.exp(). np.exp() is about 10 times faster but throws
# an OverflowError exception for numerical underflow (e.g. exp(-800),
# whereas python's math.exp() just returns zero, which is much more
# helpful.
# """
# try:
# ex = np.exp(x)
# except OverflowError:
# print("Warning: OverflowError using np.exp(). Using slower Python"\
# " routines instead!")
# ex = np.empty(len(x), float)
# for j in range(len(x)):
# ex[j] = math.exp(x[j])
# return ex
#
# def arrayexpcomplex(x):
# """
# OBSOLETE?
#
# Returns the elementwise antilog of the vector x.
#
# We try to exponentiate with np.exp() and, if that fails, with python's
# math.exp(). np.exp() is about 10 times faster but throws an
# OverflowError exception for numerical underflow (e.g. exp(-800),
# whereas python's math.exp() just returns zero, which is much more
# helpful.
#
# """
# try:
# ex = np.exp(x).real
# except OverflowError:
# ex = np.empty(len(x), float)
# try:
# for j in range(len(x)):
# ex[j] = math.exp(x[j])
# except TypeError:
# # Perhaps x[j] is complex. If so, try using the complex
# # exponential and returning the real part.
# for j in range(len(x)):
# ex[j] = cmath.exp(x[j]).real
# return ex
def sample_wr(population, k):
"""Chooses k random elements (with replacement) from a population.
(From the Python Cookbook).
"""
n = len(population)
_random, _int = random.random, int # speed hack
return [population[_int(_random() * n)] for i in range(k)]
def evaluate_feature_matrix(feature_functions,
xs,
vectorized=True,
format='csc_matrix',
dtype=float,
verbose=False):
"""Evaluate a (m x n) matrix of features `F` of the sample `xs` as:
F[i, :] = f_i(xs[:])
if xs is 1D, or as:
F[i, j] = f_i(xs[:, j])
if xs is 2D, for each feature function `f_i` in `feature_functions`.
Parameters
----------
feature_functions : a list of m feature functions f_i.
xs : either:
1. a (n x d) matrix representing n d-dimensional
observations xs[j, :] for j=1,...,n.
2. a 1d array or sequence (e.g list) of observations xs[j]
for j=1,...,n.
vectorized : bool (default True)
If True, the feature functions f_i are assumed to be vectorized;
then these will be passed all observations xs at once, in turn.
If False, the feature functions f_i will be evaluated one at a time.
format : str (default 'csc_matrix')
Options: 'ndarray', 'csc_matrix', 'csr_matrix', 'dok_matrix'.
If you have enough memory, it may be faster to create a dense
ndarray and then construct a e.g. CSC matrix from this.
Returns
-------
F : (m x n) matrix (in the given format: ndarray / csc_matrix / etc.)
Matrix of evaluated features.
"""
m = len(feature_functions)
if isinstance(xs, np.ndarray) and xs.ndim == 2:
n, d = xs.shape
if d == 1 and vectorized:
# xs may be a column vector, i.e. (n x 1) array.
# In this case, reshape it to a 1d array. This
# makes it easier to define functions that
# operate on only one variable (the usual case)
# given that sklearn's interface now forces 2D
# arrays X when calling .transform(X) and .fit(X).
xs = np.reshape(xs, n)
else:
n, d = len(xs), 1
if format in ('dok_matrix', 'csc_matrix', 'csr_matrix'):
F = scipy.sparse.dok_matrix((m, n), dtype=dtype)
elif format == 'ndarray':
F = np.empty((m, n), dtype=dtype)
else:
raise ValueError('matrix format not recognized')
for i, f_i in enumerate(feature_functions):
if verbose:
print('Computing feature {i} of {m} ...'.format(i=i, m=m))
if vectorized:
F[i::m, :] = f_i(xs)
else:
for j in range(n):
f_i_x = f_i(xs[j])
if f_i_x != 0:
F[i,j] = f_i_x
if format == 'csc_matrix':
return F.tocsc()
elif format == 'csr_matrix':
return F.tocsr()
else:
return F
# def densefeatures(f, x):
# """Returns a dense array of non-zero evaluations of the vector
# functions fi in the list f at the point x.
# """
#
# return np.array([fi(x) for fi in f])
# def densefeaturematrix(f, sample, verbose=False):
# """Compute an (m x n) dense array of non-zero evaluations of the
# scalar functions fi in the list f at the points x_1,...,x_n in the
# list sample.
# """
#
# # Was: return np.array([[fi(x) for fi in f] for x in sample])
#
# m = len(f)
# n = len(sample)
#
# F = np.empty((m, n), float)
# for i in range(m):
# f_i = f[i]
# for j in range(n):
# x = sample[j]
# F[i,j] = f_i(x)
# return F
# def sparsefeatures(f, x, format='csc_matrix'):
# """Compute an mx1 sparse matrix of non-zero evaluations of the
# scalar functions f_1,...,f_m in the list f at the point x.
#
# """
# m = len(f)
# if format in ('dok_matrix', 'csc_matrix', 'csr_matrix'):
# sparsef = scipy.sparse.dok_matrix((m, 1))
# else:
# raise ValueError("sparse matrix format not recognized")
#
# for i in range(m):
# f_i_x = f[i](x)
# if f_i_x != 0:
# sparsef[i, 0] = f_i_x
#
# if format == 'csc_matrix':
# print("Converting to CSC matrix ...")
# return sparsef.tocsc()
# elif format == 'csr_matrix':
# print("Converting to CSR matrix ...")
# return sparsef.tocsr()
# else:
# return sparsef
# def sparsefeaturematrix(f, sample, format='csc_matrix', verbose=False):
# """Compute an (m x n) sparse matrix of non-zero evaluations of the
# scalar functions f_1,...,f_m in the list f at the points x_1,...,x_n
# in the sequence 'sample'.
#
# """
# m = len(f)
# n = len(sample)
# if format in ('dok_matrix', 'csc_matrix', 'csr_matrix'):
# sparseF = scipy.sparse.dok_matrix((m, n))
# else:
# raise ValueError("sparse matrix format not recognized")
#
# for i in range(m):
# if verbose:
# print('Computing feature {i} of {m}'.format(i=i, m=m))
# f_i = f[i]
# for j in range(n):
# x = sample[j]
# f_i_x = f_i(x)
# if f_i_x != 0:
# sparseF[i,j] = f_i_x
#
# if format == 'csc_matrix':
# return sparseF.tocsc()
# elif format == 'csr_matrix':
# return sparseF.tocsr()
# else:
# return sparseF
# def sparsefeaturematrix_vectorized(feature_functions, xs, format='csc_matrix'):
# """
# Evaluate a (m x n) matrix of features `F` of the sample `xs` as:
#
# F[i, j] = f_i(xs[:, j])
#
# Parameters
# ----------
# feature_functions : a list of feature functions f_i.
#
# xs : either:
# 1. a (d x n) matrix representing n d-dimensional
# observations xs[: ,j] for j=1,...,n.
# 2. a 1d array or sequence (e.g list) of observations xs[j]
# for j=1,...,n.
#
# The feature functions f_i are assumed to be vectorized. These will be
# passed all observations xs at once, in turn.
#
# Note: some samples may be more efficient / practical to compute
# features one sample observation at a time (e.g. generated). For these
# cases, use sparsefeaturematrix().
#
# Only pass sparse=True if you need the memory savings. If you want a
# sparse matrix but have enough memory, it may be faster to
# pass dense=True and then construct a CSC matrix from the dense NumPy
# array.
#
# """
# m = len(feature_functions)
#
# if isinstance(xs, np.ndarray) and xs.ndim == 2:
# d, n = xs.shape
# else:
# n = len(xs)
# if not sparse:
# F = np.empty((m, n), float)
# else:
# import scipy.sparse
# F = scipy.sparse.lil_matrix((m, n), dtype=float)
#
# for i, f_i in enumerate(feature_functions):
# F[i::m, :] = f_i(xs)
#
# if format == 'csc_matrix':
# return F.tocsc()
# elif format == 'csr_matrix':
# return F.tocsr()
# else:
# return F
def old_vec_feature_function(feature_functions, sparse=False):
"""
Create and return a vectorized function `features(xs)` that
evaluates an (n x m) matrix of features `F` of the sample `xs` as:
F[j, i] = f_i(xs[:, j])
Parameters
----------
feature_functions : a list of feature functions f_i.
`xs` will be passed to these functions as either:
1. an (n x d) matrix representing n d-dimensional
observations xs[j, :] for j=1,...,n.
2. a 1d array or sequence (e.g list) of observations xs[j]
for j=1,...,n.
The feature functions f_i are assumed to be vectorized. These will be
passed all observations xs at once, in turn.
Note: some samples may be more efficient / practical to compute
features of one sample observation at a time (e.g. generated).
Only pass sparse=True if you need the memory savings. If you want a
sparse matrix but have enough memory, it may be faster to
pass sparse=False and then construct a CSC matrix from the dense NumPy
array.
"""
if sparse:
import scipy.sparse
m = len(feature_functions)
def vectorized_features(xs):
if isinstance(xs, np.ndarray) and xs.ndim == 2:
n, d = xs.shape
else:
n = len(xs)
if not sparse:
F = np.empty((n, m), float)
else:
F = scipy.sparse.lil_matrix((n, m), dtype=float)
# Equivalent:
# for i, f_i in enumerate(feature_functions):
# for k in range(len(xs)):
# F[len(feature_functions)*k+i, :] = f_i(xs[k])
for i, f_i in enumerate(feature_functions):
F[:, i::m] = f_i(xs)
if not sparse:
return F
else:
return scipy.sparse.csc_matrix(F)
return vectorized_features
def dotprod(u,v):
"""
This is a wrapper around general dense or sparse dot products.
It is not necessary except as a common interface for supporting
ndarray, scipy spmatrix, and PySparse arrays.
Returns the dot product of the (1 x m) sparse array u with the
(m x 1) (dense) numpy array v.
"""
#print "Taking the dot product u.v, where"
#print "u has shape " + str(u.shape)
#print "v = " + str(v)
try:
dotprod = np.array([0.0]) # a 1x1 array. Required by spmatrix.
u.matvec(v, dotprod)
return dotprod[0] # extract the scalar
except AttributeError:
# Assume u is a dense array.
return np.dot(u,v)
def innerprod(A,v):
"""
This is a wrapper around general dense or sparse dot products.
It is not necessary except as a common interface for supporting
ndarray, scipy spmatrix, and PySparse arrays.
Returns the inner product of the (m x n) dense or sparse matrix A
with the n-element dense array v. This is a wrapper for A.dot(v) for
dense arrays and spmatrix objects, and for A.matvec(v, result) for
PySparse matrices.
"""
# We assume A is sparse.
(m, n) = A.shape
vshape = v.shape
try:
(p,) = vshape
except ValueError:
(p, q) = vshape
if n != p:
raise TypeError("matrix dimensions are incompatible")
if isinstance(v, np.ndarray):
try:
# See if A is sparse
A.matvec
except AttributeError:
# It looks like A is dense
return np.dot(A, v)
else:
# Assume A is sparse
if scipy.sparse.isspmatrix(A):
innerprod = A.matvec(v) # This returns a float32 type. Why???
return innerprod
else:
# Assume PySparse format
innerprod = np.empty(m, float)
A.matvec(v, innerprod)
return innerprod
elif scipy.sparse.isspmatrix(v):
return A * v
else:
raise TypeError("unsupported types for inner product")
def innerprodtranspose(A,v):
"""
This is a wrapper around general dense or sparse dot products.
It is not necessary except as a common interface for supporting
ndarray, scipy spmatrix, and PySparse arrays.
Computes A^T V, where A is a dense or sparse matrix and V is a numpy
array. If A is sparse, V must be a rank-1 array, not a matrix. This
function is efficient for large matrices A. This is a wrapper for
A.T.dot(v) for dense arrays and spmatrix objects, and for
A.matvec_transp(v, result) for pysparse matrices.
"""
(m, n) = A.shape
#pdb.set_trace()
if hasattr(A, 'matvec_transp'):
# A looks like a PySparse matrix
if len(v.shape) == 1:
innerprod = np.empty(n, float)
A.matvec_transp(v, innerprod)
else:
raise TypeError("innerprodtranspose(A,v) requires that v be "
"a vector (rank-1 dense array) if A is sparse.")
return innerprod
elif scipy.sparse.isspmatrix(A):
return (A.conj().transpose() * v).transpose()
else:
# Assume A is dense
if isinstance(v, np.ndarray):
# v is also dense
if len(v.shape) == 1:
# We can't transpose a rank-1 matrix into a row vector, so
# we reshape it.
vm = v.shape[0]
vcolumn = np.reshape(v, (1, vm))
x = np.dot(vcolumn, A)
return np.reshape(x, (n,))
else:
#(vm, vn) = v.shape
# Assume vm == m
x = np.dot(np.transpose(v), A)
return np.transpose(x)
else:
raise TypeError("unsupported types for inner product")
def rowmeans(A):
"""
This is a wrapper for general dense or sparse dot products.
It is only necessary as a common interface for supporting ndarray,
scipy spmatrix, and PySparse arrays.
Returns a dense (m x 1) vector representing the mean of the rows of A,
which be an (m x n) sparse or dense matrix.
>>> a = np.array([[1,2],[3,4]], float)
>>> rowmeans(a)
array([ 1.5, 3.5])
"""
if type(A) is np.ndarray:
return A.mean(1)
else:
# Assume it's sparse
try:
n = A.shape[1]
except AttributeError:
raise TypeError("rowmeans() only works with sparse and dense "
"arrays")
rowsum = innerprod(A, np.ones(n, float))
return rowsum / float(n)
def columnmeans(A):
"""
This is a wrapper for general dense or sparse dot products.
It is only necessary as a common interface for supporting ndarray,
scipy spmatrix, and PySparse arrays.
Returns a dense (1 x n) vector with the column averages of A, which can
be an (m x n) sparse or dense matrix.
>>> a = np.array([[1,2],[3,4]],'d')
>>> columnmeans(a)
array([ 2., 3.])
"""
if type(A) is np.ndarray:
return A.mean(0)
else:
# Assume it's sparse
try:
m = A.shape[0]
except AttributeError:
raise TypeError("columnmeans() only works with sparse and dense "
"arrays")
columnsum = innerprodtranspose(A, np.ones(m, float))
return columnsum / float(m)
def columnvariances(A):
"""
This is a wrapper for general dense or sparse dot products.
It is not necessary except as a common interface for supporting ndarray,
scipy spmatrix, and PySparse arrays.
Returns a dense (1 x n) vector with unbiased estimators for the column
variances for each column of the (m x n) sparse or dense matrix A. (The
normalization is by (m - 1).)
>>> a = np.array([[1,2], [3,4]], 'd')
>>> columnvariances(a)
array([ 2., 2.])
"""
if type(A) is np.ndarray:
return np.std(A,0)**2
else:
try:
m = A.shape[0]
except AttributeError:
raise TypeError("columnvariances() only works with sparse "
"and dense arrays")
means = columnmeans(A)
return columnmeans((A-means)**2) * (m/(m-1.0))
def flatten(a):
"""Flattens the sparse matrix or dense array/matrix 'a' into a
1-dimensional array
"""
if scipy.sparse.isspmatrix(a):
return a.A.flatten()
else:
return np.asarray(a).flatten()
class DivergenceError(Exception):
"""Exception raised if the entropy dual has no finite minimum.
"""
def __init__(self, message):
self.message = message
Exception.__init__(self)
def __str__(self):
return repr(self.message)
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
from datetime import date
class TableItem(object):
tableItemNr = 0
tableItemDate = date.today()
tableItemMaxNumCategories = 1
tableItemCategoriesAmount = []
tableItemAmount = 0.0
def __init__(self):
self.tableItemCategoriesAmount = []
def setNr(self, nr):
self.tableItemNr = int(nr)
def getNr(self):
return self.tableItemNr
def setDate(self, date):
self.tableItemDate = date
def getDate(self):
return self.tableItemDate
def setNumCategories(self, num):
self.tableItemMaxNumCategories = num
def getNumCategories(self):
return self.tableItemCategoriesAmount
def addCategoryAmount(self, amount):
self.tableItemCategoriesAmount.append(round(amount, 2))
self.calculateAmount()
def getCategoryAmount(self):
return self.tableItemCategoriesAmount
def removeCategoryAmountAll(self):
self.tableItemCategoriesAmount.clear()
def getCategoryAmountAsString(self):
self.amountsAsString = []
for amount in self.tableItemCategoriesAmount:
amount = round(amount, 2)
if amount.is_integer() == True:
self.amountsAsString.append(str(amount) + "0 €")
elif (amount * 10.0).is_integer() == True:
self.amountsAsString.append(str(amount) + "0 €")
else:
self.amountsAsString.append(str(amount) + " €")
return self.amountsAsString
def calculateAmount(self):
self.tableItemAmount = 0.0
for x in self.tableItemCategoriesAmount:
self.tableItemAmount = self.tableItemAmount + x
def getAmountAsString(self):
self.calculateAmount()
tableItemAmount = round(self.tableItemAmount, 2)
if tableItemAmount.is_integer() == True:
tableItemAmount = str(tableItemAmount)
tableItemAmount = tableItemAmount + "0 €"
elif (tableItemAmount * 10.0).is_integer() == True:
tableItemAmount = str(tableItemAmount)
tableItemAmount = tableItemAmount + "0 €"
else:
tableItemAmount = str(tableItemAmount)
tableItemAmount = tableItemAmount + " €"
return tableItemAmount
def getAmount(self):
return self.tableItemAmount |
"""
r6satus-python
Extract player data of Rainbow six siege.
"""
import json
import sys
import asyncio
import r6sapi
from pymongo import MongoClient
import datetime
OperatorTypes = {
"DOC": "Defense",
"TWITCH": "Attack",
"ASH": "Attack",
"THERMITE": "Attack",
"BLITZ": "Attack",
"BUCK": "Attack",
"HIBANA": "Attack",
"KAPKAN": "Defense",
"PULSE": "Defense",
"CASTLE": "Defense",
"ROOK": "Defense",
"BANDIT": "Defense",
"SMOKE": "Defense",
"FROST": "Defense",
"VALKYRIE": "Defense",
"TACHANKA": "Defense",
"GLAZ": "Attack",
"FUZE": "Attack",
"SLEDGE": "Attack",
"MONTAGNE": "Attack",
"MUTE": "Defense",
"ECHO": "Defense",
"THATCHER": "Attack",
"CAPITAO": "Attack",
"IQ": "Attack",
"BLACKBEARD": "Attack",
"JAGER": "Defense",
"CAVEIRA": "Defense",
"JACKAL": "Attack",
"MIRA": "Defense",
"LESION": "Defense",
"YING": "Attack",
"ELA": "Defense",
"DOKKAEBI": "Attack",
"VIGIL": "Defense",
"ZOFIA": "Attack",
"LION": "Attack",
"FINKA": "Attack",
"MAESTRO": "Defense",
"ALIBI": "Defense",
"MAVERICK": "Attack",
"CLASH": "Defense",
"NOMAD": "Attack",
"KAID": "Defense",
"GRIDLOCK": "Attack",
"MOZZIE": "Defense",
"WARDEN": "Defense",
"NAKK": "Attack",
"AMARU": "Attack",
"GOYO": "Defense"
}
def zchk(target):
"""Check if the input is zero"""
if target == 0:
return target + 1
return target
@asyncio.coroutine
def get_data(auth, id=None, uid=None):
player = yield from auth.get_player(id, r6sapi.Platforms.UPLAY, uid)
yield from player.check_general()
yield from player.check_level()
yield from player.load_queues()
rank_data = yield from player.get_rank(r6sapi.RankedRegions.ASIA)
operators_data = yield from player.get_all_operators()
async def get_data(auth, id=None, uid=None):
player = await auth.get_player(id, r6sapi.Platforms.UPLAY, uid)
await player.check_general()
await player.check_level()
await player.load_queues()
rank_data = await player.get_rank(r6sapi.RankedRegions.ASIA)
operators_data = await player.get_all_operators()
return player, rank_data, operators_data
def pack_data(player, rank_data, operators_data, date):
player_data = {
"id": player.name,
"date": date,
"level": player.level,
"icon": player.icon_url,
"rank": rank_data.rank,
"uid": player.userid,
"operator": [],
"general": {
"kills": player.kills,
"deaths": player.deaths,
"kdr": player.kills / zchk(player.deaths),
"wons": player.matches_won,
"loses": player.matches_lost,
"played": player.matches_played,
"playtimes": player.time_played,
"wlr": player.matches_won / zchk(player.matches_lost)
}
}
for gamemode in [player.casual, player.ranked]:
player_data[gamemode.name] = {
"kills": gamemode.kills,
"deaths": gamemode.deaths,
"kdr": gamemode.kills / zchk(gamemode.deaths),
"wons": gamemode.won,
"loses": gamemode.lost,
"played": gamemode.played,
"playtimes": gamemode.time_played,
"wlr": gamemode.won / zchk(gamemode.lost)
}
for _, operator in operators_data.items():
player_data["operator"].append({
"name":
operator.name,
"type":
OperatorTypes[operator.name.upper()],
"kills":
operator.kills,
"deaths":
operator.deaths,
"kdr":
operator.kills / zchk(operator.deaths),
"wons":
operator.wins,
"loses":
operator.losses,
"pick":
operator.wins + operator.losses
})
return player_data
async def dead_method(dead_id, auth):
players = dead_id.find({}, {'_id': 0, 'id': 1})
lives = []
for player_id in players:
date = datetime.datetime.utcnow()
try:
player, rank_data, operators_data = await get_data(
auth, player_id['id'], None)
except r6sapi.exceptions.InvalidRequest:
print(date, file=sys.stderr)
print(player_id['id'] + " is not found", file=sys.stderr)
dead_id.update_one({"id": player_id['id']}, {
'$set': {
"date": date
},
'$inc': {
"deathcount": 1
}
},
upsert=True)
if 5 < dead_id.find_one({"id": player_id['id']})['deathcount']:
# userdb.delete_one({"id": player_id['id']})
dead_id.delete_one({"id": player_id['id']})
print(date, file=sys.stderr)
print(player_id['id'] + " was deleted in database",
file=sys.stderr)
continue
# print(player.name + " :" + player.userid)
lives.append({'uid': player.userid, 'id': player.name})
return lives
async def live_method(live_id, dead_id, auth, lives, userdb, id2uid, recentdb):
players_raw = live_id.find({}, {'_id': 0, 'uid': 1, 'id': 1})
players = []
for item in players_raw:
players.append({'uid': item['uid'], 'id': item['id']})
players.extend(lives)
players_data = []
for player_sss in players:
date = datetime.datetime.utcnow()
try:
player, rank_data, operators_data = await get_data(
auth, None, player_sss['uid'])
except r6sapi.exceptions.InvalidRequest:
print(date, file=sys.stderr)
print(player_sss['id'] + " is not found", file=sys.stderr)
userdb.update({"id": player_sss['id']}, {
'$set': {
"date": date
},
'$inc': {
"deathcount": 1
}
},
upsert=True)
dead_id.update({"id": player_sss['id']},
{'$set': {
"date": date,
"deathcount": 0
}},
upsert=True)
live_id.delete_one({"id": player_sss['id']})
continue
# print(player.userid)
player_data = pack_data(player, rank_data, operators_data, date)
userdb.update_one(
{"id": player.name},
{'$set': {
"date": date,
"deathcount": 0,
"uid": player.userid
}},
upsert=True)
dead_id.delete_one({"id": player.name})
id2uid.update_one({"id": player.name},
{'$set': {
"date": date,
"uid": player.userid
}},
upsert=True)
live_id.update_one({"uid": player.userid},
{'$set': {
"date": date,
"id": player.name
}},
upsert=True)
recentdb.delete_one({"id": player.name})
recentdb.insert_one(player_data)
players_data.append(player_data)
return players_data
async def run():
""" main function """
config_path = open("./config.json", 'r')
config = json.load(config_path)
client = MongoClient(config["mongodb addres"], config["mongodb port"])
recentdb = client['r6status']['recent']
olddb = client['r6status']['old']
userdb = client['r6status']['user']
id2uid = client['r6status']['id2uid']
live_id = client['r6status']['live_id']
dead_id = client['r6status']['dead_id']
mail = config["e-mail address"]
pswd = config["password"]
auth = r6sapi.Auth(mail, pswd)
try:
await auth.connect()
except r6sapi.exceptions.FailedToConnect as e:
print("type:{0}".format(type(e)))
print("args:{0}".format(e.args))
print("message:{0}".format(e.message))
print("{0}".format(e))
sys.exit(1)
lives = await dead_method(dead_id, auth)
players_data = await live_method(live_id, dead_id, auth, lives, userdb,
id2uid, recentdb)
olddb.insert_many(players_data)
await auth.close()
print(datetime.datetime.utcnow())
print("finised")
asyncio.get_event_loop().run_until_complete(run())
|
n = int(input())
k = int(input())
minX = 1
maxX = n*n
ans = 0
while minX <= maxX:
midX = (minX + maxX) // 2
cnt = 0
for i in range(1, n+1):
cnt += min([midX//i, n])
if cnt >= k:
ans = midX
maxX = midX - 1
else:
minX = midX + 1
print(ans)
|
import os
import __main__
_MAIN_FILE =__main__.__file__
_MAIN_DIR = os.path.dirname(_MAIN_FILE)
_MAIN_FILE_NO_EXT = os.path.splitext(os.path.basename(_MAIN_FILE))[0]
INPUT_DATA = []
with open(f"{_MAIN_DIR}/../input/{_MAIN_FILE_NO_EXT}.txt", 'r') as inputFile:
INPUT_DATA = [x.rstrip() for x in inputFile.readlines()] |
# global
import jax
import jax.numpy as jnp
from typing import Optional
# local
import ivy
from ivy.functional.backends.jax import JaxArray
def bitwise_left_shift(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
return jnp.left_shift(x1, x2)
def add(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.add(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def bitwise_xor(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.bitwise_xor(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def exp(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.exp(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def expm1(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.expm1(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def bitwise_invert(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.bitwise_not(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def bitwise_and(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.bitwise_and(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def ceil(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if 'int' in str(x.dtype):
ret = x
else:
ret = jnp.ceil(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def floor(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if 'int' in str(x.dtype):
ret = x
else:
ret = jnp.floor(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def isfinite(x: JaxArray)\
-> JaxArray:
return jnp.isfinite(x)
def asin(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.arcsin(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def isinf(x: JaxArray)\
-> JaxArray:
return jnp.isinf(x)
def equal(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.equal(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def greater(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None) \
-> JaxArray:
ret = jnp.greater(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def greater_equal(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.greater_equal(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def less_equal(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.less_equal(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def asinh(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.arcsinh(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def sign(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.sign(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def sqrt(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.sqrt(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def cosh(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.cosh(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def log10(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.log10(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def log(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.log(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def log2(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.log2(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def log1p(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.log1p(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def multiply(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.multiply(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def isnan(x: JaxArray)\
-> JaxArray:
return jnp.isnan(x)
def less(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.less(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def cos(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.cos(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def logical_xor(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.logical_xor(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def logical_or(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.logical_or(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def logical_and(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.logical_and(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def logical_not(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.logical_not(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def divide(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.divide(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def acos(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.arccos(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def acosh(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.arccosh(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def sin(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.sin(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def negative(x: JaxArray,
out: Optional[JaxArray] = None) -> JaxArray:
ret = jnp.negative(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def not_equal(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None) \
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.not_equal(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def tanh(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.tanh(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def floor_divide(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.floor_divide(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def bitwise_or(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None) -> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.bitwise_or(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def sinh(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.sinh(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def positive(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.positive(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def square(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.square(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def pow(x1: jnp.ndarray,
x2: jnp.ndarray,
out: Optional[JaxArray] = None)\
-> jnp.ndarray:
if hasattr(x1, 'dtype') and hasattr(x2, 'dtype'):
promoted_type = jnp.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(promoted_type)
x2 = x2.astype(promoted_type)
ret = jnp.power(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def remainder(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.remainder(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def round(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if 'int' in str(x.dtype):
ret = x
else:
ret = jnp.round(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def trunc(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if 'int' in str(x.dtype):
ret = x
else:
ret = jnp.trunc(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def abs(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.absolute(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def subtract(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if hasattr(x1, 'dtype') and hasattr(x2, 'dtype'):
promoted_type = jnp.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(promoted_type)
x2 = x2.astype(promoted_type)
ret = jnp.subtract(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def logaddexp(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.logaddexp(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def bitwise_right_shift(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
if isinstance(x2, int):
x2 = jnp.asarray(x2, dtype=x1.dtype)
ret = jnp.right_shift(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def tan(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.tan(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def atan(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.arctan(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def atanh(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.arctanh(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def atan2(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None) -> JaxArray:
if hasattr(x1, 'dtype') and hasattr(x2, 'dtype'):
promoted_type = jnp.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(promoted_type)
x2 = x2.astype(promoted_type)
ret = jnp.arctan2(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
# Extra #
# ------#
def minimum(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.minimum(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def maximum(x1: JaxArray,
x2: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jnp.maximum(x1, x2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def erf(x: JaxArray,
out: Optional[JaxArray] = None)\
-> JaxArray:
ret = jax.scipy.special.erf(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
|
#
# @lc app=leetcode id=82 lang=python3
#
# [82] Remove Duplicates from Sorted List II
#
# https://leetcode.com/problems/remove-duplicates-from-sorted-list-ii/description/
#
# algorithms
# Medium (39.04%)
# Likes: 2704
# Dislikes: 122
# Total Accepted: 317.3K
# Total Submissions: 807.9K
# Testcase Example: '[1,2,3,3,4,4,5]'
#
# Given the head of a sorted linked list, delete all nodes that have duplicate
# numbers, leaving only distinct numbers from the original list. Return the
# linked list sorted as well.
#
#
# Example 1:
#
#
# Input: head = [1,2,3,3,4,4,5]
# Output: [1,2,5]
#
#
# Example 2:
#
#
# Input: head = [1,1,1,2,3]
# Output: [2,3]
#
#
#
# Constraints:
#
#
# The number of nodes in the list is in the range [0, 300].
# -100 <= Node.val <= 100
# The list is guaranteed to be sorted in ascending order.
#
#
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
# Solution 1
# In-place with set
# Time Complexity: O(n)
# Space Complexity: O(n)
class Solution1:
def deleteDuplicates(self, head: ListNode) -> ListNode:
if not head or head.next is None:
return head
tmp = head
counter = {}
while tmp is not None:
counter[tmp.val] = counter.get(tmp.val, 0) + 1
tmp = tmp.next
dummy = ListNode(-1)
dummy.next = head
tmp = head
head = dummy
while tmp is not None:
if counter.get(tmp.val) > 1:
tmp = tmp.next
else:
head.next = tmp
tmp = tmp.next
head = head.next
head.next = None
return dummy.next
# Solution 2
# In-place with several pointers
# Time Complexity: O(n)
# Space Complexity: O(1)
class Solution2:
def deleteDuplicates(self, head: ListNode) -> ListNode:
if not head or head.next is None:
return head
if head and head.next is None:
return head
# list with at least two nodes
dummy = ListNode(-1)
prev = ListNode(-1)
dummy.next = head
curr = dummy
while head:
print(head.val)
if prev.val != head.val and ( (head.next and head.val != head.next.val) or head.next is None ):
# ListNode `head` has different value from prev node and next node
# value is unique
curr.next = head
curr = curr.next
# update two pointers
prev = head
head = head.next
if head:
head.next = None
return dummy.next
# @lc code=end
|
import factory
from brouwers.kits.tests.factories import ModelKitFactory
from brouwers.users.tests.factories import UserFactory
from ..models import (
KitReview, KitReviewProperty, KitReviewPropertyRating, KitReviewVote
)
class KitReviewFactory(factory.django.DjangoModelFactory):
model_kit = factory.SubFactory(ModelKitFactory)
raw_text = factory.Faker('text')
submitted_on = factory.Faker('date')
reviewer = factory.SubFactory(UserFactory)
class Meta:
model = KitReview
class KitReviewVoteFactory(factory.django.DjangoModelFactory):
review = factory.SubFactory(KitReviewFactory)
voter = factory.SubFactory(UserFactory)
class Meta:
model = KitReviewVote
class KitReviewPropertyFactory(factory.django.DjangoModelFactory):
name = factory.Faker('text')
class Meta:
model = KitReviewProperty
class KitReviewPropertyRatingFactory(factory.django.DjangoModelFactory):
kit_review = factory.SubFactory(KitReviewFactory)
prop = factory.SubFactory(KitReviewPropertyFactory)
class Meta:
model = KitReviewPropertyRating
|
import unittest
from python.src import utils
class test_utils(unittest.TestCase):
def test_add(self):
self.assertEqual(5, utils.add(2,3))
def test_divide(self):
self.assertEqual(2, utils.divide(4,2))
class Test_Calculator(unittest.TestCase):
def test_add(self):
self.assertEqual(5, utils.Calculator().add(2, 3)) |
#!./bin/python
# ----------------------------------------------------------------------
# Compile handlebars templates
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import print_function
import os
import glob
import stat
import subprocess
def compile_template(src):
dst = src[:-4] + ".js"
if os.path.exists(dst) and os.stat(src)[stat.ST_MTIME] < os.stat(dst)[stat.ST_MTIME]:
return # Up-to-date
parts = src.split(os.sep)
module = parts[2]
app = parts[3]
name = parts[-1][:-4]
print("%s -> %s" % (src, dst))
tmp = dst + ".tmp"
subprocess.check_call(
[
"handlebars",
"-m", # Minify
"-n",
"NOC.templates.%s_%s" % (module, app),
"-e",
"hbs",
"-f",
tmp,
src,
]
)
with open(tmp) as f:
data = f.read()
os.unlink(tmp)
data += 'Ext.define("NOC.%s.%s.templates.%s", {});' % (module, app, name)
with open(dst, "w") as f:
f.write(data)
def main():
for f in glob.glob("ui/web/*/*/templates/*.hbs"):
compile_template(f)
if __name__ == "__main__":
main()
|
from flask import current_app as app, render_template, render_template_string, request, redirect, abort, jsonify, json as json_mod, url_for, session, Blueprint, Response
from CTFd.utils import authed, ip2long, long2ip, is_setup, validate_url, get_config, set_config, sha512, get_ip, cache, is_on_team
from CTFd.models import db, Users, Solves, Awards, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Pages, Config, Teams
from jinja2.exceptions import TemplateNotFound
from passlib.hash import bcrypt_sha256
from collections import OrderedDict
from itsdangerous import TimedSerializer, BadTimeSignature, Signer, BadSignature
import logging
import os
import re
import sys
import json
import os
import datetime
import urllib
views = Blueprint('views', __name__)
@views.before_request
def redirect_setup():
if request.path.startswith("/static"):
return
if not is_setup() and request.path != "/setup":
return redirect(url_for('views.setup'))
@views.route('/setup', methods=['GET', 'POST'])
def setup():
# with app.app_context():
# admin = Teams.query.filter_by(admin=True).first()
if not is_setup():
if not session.get('nonce'):
session['nonce'] = sha512(os.urandom(10))
if request.method == 'POST':
ctf_name = request.form['ctf_name']
ctf_name = set_config('ctf_name', ctf_name)
## CSS
css = set_config('start', '')
## Admin user
name = request.form['name']
email = request.form['email']
password = request.form['password']
admin = Users(name, email, password)
admin.admin = True
admin.banned = True
## Index page
page = Pages('index', """<div class="container main-container">
<img class="logo" src="{0}/static/original/img/logo.png" />
<h3 class="text-center">
Welcome to a cool CTF framework written by <a href="https://github.com/ColdHeat">Kevin Chung</a> of <a href="https://github.com/isislab">@isislab</a>
</h3>
<h4 class="text-center">
<a href="{0}/admin">Click here</a> to login and setup your CTF
</h4>
</div>""".format(request.script_root))
#max attempts per challenge
max_tries = set_config("max_tries", 0)
## Team size limit
team_limit = set_config('team_limit', 5)
## Start time
start = set_config('start', None)
end = set_config('end', None)
## Challenges cannot be viewed by unregistered users
view_challenges_unregistered = set_config('view_challenges_unregistered', None)
## Allow/Disallow registration
prevent_registration = set_config('prevent_registration', None)
## Verify emails
verify_emails = set_config('verify_emails', None)
mail_server = set_config('mail_server', None)
mail_port = set_config('mail_port', None)
mail_tls = set_config('mail_tls', None)
mail_ssl = set_config('mail_ssl', None)
mail_username = set_config('mail_username', None)
mail_password = set_config('mail_password', None)
setup = set_config('setup', True)
db.session.add(page)
db.session.add(admin)
db.session.commit()
db.session.close()
app.setup = False
with app.app_context():
cache.clear()
return redirect(url_for('views.static_html'))
return render_template('setup.html', nonce=session.get('nonce'))
return redirect(url_for('views.static_html'))
# Custom CSS handler
@views.route('/static/user.css')
def custom_css():
return Response(get_config("css"), mimetype='text/css')
# Static HTML files
@views.route("/", defaults={'template': 'index'})
@views.route("/<template>")
def static_html(template):
try:
return render_template('%s.html' % template)
except TemplateNotFound:
page = Pages.query.filter_by(route=template).first()
if page:
return render_template('page.html', content=page.html)
else:
abort(404)
@views.route('/teams', defaults={'page':'1'})
@views.route('/teams/<page>')
def teams(page):
page = abs(int(page))
results_per_page = 50
page_start = results_per_page * ( page - 1 )
page_end = results_per_page * ( page - 1 ) + results_per_page
if get_config('verify_emails'):
count = Teams.query.count()
teams = Teams.query.slice(page_start, page_end).all()
else:
count = Teams.query.count()
teams = Teams.query.slice(page_start, page_end).all()
pages = int(count / results_per_page) + (count % results_per_page > 0)
return render_template('teams.html', teams=teams, team_pages=pages, curr_page=page)
@views.route('/user/<userid>', methods=['GET', 'POST'])
def user_view(userid):
if get_config('view_scoreboard_if_authed') and not authed():
return redirect(url_for('auth.login', next=request.path))
user = Users.query.filter_by(id=userid).first_or_404()
solves = Solves.query.filter_by(userid=userid)
awards = Awards.query.filter_by(userid=userid).all()
score = user.score()
place = user.place()
db.session.close()
if request.method == 'GET':
return render_template('user.html', solves=solves, awards=awards, team=user, score=score, place=place)
elif request.method == 'POST':
json = {'solves':[]}
for x in solves:
json['solves'].append({'id':x.id, 'chal':x.chalid, 'team':x.userid})
return jsonify(json)
@views.route('/user/<userid>/remove', methods=['POST'])
def user_remove(userid):
if authed() and is_on_team():
current_user = Users.query.filter_by(id=session.get('id')).first()
target_user = Users.query.filter_by(id=userid).first()
team = Teams.query.filter_by(id=current_user.teamid).first()
if team.captain == target_user.id:
users = Users.query.filter_by(teamid=team.id)
for user in users:
user.teamid = None
db.session.delete(team)
db.session.commit()
db.session.close()
return "-1"
if team.captain == current_user.id:
target_user.teamid = None
db.session.commit()
db.session.close()
return "1"
return "0"
else:
return redirect(url_for('auth.login'))
@views.route('/team/<teamid>', methods=['GET', 'POST'])
def team_view(teamid):
if get_config('view_scoreboard_if_authed') and not authed():
return redirect(url_for('auth.login', next=request.path))
team = Teams.query.filter_by(id=teamid).first_or_404()
users = Users.query.filter_by(teamid=teamid).all()
user_ids = [u.id for u in users]
solves = Solves.query.filter(Solves.userid.in_(user_ids))
db.session.close()
if request.method == 'GET':
return render_template('team.html', team=team, users=users, solves=solves)
# elif request.method == 'POST':
# json = {'solves': []}
# for x in solves:
# json['solves'].append({'id': x.id, 'chal': x.chalid, 'team': x.userid})
# return jsonify(json)
@views.route('/team', methods=['POST', 'GET'])
def team_management():
if authed():
user = Users.query.filter_by(id=session.get('id')).first_or_404()
if user.teamid: ## Already has team
s = Signer(app.config['SECRET_KEY'])
team = Teams.query.filter_by(id=user.teamid).first_or_404()
users = Users.query.filter_by(teamid=user.teamid)
secret = urllib.quote_plus(s.sign(str(team.id)).encode('base64'))
if request.method == "POST":
errors = []
if team.captain == user.id:
website = request.form.get('website')
affiliation = request.form.get('affiliation')
country = request.form.get('country')
if website.strip() and not validate_url(website):
errors.append("That doesn't look like a valid URL")
team.website = website
team.affiliation = affiliation
team.country = country
else:
errors.append('Only team captains can change this information.')
if errors:
return render_template('view_team.html', team=team, users=users, secret=secret, errors=errors)
db.session.commit()
db.session.close()
return redirect(url_for('views.team_management'))
else:
captain = False
if team.captain == user.id:
captain = True
return render_template('view_team.html', team=team, users=users, secret=secret, captain=captain)
else: ## Needs a team
if request.method == "POST":
name = request.form.get('name')
captain = session.get('id')
team = Teams.query.filter_by(name=name).first()
errors = []
if team:
errors.append('That team name is already taken')
t = Teams(name, captain)
if errors:
return render_template('create_team.html', errors=errors, team=t)
db.session.add(t)
db.session.flush()
user.teamid = t.id
db.session.commit()
db.session.close()
return redirect(url_for('views.team_management'))
else:
return render_template('create_team.html')
else:
return redirect(url_for('auth.login'))
@views.route('/join/<team_link>', methods=['POST', 'GET'])
def join_team(team_link):
errors = []
if authed():
user = Users.query.filter_by(id=session.get('id')).first_or_404()
s = Signer(app.config['SECRET_KEY'])
team_id = s.unsign(urllib.unquote_plus(team_link.decode('base64')))
team = Teams.query.filter_by(id=team_id).first_or_404()
user_ids = [u.id for u in Users.query.with_entities(Users.id).filter_by(teamid=team.id)]
team_captain = Teams.query.filter_by(captain=user.id).first()
print team_captain
if request.method == "POST":
if len(user_ids) >= get_config('team_limit'):
errors.append('This team is full')
if team_captain:
errors.append("You are captain of another team, you can't join another team")
if errors:
return render_template('join_team.html', team=team, errors=errors)
user.teamid = int(team.id)
db.session.commit()
db.session.close()
return redirect(url_for('views.team_management'))
else:
if len(user_ids) >= get_config('team_limit'):
errors.append('This team is full')
if user.teamid:
errors.append('You are already on a team. <br>Joining a new team will take all your solves with you.')
if team_captain:
errors.append("You are captain of another team, you can't join another team")
return render_template('join_team.html', team=team, errors=errors)
else:
return redirect(url_for('auth.login', next=request.path))
@views.route('/profile', methods=['POST', 'GET'])
def profile():
if authed():
if request.method == "POST":
errors = []
name = request.form.get('name')
email = request.form.get('email')
try:
share = bool(request.form.get('share', None))
except (ValueError, TypeError):
share = None
user = Users.query.filter_by(id=session['id']).first()
if not get_config('prevent_name_change'):
names = Users.query.filter_by(name=name).first()
name_len = len(request.form['name']) == 0
emails = Users.query.filter_by(email=email).first()
valid_email = re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email)
if ('password' in request.form.keys() and not len(request.form['password']) == 0) and \
(not bcrypt_sha256.verify(request.form.get('confirm').strip(), user.password)):
errors.append("Your old password doesn't match what we have.")
if not valid_email:
errors.append("That email doesn't look right")
if not get_config('prevent_name_change') and names and name!=session['username']:
errors.append('That team name is already taken')
if emails and emails.id != session['id']:
errors.append('That email has already been used')
if not get_config('prevent_name_change') and name_len:
errors.append('Pick a longer team name')
# if website.strip() and not validate_url(website):
# errors.append("That doesn't look like a valid URL")
if len(errors) > 0:
return render_template('profile.html', name=name, email=email, share=share, errors=errors)
else:
user = Users.query.filter_by(id=session['id']).first()
if not get_config('prevent_name_change'):
user.name = name
if user.email != email.lower():
user.email = email.lower()
if get_config('verify_emails'):
user.verified = False
session['username'] = user.name
if 'password' in request.form.keys() and not len(request.form['password']) == 0:
user.password = bcrypt_sha256.encrypt(request.form.get('password'))
user.share = share
db.session.commit()
db.session.close()
return redirect(url_for('views.profile'))
else:
user = Users.query.filter_by(id=session['id']).first()
name = user.name
email = user.email
share = user.share
# website = user.website
# affiliation = user.affiliation
# country = user.country
prevent_name_change = get_config('prevent_name_change')
confirm_email = get_config('verify_emails') and not user.verified
return render_template('profile.html', name=name, email=email, share=share, prevent_name_change=prevent_name_change, confirm_email=confirm_email)
else:
return redirect(url_for('auth.login'))
|
import tensorflow as tf
import numpy as np
import fileinput
import gzip
vocab = {"": 0}
def read_data(file_name, n):
data = []
finput = fileinput.input(file_name, openhook=gzip.open)
i = 0
for line in finput:
line = line.decode()
elems = line.strip().split("\t")
if len(elems) == 2:
data.append(elems)
for word in elems[0].split(" "):
if word not in vocab:
vocab[word] = len(vocab)
i += 1
if i > n:
break
finput.close()
return data
training = read_data("train-shuffled.txt.gz", 10000)
dev = read_data("dev-shuffled.txt.gz", 1000)
print(len(vocab))
max_len = 20
n_inputs = 50
n_neurons = 60
n_steps = 3
n_outputs = 2
n_vocab = len(vocab)
n_embedding = 150
learning_rate = 0.001
init_embeds = tf.random_uniform([n_vocab, n_embedding], -1.0, 1.0)
embedding = tf.Variable(init_embeds)
train_inputs = tf.placeholder(tf.int32, shape=[None, n_steps], name="train_inputs")
inputs = tf.nn.embedding_lookup(embedding, train_inputs)
inputs = tf.unstack(inputs, num=n_steps, axis=1)
y = tf.placeholder(tf.int32, [None], name="y")
#basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
basic_cell = tf.keras.layers.SimpleRNNCell(units=n_neurons)
outputs, states = tf.nn.static_rnn(basic_cell, inputs, dtype=tf.float32)
outputs = tf.reshape(outputs, [-1, n_neurons * n_steps])
logits = tf.layers.dense(outputs, n_outputs, activation=tf.math.sigmoid)
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
n_epochs = 1000
batch_size = 200
def make_vec(sent):
vec = np.zeros(n_steps)
i = 0
for w in sent.split(" "):
if i >= n_steps:
break
vec[i] = vocab[w]
i += 1
return vec
x_dev = [make_vec(t[0]) for t in dev]
y_dev = [int(t[1]) for t in dev]
x_train = [make_vec(t[0]) for t in training]
y_train = [int(t[1]) for t in training]
print(sum(y_train))
print(len(y_train))
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(len(training) // batch_size):
x_batch = [make_vec(t[0]) for t in training[iteration:iteration+batch_size]]
y_batch = [int(t[1]) for t in training[iteration:iteration+batch_size]]
sess.run(training_op, feed_dict={train_inputs: x_batch, y: y_batch})
acc_train = sess.run(accuracy, feed_dict={train_inputs: x_train, y: y_train})
acc_test = sess.run(accuracy, feed_dict={train_inputs: x_dev, y: y_dev})
print("%d: %0.3f // %0.3f" % (epoch, acc_train, acc_test))
save_path = saver.save(sess, "./model.ckpt")
print("Model saved in path: %s" % save_path)
|
#!/usr/bin/env python
# coding: utf-8
# # Welcome to M-LSD demo
#
# In this demo, you can simply run line segment detection and box detection with our M-LSD models.
#
# Thanks to [gradio](https://gradio.app/), this demo is interactive!
#
# - For the line segment detection demo: Launch line segment detection with M-LSD
#
# - For the box detection demo: Launch box detection with M-LSD
# ## Preliminaries
# This section contains the initial setup.
# Please, run it first.
# ### Clone M-LSD repository
# ### Install gradio
# ### Initialize tflite model and functions
# In[1]:
import os
import subprocess
from PIL import Image
import cv2
import numpy as np
import tensorflow as tf
from utils import pred_lines, pred_squares
#import gradio as gr
import matplotlib.pyplot as plt
import time
import math
from common import draw_str
from urllib.request import urlretrieve
# Load MLSD 512 Large FP32 tflite
model_name = '/home/cheng/mlsd/tflite_models/M-LSD_512_tiny_fp32.tflite'
interpreter = tf.lite.Interpreter(model_path=model_name)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# ## Launch line segment detection with M-LSD
# In[2]:
def wrapper_for_LSD(img_input, score_thr, dist_thr):
lines = pred_lines(img_input, interpreter, input_details, output_details, input_shape=[512, 512], score_thr=score_thr, dist_thr=dist_thr)
img_output = img_input.copy()
# draw lines
for line in lines:
x_start, y_start, x_end, y_end = [int(val) for val in line]
cv2.line(img_output, (x_start, y_start), (x_end, y_end), [0,255,255], 2)
return img_output,lines
class BestLineDetection(object):
def __init__(self, angle=90,tolerance=5,scale=100,score_thr=0.2,dist_thr=10):
## A flag that indicates if the output image shall be drawn
self.draw_img = True
self.scale_percent = scale # rescale the resolution if needed
self.angle = angle
self.range = tolerance # angle error tolerance
self.angle_weight=0.5
self.width_weight=3
self.length_weight=0.1
# postion weight
self.score_thr=score_thr
self.dist_thr=dist_thr
self.minWidth= 1
self.minLength = 50
def setAngle(self,new_angle):
self.angle=new_angle
def setTolerance (self, new_tolerance):
self.range=new_tolerance
# pre-process the input image by resizing, grayscaling, lsding and caluclating the line segments' angles, lengths and credits
def processImage(self, input_image):
# Resized Image
#resized_img = self.resize_img(input_image, self.scale_percent)
# Convert to grayscale
#img_gray = cv2.cvtColor(resized_img, cv2.COLOR_BGR2GRAY)
# LSD
segments = self.LSD(input_image)
if segments.shape[0]==0:
lines_info=segments.reshape(0,8)
else:
angles = line_angle(segments)
lengths = line_length(segments)
lines_info = np.append(segments, angles, axis=1)
lines_info = np.append(lines_info, lengths, axis=1)
credits = line_credit(lines_info,self.angle_weight,self.width_weight,self.length_weight)
lines_info = np.append(lines_info, credits, axis=1)
return lines_info
def resize_img(self, img, scale_percent):
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
resized_img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
return resized_img
def LSD(self, input_image):
# [point1.x, point1.y, point2.x, point2.y, width]
output_img,segments=wrapper_for_LSD(input_image, self.score_thr, self.dist_thr)
widths=np.ones((segments.shape[0],1))
segments=np.append(segments,widths,axis=1)
return segments
def bestline_picker(self, input_image):
lines_info=self.processImage(input_image)
# [point1.x, point1.y, point2.x, point2.y, width, angle, length, credit]
copy=np.copy(input_image)
bestline = pick_bestline(self.angle, lines_info,self.range, self.minWidth, self.minLength)
if bestline[6]!=0:
output_image = draw_bestline(copy, bestline)
#draw_str(output_image, (20, 20), 'Angle_drift: %d' % (bestline[5] - self.angle) + ' Length: %d' % bestline[6])
#draw_str(output_image, (20, 40), 'delta_x: %d' % (bestline[0] - bestline[2]))
#print("Deata x =", bestline[0] - bestline[2])
else:
output_image = input_image
return bestline, output_image, lines_info
# Caculation methods of line lengths, angles and credits
def line_angle(segments):
rows = segments.shape[0]
x1, y1, x2, y2 = segments[:,0],segments[:,1],segments[:,2],segments[:,3]
angles = np.degrees(np.arctan2(y1 - y2, x2 - x1)) # y axis reverses in pixel coordinate, so y1-y2
np.where(angles>=0,angles,angles+180)
return angles.reshape(rows, -1)
def line_length(segments):
rows = segments.shape[0]
point1 = segments[:,0:2]
point2 = segments[:,2:4]
lengths=np.linalg.norm(point2-point1,axis=-1) # apply L2 norm to each row
return lengths.reshape(rows, -1)
def line_credit(lines_info,angle_weight, width_weight, length_weight):
rows = lines_info.shape[0]
credits = angle_weight*lines_info[:,5]+width_weight*lines_info[:,4]+length_weight*lines_info[:,6]
return credits.reshape(rows,-1)
def draw_bestline(img, lines_info):
coordinates = lines_info[0:4].reshape([2, 2]).astype(int)
best_img = cv2.line(img, (coordinates[0][0], coordinates[0][1]), (coordinates[1][0], coordinates[1][1]),
(255, 0, 0), 3)
return best_img
def pick_bestline(theta, lines_info, tolerance, minWidth, minLength): # pick the line with the most credits within valid range
# valid range: theta-range<=angle<= theta+range, width>=minWidth, length>=minLength
closelines=lines_info[(lines_info[:, 5] >= (theta - tolerance)) & (lines_info[:, 5] <= (theta + tolerance))
& (lines_info[:,4]>=minWidth)
& (lines_info[:, 6]>=minLength)]
if closelines.size == 0: # if there is no line within the range
#flag = False
return np.zeros(8)
else:
#flag = True
index_close=np.argmax(closelines[:, 7])
index=np.where(np.all(lines_info==closelines[index_close],axis=1))
return lines_info[index[0][0]]
def pick_sameline(prev_bestline, curr_lines):
dists=[]
rows=curr_lines.shape[0]
#print(prev_bestline[:2].astype(int))
for i in curr_lines:
distance= min((np.linalg.norm(i[:2] - prev_bestline[:2])),(np.linalg.norm(i[2:4] - prev_bestline[:2])))
#print(distance)
dists.append(distance)
dists=np.array(dists).reshape(rows, -1)
curr_lines=np.append(curr_lines,dists,axis=1)
closelines=curr_lines[curr_lines[:,8]<20]
#print(closelines.astype(int))
if closelines.size==0:
print("No same line!")
return np.zeros(8),closelines, True
else:
index_nearest=np.abs(closelines[:,5]-prev_bestline[5]).argmin()
index=np.where(np.all(curr_lines==closelines[index_nearest],axis=1))
return curr_lines[index[0][0]],closelines, False
def drift_detection (bestline_1, bestline_2, image): #less priority
ratio = 0.01
x_total = image.shape[1]
if (bestline_1[1] > bestline_1[3]):
x_1 = bestline_1[0]
else:
x_1 = bestline_1[2]
if (bestline_2[1] > bestline_2[3]):
x_2 = bestline_2[0]
else:
x_2 = bestline_2[2]
lateral_drift = x_1 - x_2
# if (math.dist(bestline_1[:2],bestline_2[:2])<math.dist(bestline_1[:2],bestline_2[2:4])):
# lateral_drift= bestline_1[0]-bestline_2[0]
# else:
# lateral_drift= bestline_1[0]-bestline_2[2]
#ignore the when picking new_lines
if abs(lateral_drift) <= x_total * ratio:
return lateral_drift
else:
return 0
# In[8]:
line_detection_object = BestLineDetection(90,5,80,0.25,20)
image = cv2.imread("/home/cheng/images/frame1000.jpg")
print(image.shape)
start = time.time()
lines_info=line_detection_object.processImage(image)
#print(lines_info.shape)
prev_bestline,output_img,curr_lines =line_detection_object.bestline_picker(image)
end = time.time()
print("time elapsed = ", end - start)
print(output_img.shape)
canvas=np.zeros_like(output_img)
for i in lines_info:
pt1 = (int(i[0]),int(i[1]))
pt2 = (int(i[2]),int(i[3]))
width = int(i[4])
cv2.line(canvas, pt1, pt2, (255, 255, 255), 2)
plt.figure(figsize=(30,15))
plt.imshow(canvas)
plt.show()
plt.figure(figsize=(30,15))
plt.imshow(output_img)
plt.show()
# In[ ]:
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Tests for the universal datalad's annex customremote"""
import glob
import os.path as op
from datalad.distribution.dataset import Dataset
from datalad.downloaders.tests.utils import get_test_providers
from datalad.support.exceptions import CommandError
from datalad.support.external_versions import external_versions
from datalad.tests.utils import (
assert_in,
assert_raises,
eq_,
serve_path_via_http,
skip_if_no_network,
with_tempfile,
with_tree,
)
@with_tempfile()
@skip_if_no_network
def check_basic_scenario(url, d):
ds = Dataset(d).create()
annex = ds.repo
# TODO skip if no boto or no credentials
get_test_providers(url) # so to skip if unknown creds
# Let's try to add some file which we should have access to
ds.download_url(url)
ds.save()
# git-annex got a fix where it stopped replacing - in the middle of the filename
# Let's cater to the developers who might have some intermediate version and not
# easy to compare -- we will just check that only one file there is an that it
# matches what we expect when outside of the development versions range:
filenames = glob.glob(op.join(d, '3versions[-_]allversioned.txt'))
eq_(len(filenames), 1)
filename = op.basename(filenames[0])
if external_versions['cmd:annex'] < '8.20200501':
assert_in('_', filename)
# Date after the fix in 8.20200501-53-gcabbc91b1
elif external_versions['cmd:annex'] >= '8.20200512':
assert_in('-', filename)
else:
pass # either of those is ok
whereis1 = annex.whereis(filename, output='full')
eq_(len(whereis1), 2) # here and datalad
annex.drop(filename)
whereis2 = annex.whereis(filename, output='full')
eq_(len(whereis2), 1) # datalad
# if we provide some bogus address which we can't access, we shouldn't pollute output
with assert_raises(CommandError) as cme:
annex.add_urls([url + '_bogus'])
assert_in('addurl: 1 failed', cme.exception.stderr)
# unfortunately with_tree etc decorators aren't generators friendly thus
# this little adapters to test both on local and s3 urls
@with_tree(tree={'3versions-allversioned.txt': "somefile"})
@serve_path_via_http
def test_basic_scenario_local_url(p, local_url):
check_basic_scenario("%s3versions-allversioned.txt" % local_url)
def test_basic_scenario_s3():
check_basic_scenario('s3://datalad-test0-versioned/3versions-allversioned.txt')
|
n1 = float(input('Diga a primeira nota: '))
while n1 < 0 or n1 > 10:
n1 = float(input('Valor inválido, por favor digite outro: '))
n2 = float(input('Diga a segunda nota: '))
while n2 < 0 or n2 > 10:
n2 = float(input('Valor inválido, por favor digite outro: '))
med = (n1+n2)/2
if med < 5.0:
print('Sua média foi de \033[32m{:.2f}\033[m e você está \033[1;31mREPROVADO\033[m'.format(med))
elif 5 <= med <= 6.9:
print('Sua média foi de \033[32m{:.2f}\033[m e você está na \033[1;35mRECUPERAÇÃO\033[m'.format(med))
else:
print('Sua média foi de \033[32m{:.2f}\033[m e você está \033[1;34mAPROVADO\033[m'.format(med))
|
import argparse
import sys
from os.path import dirname, join, realpath
import tables
sys.path.insert(0, realpath(join(dirname(__file__), '../..')))
from base.config_loader import ConfigLoader
from base.data.data_table import COLUMN_MOUSE_ID, COLUMN_LABEL
def parse():
parser = argparse.ArgumentParser(description='script to count samples per dataset')
parser.add_argument('--experiment', '-e', required=True,
help='name of experiment to load config from')
return parser.parse_args()
def get_mouse_from_id(mouse_id: int):
"""because mice_id are stored as int in datafile, they must be transformed back to the original names"""
return 'M' + str(mouse_id)[:2]
def main():
# datasets in the transformed data file, dataset without a table in the file are ignored
datasets = ['train', 'valid', 'test']
# open datafile
with tables.open_file(config.DATA_FILE) as file:
for ds in datasets:
# load table, ignore dataset if it does not exist
table = file.root[ds]
if not table:
continue
n_total = len(table[:]) # total number of samples in dataset
print('dataset:', ds)
# convert mice_id back to mice names and log them
print('mice:', set(map(get_mouse_from_id, table[:][COLUMN_MOUSE_ID])))
print('{:12s}{}\t{}'.format('stage', 'relative', 'total'))
# count number of samples per stage
for s in config.STAGES:
n = len([row[COLUMN_MOUSE_ID] for row in table.where('({}=="{}")'.format(COLUMN_LABEL, s))])
print('{:12s}{:6.2%}\t{:6d}'.format(s, n / n_total, n))
print()
if __name__ == '__main__':
args = parse()
config = ConfigLoader(args.experiment, create_dirs=False) # load given config
main()
|
import runway
import keras_ocr
import numpy as np
@runway.setup
def setup():
return keras_ocr.pipeline.Pipeline()
@runway.command('recognize', inputs={'image': runway.image}, outputs={'bboxes': runway.array(runway.image_bounding_box), 'labels': runway.array(runway.text)})
def recognize(model, inputs):
width, height = inputs['image'].size
predictions = model.recognize(np.array(inputs['image']))
labels = []
bboxes = []
for label, bbox in predictions:
labels.append(label)
min_x, min_y = bbox.min(0)
max_x, max_y = bbox.max(0)
bboxes.append([
min_x / width,
min_y / height,
max_x / width,
max_y / height
])
return {'labels': labels, 'bboxes': bboxes}
if __name__ == "__main__":
runway.run() |
import functools
from gaphas.view import GtkView
from gi.repository import Gdk, GLib, Gtk
from gaphor.core import Transaction
from gaphor.core.modeling import Presentation
def shortcut_tool(view, modeling_language, event_manager):
ctrl = Gtk.EventControllerKey.new(view)
ctrl.connect("key-pressed", on_delete, event_manager)
ctrl.connect("key-pressed", on_shortcut, modeling_language)
return ctrl
def on_delete(ctrl, keyval, keycode, state, event_manager):
"""Handle the 'Delete' key.
This can not be handled directly (through GTK's accelerators),
otherwise this key will confuse the text edit stuff.
"""
view: GtkView = ctrl.get_widget()
if keyval in (Gdk.KEY_Delete, Gdk.KEY_BackSpace) and (
state == 0 or state & Gdk.ModifierType.MOD2_MASK
):
delete_selected_items(view, event_manager)
return True
return False
def delete_selected_items(view: GtkView, event_manager):
with Transaction(event_manager):
items = view.selection.selected_items
for i in list(items):
if isinstance(i, Presentation):
i.unlink()
else:
if i.diagram:
i.diagram.remove(i)
def on_shortcut(ctrl, keyval, keycode, state, modeling_language):
# accelerator keys are lower case. Since we handle them in a key-press event
# handler, we'll need the upper-case versions as well in case Shift is pressed.
view: GtkView = ctrl.get_widget()
for _title, items in modeling_language.toolbox_definition:
for action_name, _label, _icon_name, shortcut, *rest in items:
if not shortcut:
continue
keys, mod = parse_shortcut(shortcut)
if state == mod and keyval in keys:
view.get_toplevel().get_action_group("diagram").lookup_action(
"select-tool"
).change_state(GLib.Variant.new_string(action_name))
return True
return False
_upper_offset = ord("A") - ord("a")
@functools.lru_cache(maxsize=None)
def parse_shortcut(shortcut):
key, mod = Gtk.accelerator_parse(shortcut)
return (key, key + _upper_offset), mod
|
"""
Given a reference to the head node of a singly-linked list, write a function
that reverses the linked list in place. The function should return the new head
of the reversed list.
In order to do this in O(1) space (in-place), you cannot make a new list, you
need to use the existing nodes.
In order to do this in O(n) time, you should only have to traverse the list
once.
*Note: If you get stuck, try drawing a picture of a small linked list and
running your function by hand. Does it actually work? Also, don't forget to
consider edge cases (like a list with only 1 or 0 elements).*
"""
from guided import add_to_head, add_to_tail, add_to_next, print_list
class LinkedListNode:
def __init__(self, value):
self.value = value
self.next = None
def reverse(head_of_list):
# Your code here
curr_node = head_of_list
prev_node = None
new_node = curr_node.next
while curr_node is not None:
# lets make sure our node refs are correct
next_node = curr_node.next
# point current node backwards
curr_node.next = prev_node
# move all the pointers forward
prev_node = curr_node
curr_node = next_node
if curr_node is not None:
next_node = curr_node.next
return prev_node
linked_list = LinkedListNode(
3
) # or head also this is a new instance of the class LinkedListNode
tail = linked_list
linked_list = add_to_head(linked_list, 2)
linked_list = add_to_head(linked_list, 5)
middle = linked_list
linked_list = add_to_head(linked_list, 6)
linked_list = add_to_head(linked_list, 0)
linked_list = add_to_head(linked_list, 2)
reverse_list = reverse(linked_list)
print(reverse_list)
print("--------")
print(linked_list)
|
from django.conf import settings
from django.conf.urls.static import static
from django.http import HttpResponse
from django.urls import include, path
from django.utils.translation import ugettext
from django.views.decorators.csrf import csrf_exempt
from helusers.admin_site import admin
from common.utils import get_api_version
from palvelutarjotin.views import SentryGraphQLView
admin.site.index_title = " ".join([ugettext("Beta Kultus API"), get_api_version()])
urlpatterns = [
path("admin/", admin.site.urls),
path("reports/", include("reports.urls")),
path("pysocial/", include("social_django.urls", namespace="social")),
path("helauth/", include("helusers.urls")),
path(
"graphql",
csrf_exempt(
SentryGraphQLView.as_view(
graphiql=settings.ENABLE_GRAPHIQL or settings.DEBUG
)
),
),
]
#
# Kubernetes liveness & readiness probes
#
def healthz(*args, **kwargs):
return HttpResponse(status=200)
def readiness(*args, **kwargs):
return HttpResponse(status=200)
urlpatterns += [path("healthz", healthz), path("readiness", readiness)]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from mock import patch
from pysparkling import Context
from jobs.wordcount import analyze
@patch('jobs.wordcount.WordCountJobContext.initalize_counter')
@patch('jobs.wordcount.WordCountJobContext.inc_counter')
def test_wordcount_analyze(_, __):
result = analyze(Context())
assert len(result) == 327
assert result[:6] == [('ut', 17), ('eu', 16), ('vel', 14), ('nec', 14), ('quis', 12), ('vitae', 12)]
|
from setuptools import setup, find_packages
VERSION='0.1'
long_description='A tool that stores AWS events from a resource to elastic search.'
packages=[
'elasticevents',
]
install_requires=[
'boto3',
'click',
]
def main():
setup_info = dict(
name='Elasticevents',
version=VERSION,
author='Victor Palade',
description='AWS Tools',
long_description=long_description,
license='Apache-2.0',
packages=packages,
install_requires=install_requires,
zip_safe=False,
)
setup(**setup_info)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 by Murray Altheim. All rights reserved. This file is part of
# the Robot OS project and is released under the "Apache Licence, Version 2.0".
# Please see the LICENSE file included as part of this package.
#
# author: altheim
# created: 2020-01-18
# modified: 2020-10-28
#
# Wraps the functionality of a Pimoroni IO Expander Breakout board, providing
# access to the values of the board's pins, which outputs 0-255 values for
# analog pins, and a 0 or 1 for digital pins.
#
# source: /usr/local/lib/python3.7/dist-packages/ioexpander/__init__.py
#
import sys
import ioexpander as io
from colorama import init, Fore, Style
init()
from lib.logger import Logger, Level
from lib.event import Event
# ..............................................................................
class MockIoe(object):
def __init__(self, trigger_pin, level):
self._log = Logger('mock-ioe', level)
self._trigger_pin = trigger_pin
self._active_value = 0.60
self._value = 0.0
self._log.info('ready.')
def set_default_value(self, value):
self._value = value
def input(self, pin):
if pin == self._trigger_pin:
self._log.info(Fore.YELLOW + 'returning triggerd value {:5.2f} for pin {:d}.'.format(self._active_value, pin))
return self._active_value
else:
self._log.info(Style.DIM + 'returning mock value 0.0 for pin {:d}.'.format(pin))
return self._value
# ..............................................................................
class MockIoExpander():
'''
A mock of an IO Expander board.
'''
def __init__(self, config, level):
super().__init__()
if config is None:
raise ValueError('no configuration provided.')
_config = config['ros'].get('io_expander')
self._log = Logger('mock-io-expander', level)
# infrared
self._port_side_ir_pin = _config.get('port_side_ir_pin') # pin connected to port side infrared
self._port_ir_pin = _config.get('port_ir_pin') # pin connected to port infrared
self._center_ir_pin = _config.get('center_ir_pin') # pin connected to center infrared
self._stbd_ir_pin = _config.get('stbd_ir_pin') # pin connected to starboard infrared
self._stbd_side_ir_pin = _config.get('stbd_side_ir_pin') # pin connected to starboard side infrared
self._log.info('infrared pin assignments:\t' \
+ Fore.RED + ' port side={:d}; port={:d};'.format(self._port_side_ir_pin, self._port_ir_pin) \
+ Fore.BLUE + ' center={:d};'.format(self._center_ir_pin) \
+ Fore.GREEN + ' stbd={:d}; stbd side={:d}'.format(self._stbd_ir_pin, self._stbd_side_ir_pin))
# moth/anti-moth
self._port_moth_pin = _config.get('port_moth_pin') # pin connected to port moth sensor
self._stbd_moth_pin = _config.get('stbd_moth_pin') # pin connected to starboard moth sensor
self._log.info('moth pin assignments:\t' \
+ Fore.RED + ' moth port={:d};'.format(self._port_moth_pin) \
+ Fore.GREEN + ' moth stbd={:d};'.format(self._stbd_moth_pin))
# bumpers
self._port_bmp_pin = _config.get('port_bmp_pin') # pin connected to port bumper
self._cntr_bmp_pin = _config.get('center_bmp_pin') # pin connected to center bumper
self._stbd_bmp_pin = _config.get('stbd_bmp_pin') # pin connected to starboard bumper
self._log.info('bumper pin assignments:\t' \
+ Fore.RED + ' port={:d};'.format(self._port_bmp_pin) \
+ Fore.BLUE + ' center={:d};'.format(self._cntr_bmp_pin) \
+ Fore.GREEN + ' stbd={:d}'.format(self._stbd_bmp_pin))
# configure board
self._ioe = MockIoe(self._center_ir_pin, level)
self._port_bmp_pump = 0
self._cntr_bmp_pump = 0
self._stbd_bmp_pump = 0
self._pump_limit = 10
self._log.info('ready.')
# infrared sensors .........................................................
def get_port_side_ir_value(self):
return int(round(self._ioe.input(self._port_side_ir_pin) * 100.0))
def get_port_ir_value(self):
return int(round(self._ioe.input(self._port_ir_pin) * 100.0))
def get_center_ir_value(self):
return int(round(self._ioe.input(self._center_ir_pin) * 100.0))
def get_stbd_ir_value(self):
return int(round(self._ioe.input(self._stbd_ir_pin) * 100.0))
def get_stbd_side_ir_value(self):
return int(round(self._ioe.input(self._stbd_side_ir_pin) * 100.0))
# moth/anti-moth ...........................................................
def get_moth_values(self):
return [ int(round(self._ioe.input(self._port_moth_pin) * 100.0)), \
int(round(self._ioe.input(self._stbd_moth_pin) * 100.0)) ]
# bumpers ..................................................................
def get_port_bmp_value(self):
return ( self._ioe.input(self._port_bmp_pin) == 0 )
# return ( self._ioe.input(self._port_bmp_pin) == 0 )
def get_center_bmp_value(self):
_value = self._ioe.input(self._cntr_bmp_pin)
if _value == 0:
print(Fore.GREEN + 'get_center_bmp_value({}): {}'.format(type(_value), _value) + Style.RESET_ALL)
return True
else:
print(Fore.RED + 'get_center_bmp_value({}): {}'.format(type(_value), _value) + Style.RESET_ALL)
return False
# return ( _value == 0 )
# return ( self._ioe.input(self._cntr_bmp_pin) == 0 )
def get_stbd_bmp_value(self):
return ( self._ioe.input(self._stbd_bmp_pin) == 0 )
# return ( self._ioe.input(self._stbd_bmp_pin) == 0 )
# ..........................................................................
# raw values are unprocessed values from the IO Expander (used for testing)
# raw infrared sensors .....................................................
def get_raw_port_side_ir_value(self):
return self._ioe.input(self._port_side_ir_pin)
def get_raw_port_ir_value(self):
return self._ioe.input(self._port_ir_pin)
def get_raw_center_ir_value(self):
return self._ioe.input(self._center_ir_pin)
def get_raw_stbd_ir_value(self):
return self._ioe.input(self._stbd_ir_pin)
def get_raw_stbd_side_ir_value(self):
return self._ioe.input(self._stbd_side_ir_pin)
# raw moth sensors .........................................................
def get_raw_moth_values(self):
return [ self._ioe.input(self._port_moth_pin), self._ioe.input(self._stbd_moth_pin) ]
def get_raw_port_moth_value(self):
return self._ioe.input(self._port_moth_pin)
def get_raw_stbd_moth_value(self):
return self._ioe.input(self._stbd_moth_pin)
# raw bumpers ..............................................................
def get_raw_port_bmp_value(self):
return self._ioe.input(self._port_bmp_pin)
def get_raw_center_bmp_value(self):
return self._ioe.input(self._cntr_bmp_pin)
def get_raw_stbd_bmp_value(self):
return self._ioe.input(self._stbd_bmp_pin)
# EOF
|
import logging
from .File import File
from .Profile import Profile
from .Root import Root
from .session import *
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
|
"""
test_terminal
~~~~~~~~~~~~~~~~
"""
import unittest
from domonic.decorators import silence
from domonic.terminal import *
class TestCase(unittest.TestCase):
def test_bash_ls(self):
files = ls()
# print(files)
assert "domonic" in files
# return
self.assertIn("domonic", ls())
print(ls("-al"))
print(ls("../"))
for line in ls():
print("line:", line)
# for f in ls():
# try:
# print(f)
# print(cat(f))
# except Exception as e:
# pass
def test_bash_pwd(self):
thedir = pwd()
# print("OYI::", thedir)
self.assertIn("domonic", thedir)
def test_bash_cd(self):
pass # TODO - need to change github action
# print(cd('../')) # < CD does not run on terminal
# thedir_aftercd = pwd()
# print(thedir_aftercd)
# self.assertTrue('domonic' not in thedir_aftercd)
# print(cd('domonic'))
# thedir_aftercd = pwd()
# print(thedir_aftercd)
# self.assertTrue('domonic' in thedir_aftercd)
def test_bash_mkdir(self):
try:
mkdir("somedir")
self.assertIn("somedir", ls())
except Exception as e:
print(e)
finally:
# rm('-r somedir')
rmdir("somedir")
self.assertTrue("somedir" not in ls())
def test_bash_touch(self):
try:
touch("somefile")
self.assertTrue("somefile" in ls())
except Exception as e:
print(e)
finally:
rm("somefile")
self.assertTrue("somefile" not in ls())
def test_bash_mv(self):
try:
touch("somefile")
mv("somefile temp")
except Exception as e:
print(e)
finally:
self.assertTrue("somefile" not in ls())
self.assertTrue("temp" in ls())
rm("temp")
def test_bash_cp(self):
try:
touch("somefile")
cp("somefile temp")
except Exception as e:
print(e)
finally:
self.assertTrue("temp" in ls())
rm("somefile")
rm("temp")
@silence
def test_bash_git(self):
# print(git('status'))
self.assertIn("master", git("status"))
def test_bash_general(self):
print(man("ls"))
print(echo("test"))
print(df())
print(du())
print(ps())
# print(cowsay('moo'))
print(date())
print(cal())
# failing on github actions
# for i, l in enumerate(cat('LICENSE.txt')):
# print(i, l)
def test_bash_history(self):
pass # failing on github actions
# print(history())
# for i, thing in enumerate(history(), 1):
# print(i, thing)
@silence
def test_bash(self):
print("ran")
print(ping("http://eventual.technology")) # < TODO - need to strean output
# print(wget('eventual.technology'))
if __name__ == "__main__":
unittest.main()
|
# Generated by Django 3.2 on 2021-08-28 20:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('payment', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='created time')),
('modified_time', models.DateTimeField(auto_now=True, verbose_name='modified time')),
('status', models.PositiveSmallIntegerField(choices=[(0, 'preparing food'), (0, 'sending')], verbose_name='status')),
('is_delivered', models.BooleanField(default=False, verbose_name='is delivered')),
('invoice', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='orders', to='payment.invoice', verbose_name='invoice')),
],
options={
'abstract': False,
},
),
]
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import git_rebaser
_ARG_ALIASES = {
'xl': ['ll', 'l'],
'update': ['up'],
'commit': ['ci'],
'prune': ['d', 'delete']
}
def _get_arg_with_aliases(arg):
return {'name': arg, 'aliases': _ARG_ALIASES.get(arg, [])}
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subparser_name')
sub_arg = subparsers.add_parser(
**_get_arg_with_aliases('amend'), help='Amend current branch')
sub_arg = subparsers.add_parser(
**_get_arg_with_aliases('xl'),
help='show revision history of entire repository')
sub_arg = subparsers.add_parser(
**_get_arg_with_aliases('change_branch_name'),
help='change current branch name')
sub_arg.add_argument(
'--branch_name',
default=None,
help='branch name. If not specified, it will use next available positive number'
)
sub_arg = subparsers.add_parser(
**_get_arg_with_aliases('set_as_master_branch'),
help='Set current branch as master branch, it will be associated with another'
' branch name 0 so it is easier to refer to')
sub_arg = subparsers.add_parser(
**_get_arg_with_aliases('diff'), help='diff file with parent branch')
sub_arg = subparsers.add_parser(
**_get_arg_with_aliases('difftool'),
help='diff file with parent branch using git difftool')
sub_arg = subparsers.add_parser(
**_get_arg_with_aliases('commit'),
help='commit the specified files or all outstanding changes')
sub_arg.add_argument('--branch_name', default=None)
sub_arg = subparsers.add_parser(
**_get_arg_with_aliases('init'),
help='Initialize git_rebaser on current directory')
sub_arg = subparsers.add_parser(
**_get_arg_with_aliases('prune'),
help='delete a branch, but keeping sub branches if there is any')
sub_arg.add_argument('branch_name')
sub_arg = subparsers.add_parser(
**_get_arg_with_aliases('rebase'),
help='rebase a branch with its sub-branches(the whole chain) on top of a branch'
)
sub_arg.add_argument('-s', '--source', help='source branch name')
sub_arg.add_argument('-d', '--dest', help='destination branch name')
sub_arg = subparsers.add_parser(
**_get_arg_with_aliases('sync'), help='sync git from remote')
sub_arg = subparsers.add_parser(
**_get_arg_with_aliases('update'),
help='update working directory (or switch revisions)')
sub_arg.add_argument('branch_name')
args = parser.parse_args()
sub_arg_name = args.subparser_name
for key, value in _ARG_ALIASES.items():
if sub_arg_name in value:
sub_arg_name = key
if sub_arg_name is None:
parser.print_help()
exit(1)
# pass the sub argument to corresponding method call.
rebaser = git_rebaser.GitRebaser()
getattr(rebaser, sub_arg_name)(args)
if __name__ == '__main__':
main()
|
"""Functions for safely printing in parallel."""
try:
import horovod.torch as hvd
except ModuleNotFoundError:
pass
use_horovod = False
def set_horovod_status(new_value):
"""
Set the horovod status for printing.
By setting the horovod status via this function it can be ensured that
printing works in parallel. The Parameters class does that for the user.
Parameters
----------
new_value : bool
Value the horovod status has.
"""
global use_horovod
use_horovod = new_value
def printout(*values, sep=' '):
"""
Interface to built-in "print" for parallel runs. Can be used like print.
Parameters
----------
values
Values to be printed.
sep : string
Separator between printed values.
"""
outstring = sep.join([str(v) for v in values])
if use_horovod is False:
print(outstring)
else:
if hvd.rank() == 0:
print(outstring)
|
# -*- coding: utf-8 -*-
u"""Jupyterhub login
:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkconfig
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdp
import contextlib
import jupyterhub.auth
import sirepo.auth
import sirepo.cookie
import sirepo.server
import sirepo.util
import tornado.web
import werkzeug.exceptions
_JUPYTERHUBLOGIN_ROUTE = '/jupyterhublogin'
class Authenticator(jupyterhub.auth.Authenticator):
# Do not prompt with jupyterhub login page. self.authenticate()
# will handle login using Sirepo functionality
# See the jupyterhub docs for more info:
# https://jupyterhub.readthedocs.io/en/stable/api/auth.html
auto_login = True
refresh_pre_spawn = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
sirepo.server.init()
async def authenticate(self, handler, data):
with _set_cookie(handler):
try:
self._check_permissions()
except werkzeug.exceptions.Forbidden:
# returning None means the user is forbidden (403)
# https://jupyterhub.readthedocs.io/en/stable/api/auth.html#jupyterhub.auth.Authenticator.authenticate
return None
except sirepo.util.SRException as e:
r = e.sr_args.get('routeName')
if r not in ('completeRegistration', 'login', 'loginFail'):
raise
handler.redirect(f'{_JUPYTERHUBLOGIN_ROUTE}#/{r}')
raise tornado.web.Finish()
u = sirepo.sim_api.jupyterhublogin.unchecked_jupyterhub_user_name(
have_simulation_db=False,
)
if not u:
handler.redirect(f'{_JUPYTERHUBLOGIN_ROUTE}')
raise tornado.web.Finish()
return u
async def refresh_user(self, user, handler=None):
with _set_cookie(handler):
try:
self._check_permissions()
except sirepo.util.SRException:
# Returning False is what the jupyterhub API expects and jupyterhub
# will handle re-authenticating the user.
# https://jupyterhub.readthedocs.io/en/stable/api/auth.html#jupyterhub.auth.Authenticator.refresh_user
return False
return True
def _check_permissions(self):
sirepo.auth.require_user()
sirepo.auth.require_sim_type('jupyterhublogin')
@contextlib.contextmanager
def _set_cookie(handler):
import sirepo.auth_db
with sirepo.auth_db.session(), \
sirepo.cookie.set_cookie_outside_of_flask_request(
handler.get_cookie(sirepo.cookie.cfg.http_name),
):
yield
|
#!/usr/bin/env python
import docker
import json
import os
import uuid
from esbulkstream import Documents
def main():
cwd = os.getcwd()
docker_client = docker.from_env()
es = Documents('sbom')
with open("top-containers.json") as fh:
container_names = json.load(fh)['containers']
for c in container_names:
print("Scanning %s" % c)
if c == "elasticsearch":
c = "elasticsearch:8.0.0"
elif c == "logstash":
c = "logstash:8.0.0"
elif c == "kibana":
c = "kibana:8.0.0"
elif c == "jenkins":
c = "jenkins:2.60.3"
elif c == "oraclelinux":
c = "oraclelinux:8"
elif c == "opensuse":
continue
elif c == "ubuntu-debootstrap":
continue
elif c == "notary":
c = "notary:signer"
elif c == "docker-dev":
continue
elif c == "ibm-semeru-runtimes":
c = "ibm-semeru-runtimes:open-8u322-b06-jre-centos7"
elif c == "scratch":
continue
elif c == "clefos":
# Syft doesn't like this image, just skip it
continue
else:
c = f"{c}:latest"
docker_client.images.pull(c)
output = docker_client.containers.run("anchore/syft", \
"-o json --file /SBOMs/%s.json packages docker:%s" % (c, c), \
auto_remove=True, \
environment=["SYFT_FILE_METADATA_CATALOGER_ENABLED=true"], \
volumes=[f"{cwd}/SBOMs:/SBOMs", "/var/run/docker.sock:/var/run/docker.sock"])
if __name__ == "__main__":
main()
|
'''
Code for JAX implementations presented in: Enabling Fast
Differentially Private SGD via Just-in-Time Compilation and Vectorization
'''
import itertools
import time
from functools import partial
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from jax import grad, jit, random, vmap
from jax.experimental import optimizers, stax
from jax.lib import pytree
from jax.tree_util import tree_flatten, tree_multimap, tree_unflatten
from keras.utils.np_utils import to_categorical
from tensorflow_privacy.privacy.analysis.rdp_accountant import (compute_rdp, get_privacy_spent)
import data
import utils
def logistic_model(features, **_):
return hk.Sequential([hk.Linear(1), jax.nn.sigmoid])(features)
def ffnn_model(features, **_):
return hk.Sequential([hk.Linear(50), jax.nn.relu, hk.Linear(2)])(features)
def mnist_model(features, **_):
return hk.Sequential([
hk.Conv2D(16, (8, 8), padding='SAME', stride=(2, 2)),
jax.nn.relu,
hk.MaxPool(2, 1, padding='VALID'), # matches stax
hk.Conv2D(32, (4, 4), padding='VALID', stride=(2, 2)),
jax.nn.relu,
hk.MaxPool(2, 1, padding='VALID'), # matches stax
hk.Flatten(),
hk.Linear(32),
jax.nn.relu,
hk.Linear(10),
])(features)
def lstm_model(x, vocab_size=10_000, seq_len=256, args=None, **_):
embed_init = hk.initializers.TruncatedNormal(stddev=0.02)
token_embedding_map = hk.Embed(vocab_size + 4, 100, w_init=embed_init)
o2 = token_embedding_map(x)
o2 = jnp.reshape(o2, (o2.shape[1], o2.shape[0], o2.shape[2]))
# LSTM Part of Network
core = hk.LSTM(100)
if args and args.dynamic_unroll:
outs, state = hk.dynamic_unroll(core, o2, core.initial_state(x.shape[0]))
else:
outs, state = hk.static_unroll(core, o2, core.initial_state(x.shape[0]))
outs = outs.reshape(outs.shape[1], outs.shape[0], outs.shape[2])
# Avg Pool -> Linear
red_dim_outs = hk.avg_pool(outs, seq_len, seq_len, "SAME").squeeze()
final_layer = hk.Linear(2)
ret = final_layer(red_dim_outs)
return ret
def embedding_model(arr, vocab_size=10_000, seq_len=256, **_):
# embedding part of network
x = arr
embed_init = hk.initializers.TruncatedNormal(stddev=0.02)
token_embedding_map = hk.Embed(vocab_size + 4, 16, w_init=embed_init)
o2 = token_embedding_map(x)
# avg pool -> linear
o3 = hk.avg_pool(o2, seq_len, seq_len, "SAME").squeeze()
fcnn = hk.Sequential([hk.Linear(16), jax.nn.relu, hk.Linear(2)])
return fcnn(o3)
def cifar_model(features, **_):
out = hk.Conv2D(32, (3, 3), padding='SAME', stride=(1, 1))(features)
out = jax.nn.relu(out)
out = hk.Conv2D(32, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.AvgPool(2, strides=2, padding='VALID')(out)
out = hk.Conv2D(64, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.Conv2D(64, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.AvgPool(2, strides=2, padding='VALID')(out)
out = hk.Conv2D(128, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.Conv2D(128, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.AvgPool(2, strides=2, padding='VALID')(out)
out = hk.Conv2D(256, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.Conv2D(10, (3, 3), padding='SAME', stride=(1, 1))(out)
return out.mean((1, 2))
def multiclass_loss(model, params, batch):
inputs, targets = batch
logits = model.apply(params, None, inputs)
# convert the outputs to one hot shape according to the same shape as
# logits for vectorized dot product
one_hot = jax.nn.one_hot(targets, logits.shape[-1])
logits = stax.logsoftmax(logits) # log normalize
return -jnp.mean(jnp.sum(logits * one_hot, axis=-1)) # cross entropy loss
def logistic_loss(model, params, batch):
inputs, targets = batch[0], batch[1]
# have to always supply the RNG field
logits = model.apply(params, None, inputs)
logits = jnp.reshape(logits, -1) # needs to be only scalar per index
# max_val is required for numerical stability
max_val = jnp.clip(logits, 0, None)
loss = jnp.mean(logits - logits * targets + max_val +
jnp.log(jnp.exp(-max_val) + jnp.exp((-logits - max_val))))
return loss
def accuracy(model, params, batch):
inputs, targets = batch
target_class = jnp.argmax(targets, axis=1)
predicted_class = jnp.argmax(model.apply(params, None, inputs), axis=1)
return jnp.mean(predicted_class == target_class)
def clipped_grad(model, loss, params, l2_norm_clip, single_example_batch):
"""Evaluate gradient for a single-example batch and clip its grad norm."""
grads = grad(partial(loss, model))(params, single_example_batch)
nonempty_grads, tree_def = tree_flatten(grads)
total_grad_norm = jnp.linalg.norm([jnp.linalg.norm(neg.ravel()) for neg in nonempty_grads])
divisor = jnp.maximum(total_grad_norm / l2_norm_clip, 1.)
normalized_nonempty_grads = [g / divisor for g in nonempty_grads]
return tree_unflatten(tree_def, normalized_nonempty_grads)
def private_grad(model, loss, params, batch, rng, l2_norm_clip, noise_multiplier, batch_size):
"""Return differentially private gradients for params, evaluated on batch."""
clipped_grads = vmap(partial(clipped_grad, model, loss), (None, None, 0))(params, l2_norm_clip,
batch)
clipped_grads_flat, grads_treedef = tree_flatten(clipped_grads)
aggregated_clipped_grads = [g.sum(0) for g in clipped_grads_flat]
rngs = random.split(rng, len(aggregated_clipped_grads))
noised_aggregated_clipped_grads = [
g + l2_norm_clip * noise_multiplier * random.normal(r, g.shape)
for r, g in zip(rngs, aggregated_clipped_grads)
]
normalized_noised_aggregated_clipped_grads = [
g / batch_size for g in noised_aggregated_clipped_grads
]
return tree_unflatten(grads_treedef, normalized_noised_aggregated_clipped_grads)
def private_grad_no_vmap(model, loss, params, batch, rng, l2_norm_clip, noise_multiplier,
batch_size):
"""Return differentially private gradients for params, evaluated on batch."""
clipped_grads = tree_multimap(
lambda *xs: jnp.stack(xs),
*(clipped_grad(model, loss, params, l2_norm_clip, eg) for eg in zip(*batch)))
clipped_grads_flat, grads_treedef = tree_flatten(clipped_grads)
aggregated_clipped_grads = [g.sum(0) for g in clipped_grads_flat]
rngs = random.split(rng, len(aggregated_clipped_grads))
noised_aggregated_clipped_grads = [
g + l2_norm_clip * noise_multiplier * random.normal(r, g.shape)
for r, g in zip(rngs, aggregated_clipped_grads)
]
normalized_noised_aggregated_clipped_grads = [
g / batch_size for g in noised_aggregated_clipped_grads
]
return tree_unflatten(grads_treedef, normalized_noised_aggregated_clipped_grads)
model_dict = {
'mnist': mnist_model,
'lstm': lstm_model,
'embed': embedding_model,
'ffnn': ffnn_model,
'logreg': logistic_model,
'cifar10': cifar_model,
}
def main(args):
print(args)
if args.microbatches:
raise NotImplementedError('Microbatches < batch size not currently supported')
if args.experiment == 'lstm' and args.no_jit:
raise ValueError('LSTM with no JIT will fail.')
data_fn = data.data_fn_dict[args.experiment][int(args.dummy_data)]
kwargs = {
'max_features': args.max_features,
'max_len': args.max_len,
'format': 'NHWC',
}
if args.dummy_data:
kwargs['num_examples'] = args.batch_size * 2
(train_data, train_labels), _ = data_fn(**kwargs)
# train_labels, test_labels = to_categorical(train_labels), to_categorical(
# test_labels)
num_train = train_data.shape[0]
num_complete_batches, leftover = divmod(num_train, args.batch_size)
num_batches = num_complete_batches + bool(leftover)
key = random.PRNGKey(args.seed)
model = hk.transform(
partial(model_dict[args.experiment],
args=args,
vocab_size=args.max_features,
seq_len=args.max_len))
rng = jax.random.PRNGKey(42)
init_params = model.init(key, train_data[:args.batch_size])
opt_init, opt_update, get_params = optimizers.sgd(args.learning_rate)
loss = logistic_loss if args.experiment == 'logreg' else multiclass_loss
if args.dpsgd:
train_data, train_labels = train_data[:, None], train_labels[:, None]
# regular update -- non-private
def update(_, i, opt_state, batch):
params = get_params(opt_state)
return opt_update(i, grad(partial(loss, model))(params, batch), opt_state)
grad_fn = private_grad_no_vmap if args.no_vmap else private_grad
# differentially private update
def private_update(rng, i, opt_state, batch):
params = get_params(opt_state)
rng = random.fold_in(rng, i) # get new key for new random numbers
return opt_update(
i,
grad_fn(model, loss, params, batch, rng, args.l2_norm_clip, args.noise_multiplier,
args.batch_size), opt_state)
opt_state = opt_init(init_params)
itercount = itertools.count()
train_fn = private_update if args.dpsgd else update
if args.no_vmap:
print('No vmap for dpsgd!')
if not args.no_jit:
train_fn = jit(train_fn)
else:
print('No jit!')
dummy = jnp.array(1.)
timings = []
for epoch in range(1, args.epochs + 1):
start = time.perf_counter()
for i, batch in enumerate(data.dataloader(train_data, train_labels, args.batch_size)):
opt_state = train_fn(
key,
next(itercount),
opt_state,
batch,
)
(dummy * dummy).block_until_ready() # synchronize CUDA.
duration = time.perf_counter() - start
print("Time Taken: ", duration)
timings.append(duration)
if args.dpsgd:
print('Trained with DP SGD optimizer')
else:
print('Trained with vanilla non-private SGD optimizer')
if not args.no_save:
append_to_name = ''
if args.no_jit: append_to_name += '_nojit'
if args.no_vmap: append_to_name += '_novmap'
utils.save_runtimes(__file__.split('.')[0], args, timings, append_to_name)
else:
print('Not saving!')
print('Done!')
if __name__ == '__main__':
parser = utils.get_parser(model_dict.keys())
parser.add_argument('--no_vmap', dest='no_vmap', action='store_true')
parser.add_argument('--no_jit', dest='no_jit', action='store_true')
parser.add_argument('--dynamic_unroll', dest='dynamic_unroll', action='store_true')
args = parser.parse_args()
main(args)
|
"""
Preprocessing of the Fichier Canadien des Éléments Nutritifs (FCEN) data before using it to characterize OFF ingredients
"""
import os
import pandas as pd
import json
from ingredients_characterization.vars import FCEN_DATA_DIR, FCEN_NUTRIMENTS_TO_OFF, FCEN_DATA_FILEPATH
# Reading data from FCEN
food_names = pd.read_csv(os.path.join(FCEN_DATA_DIR, 'FOOD NAME.csv'), encoding='ISO-8859-1')
nutrient_amounts = pd.read_csv(os.path.join(FCEN_DATA_DIR, 'NUTRIENT AMOUNT.csv'), encoding='ISO-8859-1')
top_level_nutriments = ['proteins', 'carbohydrates', 'fat', 'fiber', 'water']
# Looping on FCEN ingredients
result = dict()
for i, food in food_names.iterrows():
# Looping on nutriments
nutriments = dict()
foo_nutrient_amounts = nutrient_amounts[(nutrient_amounts.FoodID == food.FoodID) &
(nutrient_amounts.NutrientID.isin(FCEN_NUTRIMENTS_TO_OFF.keys()))]
for j, nutri_amount in foo_nutrient_amounts.iterrows():
nutriments[FCEN_NUTRIMENTS_TO_OFF[nutri_amount.NutrientID]] = {"value": nutri_amount.NutrientValue,
"stdev": nutri_amount.StandardError}
nutriments_sum = sum([v['value'] for k, v in nutriments.items() if k in top_level_nutriments])
nutriments['other'] = {'value': 0, 'stdev': 0}
# If the total sum is superior to 100, rectify the values
if nutriments_sum > 100:
for nutri in nutriments.values():
if nutri['value']:
nutri['value'] = nutri['value'] * 100 / nutriments_sum
# If the total sum is inferior to 100, add a "other" nutriment category that will ensure mass balance
elif (nutriments_sum < 100) and (all([x in nutriments for x in top_level_nutriments])):
nutriments['other']['value'] = 100 - nutriments_sum
# Adding the data to the result
food['nutriments'] = nutriments
result[food.FoodID] = food.to_dict()
# Saving the result
with open(FCEN_DATA_FILEPATH, 'w', encoding='utf8') as file:
json.dump(result, file, indent=2, ensure_ascii=False)
|
from landscapemodel import *
from plots import *
def ABtest(comparison,path,species=1,tmax=100,tshow=(0,-1),**kwargs):
"""A/B testing: plot results side-by-side for sets of options in comparison"""
path=Path(path).mkdir()
prm=deepcopy(LandscapeModel.dft_prm)
prm['species']=species
prm['landx']=prm['landy']=64
prm['dispersal']['mean']=1
model=LandscapeModel(parameters=prm)
results=[]
for comp in comparison:
dic={}
dic.update(kwargs)
if results:
dic['reseed']=0
dic['init']='restart'
dic.update(comp)
model.evol(dt=0.01,tmax=tmax,**dic)
results.append(deepcopy(model.results))
for s in range(species):
plt.figure()
plt.suptitle('Species {}'.format(s) )
panel=0
for i in tshow:
span=min([np.min(r['n'][i][s]) for r in results ]) , max([np.max(r['n'][i][s]) for r in results ])
for idx,comp, res in zip(range(len(results)),comparison,results):
panel, ax = auto_subplot(panel, len(tshow) * len(results))
plt.colorbar(plt.imshow(res['n'][i][s],vmin=span[0],vmax=span[1]),ax=ax)
t=res['t'][i]
plt.title('{} t={}'.format(comp.get('title',idx), t) )
if kwargs.get('debug',0):
code_debugger()
else:
plt.show()
def FTtest(path='TEST/FTtest',**kwargs):
"""Test Fourier Transform optimization"""
comparison=[{'method':'Euler','use_Fourier':0,'title':'Direct'},{'method':'Euler','use_Fourier':1,'title':'Fourier'} ]
ABtest(comparison,path,**kwargs)
def algotest(path='TEST/algotest',**kwargs):
"""Test dependence on integration algorithm"""
comparison=[{'method':'Euler','title':'Euler'},{'method':'scipy','title':'scipy+Fourier','use_Fourier':1} ]
ABtest(comparison,path,**kwargs)
def environment(path):
"""Compare color and cutoff"""
from landscapesimu import loop, summary_plots
axes=[
('environment_cutoff',[.01,.2] ), #Environment heterogeneity
('environment_color',[1.,3.] ), #Environment heterogeneity
('dispersal_mean', np.logspace(-1,.5,2) ), # Mean dispersal strength
# ('competition_mean',[0.1,0.3][:1] ), #Interspecific competition strength
# ('competition_scale', [0.1, 5][:] ), # Competition spatial scale
('sys', range(1)) # Dummy variable (change number in parentheses to make multiple runs with same parameters)
]
loop(axes=axes,path=path,tmax=250,nsample=20,rerun='rerun' in sys.argv,species=30,
reseed=0,use_Fourier=1,method='scipy')
summary_plots(path,save=1,detailed=1,movie='movie' in sys.argv,rerun='rerun' in sys.argv or 'replot' in sys.argv,
values=[ ('checker_r2','Checkerboard pattern')])
if __name__=='__main__':
if 'FT' in sys.argv:
FTtest(debug='debug' in sys.argv,species=16,tmax=10)
elif 'algo' in sys.argv:
algotest(debug='debug' in sys.argv,species=8,tmax=100)
else:
environment(Path('TEST/environment')) |
from requests import Session
from requests.adapters import HTTPAdapter
from urllib3 import Retry
class RetrySession(Session):
def __init__(self, retries=3, backoff_factor=0.5, status_forcelist=None):
super().__init__()
retry = Retry(total=retries, read=retries, backoff_factor=backoff_factor, status_forcelist=status_forcelist)
adapter = HTTPAdapter(max_retries=retries)
self.mount('http://', adapter)
self.mount('https://', adapter)
|
#-*- coding: utf-8 -*-
from django_town.utils import CaseLessDict
from .errors import AccessDeniedError
from django_town.utils import class_from_path
from django_town.core.settings import OAUTH2_SETTINGS
from django_town.oauth2.user import OAuth2User
def authorization_from_django_request(request):
return request.META.get('HTTP_AUTHORIZATION')
def uri_from_django_request(request):
return request.build_absolute_uri()
def http_method_from_django_request(request):
return request.method
def get_dict_from_django_request(request):
return request.GET
def post_dict_from_django_request(request):
return request.POST
class OAuth2Request(object):
def __init__(self, request):
if request:
self._uri = uri_from_django_request(request)
self._method = http_method_from_django_request(request).upper()
self._body = CaseLessDict((get_dict_from_django_request(request)
if self._method == "GET" else post_dict_from_django_request(request)))
self._request = request
self._client = None
def save_session(self, key, value):
pass
@property
def method(self):
return self._method
@property
def client_id(self):
return self._body.get('client_id')
@property
def client_secret(self):
return self._body.get('client_secret')
@property
def username(self):
return self._body.get('username')
@property
def password(self):
return self._body.get('password')
@property
def grant_type(self):
return self._body.get('grant_type')
@property
def code(self):
return self._body.get('code')
@property
def redirect_uri(self):
return self._body.get('redirect_uri')
@property
def response_type(self):
return self._body.get('response_type')
@property
def refresh_token(self):
return self._body.get('refresh_token')
@property
def access_token(self):
return self._body.get('access_token')
@property
def state(self):
return self._body.get('state')
@property
def scope(self):
scope = self._body.get('scope')
if scope:
return scope.split(' ')
return []
#
#@property
#def scope(self):
# return self._body.get('scope')
@property
def user(self):
if self._request.user.is_authenticated():
return OAuth2User(django_user=self._request.user)
else:
raise AccessDeniedError()
def oauth2_request_class():
try:
return class_from_path(OAUTH2_SETTINGS.REQUEST)
except KeyError:
return OAuth2Request
class FakeRequest(OAuth2Request):
def __init__(self, body):
self._body = body
self._method = "POST"
super(FakeRequest, self).__init__(None)
@property
def method(self):
return self._method
@property
def client_id(self):
return self._body.get('client_id')
@property
def client_secret(self):
return self._body.get('client_secret')
@property
def username(self):
return self._body.get('username')
@property
def password(self):
return self._body.get('password')
@property
def grant_type(self):
return self._body.get('grant_type')
@property
def code(self):
return self._body.get('code')
@property
def redirect_uri(self):
return self._body.get('redirect_uri')
@property
def response_type(self):
return self._body.get('response_type')
@property
def state(self):
return self._body.get('state')
@property
def user(self):
return self._body.get('user')
|
from django.forms import ModelForm
from .models import Student, Teacher, SchoolClass, Parent, Subject, Lesson
from django import forms
YEARS = (2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016)
class AddStudentForm(ModelForm):
second_name = forms.CharField(required=False)
address = forms.CharField(required=False)
city = forms.CharField(required=False)
zip_code = forms.CharField(required=False)
class Meta:
model = Student
exclude = ['create_date', 'school_class', 'user', 'parents']
widgets = {
'birth_date': forms.SelectDateWidget(years=YEARS, attrs=({'style': 'width: 33%; display: inline-block;'})),
'pesel': forms.TextInput(attrs={'size': 11, 'title': 'numer PESEL', 'style': 'max-width: 9em'})
}
class CreateStudentAccountForm(ModelForm):
student = forms.ModelChoiceField(queryset=Student.objects.filter(user=None), initial=0)
class Meta:
model = Student
fields = {'student'}
class AddTeacherForm(ModelForm):
class Meta:
model = Teacher
exclude = ['user']
class CreateTeacherAccountForm(ModelForm):
teacher = forms.ModelChoiceField(queryset=Teacher.objects.filter(user=None), initial=0)
class Meta:
model = Teacher
fields = {'teacher'}
class CreateSchoolClassForm(ModelForm):
name = forms.CharField(required=True, label='Symbol klasy')
tutor = forms.ModelChoiceField(queryset=Teacher.objects.all(), initial=0, label="Wychowawca")
class Meta:
model = Teacher
fields = {}
widgets = {
'name': forms.TextInput(attrs={'style': 'max-width: 3em'})
}
class CreateSubjectForm(ModelForm):
class Meta:
model = Subject
fields = {'type', 'teacher', 'school_class'}
class CreateLessonForm(ModelForm):
school_class = forms.ModelChoiceField(queryset=SchoolClass.objects.all(), initial=0, label="Klasa")
subject = forms.ModelChoiceField(queryset=Subject.objects.all(), initial=0, label="Przedmiot")
class Meta:
model = Lesson
fields = {'beginning_hour', 'ending_hour', 'topic'}
field_classes = {
'beginning_hour': forms.DateTimeField,
'ending_hour': forms.DateTimeField,
}
widgets = {
'topic': forms.Textarea,
}
class AssignStudentToClassForm(forms.Form):
student = forms.ModelChoiceField(queryset=Student.objects.all(), initial=0, label="Uczeń")
school_class = forms.ModelChoiceField(queryset=SchoolClass.objects.all(), initial=0, label="Klasa")
|
#coding = utf-8
import unittest
import requests
from selenium import webdriver
import json
import re
import time
class modifiedPhoneNum(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(10.0)
self.driver.maximize_window()
self.loginNum = "13000000000"
self.loginPwd = "666666"
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1 ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'}
'''调用login函数'''
self.login()
# 列出一些测试需要的数据
self.phoneNumList = [1234564567890, 12345645669, 10000000000,11012345678,16212346789, 19256456489,69894548755,53245678956,' ']
self.verifyCodeList = [3232,55444,668,985999,'fdfd','yuh7',' ']
self.pwd = [454578,12345,123456789012345678901,'yuhi','']
global modifXpath #修改手机号按钮
modifXpath = "me__number___gR8Vt"
global phoneXpath
phoneXpath= "/html/body/div[5]/div/div/div[1]/div/div[1]/input"
global sendXpath
sendXpath = "/html/body/div[5]/div/div/div[1]/div/div[2]/button"
global verifyCodeXpath
verifyCodeXpath = "/html/body/div[5]/div/div/div[1]/div/div[2]/input"
global passWdXpath
passWdXpath = "/html/body/div[5]/div/div/div[1]/div/div[3]/input"
global cancelXpath
cancelXpath ="dialog__dialog-button-cancel___tJdyZ"
global confirmXpath
confirmXpath = "/html/body/div[5]/div/div/div[2]/button[1]"
global tipXpath
tipXpath = "/html/body/div[5]/div/div/div[1]/div/span"
self.tipText = ""
self.verifyCode = ""
self.inputVerifyCode = "5625"
# 登录
def login(self):
self.driver.get("http://192.168.4.15:8001/1.8.0/cn/")
loginPhoneNumBtn = self.driver.find_element_by_xpath("//*[@id='app']/div/div[1]/section/div/section/div/p[1]/input")
loginPwdBtn = self.driver.find_element_by_xpath("//*[@id='app']/div/div[1]/section/div/section/div/p[2]/input")
loginBtn =self.driver.find_element_by_xpath("//*[@id='app']/div/div[1]/section/div/section/p[1]")
loginPhoneNumBtn.send_keys(self.loginNum)
loginPwdBtn.send_keys(self.loginPwd)
loginBtn.click()
# 定位到用户头像的下拉列表
profileBtn =self.driver.find_element_by_xpath("//*[@id='app']/div/div[1]/div[2]/div/div/div[1]/div[2]/div[1]/div/span")
profileBtn.click()
meInfoBtn = self.driver.find_element_by_xpath("//*[@id='app']/div/div[1]/div[2]/div/div/div[1]/div[2]/div[1]/div[2]/ul/li[2]/a")
meInfoBtn.click()
def reqVerifyCode(self):
# 网络请求获取验证码
verifyUrl = 'http://192.168.4.15:8001/1.8.0/cn/api/0.2/aux/sendverifycode'
verifyParams = {'mobile': '18258183861', 'type': '3'}
verify = requests.post(verifyUrl, data=verifyParams, headers=self.headers)
print(verify.status_code)
dic_json = json.dumps(verify.json(), sort_keys=True, indent=10, ensure_ascii=False)
dicVerify = json.loads(dic_json)
print(dicVerify)
self.verifyCode = int(dicVerify["r"]["code"])
# 断言与验证
def confirmTest(self):
try:
tip = self.driver.find_element_by_xpath(tipXpath).text
state = True
if tip == self.tipText:
self.assertTrue(True, ['正常'])
else:
self.assertFalse(False, tip)
except:
print("error")
#修改手机号码----验证手机号码的合法性
def test_PhoneNum_1(self):
self.driver.find_element_by_class_name(modifXpath).click()
#取消修改
# self.driver.find_element_by_class_name(cancelXpath)
#输入手机号:正则匹配复合手机号格式的手机号
for phoneNum in self.phoneNumList:
phoneNumInput = self.driver.find_element_by_xpath(phoneXpath)
phoneNumInput.clear()
phoneNumInput.send_keys(phoneNum)
regex = "^1(3|4|5|7|8)\d{9}$"
match = re.search(regex, "%s" %phoneNum)
time.sleep(0.5)
vertifyBtn = self.driver.find_element_by_xpath(sendXpath)
vertifyBtn.click()
if match:
print("手机号合法是:%s" %phoneNum)
else :
self.tipText = "手机号码异常!"
print("异常手机号是:%s" %phoneNum)
print("test_PhoneNum_1---end")
# 修改手机号码----验证码
def test_verifyCode(self):
#进入修改手机号的弹窗并发送验证码
self.driver.find_element_by_class_name(modifXpath).click()
self.driver.find_element_by_xpath(phoneXpath).send_keys(int(self.loginNum))
# 获取验证码有效剩余时间
time.sleep(3)
vertifyBtn = self.driver.find_element_by_xpath(sendXpath)
if vertifyBtn.is_enabled():
vertifyBtn.click()
# 输入验证码和密码并确定来验证验证码的正确性
self.driver.find_element_by_xpath(verifyCodeXpath).send_keys(self.inputVerifyCode)
self.driver.find_element_by_xpath(passWdXpath).send_keys(self.loginPwd)
# 取出文本中的数字
restSeconds = self.driver.find_element_by_class_name("me__send-code___3uCe0").text
restSeconds = int(re.sub("\D", "", restSeconds))
print(restSeconds)
time.sleep(2)
if restSeconds>0:
self.driver.find_element_by_xpath(confirmXpath).click()
if self.inputVerifyCode == self.verifyCode:
print("验证码输入正确")
else:
print("验证码输入错误")
else:
self.driver.find_element_by_xpath(confirmXpath).click()
print("验证码输入错误-----超时")
print("test_verifyCode---end")
else:
print(2222)
def test_password(self):
testPwd = "123456"
self.driver.find_element_by_xpath(passWdXpath).send_keys(testPwd)
print("密码")
def test_modifiedPhoneNum_2(self):
print("testcase_2")
#
# def test_modifiedPhoneNum_3(self):
# print("testcase_3")
# def tearDown(self):
self.driver.quit()
print("test end")
if __name__ == '__main__':
#
suite = unittest.TestSuite()
suite.addTest(modifiedPhoneNum("test_PhoneNum_1"))
# suite.addTest(modifiedPhoneNum("test_verifyCode"))
# # suite.addTest(modifiedPhoneNum("test_modifiedPhoneNum_3"))
#
runner = unittest.TextTestRunner()
runner.run(suite)
|
from django.urls import path
from . import views
urlpatterns = [
path('<int:root>', views.index, name='index'),
path('freshmen-groups', views.index, {"root": 5}, name='index'),
path('subjects', views.index, {"root": 15}, name='index'),
path('websites', views.website, {"root": 18}, name='website')
]
|
import os, glob
papka_korpus = os.path.dirname(os.path.abspath(__file__))
papka_apertium = os.path.join(papka_korpus, "testApertium/")
from tqdm import tqdm
from time import monotonic, sleep
from datetime import timedelta
global_katolog = os.path.join(papka_korpus, "testbasictexts/")
files = glob.glob(global_katolog+"*.txt")
f = open(os.path.join(papka_korpus,"errors/error.log"), 'a+', encoding="utf-8")
length = len(files)
pbar = tqdm(files)
start_time = monotonic()
for fail in pbar:
filename = fail[fail.rfind("/")+1:]
pbar.set_description(f"Жасалуда {str(filename)}")
try:
os.system('''cd $HOME/sources/apertium-kaz-rus\ncat "{0}" | apertium -n -d. kaz-rus-tagger > "{1}"'''.format(fail, os.path.join(papka_apertium, filename)))
except:
f.write(fail+"\n")
end_time = monotonic()
timedel = end_time - start_time
print("Аяқталды! Барлығы {0} құжат. Жұмсалған уақыт: {1}".format(length, timedelta(seconds=timedel)))
f.close()
|
#!/usr/bin/env python
#from .core import *
import numpy as np
import pandas as pd
import shutil
import urllib
import urlparse
from os.path import splitext, basename
import os
import util
from pprint import pprint
import StringIO
import db
from core import *
from IPython.core.debugger import Tracer
class Term(UploadCsvConvert):
def __init__(self, xe):
#add default col instance to children
xe.attrib['newCols'] = 'term_id,term_source_id,term_name,term_category_id,description,term_field1,term_field2'
defined_cols = {x.attrib['colName'] for x in xe}
UploadCsvConvert.__init__(self,xe=xe,dest='term')
for c in self.children:
if c.col_name == 'term_source_id' and ( 'term_source_id' not in defined_cols):
c.source_col = 'term_id'
self.type_col = 'term_category_id'
if 'termPrefix' in xe.attrib:
self.term_prefix = xe.attrib['termPrefix']
if 'destFile' in xe.attrib:
self.use_dest =xe.attrib['destFile'];
def get_type_col_value_sql(self):
return 'SELECT t.term_category_id FROM %s.term_category t WHERE t.category_name = ?' % SyncDB.DATABASE
def get_term_prefix(self):
if hasattr(self,'term_prefix'):
return self.term_prefix
return self.get_type_col_value() + '_'
def generate_new_row(self,row):
r = super(Term,self).generate_new_row(row)
#if the Species is supported
if r is not None:
r['term_id'] = self.get_term_prefix() + r['term_id']
return r
|
import click
import gsextract.gse_parser as gse_parser
@click.command()
@click.argument('input_file', type=click.Path(exists=True))
@click.argument('output_file', type=click.Path())
@click.option('--stream/--no-stream', default=False, help='Stream continuously from the file. Use the --stream flag to dump to a pcap from a real time GSE recording.')
@click.option('--reliable/--no-reliable', default=True, help='Add the --no-reliable flag to attempt to brute force IP headers in certain situations. Increases recovery but also can result in fake packets.')
def gsextract(input_file, output_file, stream, reliable):
gse_parser.gse_parse(file=input_file, outfile=output_file, stream=stream, reliable=reliable)
def cli_runner():
gsextract() |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:pyy
# datetime:2018/12/29 10:33
from peewee import ForeignKeyField, CharField, TextField, IntegerField, JOIN
from Myforum.Forum.models import BaseModel
from Myforum.apps.users.models import User
class Question(BaseModel):
user = ForeignKeyField(User, verbose_name="用户")
category = CharField(max_length=200, verbose_name="分类", null=True)
title = CharField(max_length=200, verbose_name="标题", null=True)
content = TextField(verbose_name="内容")
image = CharField(default=200, verbose_name="图片")
answer_nums = IntegerField(default=0, verbose_name="回答数")
@classmethod
def extend(cls):
return cls.select(cls, User.id, User.nick_name).join(User)
class Answer(BaseModel):
# 回答和回复
user = ForeignKeyField(User, verbose_name="用户", related_name="answer_author")
question = ForeignKeyField(Question, verbose_name="问题")
parent_answer = ForeignKeyField('self', null=True, verbose_name="回答", related_name="answer_parent")
reply_user = ForeignKeyField(User, verbose_name="用户", related_name="question_user", null=True)
content = CharField(max_length=1000, verbose_name="内容")
reply_nums = IntegerField(default=0, verbose_name="回复数")
@classmethod
def extend(cls):
# 1. 多表join
# 2. 多字段映射同一个model
answer_author = User.alias()
question_user = User.alias()
return cls.select(cls, Question, answer_author.id, answer_author.nick_name, answer_author.head_url,
question_user.id, question_user.nick_name, question_user.head_url).join(
Question, join_type=JOIN.LEFT_OUTER, on=cls.question).switch(cls).join(question_user,
join_type=JOIN.LEFT_OUTER,
on=cls.user).switch(cls).join(
answer_author, join_type=JOIN.LEFT_OUTER, on=cls.reply_user
)
|
import io
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from PIL import Image
import flask
from flask import send_file, abort
from flask import jsonify, make_response
from flask import request
from flask_cors import CORS
from pymemcache.client import base
cache = base.Client(('localhost', 11211))
# initialise Flask application and Keras model
app = flask.Flask(__name__)
CORS(app)
predicted_image_name = 'predicted.png'
IMG_SIZE = (224, 224)
#trained_model = tf.keras.models.load_model(os.path.join('trained','Densenet_categorical_10_11_20'))
@app.route('/backend/test/ping')
def ping():
return 'pong'
@app.route('/backend/test/cache', methods=['POST', 'GET'])
def hello_world():
if flask.request.method == 'POST':
sessionId = request.args.get('id')
data = request.json
cache.set(sessionId, data)
return make_response('pong ' + sessionId, 200)
else:
sessionId = request.args.get('id')
data = cache.get(sessionId)
return make_response(data,200)
@app.route('/backend/image1', methods=['POST'])
def img1():
if not flask.request.files['photos']: return abort(401)
sessionId = request.args.get('id')
imageFormRequest = flask.request.files['photos'].read()
img = Image.open(io.BytesIO(imageFormRequest))
cache.set(sessionId + "img1", img)
return make_response("", 200)
app.route('/backend/image1', methods=['POST'])
def img2():
if not flask.request.files['photos']: return abort(401)
sessionId = request.args.get('id')
imageFormRequest = flask.request.files['photos'].read()
img = Image.open(io.BytesIO(imageFormRequest))
cache.set(sessionId + "img2", img)
return make_response("", 200)
app.route('/backend/result', methods=['POST'])
def getResult():
return send_file(
io.BytesIO(image_binary),
mimetype='image/jpeg',
as_attachment=True,
attachment_filename='result.jpg')
@app.route('/predict', methods=['POST'])
def predict():
print('New request')
print(flask.request)
if flask.request.files['photos']:
imageFormRequest = flask.request.files['photos'].read()
row_img = Image.open(io.BytesIO(imageFormRequest))
resized_img = row_img.resize(IMG_SIZE, Image.ANTIALIAS)
batch = []
batch.append(np.array(resized_img)/255.)
prediction = trained_model.predict(np.array(batch))
return make_response(jsonify(({'probability': str(prediction[(0,0)])})), 200)
return abort(401)
if __name__=='__main__':
print(('* loading Keras model and Flask starting server'))
#global model
#model = load_model()
#global graph
#graph = tf.get_default_graph()
app.run(host='0.0.0.0',port=5000, debug = True) |
#!/usr/bin/env python3
"""Module containing the ParmedHMassRepartition class and the command line interface."""
import argparse
import shutil, re
from pathlib import Path, PurePath
from biobb_common.generic.biobb_object import BiobbObject
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
from biobb_common.tools.file_utils import launchlogger
from biobb_amber.parmed.common import *
class ParmedHMassRepartition(BiobbObject):
"""
| biobb_amber ParmedHMassRepartition
| Wrapper of the `AmberTools (AMBER MD Package) parmed tool <https://ambermd.org/AmberTools.php>`_ module.
| Performs a Hydrogen Mass Repartition from an AMBER topology file using parmed tool from the AmberTools MD package.
Args:
input_top_path (str): Input AMBER topology file. File type: input. `Sample file <https://github.com/bioexcel/biobb_amber/raw/master/biobb_amber/test/data/parmed/input.hmass.prmtop>`_. Accepted formats: top (edam:format_3881), parmtop (edam:format_3881), prmtop (edam:format_3881).
output_top_path (str): Output topology file (AMBER ParmTop). File type: output. `Sample file <https://github.com/bioexcel/biobb_amber/raw/master/biobb_amber/test/reference/parmed/output.hmass.prmtop>`_. Accepted formats: top (edam:format_3881), parmtop (edam:format_3881), prmtop (edam:format_3881).
properties (dic - Python dictionary object containing the tool parameters, not input/output files):
* **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
* **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
Examples:
This is a use example of how to use the building block from Python::
from biobb_amber.parmed.parmed_hmassrepartition import parmed_hmassrepartition
parmed_hmassrepartition(input_top_path='/path/to/topology.top',
output_top_path='/path/to/newTopology.top')
Info:
* wrapped_software:
* name: AmberTools parmed
* version: >20.9
* license: LGPL 2.1
* ontology:
* name: EDAM
* schema: http://edamontology.org/EDAM.owl
"""
def __init__(self, input_top_path, output_top_path, properties=None, **kwargs) -> None:
properties = properties or {}
# Call parent class constructor
super().__init__(properties)
# Input/Output files
self.io_dict = {
'in': { 'input_top_path': input_top_path },
'out': { 'output_top_path': output_top_path }
}
# Properties specific for BB
self.properties = properties
# Check the properties
self.check_properties(properties)
def check_data_params(self, out_log, err_log):
""" Checks input/output paths correctness """
# Check input(s)
self.io_dict["in"]["input_top_path"] = check_input_path(self.io_dict["in"]["input_top_path"], "input_top_path", False, out_log, self.__class__.__name__)
# Check output(s)
self.io_dict["out"]["output_top_path"] = check_output_path(self.io_dict["out"]["output_top_path"],"output_top_path", False, out_log, self.__class__.__name__)
@launchlogger
def launch(self):
"""Launches the execution of the ParmedHMassRepartition module."""
# check input/output paths and parameters
self.check_data_params(self.out_log, self.err_log)
# Setup Biobb
if self.check_restart(): return 0
self.stage_files()
# Creating temporary folder
self.tmp_folder = fu.create_unique_dir()
fu.log('Creating %s temporary folder' % self.tmp_folder, self.out_log)
# Parmed configuration (instructions) file
instructions_file = str(PurePath(self.tmp_folder).joinpath("parmed.in"))
with open(instructions_file, 'w') as parmedin:
parmedin.write("hmassrepartition\n")
parmedin.write("outparm " + self.io_dict['out']['output_top_path'] + "\n")
self.cmd = ['parmed',
'-p', self.io_dict['in']['input_top_path'],
'-i', instructions_file,
'-O' # Overwrite output files
]
# Run Biobb block
self.run_biobb()
# Copy files to host
self.copy_to_host()
# remove temporary folder(s)
if self.remove_tmp:
self.tmp_files.append(self.tmp_folder)
self.remove_tmp_files()
return self.return_code
def parmed_hmassrepartition(input_top_path: str,
output_top_path: str = None,
properties: dict = None, **kwargs) -> int:
"""Create :class:`ParmedHMassRepartition <parmed.parmed_hmassrepartition.ParmedHMassRepartition>`parmed.parmed_hmassrepartition.ParmedHMassRepartition class and
execute :meth:`launch() <parmed.parmed_hmassrepartition.ParmedHMassRepartition.launch>` method"""
return ParmedHMassRepartition( input_top_path=input_top_path,
output_top_path=output_top_path,
properties=properties).launch()
def main():
parser = argparse.ArgumentParser(description='Performs a Hydrogen Mass Repartition from an AMBER topology file using parmed tool from the AmberTools MD package.', formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
parser.add_argument('--config', required=False, help='Configuration file')
# Specific args
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('--input_top_path', required=True, help='Input AMBER topology file. Accepted formats: top, parmtop, prmtop.')
required_args.add_argument('--output_top_path', required=False, help='Output topology file (AMBER ParmTop). Accepted formats: top, parmtop, prmtop.')
args = parser.parse_args()
config = args.config if args.config else None
properties = settings.ConfReader(config=config).get_prop_dic()
# Specific call
parmed_hmassrepartition( input_top_path=args.input_top_path,
output_top_path=args.output_top_path,
properties=properties)
if __name__ == '__main__':
main()
|
# Generated by Django 3.0.11 on 2020-12-28 08:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='fav_color',
field=models.CharField(blank=True, max_length=255, verbose_name='Favorite Color'),
),
]
|
"""
"""
import datetime
from fastapi import params
from fastapi import security
import fastapi
import sqlalchemy as sa
import sqlalchemy.orm
from rss_reader import models
from rss_reader import security as rss_security
from rss_reader.api import crud
from rss_reader.api import deps
from rss_reader.api import schemas
from rss_reader.config import settings
router = fastapi.APIRouter(
tags=["auth"],
)
@router.post("/login/access-token", response_model=schemas.Token)
def login_access_token(
db: sa.orm.Session = params.Depends(deps.get_db),
form_data: security.OAuth2PasswordRequestForm = params.Depends(),
) -> schemas.Token:
"""
OAuth2-compatible token login, get an access token for future requests.
"""
user = crud.user.authenticate(
db, email=form_data.username, password=form_data.password,
)
if not user:
raise fastapi.HTTPException(
status_code=400,
detail="Incorrect email or password",
)
if not user.is_active:
raise fastapi.HTTPException(
status_code=400,
detail="Inactive user",
)
expires_in = datetime.timedelta(seconds=settings.ACCESS_TOKEN_EXP_SECONDS)
access_token = rss_security.create_access_token(
user.id, expiration_delta=expires_in
)
return schemas.Token(
access_token=access_token,
)
@router.post("/login/test-access-token", response_model=schemas.User)
def test_access_token(
current_user: models.User = params.Depends(deps.get_current_user),
):
"""
Test access token.
"""
return current_user
|
from __future__ import annotations
from typing import Any, Dict, Optional
import requests
from ice3x.clients.abc import IceCubedClientBase
from ice3x.decorators import add_nonce, requires_authentication
class IceCubedSyncClient(IceCubedClientBase):
def _fetch_resource(
self, method: str, suffix: str, params: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Fetch the specified resource
Args:
method: The request method
suffix: The resource suffix
params: A Python dict of request params
Returns:
A Python dict containing the response data
"""
if params is None:
params = {}
kwargs: Any = {"params": params}
if method == "post":
kwargs["headers"] = {"Key": self.api_key, "Sign": self.sign(params)}
url = f"{self.BASE_URI}{suffix}"
resp = self.session.request(method, url, **kwargs)
resp.raise_for_status()
return resp.json()
def __init__(self, api_key: str = None, secret: str = None) -> None:
"""Instantiate the client
Args:
api_key: An ICE3X public API key
secret: An ICE3X private API key
"""
super().__init__(api_key=api_key, secret=secret)
self.session = requests.Session()
# Set the default session request headers
self.session.headers[
"user-agent"
] = "Mozilla/4.0 (compatible; Ice3x Sync Python client)"
def get_public_trade_info(self, trade_id: int, **params: Any) -> Dict[str, Any]:
"""Fetch public info relating to a specified trade
Args:
trade_id: A valid trade id
Returns:
Data relating to the specified trade id
"""
params.update({"trade_id": trade_id})
return self._fetch_resource("get", "trade/info", params)
def get_public_trade_list(self, **params: Any) -> Dict[str, Any]:
"""Fetch a public facing list of trades
Returns:
A list of public trade data
"""
return self._fetch_resource("get", "trade/list", params)
def get_market_depth(self, **params: Any) -> Dict[str, Any]:
"""Fetch the public market depth
Returns:
A market depth data
"""
return self._fetch_resource("get", "stats/marketdepth", params)
def get_pair_info(self, pair_id: int, **params: Any) -> Dict[str, Any]:
""""""
params.update({"pair_id": pair_id})
return self._fetch_resource("get", "pair/info", params)
def get_pair_list(self, **params: Any) -> Dict[str, Any]:
""""""
return self._fetch_resource("get", "pair/list", params)
def get_currency_info(self, currency_id: int, **params: Any) -> Dict[str, Any]:
""""""
params.update({"currency_id": currency_id})
return self._fetch_resource("get", "currency/info", params)
def get_currency_list(self, **params: Any) -> Dict[str, Any]:
""""""
return self._fetch_resource("get", "currency/list", params)
def get_orderbook_info(self, pair_id: int, **params: Any) -> Dict[str, Any]:
""""""
params.update({"pair_id": pair_id})
return self._fetch_resource("get", "orderbook/info", params)
def get_market_depth_full(self, **params: Any) -> Dict[str, Any]:
""""""
return self._fetch_resource("get", "stats/marketdepthfull", params)
def get_market_depth_bt_cav(self, **params: Any) -> Dict[str, Any]:
""""""
return self._fetch_resource("get", "stats/marketdepthbtcav", params)
@add_nonce
@requires_authentication
def get_invoice_list(self, **params: Any) -> Dict[str, Any]:
""""""
return self._fetch_resource("post", "invoice/list", params)
@add_nonce
@requires_authentication
def get_invoice_info(self, invoice_id: int, **params: Any) -> Dict[str, Any]:
""""""
params.update({"invoice_id": invoice_id})
return self._fetch_resource("post", "invoice/info", params)
@add_nonce
@requires_authentication
def get_invoice_pdf(self, invoice_id: int, **params: Any) -> Dict[str, Any]:
""""""
params.update({"invoice_id": invoice_id})
return self._fetch_resource("post", "invoice/pdf", params)
@add_nonce
@requires_authentication
def cancel_order(self, order_id: int, **params: Any) -> Dict[str, Any]:
""""""
params.update({"order_id": order_id})
return self._fetch_resource("post", "order/cancel", params)
@add_nonce
@requires_authentication
def create_order(
self, pair_id: int, kind: str, price: float, amount: float, **params: Any
) -> Dict[str, Any]:
"""Creates a new order given the provided inputs
Args:
paid_id: Currency pair id
kind: Transaction type i.e. 'buy' or 'sell'
price: The price to be transacted at
volume: The volume to be transacted
"""
params.update(
{"pair_id": pair_id, "amount": amount, "price": price, "type": kind}
)
return self._fetch_resource("post", "order/new", params)
@add_nonce
@requires_authentication
def get_order_info(self, order_id: int, **params: Any) -> Dict[str, Any]:
""""""
params.update({"order_id": order_id})
return self._fetch_resource("post", "order/info", params)
@add_nonce
@requires_authentication
def get_order_list(self, **params: Any) -> Dict[str, Any]:
""""""
return self._fetch_resource("post", "order/list", params)
@add_nonce
@requires_authentication
def get_transaction_info(
self, transaction_id: int, **params: Any
) -> Dict[str, Any]:
""""""
params.update({"transaction_id": transaction_id})
return self._fetch_resource("post", "transaction/info", params)
@add_nonce
@requires_authentication
def get_transaction_list(self, **params: Any) -> Dict[str, Any]:
""""""
return self._fetch_resource("post", "transaction/list", params)
@add_nonce
@requires_authentication
def get_trade_info(self, trade_id: int, **params: Any) -> Dict[str, Any]:
""""""
params.update({"trade_id": trade_id})
return self._fetch_resource("post", "trade/info", params)
@add_nonce
@requires_authentication
def get_trade_list(self, **params: Any) -> Dict[str, Any]:
""""""
return self._fetch_resource("post", "trade/list", params)
@add_nonce
@requires_authentication
def get_balance_list(self, **params: Any) -> Dict[str, Any]:
""""""
return self._fetch_resource("post", "balance/list", params)
@add_nonce
@requires_authentication
def get_balance_info(self, currency_id: int, **params: Any) -> Dict[str, Any]:
""""""
params.update({"currency_id": currency_id})
return self._fetch_resource("post", "balance/info", params)
|
# Copyright (c) 2019, MD2K Center of Excellence
# - Nasir Ali <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import os
import traceback
from typing import List
class MinioHandler:
"""
Todo:
For now, Minio is disabled as CC config doesn't provide an option to use mutliple object-storage
"""
###################################################################
################## GET DATA METHODS ###############################
###################################################################
def get_buckets(self) -> List:
"""
returns all available buckets in an object storage
Returns:
dict: {bucket-name: str, [{"key":"value"}]}, in case of an error {"error": str}
"""
bucket_list = []
try:
temp = []
bucket_list = {}
buckets = self.minioClient.list_buckets()
for bucket in buckets:
temp.append({"bucket-name":bucket.name, "last_modified": str(bucket.creation_date)})
bucket_list["buckets-list"] = temp
return bucket_list
except Exception as e:
return [{"error": str(e)}]
def get_bucket_objects(self, bucket_name: str) -> dict:
"""
returns a list of all objects stored in the specified Minio bucket
Args:
bucket_name (str): name of the bucket aka folder
Returns:
dict: {bucket-objects: [{"object_name":"", "metadata": {}}...], in case of an error {"error": str}
"""
objects_in_bucket = {}
try:
objects = self.minioClient.list_objects(bucket_name, recursive=True)
temp = []
bucket_objects = {}
for obj in objects:
object_stat = self.minioClient.stat_object(obj.bucket_name, obj.object_name)
object_stat = json.dumps(object_stat, default=lambda o: o.__dict__)
object_stat = json.loads(object_stat)
temp.append(object_stat)
objects_in_bucket[obj.object_name] = object_stat
object_stat.pop('metadata', None)
bucket_objects["bucket-objects"] = temp
return bucket_objects
except Exception as e:
objects_in_bucket["error"] = str(e)+" \n - Trace: "+str(traceback.format_exc())
return objects_in_bucket
def get_object_stats(self, bucket_name: str, object_name: str) -> dict:
"""
Returns properties (e.g., object type, last modified etc.) of an object stored in a specified bucket
Args:
bucket_name (str): name of a bucket aka folder
object_name (str): name of an object
Returns:
dict: information of an object (e.g., creation_date, object_size etc.). In case of an error {"error": str}
Raises:
ValueError: Missing bucket_name and object_name params.
Exception: {"error": "error-message"}
"""
try:
if self.is_bucket(bucket_name):
object_stat = self.minioClient.stat_object(bucket_name, object_name)
object_stat = json.dumps(object_stat, default=lambda o: o.__dict__)
object_stat = json.loads(object_stat)
return object_stat
else:
return [{"error": "Bucket does not exist"}]
except Exception as e:
return {"error": str(e)}
def get_object(self, bucket_name: str, object_name: str) -> dict:
"""
Returns stored object (HttpResponse)
:param bucket_name:
:param object_name:
:return: object (HttpResponse), in case of an error {"error": str}
Args:
bucket_name (str): name of a bucket aka folder
object_name (str): name of an object that needs to be downloaded
Returns:
file-object: object that needs to be downloaded. If file does not exists then it returns an error {"error": "File does not exist."}
Raises:
ValueError: Missing bucket_name and object_name params.
Exception: {"error": "error-message"}
"""
try:
if self.is_bucket(bucket_name):
return self.minioClient.get_object(bucket_name, object_name)
else:
return {"error": "Bucket does not exist"}
except Exception as e:
return {"error": str(e)}
def is_bucket(self, bucket_name: str) -> bool:
"""
checks whether a bucket exist
Args:
bucket_name (str): name of the bucket aka folder
Returns:
bool: True if bucket exist or False otherwise. In case an error {"error": str}
Raises:
ValueError: bucket_name cannot be None or empty.
"""
try:
return self.minioClient.bucket_exists(bucket_name)
except Exception as e:
raise e
def is_object(self, bucket_name: str, object_name: str) -> dict:
"""
checks whether an object exist in a bucket
Args:
bucket_name (str): name of the bucket aka folder
object_name (str): name of the object
Returns:
bool: True if object exist or False otherwise. In case an error {"error": str}
Raises:
Excecption: if bucket_name and object_name are empty or None
"""
try:
if self.is_bucket(bucket_name):
self.minioClient.stat_object(bucket_name, object_name)
return True
else:
return False
except Exception as e:
raise e
###################################################################
################## STORE DATA METHODS #############################
###################################################################
def create_bucket(self, bucket_name: str) -> bool:
"""
creates a bucket aka folder in object storage system.
Args:
bucket_name (str): name of the bucket
Returns:
bool: True if bucket was successfully created. On failure, returns an error with dict {"error":"error-message"}
Raises:
ValueError: Bucket name cannot be empty/None.
Examples:
>>> CC = CerebralCortex("/directory/path/of/configs/")
>>> CC.create_bucket("live_data_folder")
>>> True
"""
if not bucket_name:
raise ValueError("Bucket name cannot be empty")
try:
self.minioClient.make_bucket(bucket_name, location=self.CC.timezone)
return True
except Exception as e:
raise e
def upload_object(self, bucket_name: str, object_name: str, object_filepath: object) -> bool:
"""
Upload an object in a bucket aka folder of object storage system.
Args:
bucket_name (str): name of the bucket
object_name (str): name of the object to be uploaded
object_filepath (str): it shall contain full path of a file with file name (e.g., /home/nasir/obj.zip)
Returns:
bool: True if object successfully uploaded. On failure, returns an error with dict {"error":"error-message"}
Raises:
ValueError: Bucket name cannot be empty/None.
Exception: if upload fails
"""
if not object_filepath:
raise ValueError("File name cannot be empty")
try:
file_stat = os.stat(object_filepath)
file_data = open(object_filepath, 'rb')
self.minioClient.put_object(bucket_name, object_name, file_data,
file_stat.st_size, content_type='application/zip')
return True
except Exception as e:
raise e
def upload_object_to_s3(self, bucket_name: str, object_name: str, file_data: object, obj_length:int) -> bool:
"""
Upload an object in a bucket aka folder of object storage system.
Args:
bucket_name (str): name of the bucket
object_name (str): name of the object to be uploaded
file_data (object): object of a file
obj_length (int): size of an object
Returns:
bool: True if object successfully uploaded. On failure, throws an exception
Raises:
Exception: if upload fails
"""
try:
self.minioClient.put_object(bucket_name, object_name, file_data,
obj_length, content_type='application/zip')
return True
except Exception as e:
raise e
|
"""
Unit tests for kindle_to_md.py | parse_json_file().
"""
import pytest
from kindle_to_md import parse_json_file
from kindle_to_md import MisformattedKindleData
@pytest.mark.usefixtures( 'file_paths' )
@pytest.mark.usefixtures( 'test_data' )
class TestParseJSONFile :
def test_parse_good_book( self ) :
parsed_book = parse_json_file( self.file_paths['good'] )
assert( isinstance( parsed_book, dict ) )
assert( parsed_book['asin'] == 'A0A0A0A0A0' )
assert( parsed_book['title'] == 'A Book (With Parentheses)' )
assert( parsed_book['authors'] == 'An Author, Another Author, and Third Author' )
assert( isinstance( parsed_book['highlights'], list ) )
assert( parsed_book['highlights'][0] == self.test_data['individual_highlights']['just-highlight'] )
assert( parsed_book['highlights'][1] == self.test_data['individual_highlights']['highlight-and-note'] )
assert( parsed_book['highlights'][2] == self.test_data['individual_highlights']['just-note'] )
assert( parsed_book['highlights'][3] == self.test_data['individual_highlights']['cyrillic' ] )
def test_parse_book_not_kindle( self ) :
parsed_book = parse_json_file( self.file_paths['not-kindle'] )
assert( parsed_book is not None )
def test_parse_book_bad_json( self ) :
with pytest.raises( MisformattedKindleData ) :
parsed_book = parse_json_file( self.file_paths['bad-json'] )
def test_parse_book_not_json( self ) :
with pytest.raises( MisformattedKindleData ) :
parsed_book = parse_json_file( self.file_paths['not-json'] )
def test_parse_non_existent_book( self ) :
with pytest.raises( FileNotFoundError ) :
parsed_book = parse_json_file( 'path/no-book-here.json' )
|
import pytest
from DevOps import Sum
from DevOps import Sub
from DevOps import Mul
from DevOps import Div
def test_somar():
assert Sum(2,4)==6
def test_sub():
assert Sub(2,4)==-2
def test_mul():
assert Mul(2,4)==8
def test_div():
assert Div(2,4)==0.5 |
import argparse
import sys # We need sys so that we can pass argv to QApplication
import os
import warnings
import pandas as pd
import petab.C as ptc
import pyqtgraph as pg
from PySide6 import QtWidgets, QtCore, QtGui
from PySide6.QtWidgets import (
QVBoxLayout, QComboBox, QWidget, QLabel, QTreeView
)
from petab import core
import petab
from petab.visualize.helper_functions import check_ex_exp_columns
from . import (utils, vis_spec_plot, window_functionality)
from .bar_plot import BarPlot
from .options_window import (OptionMenu, CorrelationOptionMenu,
OverviewPlotWindow)
class MainWindow(QtWidgets.QMainWindow):
"""
The main window
Attributes:
exp_data: PEtab measurement table
visualization_df: PEtab visualization table
yaml_dict: Dictionary of the files in the yaml file
condition_df: PEtab condition table
observable_df: PEtab observable table
plot1_widget: pg.GraphicsLayoutWidget containing the main plot
plot2_widget: pg.GraphicsLayoutWidget containing the correlation plot
warn_msg: QLabel displaying current warning messages
popup_tables: List of Popup TableWidget displaying the clicked table
tree_view: QTreeView of the yaml file
visu_spec_plots: A list of VisuSpecPlots
cbox: A dropdown menu for the plots
current_list_index: List index of the currently displayed plot
wid: QSplitter between main plot and correlation plot
"""
def __init__(self, yaml_filename: str = None,
simulation_file: pd.DataFrame = None, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
# set the background color to white
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
pg.setConfigOption("antialias", True)
self.resize(1000, 600)
self.setWindowTitle("petabvis")
self.visualization_df = None
self.simulation_df = None
self.condition_df = None
self.observable_df = None
self.exp_data = None
self.yaml_filename = yaml_filename
self.yaml_dict = None
self.color_map = utils.generate_color_map("viridis")
self.vis_spec_plots = []
self.wid = QtWidgets.QSplitter()
self.plot1_widget = pg.GraphicsLayoutWidget(show=True)
self.plot2_widget = pg.GraphicsLayoutWidget(show=False)
self.overview_plot_window = None
self.wid.addWidget(self.plot1_widget)
# plot2_widget will be added to the QSplitter when
# a simulation file is opened
self.cbox = QComboBox() # dropdown menu to select plots
self.cbox.currentIndexChanged.connect(lambda x: self.index_changed(x))
self.warn_msg = QLabel("")
self.warnings = []
self.warning_counter = {}
# The new window that pops up to display a table
self.popup_tables = []
self.options_window = OptionMenu(window=self,
vis_spec_plots=self.vis_spec_plots)
self.correlation_options_window = \
CorrelationOptionMenu(vis_spec_plots=self.vis_spec_plots)
self.correlation_option_button = None
self.overview_plot_button = None
self.tree_view = QTreeView(self)
self.tree_view.setHeaderHidden(True)
self.tree_root_node = None
self.simulation_tree_branch = None
self.wid.addWidget(self.tree_view)
self.current_list_index = 0
warnings.showwarning = self.redirect_warning
window_functionality.add_file_selector(self)
window_functionality.add_option_menu(self)
# the layout of the plot-list and message textbox
lower_layout = QVBoxLayout()
lower_layout.addWidget(self.cbox)
lower_layout.addWidget(self.warn_msg)
lower_widget = QWidget()
lower_widget.setLayout(lower_layout)
split_plots_and_warnings = QtWidgets.QSplitter()
split_plots_and_warnings.setOrientation(QtCore.Qt.Vertical)
split_plots_and_warnings.addWidget(self.wid)
split_plots_and_warnings.addWidget(lower_widget)
layout = QVBoxLayout()
layout.addWidget(split_plots_and_warnings)
widget = QWidget()
widget.setLayout(layout)
self.setCentralWidget(widget)
if self.yaml_filename:
self.read_data_from_yaml_file()
if simulation_file:
self.add_and_plot_simulation_file(simulation_file)
else:
self.add_plots()
def read_data_from_yaml_file(self):
self.yaml_dict = petab.load_yaml(self.yaml_filename)["problems"][0]
folder_path = os.path.dirname(self.yaml_filename) + "/"
if ptc.VISUALIZATION_FILES not in self.yaml_dict:
self.visualization_df = None
self.add_warning(
"The YAML file contains no "
"visualization file (default plotted)")
# table_tree_view sets the df attributes of the window
# equal to the first file of each branch
# (measurement, visualization, ...)
window_functionality.table_tree_view(self, folder_path)
def add_and_plot_simulation_file(self, filename):
"""
Add the simulation file and plot them.
Also, add the correlation plot to the window
and enable correlation plot and overview plot options.
Arguments:
filename: Path of the simulation file.
"""
sim_data = core.get_simulation_df(filename)
# check columns, and add non-mandatory default columns
sim_data, _, _ = check_ex_exp_columns(
sim_data, None, None, None, None, None,
self.condition_df, sim=True)
# delete the replicateId column if it gets added to the simulation
# table but is not in exp_data because it causes problems when
# splitting the replicates
if ptc.REPLICATE_ID not in self.exp_data.columns \
and ptc.REPLICATE_ID in sim_data.columns:
sim_data.drop(ptc.REPLICATE_ID, axis=1, inplace=True)
if len(self.yaml_dict[ptc.MEASUREMENT_FILES]) > 1:
self.add_warning(
"Not Implemented Error: Loading a simulation file with "
"multiple measurement files is currently not supported.")
else:
self.simulation_df = sim_data
self.add_plots()
# insert correlation plot at position 1
self.wid.insertWidget(1, self.plot2_widget)
filename = os.path.basename(filename)
window_functionality.add_simulation_df_to_tree_view(self, filename)
# add correlation options and overview plot to option menu
self.correlation_option_button.setVisible(True)
self.overview_plot_button.setVisible(True)
self.add_overview_plot_window()
def add_plots(self):
"""
Adds the current visuSpecPlots to the main window,
removes the old ones and updates the
cbox (dropdown list)
Returns:
List of PlotItem
"""
self.clear_qsplitter()
self.vis_spec_plots.clear()
self.options_window.reset_states()
if self.visualization_df is not None:
# to keep the order of plots consistent
# with names from the plot selection
plot_ids = list(self.visualization_df[ptc.PLOT_ID].unique())
for plot_id in plot_ids:
self.create_and_add_vis_plot(plot_id)
else: # default plot when no visu_df is provided
self.create_and_add_vis_plot()
plots = [vis_spec_plot.get_plot() for vis_spec_plot in
self.vis_spec_plots]
# update the cbox
self.cbox.clear()
# calling this method sets the index of the cbox to 0
# and thus displays the first plot
utils.add_plotnames_to_cbox(self.exp_data, self.visualization_df,
self.cbox)
return plots
def index_changed(self, i: int):
"""
Changes the displayed plot to the one selected in the dropdown list
Arguments:
i: index of the selected plot
"""
if 0 <= i < len(
self.vis_spec_plots): # i is -1 when the cbox is cleared
self.clear_qsplitter()
self.plot1_widget.addItem(self.vis_spec_plots[i].get_plot())
self.plot2_widget.hide()
if self.simulation_df is not None:
self.plot2_widget.show()
self.plot2_widget.addItem(
self.vis_spec_plots[i].correlation_plot)
self.current_list_index = i
def keyPressEvent(self, ev):
"""
Changes the displayed plot by pressing arrow keys
Arguments:
ev: key event
"""
# Exit when pressing ctrl + Q
ctrl = False
if ev.modifiers() & QtCore.Qt.ControlModifier:
ctrl = True
if ctrl and ev.key() == QtCore.Qt.Key_Q:
sys.exit()
if ev.key() == QtCore.Qt.Key_Up:
self.index_changed(self.current_list_index - 1)
if ev.key() == QtCore.Qt.Key_Down:
self.index_changed(self.current_list_index + 1)
if ev.key() == QtCore.Qt.Key_Left:
self.index_changed(self.current_list_index - 1)
if ev.key() == QtCore.Qt.Key_Right:
self.index_changed(self.current_list_index + 1)
def closeEvent(self, event):
sys.exit()
def add_warning(self, message: str):
"""
Adds the message to the warnings box
Arguments:
message: The message to display
"""
if message not in self.warnings:
self.warnings.append(message)
self.warning_counter[message] = 1
else:
self.warning_counter[message] += 1
self.warn_msg.setText(self.warnings_to_string())
def warnings_to_string(self):
"""
Convert the list of warnings to a string and
indicate the number of occurences
Returns:
Self.warnings as a string
"""
return "\n".join([warning if self.warning_counter[warning] <= 1
else warning + " (occured {} times)".format(
str(self.warning_counter[warning]))
for warning in self.warnings])
def redirect_warning(self, message, category, filename=None, lineno=None,
file=None, line=None):
"""
Redirect all warning messages and display them in the window.
Arguments:
message: The message of the warning
"""
print("Warning redirected: " + str(message))
self.add_warning(str(message))
def create_and_add_vis_plot(self, plot_id=""):
"""
Create a vis_spec_plot object based on the given plot_id.
If no plot_it is provided the default will be plotted.
Add all the warnings of the vis_plot object to the warning text box.
The actual plotting happens in the index_changed method
Arguments:
plot_id: The plotId of the plot
"""
# split the measurement df by observable when using default plots
if self.visualization_df is None:
observable_ids = list(self.exp_data[ptc.OBSERVABLE_ID].unique())
for observable_id in observable_ids:
rows = self.exp_data[ptc.OBSERVABLE_ID] == observable_id
data = self.exp_data[rows]
simulation_df = self.simulation_df
if simulation_df is not None:
rows = self.simulation_df[ptc.OBSERVABLE_ID]\
== observable_id
simulation_df = self.simulation_df[rows]
vis_plot = vis_spec_plot.VisSpecPlot(
measurement_df=data, visualization_df=None,
condition_df=self.condition_df,
simulation_df=simulation_df, plot_id=observable_id,
color_map=self.color_map)
self.vis_spec_plots.append(vis_plot)
if vis_plot.warnings:
self.add_warning(vis_plot.warnings)
else:
# reduce the visualization df to the relevant rows (by plotId)
rows = self.visualization_df[ptc.PLOT_ID] == plot_id
vis_df = self.visualization_df[rows]
if ptc.PLOT_TYPE_SIMULATION in vis_df.columns and \
vis_df.iloc[0][ptc.PLOT_TYPE_SIMULATION] == ptc.BAR_PLOT:
bar_plot = BarPlot(measurement_df=self.exp_data,
visualization_df=vis_df,
condition_df=self.condition_df,
simulation_df=self.simulation_df,
plot_id=plot_id)
# might want to change the name of
# visu_spec_plots to clarify that
# it can also include bar plots (maybe to plots?)
self.vis_spec_plots.append(bar_plot)
else:
vis_plot = vis_spec_plot.VisSpecPlot(
measurement_df=self.exp_data,
visualization_df=vis_df,
condition_df=self.condition_df,
simulation_df=self.simulation_df, plot_id=plot_id,
color_map=self.color_map)
self.vis_spec_plots.append(vis_plot)
if vis_plot.warnings:
self.add_warning(vis_plot.warnings)
def clear_qsplitter(self):
"""
Clear the GraphicsLayoutWidgets for the
measurement and correlation plot
"""
self.plot1_widget.clear()
self.plot2_widget.clear()
def add_overview_plot_window(self):
self.overview_plot_window = OverviewPlotWindow(self.exp_data,
self.simulation_df)
def main():
options = argparse.ArgumentParser()
options.add_argument("-y", "--YAML", type=str, required=False,
help="PEtab YAML file", default=None)
options.add_argument("-s", "--simulation", type=str, required=False,
help="PEtab simulation file", default=None)
args = options.parse_args()
simulation_file = None
if args.simulation is not None:
simulation_file = args.simulation
app = QtWidgets.QApplication(sys.argv)
main_window = MainWindow(args.YAML, simulation_file)
main_window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
spcconvert - batch conversion and webpage building for SPC images
"""
import cvtools
import cv2
import os
import sys
import glob
import time
import datetime
import json
import shutil
import math
import tarfile
from multiprocessing import Pool, Process, Queue
from threading import Thread
import multiprocessing
from itertools import repeat
from operator import itemgetter
from pytz import timezone
import pystache
import numpy as np
import xmlsettings
from scipy import stats
import pandas
from collections import OrderedDict
def lmap(f, l):
return list(map(f,l))
def lzip(a,b):
return list(zip(a,b))
flow_frames = True
class Counter(object):
def __init__(self):
self.val = multiprocessing.Value('i', 0)
def increment(self, n=1):
with self.val.get_lock():
self.val.value += n
@property
def value(self):
return self.val.value
def process_image(bundle):
image_path = bundle['image_path']
image = bundle['image']
data_path = bundle['data_path']
image_dir = bundle['image_dir']
cfg = bundle['cfg']
total_images = bundle['total_images']
filename = os.path.basename(image_path)
# Patch bug in PCAM where timestamp string is somtimes incorrectly set to
# 0 or a small value. Use the file creation time instead.
#
# This is okay so long as the queue in PCAM mostly empty. We can adapt
# the frame counter to fix this in the future.
timestamp = 0
for substr in filename.split('-'):
try:
timestamp = int(substr)
break;
except ValueError:
pass
# Range check the timestamp
if timestamp < 100000:
print ("" + filename + " strange timestamp.")
#timestamp = os.path.getctime(image_path)
output = {}
return output
prefix = filename.split('.')[0]
# image is preloaded so no need to load here
#image = cvtools.import_image(data_path,filename,bayer_pattern=cv2.COLOR_BAYER_BG2RGB)
img_c_8bit = cvtools.convert_to_8bit(image)
# images will be saved out later so set save_to_disk to False
features = cvtools.quick_features(
img_c_8bit,
save_to_disk=False,
abs_path=image_dir,
file_prefix=prefix,
cfg=cfg
)
use_jpeg = use_jpeg = cfg.get("UseJpeg").lower() == 'true'
if use_jpeg:
filename = os.path.basename(image_path).split('.')[0] + '.jpeg'
else:
filename = os.path.basename(image_path).split('.')[0] + '.png'
# handle new file formwat with unixtime in microseconds
if timestamp > 1498093400000000:
timestamp = timestamp/1000000
# Range check the timestamp
if timestamp < 100000 or timestamp > time.time():
print ("" + filename + " strange timestamp.")
#timestamp = os.path.getctime(image_path)
output = {}
return output
# print "Timestamp: " + str(timestamp)
timestring = datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
entry = {}
entry['maj_axis_len'] = features['major_axis_length']
entry['min_axis_len'] = features['minor_axis_length']
entry['aspect_ratio'] = features['aspect_ratio']
entry['area'] = features['area']
entry['clipped_fraction'] = features['clipped_fraction']
entry['orientation'] = features['orientation']*180/math.pi
entry['eccentricity'] = features['eccentricity']
entry['solidity'] = features['solidity']
entry['estimated_volume'] = features['estimated_volume']
entry['intensity_gray'] = features['intensity_gray']
entry['intensity_red'] = features['intensity_red']
entry['intensity_green'] = features['intensity_green']
entry['intensity_blue'] = features['intensity_blue']
entry['timestring'] = timestring
entry['timestamp'] = timestamp
entry['width'] = img_c_8bit.shape[1]
entry['height'] = img_c_8bit.shape[0]
entry['url'] = bundle['reldir'] + '/' + filename
entry['file_size'] = os.path.getsize(image_path)
output = {}
output['entry'] = entry
output['image_path'] = image_dir
output['prefix'] = prefix
output['features'] = features
return output
# threaded function for each process to call
# queues are used to sync processes
def process_bundle_list(bundle_queue,output_queue):
while True:
try:
output_queue.put(process_image(bundle_queue.get()))
except:
time.sleep(0.02*np.random.rand())
# Split a list into sublists
def chunks(l, n):
n = max(1, n)
return [l[i:i+n] for i in xrange(0, len(l), n)]
# Process a directory of images
def run(data_path,cfg):
print ("Running SPC image conversion...")
# get the base name of the directory
base_dir_name = os.path.basename(os.path.abspath(data_path))
# list the directory for tif images
print ("Listing directory " + base_dir_name + "...")
image_list = []
if cfg.get('MergeSubDirs',"false").lower() == "true":
sub_directory_list = sorted(glob.glob(os.path.join(data_path,"[0-9]"*10)))
for sub_directory in sub_directory_list:
print ("Listing sub directory " + sub_directory + "...")
image_list += glob.glob(os.path.join(sub_directory,"*.tif"))
else:
image_list += glob.glob(os.path.join(data_path,"*.tif"))
image_list = sorted(image_list)
# skip if no images were found
if len(image_list) == 0:
print ("No images were found. skipping this directory.")
return
# Get the total number of images in the directory
total_images = len(image_list)
# Create the output directories for the images and web app files
subdir = os.path.join(data_path,'..',base_dir_name + '_static_html')
if not os.path.exists(subdir):
os.makedirs(subdir)
image_dir = os.path.join(subdir,'static/images')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
print ("Starting image conversion and page generation...")
# loop over the images and do the processing
images_per_dir = cfg.get('ImagesPerDir',2000)
if cfg.get("BayerPattern").lower() == "rg":
bayer_conv = cv2.COLOR_BAYER_RG2RGB
if cfg.get("BayerPattern").lower() == "bg":
bayer_conv = cv2.COLOR_BAYER_BG2RGB
print ("Loading images...\n",)
bundle_queue = Queue()
for index, image in enumerate(image_list):
reldir = 'images/' + str(images_per_dir*int(index/images_per_dir)).zfill(5)
absdir = os.path.join(image_dir,str(images_per_dir*int(index/images_per_dir)).zfill(5))
filename = os.path.basename(image)
if not os.path.exists(absdir):
os.makedirs(absdir)
bundle = {}
bundle['image_path'] = image
bundle['image'] = cvtools.import_image(os.path.dirname(image),filename,bayer_pattern=bayer_conv)
bundle['data_path'] = data_path
bundle['image_dir'] = absdir
bundle['reldir'] = reldir
bundle['cfg'] = cfg
bundle['total_images'] = total_images
bundle_queue.put(bundle)
print ("Loading images... (" + str(index) + " of " + str(total_images) + ")\n"),
#if index > 2000:
# total_images = index
# break
# Get the number o proceess to use based on CPUs
n_threads = multiprocessing.cpu_count() - 1
if n_threads < 1:
n_threads = 1
# Create the set of processes and start them
start_time = time.time()
output_queue = Queue()
processes = []
for i in range(0,n_threads):
p = Process(target=process_bundle_list, args=(bundle_queue,output_queue))
p.start()
processes.append(p)
# Monitor processing of the images and save processed images to disk as they become available
print ("\nProcessing Images...\r"),
counter = 0
entry_list = []
use_jpeg = use_jpeg = cfg.get("UseJpeg").lower() == 'true'
raw_color = cfg.get("SaveRawColor").lower() == 'true'
while True:
print ("Processing and saving images... (" + str(counter).zfill(5) + " of " + str(total_images).zfill(5) + ")\r",)
if counter >= total_images:
break
#if output_queue.qsize() == 0:
try:
output = output_queue.get()
if output:
entry_list.append(output['entry'])
output_path = os.path.join(output['image_path'],output['prefix'])
if use_jpeg:
if raw_color:
cv2.imwrite(os.path.join(output_path+"_rawcolor.jpeg"),output['features']['rawcolor'])
cv2.imwrite(os.path.join(output_path+".jpeg"),output['features']['image'])
else:
if raw_color:
cv2.imwrite(os.path.join(output_path+"_rawcolor.png"),output['features']['rawcolor'])
cv2.imwrite(os.path.join(output_path+".png"),output['features']['image'])
cv2.imwrite(os.path.join(output_path+"_binary.png"),output['features']['binary'])
counter = counter + 1
except:
time.sleep(0.05)
# Record the total time for processing
proc_time = int(math.floor(time.time()-start_time))
# Terminate the processes in case they are stuck
for p in processes:
p.terminate()
print ("\nPostprocessing...")
# sort the entries by height and build the output
entry_list.sort(key=itemgetter('maj_axis_len'),reverse=True)
# Create histograms of several key features
# image resolution in mm/pixel
image_res = cfg.get('PixelSize',22.1)/1000;
#print "Image resolution is set to: " + str(image_res) + " mm/pixel."
# Get arrays from the dict of features
total_images = len(entry_list)
nbins = int(np.ceil(np.sqrt(total_images)))
maj_len = np.array(lmap(itemgetter('maj_axis_len'),entry_list))*image_res
min_len = np.array(lmap(itemgetter('min_axis_len'),entry_list))*image_res
volume = np.array(lmap(itemgetter('estimated_volume'),entry_list))*image_res*image_res*image_res
aspect_ratio = np.array(lmap(itemgetter('aspect_ratio'),entry_list))
orientation = np.array(lmap(itemgetter('orientation'),entry_list))
area = np.array(lmap(itemgetter('area'),entry_list))*image_res*image_res
unixtime = np.array(lmap(itemgetter('timestamp'),entry_list))
elapsed_seconds = unixtime - np.min(unixtime)
file_size = np.array(lmap(itemgetter('file_size'),entry_list))/1000.0
# Gather features scaled by the pixel size
entry_list_scaled = []
for i, e in enumerate(entry_list):
data_list = [
('url', e['url']),
('timestamp', e['timestring']),
('file_size', file_size[i]),
('aspect_ratio', aspect_ratio[i]),
('maj_axis_len' , maj_len[i]),
('min_axis_len', min_len[i]),
('orientation', orientation[i]),
('eccentricity', e['eccentricity']),
('solidity', e['solidity']),
('estimated_volume', volume[i]),
('area', area[i]),
]
for intensity_group in ['intensity_gray','intensity_red', 'intensity_green', 'intensity_blue']:
if intensity_group in e:
for k, v in e[intensity_group].items():
data_list.append((intensity_group + "_" + k, v))
data = OrderedDict(data_list)
entry_list_scaled.append(data)
total_seconds = max(elapsed_seconds)
print ("Total seconds recorded: " + str(total_seconds))
if total_seconds < 1:
total_seconds = 1
print ("\nComputing histograms...")
# Compute histograms
all_hists = {}
hist = np.histogram(area,nbins)
all_hists['area'] = json.dumps(lzip(hist[1].tolist(),hist[0].tolist()))
hist = np.histogram(maj_len,nbins)
all_hists['major_axis_length'] = json.dumps(lzip(hist[1].tolist(),hist[0].tolist()))
hist = np.histogram(min_len,nbins)
all_hists['minor_axis_length'] = json.dumps(lzip(hist[1].tolist(),hist[0].tolist()))
hist = np.histogram(aspect_ratio,nbins)
all_hists['aspect_ratio'] = json.dumps(lzip(hist[1].tolist(),hist[0].tolist()))
hist = np.histogram(elapsed_seconds,np.uint32(total_seconds))
all_hists['elapsed_seconds'] = json.dumps(lzip(hist[1].tolist(),hist[0].tolist()))
hist = np.histogram(orientation,nbins)
all_hists['orientation'] = json.dumps(lzip(hist[1].tolist(),hist[0].tolist()))
hist = np.histogram(file_size,nbins)
print ("\nComputing stats...")
all_hists['file_size'] = json.dumps(lzip(hist[1].tolist(),hist[0].tolist()))
# Compute general stats from features
all_stats = {}
all_stats['area'] = stats.describe(area)
all_stats['major_axis_length'] = stats.describe(maj_len)
all_stats['minor_axis_length'] = stats.describe(min_len)
all_stats['aspect_ratio'] = stats.describe(aspect_ratio)
all_stats['elapsed_seconds'] = stats.describe(elapsed_seconds)
all_stats['orientation'] = stats.describe(orientation)
all_stats['file_size'] = stats.describe(file_size)
print ("Exporting spreadsheet results...")
df = pandas.DataFrame(entry_list_scaled)
df.to_csv(os.path.join(subdir,'features.tsv'), index=False, sep='\t')
print ("Building web app...")
try:
os.mkdir(os.path.join(subdir, "templates"))
os.mkdir(os.path.join(subdir, "static"))
except OSError:
print("directory already existed")
# Load html template for rendering
template = ""
with open(os.path.join('app','templates/index.html'),"r") as fconv:
template = fconv.read()
server = ""
with open(os.path.join('app','server.py'),"r") as fconv:
server = fconv.read()
# Define the render context from the processed histograms, images, and stats
context = {}
context['version'] = '1.0.1.05'
context['total_images'] = total_images
context['proc_time'] = proc_time
context['duration'] = total_seconds
context['compression_ratio'] = int((1000.0*24*total_images)/np.sum(file_size))
context['rois_per_second'] = total_images/context['duration']
context['kb_per_second'] = int(np.sum(file_size)/context['duration'])
context['recording_started'] = datetime.datetime.fromtimestamp(np.min(unixtime)).strftime('%Y-%m-%d %H:%M:%S')
context['app_title'] = "SPC Convert: " + base_dir_name
context['dir_name'] = base_dir_name
context['raw_color'] = raw_color
context['image_res'] = image_res
if use_jpeg:
context['image_ext'] = '.jpeg'
else:
context['image_ext'] = '.png'
context['stats_names'] = [{"name":"Min"},{"name":"Max"},{"name":"Mean"},{"name":"Standard Deviation"},{"name":"Skewness"},{"name":"Kurtosis"}]
# definie the charts to display from the histogram data
charts = []
for chart_name, data_values in all_hists.items():
chart = {}
chart['source'] = 'js/' + chart_name + '.js'
chart['name'] = chart_name
units = ""
if chart_name == 'area':
units = " (mm*mm)"
if chart_name == 'major_axis_length' or chart_name == 'minor_axis_length':
units = " (mm)"
if chart_name == 'file_size':
units = " (kB)"
if chart_name == 'elapsed_seconds':
units = " (s)"
if chart_name == 'orientation':
units = " (deg)"
chart['title'] = 'Histogram of ' + chart_name + units
chart['x_title'] = chart_name + units
chart['y_title'] = 'counts'
chart['stats_title'] = chart_name
chart['data'] = data_values
chart['stats'] = []
chart['stats'].append({"name":"Min","value":"{:10.3f}".format(all_stats[chart_name][1][0])})
chart['stats'].append({"name":"Max","value":"{:10.3f}".format(all_stats[chart_name][1][1])})
chart['stats'].append({"name":"Mean","value":"{:10.3f}".format(all_stats[chart_name][2])})
chart['stats'].append({"name":"Standard Deviation","value":"{:10.3f}".format(math.sqrt(all_stats[chart_name][3]))})
chart['stats'].append({"name":"Skewness","value":"{:10.3f}".format(all_stats[chart_name][4])})
chart['stats'].append({"name":"Kurtosis","value":"{:10.3f}".format(all_stats[chart_name][5])})
charts.append(chart)
context['charts'] = charts
context['num_pred_0'], context['num_pred_1'] = ("{{num_pred_0}}","{{num_pred_1}}")
# render the html page and save to disk
page = pystache.render(template,context)
with open(os.path.join(subdir,'templates/spcdata.html'),"w") as fconv:
fconv.write(page)
with open(os.path.join(subdir,'server.py'),"w") as fconv:
fconv.write(server)
# remove any old app files and try to copy over new ones
try:
shutil.rmtree(os.path.join(subdir,"static/css"),ignore_errors=True)
shutil.copytree("app/static/css",os.path.join(subdir,"static/css"))
shutil.rmtree(os.path.join(subdir,"static/js"),ignore_errors=True)
shutil.copytree("app/static/js",os.path.join(subdir,"static/js"))
except:
print ("Error copying supporting files for html.")
# Load roistore.js database for rendering
template = ""
with open(os.path.join('app','static/js','database-template.js'),"r") as fconv:
template = fconv.read()
context = {}
context['image_items'] = entry_list
context['table'] = base_dir_name
# render the javascript page and save to disk
page = pystache.render(template,context)
with open(os.path.join(subdir,'static/js','database.js'),"w") as fconv:
fconv.write(page)
print ("Done.")
def valid_image_dir(test_path):
list = glob.glob(os.path.join(test_path,"*.tif"))
if len(list) > 0:
return True
else:
return False
# Module multiprocessing is organized differently in Python 3.4+
try:
# Python 3.4+
if sys.platform.startswith('win'):
import multiprocessing.popen_spawn_win32 as forking
else:
import multiprocessing.popen_fork as forking
except ImportError:
import multiprocessing.forking as forking
if sys.platform.startswith('win'):
# First define a modified version of Popen.
class _Popen(forking.Popen):
def __init__(self, *args, **kw):
if hasattr(sys, 'frozen'):
# We have to set original _MEIPASS2 value from sys._MEIPASS
# to get --onefile mode working.
os.putenv('_MEIPASS2', sys._MEIPASS)
try:
super(_Popen, self).__init__(*args, **kw)
finally:
if hasattr(sys, 'frozen'):
# On some platforms (e.g. AIX) 'os.unsetenv()' is not
# available. In those cases we cannot delete the variable
# but only set it to the empty string. The bootloader
# can handle this case.
if hasattr(os, 'unsetenv'):
os.unsetenv('_MEIPASS2')
else:
os.putenv('_MEIPASS2', '')
# Second override 'Popen' class with our modified version.
forking.Popen = _Popen
if __name__ == '__main__':
multiprocessing.freeze_support()
if len(sys.argv) <= 1:
print ("Please input a dirtectory of data directories, aborting.")
else:
if len(sys.argv) <= 2:
data_path = sys.argv[1]
# load the config file
cfg = xmlsettings.XMLSettings(os.path.join(sys.path[0],'settings.xml'))
combine_subdirs = cfg.get('MergeSubDirs',"False").lower() == "true"
print ("Settings file: " + os.path.join(sys.path[0],'settings.xml'))
# If file given try to unpack
if os.path.isfile(data_path):
extracted_path = data_path + "_unpacked"
with tarfile.open(data_path) as archive:
archive.extractall(path=extracted_path)
data_path = extracted_path
to_clean = extracted_path
# If given directory is a single data directory, just process it
if valid_image_dir(data_path):
run(data_path,cfg)
sys.exit(0)
# Otherwise look for data directories in the given directory
# List data directories and process each one
# expect the directories to be in the unixtime format
directory_list = sorted(glob.glob(os.path.join(data_path,"[0-9]"*10)))
if len(directory_list) == 0:
print ("No data directories found.")
sys.exit(0)
# Process the data directories in order
print ('Processing each data directory...')
for directory in directory_list:
if os.path.isdir(directory):
if not combine_subdirs:
if valid_image_dir(directory):
run(directory,cfg)
else:
run(directory,cfg)
|
import discord
class administrator:
def __init__(self, rankie, logging):
self.__rankie = rankie
self.__logging = logging
# Attaches and sends managed_channels to a discord message
async def get_log_file(self, ctx):
try:
await ctx.message.reply(file=discord.File(f'logs/rankie.log'))
except Exception as e:
self.__logging.error(f'Failed to deliver logs: {e}')
await ctx.message.reply(f'Error: {e}')
async def get_guilds(self, ctx):
msg = f'Connected to {len(self.__rankie.guilds)} server(s):\n'
# Iterate over guilds
for guild in self.__rankie.guilds:
msg += f'``{guild.name:<20}\t{guild.id:<15}``\n'
# Send the msg
await ctx.message.reply(msg)
|
import os
from tqdm import trange
import torch
from torch.nn import functional as F
from torch import distributions as dist
from im2mesh.common import (
compute_iou, make_3d_grid, fix_K_camera, get_camera_args
)
from im2mesh.utils import visualize as vis
from im2mesh.training import BaseTrainer
from im2mesh.onet.loss_functions import get_occ_loss, occ_loss_postprocess
class Trainer(BaseTrainer):
''' Trainer object for the Occupancy Network.
Args:
model (nn.Module): Occupancy Network model
optimizer (optimizer): pytorch optimizer object
device (device): pytorch device
input_type (str): input type
vis_dir (str): visualization directory
threshold (float): threshold value
eval_sample (bool): whether to evaluate samples
'''
def __init__(self, model, optimizer, device=None, input_type='img',
vis_dir=None, threshold=0.5, eval_sample=False, loss_type='cross_entropy',
use_local_feature=False, surface_loss_weight=1.,
binary_occ=False,
loss_tolerance_episolon=0.,
sign_lambda=0.
):
self.model = model
self.optimizer = optimizer
self.device = device
self.input_type = input_type
self.vis_dir = vis_dir
self.threshold = threshold
self.eval_sample = eval_sample
self.loss_type = loss_type
self.use_local_feature = use_local_feature
self.surface_loss_weight = surface_loss_weight
self.binary_occ = binary_occ
self.loss_tolerance_episolon = loss_tolerance_episolon
self.sign_lambda = sign_lambda
if self.surface_loss_weight != 1.:
print('Surface loss weight:', self.surface_loss_weight)
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
def train_step(self, data):
''' Performs a training step.
Args:
data (dict): data dictionary
'''
self.model.train()
self.optimizer.zero_grad()
loss = self.compute_loss(data)
loss.backward()
self.optimizer.step()
return loss.item()
def eval_step(self, data):
''' Performs an evaluation step.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
threshold = self.threshold
eval_dict = {}
# Compute elbo
points = data.get('points').to(device)
if self.binary_occ:
occ = (data.get('points.occ') >= 0.5).float().to(device)
else:
occ = data.get('points.occ').to(device)
inputs = data.get('inputs', torch.empty(points.size(0), 0)).to(device)
voxels_occ = data.get('voxels')
points_iou = data.get('points_iou').to(device)
if self.binary_occ:
occ_iou = (data.get('points_iou.occ') >= 0.5).float().to(device)
else:
occ_iou = data.get('points_iou.occ').to(device)
kwargs = {}
if self.use_local_feature:
camera_args = get_camera_args(data, 'points.loc', 'points.scale', device=device)
Rt = camera_args['Rt']
K = camera_args['K']
with torch.no_grad():
if self.use_local_feature:
elbo, rec_error, kl = self.model.compute_elbo(
points, occ, inputs, Rt, K, **kwargs)
else:
elbo, rec_error, kl = self.model.compute_elbo(
points, occ, inputs, **kwargs)
eval_dict['loss'] = -elbo.mean().item()
eval_dict['rec_error'] = rec_error.mean().item()
eval_dict['kl'] = kl.mean().item()
# Compute iou
batch_size = points.size(0)
with torch.no_grad():
if self.use_local_feature:
p_out = self.model(points_iou, inputs, Rt, K,
sample=self.eval_sample, **kwargs)
else:
p_out = self.model(points_iou, inputs,
sample=self.eval_sample, **kwargs)
occ_iou_np = (occ_iou >= 0.5).cpu().numpy()
occ_iou_hat_np = (p_out.probs >= threshold).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
eval_dict['iou'] = iou
# Estimate voxel iou
if voxels_occ is not None:
voxels_occ = voxels_occ.to(device)
points_voxels = make_3d_grid(
(-0.5 + 1/64,) * 3, (0.5 - 1/64,) * 3, (32,) * 3)
points_voxels = points_voxels.expand(
batch_size, *points_voxels.size())
points_voxels = points_voxels.to(device)
with torch.no_grad():
if self.use_local_feature:
p_out = self.model(points_voxels, inputs, Rt, K,
sample=self.eval_sample, **kwargs)
else:
p_out = self.model(points_voxels, inputs,
sample=self.eval_sample, **kwargs)
voxels_occ_np = (voxels_occ >= 0.5).cpu().numpy()
occ_hat_np = (p_out.probs >= threshold).cpu().numpy()
iou_voxels = compute_iou(voxels_occ_np, occ_hat_np).mean()
eval_dict['iou_voxels'] = iou_voxels
return eval_dict
def visualize(self, data):
''' Performs a visualization step for the data.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
batch_size = data['points'].size(0)
inputs = data.get('inputs', torch.empty(batch_size, 0)).to(device)
if self.use_local_feature:
camera_args = get_camera_args(data, 'points.loc', 'points.scale', device=device)
Rt = camera_args['Rt']
K = camera_args['K']
shape = (32, 32, 32)
p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device)
p = p.expand(batch_size, *p.size())
kwargs = {}
with torch.no_grad():
if self.use_local_feature:
p_r = self.model(p, inputs, Rt, K, sample=self.eval_sample, **kwargs)
else:
p_r = self.model(p, inputs, sample=self.eval_sample, **kwargs)
occ_hat = p_r.probs.view(batch_size, *shape)
voxels_out = (occ_hat >= self.threshold).cpu().numpy()
for i in trange(batch_size):
input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
vis.visualize_data(
inputs[i].cpu(), self.input_type, input_img_path)
vis.visualize_voxels(
voxels_out[i], os.path.join(self.vis_dir, '%03d.png' % i))
def compute_loss(self, data):
''' Computes the loss.
Args:
data (dict): data dictionary
'''
device = self.device
p = data.get('points').to(device)
if self.binary_occ:
occ = (data.get('points.occ') >= 0.5).float().to(device)
else:
occ = data.get('points.occ').to(device)
inputs = data.get('inputs', torch.empty(p.size(0), 0)).to(device)
kwargs = {}
if self.use_local_feature:
camera_args = get_camera_args(data, 'points.loc', 'points.scale', device=device)
Rt = camera_args['Rt']
K = camera_args['K']
f3,f2,f1 = self.model.encode_inputs(inputs,p,Rt,K)
else:
f3,f2,f1 = self.model.encode_inputs(inputs)
q_z = self.model.infer_z(p, occ, f3, **kwargs)
z = q_z.rsample()
# KL-divergence
kl = dist.kl_divergence(q_z, self.model.p0_z).sum(dim=-1)
loss = kl.mean()
# General points
p_r = self.model.decode(p, z, f3, f2, f1, **kwargs)
logits = p_r.logits
probs = p_r.probs
# loss
loss_i = get_occ_loss(logits, occ, self.loss_type)
# loss strategies
loss_i = occ_loss_postprocess(loss_i, occ, probs, self.loss_tolerance_episolon, self.sign_lambda, self.threshold, self.surface_loss_weight)
loss = loss + loss_i.sum(-1).mean()
return loss
|
import scipy.io as sio
cats = ["Peace","Affection","Esteem","Anticipation","Engagement","Confidence","Happiness","Pleasure","Excitement","Surprise","Sympathy","Doubt/Confusion","Disconnection","Fatigue","Embarrassment","Yearning","Disapproval","Aversion","Annoyance","Anger","Sensitivity","Sadness","Disquietment","Fear","Pain","Suffering"]
mapp = {}
for i in range(26):
mapp[cats[i]] = i
a = sio.loadmat("Annotations.mat")
train = a['train']
val = a['val']
test = a['test']
def process(d, name):
f = open(name, 'w')
d = d[0]
tot = d.shape[0]
print(name, tot)
for i in range(tot):
di = d[i]
name = di[0].item()
folder = di[1].item()
person = di[4][0]
totp = person.shape[0]
for j in range(totp):
p = person[j].item()
bbox = p[0][0]
pc = p[1][0][0][0][0]
label = [0] * 26
totpc = len(pc)
for k in range(totpc):
label[mapp[pc[k].item()]] = 1
label = [str(item) for item in label]
label = ' '.join(label)
f.write('%s/%s %f %f %f %f %s\n'%(folder, name, bbox[0], bbox[1], bbox[2], bbox[3], label))
process(train, 'train.txt')
#process(val, 'val.txt')
#process(test, 'test.txt')
|
from rest_framework import viewsets, permissions
from api.permissions import IsOwner
from updates.models import UpdateSubscription
from updates.serializers import UpdateSubscriptionSerializer
class UpdateSubscriptionViewSet(viewsets.ModelViewSet):
"""
API endpoints for subscription management
"""
queryset = UpdateSubscription.objects.all()
serializer_class = UpdateSubscriptionSerializer
permission_classes = [permissions.IsAuthenticated, IsOwner]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.