code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from flask import Blueprint, request
bg9_40323218 = Blueprint('bg9_40323218', __name__, url_prefix='/bg9_40323218', template_folder='templates')
head_str = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 鏈條繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango2D-6v13.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/CangoAxes-1v33.js"></script>
</head>
<body>
<script>
window.onload=function(){
brython(1);
}
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
'''
tail_str = '''
</script>
</body>
</html>
'''
chain_str = '''
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window
import math
cango = JSConstructor(window.Cango)
cobj = JSConstructor(window.Cobj)
shapedefs = window.shapeDefs
obj2d = JSConstructor(window.Obj2D)
cgo = cango("plotarea")
cgo.setWorldCoords(-250, -250, 500, 500)
# 畫軸線
cgo.drawAxes(0, 240, 0, 240, {
"strokeColor":"#aaaaaa",
"fillColor": "#aaaaaa",
"xTickInterval": 20,
"xLabelInterval": 20,
"yTickInterval": 20,
"yLabelInterval": 20})
deg = math.pi/180
# 將繪製鏈條輪廓的內容寫成 class 物件
class chain():
# 輪廓的外型設為 class variable
chamber = "M -6.8397, -1.4894 \
A 7, 7, 0, 1, 0, 6.8397, -1.4894 \
A 40, 40, 0, 0, 1, 6.8397, -18.511 \
A 7, 7, 0, 1, 0, -6.8397, -18.511 \
A 40, 40, 0, 0, 1, -6.8397, -1.4894 z"
cgoChamber = window.svgToCgoSVG(chamber)
def __init__(self, fillcolor="green", border=True, strokecolor= "tan", linewidth=2, scale=1):
self.fillcolor = fillcolor
self.border = border
self.strokecolor = strokecolor
self.linewidth = linewidth
self.scale = scale
# 利用鏈條起點與終點定義繪圖
def basic(self, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": self.fillcolor,
"border": self.border,
"strokeColor": self.strokecolor,
"lineWidth": self.linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4*self.scale), "PATH")
cmbr.appendPath(hole)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(math.atan2(y2-y1, x2-x1)/deg+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, self.scale, 0)
# 利用鏈條起點與旋轉角度定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic_rot(self, x1, y1, rot, v=False):
# 若 v 為 True 則為虛擬 chain, 不 render
self.x1 = x1
self.y1 = y1
self.rot = rot
self.v = v
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": self.fillcolor,
"border": self.border,
"strokeColor": self.strokecolor,
"lineWidth": self.linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4*self.scale), "PATH")
cmbr.appendPath(hole)
# 根據旋轉角度, 計算 x2 與 y2
x2 = x1 + 20*math.cos(rot*deg)*self.scale
y2 = y1 + 20*math.sin(rot*deg)*self.scale
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(rot+90)
# 放大 scale 倍
if v == False:
cgo.render(basic1, x1, y1, self.scale, 0)
return x2, y2
'''
def circle36(x, y, degree=10):
# 20 為鏈條輪廓之圓距
# chain 所圍之圓圈半徑為 20/2/math.asin(degree*math.pi/180/2)
# degree = math.asin(20/2/radius)*180/math.pi
#degree = 10
first_degree = 90 - degree
repeat = 360 / degree
outstring = '''
mychain = chain()
x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+", "+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+") \n"
return outstring
@bg9_40323218.route('/circle36/<degree>', defaults={'x': 0, 'y': 0})
@bg9_40323218.route('/circle36/<x>/<degree>', defaults={'y': 0})
@bg9_40323218.route('/circle36/<x>/<y>/<degree>')
#@bg9_40323218.route('/circle36/<int:x>/<int:y>/<int:degree>')
def drawcircle36(x,y,degree):
return head_str + chain_str + circle36(int(x), int(y), int(degree)) + tail_str
@bg9_40323218.route('/bike')
def bike():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango2D-6v13.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/CangoAxes-1v33.js"></script>
<script>
window.onload=function(){
brython(1);
}
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window
import math
cango = JSConstructor(window.Cango)
cobj = JSConstructor(window.Cobj)
shapedefs = window.shapeDefs
obj2d = JSConstructor(window.Obj2D)
cgo = cango("plotarea")
cgo.setWorldCoords(-250, -250, 500, 500)
# 畫軸線
cgo.drawAxes(0, 240, 0, 240, {
"strokeColor":"#aaaaaa",
"fillColor": "#aaaaaa",
"xTickInterval": 20,
"xLabelInterval": 20,
"yTickInterval": 20,
"yLabelInterval": 20})
deg = math.pi/180
# 將繪製鏈條輪廓的內容寫成 class 物件
class chain():
# 輪廓的外型設為 class variable
chamber = "M -6.8397, -1.4894 A 7, 7, 0, 1, 0, 6.8397, -1.4894 A 40, 40, 0, 0, 1, 6.8397, -18.511 A 7, 7, 0, 1, 0, -6.8397, -18.511 A 40, 40, 0, 0, 1, -6.8397, -1.4894 z"
#chamber = "M 0, 0 L 0, -20 z"
cgoChamber = window.svgToCgoSVG(chamber)
def __init__(self, fillcolor="green", border=True, strokecolor= "tan", linewidth=2, scale=1):
self.fillcolor = fillcolor
self.border = border
self.strokecolor = strokecolor
self.linewidth = linewidth
self.scale = scale
# 利用鏈條起點與終點定義繪圖
def basic(self, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": self.fillcolor,
"border": self.border,
"strokeColor": self.strokecolor,
"lineWidth": self.linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4*self.scale), "PATH")
cmbr.appendPath(hole)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(math.atan2(y2-y1, x2-x1)/deg+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, self.scale, 0)
# 利用鏈條起點與旋轉角度定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic_rot(self, x1, y1, rot, v=False):
# 若 v 為 True 則為虛擬 chain, 不 render
self.x1 = x1
self.y1 = y1
self.rot = rot
self.v = v
# 注意, cgoChamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": self.fillcolor,
"border": self.border,
"strokeColor": self.strokecolor,
"lineWidth": self.linewidth })
# hole0 為原點位置
hole = cobj(shapedefs.circle(4*self.scale), "PATH")
cmbr.appendPath(hole)
# 根據旋轉角度, 計算 x2 與 y2
x2 = x1 + 20*math.cos(rot*deg)*self.scale
y2 = y1 + 20*math.sin(rot*deg)*self.scale
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(rot+90)
# 放大 scale 倍
if v == False:
cgo.render(basic1, x1, y1, self.scale, 0)
return x2, y2
mychain = chain()
x1, y1 = mychain.basic_rot(-133.06,49.48, 20.78)
x2, y2=mychain.basic_rot(x1, y1,0.7800000000000011, True)
x3, y3=mychain.basic_rot(x2, y2,-19.22, True)
x4, y4=mychain.basic_rot(x3, y3,-39.22, True)
x5, y5=mychain.basic_rot(x4, y4,-59.22, True)
x6, y6=mychain.basic_rot(x5, y5,-79.22, True)
x7, y7=mychain.basic_rot(x6, y6,-99.22, True)
x8, y8=mychain.basic_rot(x7, y7,-119.22, True)
x9, y9=mychain.basic_rot(x8, y8,-139.22, True)
x10, y10=mychain.basic_rot(x9, y9,-159.22, True)
x11, y11=mychain.basic_rot(x10, y10,-179.22, True)
x12, y12=mychain.basic_rot(x11, y11,-199.22)
x13, y13=mychain.basic_rot(x12, y12,-219.22)
x14, y14=mychain.basic_rot(x13, y13,-239.22)
x15, y15=mychain.basic_rot(x14, y14,-259.22)
x16, y16=mychain.basic_rot(x15, y15,-279.22)
x17, y17=mychain.basic_rot(x16, y16,-299.22)
x18, y18=mychain.basic_rot(x17, y17,-319.22)
#mychain = chain()
p1, k1 = mychain.basic_rot(82.11,93.98, 4.78)
p2, k2=mychain.basic_rot(p1, k1,-7.219999999999999)
p3, k3=mychain.basic_rot(p2, k2,-19.22)
p4, k4=mychain.basic_rot(p3, k3,-31.22)
p5, k5=mychain.basic_rot(p4, k4,-43.22)
p6, k6=mychain.basic_rot(p5, k5,-55.22)
p7, k7=mychain.basic_rot(p6, k6,-67.22)
p8, k8=mychain.basic_rot(p7, k7,-79.22)
p9, k9=mychain.basic_rot(p8, k8,-91.22)
p10, k10=mychain.basic_rot(p9, k9,-103.22)
p11, k11=mychain.basic_rot(p10, k10,-115.22)
p12, k12=mychain.basic_rot(p11, k11,-127.22)
p13, k13=mychain.basic_rot(p12, k12,-139.22)
p14, k14=mychain.basic_rot(p13, k13,-151.22)
p15, k15=mychain.basic_rot(p14, k14,-163.22)
p16, k16=mychain.basic_rot(p15, k15,-175.22)
p17, k17=mychain.basic_rot(p16, k16,-187.22)
p18, k18=mychain.basic_rot(p17, k17,-199.22, True)
p19, k19=mychain.basic_rot(p18, k18,-211.22, True)
p20, k20=mychain.basic_rot(p19, k19,-223.22, True)
p21, k21=mychain.basic_rot(p20, k20,-235.22, True)
p22, k22=mychain.basic_rot(p21, k21,-247.22, True)
p23, k23=mychain.basic_rot(p22, k22,-259.22, True)
p24, k24=mychain.basic_rot(p23, k23,-271.22, True)
p25, k25=mychain.basic_rot(p24, k24,-283.22, True)
p26, k26=mychain.basic_rot(p25, k25,-295.22, True)
p27, k27=mychain.basic_rot(p26, k26,-307.22, True)
p28, k28=mychain.basic_rot(p27, k27,-319.22, True)
p29, k29=mychain.basic_rot(p28, k28,-331.22, True)
p30, k30=mychain.basic_rot(p29, k29,-343.22, True)
m1, n1 = mychain.basic_rot(x1, y1, 10.78)
m2, n2=mychain.basic_rot(m1, n1, 10.78)
m3, n3=mychain.basic_rot(m2, n2, 10.78)
m4, n4=mychain.basic_rot(m3, n3, 10.78)
m5, n5=mychain.basic_rot(m4, n4, 10.78)
m6, n6=mychain.basic_rot(m5, n5, 10.78)
m7, n7=mychain.basic_rot(m6, n6, 10.78)
m8, n8=mychain.basic_rot(m7, n7, 10.78)
m9, n9=mychain.basic_rot(m8, n8, 10.78)
m10, n10=mychain.basic_rot(m9, n9, 10.78)
r1, s1 = mychain.basic_rot(x11, y11, -10.78)
r2, s2=mychain.basic_rot(r1, s1, -10.78)
r3, s3=mychain.basic_rot(r2, s2, -10.78)
r4, s4=mychain.basic_rot(r3, s3, -10.78)
r5, s5=mychain.basic_rot(r4, s4, -10.78)
r6, s6=mychain.basic_rot(r5, s5, -10.78)
r7, s7=mychain.basic_rot(r6, s6, -10.78)
r8, s8=mychain.basic_rot(r7, s7, -10.78)
r9, s9=mychain.basic_rot(r8, s8, -10.78)
r10, s10=mychain.basic_rot(r9, s9, -10.78)
</script>
'''
return outstring
@bg9_40323218.route('/bike2')
def bike2():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango2D-6v13.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/CangoAxes-1v33.js"></script>
<script>
window.onload=function(){
brython(1);
}
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window
import math
cango = JSConstructor(window.Cango)
cobj = JSConstructor(window.Cobj)
shapedefs = window.shapeDefs
obj2d = JSConstructor(window.Obj2D)
cgo = cango("plotarea")
cgo.setWorldCoords(-250, -250, 500, 500)
# 畫軸線
cgo.drawAxes(0, 240, 0, 240, {
"strokeColor":"#aaaaaa",
"fillColor": "#aaaaaa",
"xTickInterval": 20,
"xLabelInterval": 20,
"yTickInterval": 20,
"yLabelInterval": 20})
deg = math.pi/180
# 將繪製鏈條輪廓的內容寫成 class 物件
class chain():
# 輪廓的外型設為 class variable
chamber = "M -6.8397, -1.4894 A 7, 7, 0, 1, 0, 6.8397, -1.4894 A 40, 40, 0, 0, 1, 6.8397, -18.511 A 7, 7, 0, 1, 0, -6.8397, -18.511 A 40, 40, 0, 0, 1, -6.8397, -1.4894 z"
#chamber = "M 0, 0 L 0, -20 z"
cgoChamber = window.svgToCgoSVG(chamber)
def __init__(self, fillcolor="green", border=True, strokecolor= "tan", linewidth=2, scale=1):
self.fillcolor = fillcolor
self.border = border
self.strokecolor = strokecolor
self.linewidth = linewidth
self.scale = scale
# 利用鏈條起點與終點定義繪圖
def basic(self, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": self.fillcolor,
"border": self.border,
"strokeColor": self.strokecolor,
"lineWidth": self.linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4*self.scale), "PATH")
cmbr.appendPath(hole)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(math.atan2(y2-y1, x2-x1)/deg+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, self.scale, 0)
# 利用鏈條起點與旋轉角度定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic_rot(self, x1, y1, rot, v=False):
# 若 v 為 True 則為虛擬 chain, 不 render
self.x1 = x1
self.y1 = y1
self.rot = rot
self.v = v
# 注意, cgoChamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": self.fillcolor,
"border": self.border,
"strokeColor": self.strokecolor,
"lineWidth": self.linewidth })
# hole0 為原點位置
hole = cobj(shapedefs.circle(4*self.scale), "PATH")
cmbr.appendPath(hole)
# 根據旋轉角度, 計算 x2 與 y2
x2 = x1 + 20*math.cos(rot*deg)*self.scale
y2 = y1 + 20*math.sin(rot*deg)*self.scale
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(rot+90)
# 放大 scale 倍
if v == False:
cgo.render(basic1, x1, y1, self.scale, 0)
return x2, y2
mychain = chain()
x1, y1 = mychain.basic_rot(-133.06,49.48, 20.78)
x2, y2=mychain.basic_rot(x1, y1,0.7800000000000011, True)
x3, y3=mychain.basic_rot(x2, y2,-19.22, True)
x4, y4=mychain.basic_rot(x3, y3,-39.22, True)
x5, y5=mychain.basic_rot(x4, y4,-59.22, True)
x6, y6=mychain.basic_rot(x5, y5,-79.22, True)
x7, y7=mychain.basic_rot(x6, y6,-99.22, True)
x8, y8=mychain.basic_rot(x7, y7,-119.22, True)
x9, y9=mychain.basic_rot(x8, y8,-139.22, True)
x10, y10=mychain.basic_rot(x9, y9,-159.22, True)
x11, y11=mychain.basic_rot(x10, y10,-179.22, True)
x12, y12=mychain.basic_rot(x11, y11,-199.22)
x13, y13=mychain.basic_rot(x12, y12,-219.22)
x14, y14=mychain.basic_rot(x13, y13,-239.22)
x15, y15=mychain.basic_rot(x14, y14,-259.22)
x16, y16=mychain.basic_rot(x15, y15,-279.22)
x17, y17=mychain.basic_rot(x16, y16,-299.22)
x18, y18=mychain.basic_rot(x17, y17,-319.22)
#mychain = chain()
p1, k1 = mychain.basic_rot(82.11,93.98, 4.78)
p2, k2=mychain.basic_rot(p1, k1,-7.219999999999999)
p3, k3=mychain.basic_rot(p2, k2,-19.22)
p4, k4=mychain.basic_rot(p3, k3,-31.22)
p5, k5=mychain.basic_rot(p4, k4,-43.22)
p6, k6=mychain.basic_rot(p5, k5,-55.22)
p7, k7=mychain.basic_rot(p6, k6,-67.22)
p8, k8=mychain.basic_rot(p7, k7,-79.22)
p9, k9=mychain.basic_rot(p8, k8,-91.22)
p10, k10=mychain.basic_rot(p9, k9,-103.22)
p11, k11=mychain.basic_rot(p10, k10,-115.22)
p12, k12=mychain.basic_rot(p11, k11,-127.22)
p13, k13=mychain.basic_rot(p12, k12,-139.22)
p14, k14=mychain.basic_rot(p13, k13,-151.22)
p15, k15=mychain.basic_rot(p14, k14,-163.22)
p16, k16=mychain.basic_rot(p15, k15,-175.22)
p17, k17=mychain.basic_rot(p16, k16,-187.22)
p18, k18=mychain.basic_rot(p17, k17,-199.22, True)
p19, k19=mychain.basic_rot(p18, k18,-211.22, True)
p20, k20=mychain.basic_rot(p19, k19,-223.22, True)
p21, k21=mychain.basic_rot(p20, k20,-235.22, True)
p22, k22=mychain.basic_rot(p21, k21,-247.22, True)
p23, k23=mychain.basic_rot(p22, k22,-259.22, True)
p24, k24=mychain.basic_rot(p23, k23,-271.22, True)
p25, k25=mychain.basic_rot(p24, k24,-283.22, True)
p26, k26=mychain.basic_rot(p25, k25,-295.22, True)
p27, k27=mychain.basic_rot(p26, k26,-307.22, True)
p28, k28=mychain.basic_rot(p27, k27,-319.22, True)
p29, k29=mychain.basic_rot(p28, k28,-331.22, True)
p30, k30=mychain.basic_rot(p29, k29,-343.22, True)
m1, n1 = mychain.basic_rot(x1, y1, 10.78)
m2, n2=mychain.basic_rot(m1, n1, 10.78)
m3, n3=mychain.basic_rot(m2, n2, 10.78)
m4, n4=mychain.basic_rot(m3, n3, 10.78)
m5, n5=mychain.basic_rot(m4, n4, 10.78)
m6, n6=mychain.basic_rot(m5, n5, 10.78)
m7, n7=mychain.basic_rot(m6, n6, 10.78)
m8, n8=mychain.basic_rot(m7, n7, 10.78)
m9, n9=mychain.basic_rot(m8, n8, 10.78)
m10, n10=mychain.basic_rot(m9, n9, 10.78)
r1, s1 = mychain.basic_rot(x11, y11, -10.78)
r2, s2=mychain.basic_rot(r1, s1, -10.78)
r3, s3=mychain.basic_rot(r2, s2, -10.78)
r4, s4=mychain.basic_rot(r3, s3, -10.78)
r5, s5=mychain.basic_rot(r4, s4, -10.78)
r6, s6=mychain.basic_rot(r5, s5, -10.78)
r7, s7=mychain.basic_rot(r6, s6, -10.78)
r8, s8=mychain.basic_rot(r7, s7, -10.78)
r9, s9=mychain.basic_rot(r8, s8, -10.78)
r10, s10=mychain.basic_rot(r9, s9, -10.78)
</script>
'''
return outstring
def circle(x, y):
outstring = '''
mychain = chain()
x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+''', 50)
'''
for i in range(2, 10):
outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*40)+") \n"
return outstring
def circle1(x, y, degree=10):
# 20 為鏈條兩圓距
# chain 所圍之圓圈半徑為 20/2/math.asin(degree*math.pi/180/2)
# degree = math.asin(20/2/radius)*180/math.pi
#degree = 10
first_degree = 90 - degree
repeat = 360 / degree
outstring = '''
mychain = chain()
x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+", "+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+") \n"
return outstring
def circle2(x, y, degree=10):
# 20 為鏈條兩圓距
# chain 所圍之圓圈半徑為 20/2/math.asin(degree*math.pi/180/2)
# degree = math.asin(20/2/radius)*180/math.pi
#degree = 10
first_degree = 90 - degree
repeat = 360 / degree
outstring = '''
mychain = chain()
x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+", "+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+") \n"
return outstring
def twocircle(x, y):
# 20 為鏈條兩圓距
# chain 所圍之圓圈半徑為 20/2/math.asin(degree*math.pi/180/2)
# degree = math.asin(20/2/radius)*180/math.pi
x = 50
y = 0
degree = 12
# 78, 66, 54, 42, 30, 18, 6度
#必須有某些 chain 算座標但是不 render
first_degree = 90 - degree
repeat = 360 / degree
# 第1節也是 virtual chain
outstring = '''
mychain = chain()
x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+", "+str(first_degree)+''', True)
#x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+", "+str(first_degree)+''')
'''
# 這裡要上下各多留一節虛擬 chain, 以便最後進行連接 (x7, y7) 與 (x22, y22)
for i in range(2, int(repeat)+1):
#if i < 7 or i > 23:
if i <= 7 or i >= 23:
# virautl chain
outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+", True) \n"
#outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+") \n"
else:
outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+") \n"
p = -150
k = 0
degree = 20
# 70, 50, 30, 10
# 從 i=5 開始, 就是 virautl chain
first_degree = 90 - degree
repeat = 360 / degree
# 第1節不是 virtual chain
outstring += '''
#mychain = chain()
p1, k1 = mychain.basic_rot('''+str(p)+","+str(k)+", "+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
if i >= 5 and i <= 13:
# virautl chain
outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+", 90-"+str(i*degree)+", True) \n"
#outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+", 90-"+str(i*degree)+") \n"
else:
outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+", 90-"+str(i*degree)+") \n"
# 上段連接直線
# 從 p5, k5 作為起點
first_degree = 10
repeat = 11
outstring += '''
m1, n1 = mychain.basic_rot(p4, k4, '''+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
outstring += "m"+str(i)+", n"+str(i)+"=mychain.basic_rot(m"+str(i-1)+", n"+str(i-1)+", "+str(first_degree)+")\n"
# 下段連接直線
# 從 p12, k12 作為起點
first_degree = -10
repeat = 11
outstring += '''
r1, s1 = mychain.basic_rot(p13, k13, '''+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
outstring += "r"+str(i)+", s"+str(i)+"=mychain.basic_rot(r"+str(i-1)+", s"+str(i-1)+", "+str(first_degree)+")\n"
# 上段右方接點為 x7, y7, 左側則為 m11, n11
outstring += "mychain.basic(x7, y7, m11, n11)\n"
# 下段右方接點為 x22, y22, 左側則為 r11, s11
outstring += "mychain.basic(x22, y22, r11, s11)\n"
return outstring
def eighteenthirty(x, y):
'''
從圖解法與符號式解法得到的兩條外切線座標點
(-203.592946177111, 0.0), (0.0, 0.0), (-214.364148466539, 56.5714145924675), (-17.8936874260919, 93.9794075692901)
(-203.592946177111, 0.0), (0.0, 0.0), (-214.364148466539, -56.5714145924675), (-17.8936874260919, -93.9794075692901)
左邊關鍵鍊條起點 (-233.06, 49.48), 角度 20.78, 圓心 (-203.593, 0.0)
右邊關鍵鍊條起點 (-17.89, 93.9), 角度 4.78, 圓心 (0, 0)
'''
# 20 為鏈條兩圓距
# chain 所圍之圓圈半徑為 20/2/math.asin(degree*math.pi/180/2)
# degree = math.asin(20/2/radius)*180/math.pi
x = 50
y = 0
degree = 20
first_degree = 20.78
startx = -233.06+100
starty = 49.48
repeat = 360 / degree
# 先畫出左邊第一關鍵節
outstring = '''
mychain = chain()
x1, y1 = mychain.basic_rot('''+str(startx)+","+str(starty)+", "+str(first_degree)+''')
'''
# 接著繪製左邊的非虛擬鍊條
for i in range(2, int(repeat)+1):
if i >=2 and i <=11:
# virautl chain
#outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+","+str(first_degree+degree-i*degree)+") \n"
outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+","+str(first_degree+degree-i*degree)+", True) \n"
else:
outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+","+str(first_degree+degree-i*degree)+") \n"
# 接著處理右邊的非虛擬鍊條
# 先畫出右邊第一關鍵節
p = -17.89+100
k = 93.98
degree = 12
first_degree = 4.78
repeat = 360 / degree
# 第1節不是 virtual chain
outstring += '''
#mychain = chain()
p1, k1 = mychain.basic_rot('''+str(p)+","+str(k)+", "+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
if i >=18:
# virautl chain
outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+","+str(first_degree+degree-i*degree)+", True) \n"
#outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+","+str(first_degree+degree-i*degree)+") \n"
else:
outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+","+str(first_degree+degree-i*degree)+") \n"
# 上段連接直線
# 從 x1, y1 作為起點
first_degree = 10.78
repeat = 10
outstring += '''
m1, n1 = mychain.basic_rot(x1, y1, '''+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
outstring += "m"+str(i)+", n"+str(i)+"=mychain.basic_rot(m"+str(i-1)+", n"+str(i-1)+", "+str(first_degree)+")\n"
# 下段連接直線
# 從 x11, y11 作為起點
first_degree = -10.78
repeat = 10
outstring += '''
r1, s1 = mychain.basic_rot(x11, y11, '''+str(first_degree)+''')
'''
for i in range(2, int(repeat)+1):
outstring += "r"+str(i)+", s"+str(i)+"=mychain.basic_rot(r"+str(i-1)+", s"+str(i-1)+", "+str(first_degree)+")\n"
return outstring
@bg9_40323218.route('/circle')
def drawcircle():
return head_str + chain_str + circle(0, 0) + tail_str
@bg9_40323218.route('/circle1/<degree>', defaults={'x': 0, 'y': 0})
@bg9_40323218.route('/circle1/<x>/<degree>', defaults={'y': 0})
@bg9_40323218.route('/circle1/<x>/<y>/<degree>')
#@bg9_40323218.route('/circle1/<int:x>/<int:y>/<int:degree>')
def drawcircle1(x,y,degree):
return head_str + chain_str + circle1(int(x), int(y), int(degree)) + tail_str
@bg9_40323218.route('/circle2/<degree>', defaults={'x': 0, 'y': 0})
@bg9_40323218.route('/circle2/<x>/<degree>', defaults={'y': 0})
@bg9_40323218.route('/circle2/<x>/<y>/<degree>')
#@bg9_40323218.route('/circle2/<int:x>/<int:y>/<int:degree>')
def drawcircle2(x,y,degree):
return head_str + chain_str + circle2(int(x), int(y), int(degree)) + tail_str
@bg9_40323218.route('/twocircle/<x>/<y>')
@bg9_40323218.route('/twocircle', defaults={'x':0, 'y':0})
def drawtwocircle(x,y):
return head_str + chain_str + twocircle(int(x), int(y)) + tail_str
@bg9_40323218.route('/eighteenthirty/<x>/<y>')
@bg9_40323218.route('/eighteenthirty', defaults={'x':0, 'y':0})
def draweithteenthirdy(x,y):
return head_str + chain_str + eighteenthirty(int(x), int(y)) + tail_str
@bg9_40323218.route('/snap')
# http://svg.dabbles.info/snaptut-base
def snap():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 snap 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="/static/snap.svg-min.js"></script>
<script>
window.onload=function(){
brython(1);
}
</script>
</head>
<body>
<svg width="800" height="800" viewBox="0 0 800 800" id="svgout"></svg>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window, document
# 透過 window 與 JSConstructor 從 Brython 物件 snap 擷取 Snap 物件的內容
snap = JSConstructor(window.Snap)
s = snap("#svgout")
# 建立物件時, 同時設定 id 名稱
r = s.rect(10,10,100,100).attr({'id': 'rect'})
c = s.circle(100,100,50).attr({'id': 'circle'})
r.attr('fill', 'red')
c.attr({ 'fill': 'blue', 'stroke': 'black', 'strokeWidth': 10 })
r.attr({ 'stroke': '#123456', 'strokeWidth': 20 })
s.text(180,100, '點按一下圖形').attr({'fill' : 'blue', 'stroke': 'blue', 'stroke-width': 0.2 })
g = s.group().attr({'id': 'tux'})
def hoverover(ev):
g.animate({'transform': 's1.5r45,t180,20'}, 1000, window.mina.bounce)
def hoverout(ev):
g.animate({'transform': 's1r0,t180,20'}, 1000, window.mina.bounce)
# callback 函式
def onSVGLoaded(data):
#s.append(data)
g.append(data)
#g.hover(hoverover, hoverout )
g.text(300,100, '拿滑鼠指向我')
# 利用 window.Snap.load 載入 svg 檔案
tux = window.Snap.load("/static/Dreaming_tux.svg", onSVGLoaded)
g.transform('t180,20')
# 與視窗事件對應的函式
def rtoyellow(ev):
r.attr('fill', 'yellow')
def ctogreen(ev):
c.attr('fill', 'green')
# 根據物件 id 綁定滑鼠事件執行對應函式
document['rect'].bind('click', rtoyellow)
document['circle'].bind('click', ctogreen)
document['tux'].bind('mouseover', hoverover)
document['tux'].bind('mouseleave', hoverout)
</script>
</body>
</html>
'''
return outstring
@bg9_40323218.route('/snap_link')
# http://svg.dabbles.info/
def snap_link():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 snap 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="/static/snap.svg-min.js"></script>
<script>
window.onload=function(){
brython(1);
}
</script>
</head>
<body>
<svg width="800" height="800" viewBox="0 0 800 800" id="svgout"></svg>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window, document
# 透過 window 與 JSConstructor 從 Brython 物件 snap 擷取 Snap 物件的內容
snap = JSConstructor(window.Snap)
# 使用 id 為 "svgout" 的 svg 標註進行繪圖
s = snap("#svgout")
offsetY = 50
# 是否標訂出繪圖範圍
#borderRect = s.rect(0,0,800,640,10,10).attr({ 'stroke': "silver", 'fill': "silver", 'strokeWidth': "3" })
g = s.group().transform('t250,120')
r0 = s.rect(150,150,100,100,20,20).attr({ 'fill': "orange", 'opacity': "0.8", 'stroke': "black", 'strokeWidth': "2" })
c0 = s.circle(225,225,10).attr({ 'fill': "silver", 'stroke': "black", 'strokeWidth': "4" }).attr({ 'id': 'c0' })
g0 = s.group( r0,c0 ).attr({ 'id': 'g0' })
#g0.animate({ 'transform' : 't250,120r360,225,225' },4000)
g0.appendTo( g )
g0.animate({ 'transform' : 'r360,225,225' },4000)
# 讓 g0 可以拖動
g0.drag()
r1 = s.rect(100,100,100,100,20,20).attr({ 'fill': "red", 'opacity': "0.8", 'stroke': "black", 'strokeWidth': "2" })
c1 = s.circle(175,175,10).attr({ 'fill': "silver", 'stroke': "black" , 'strokeWidth': "4"}).attr({ 'id': 'c1' })
g1 = s.group( r1,c1 ).attr({ 'id': 'g1' })
g1.appendTo( g0 ).attr({ 'id': 'g1' })
g1.animate({ 'transform' : 'r360,175,175' },4000)
r2 = s.rect(50,50,100,100,20,20).attr({ 'fill': "blue", 'opacity': "0.8", 'stroke': "black", 'strokeWidth': "2" })
c2 = s.circle(125,125,10).attr({ 'fill': "silver", 'stroke': "black", 'strokeWidth': "4" }).attr({ 'id': 'c2' })
g2 = s.group(r2,c2).attr({ 'id': 'g2' })
g2.appendTo( g1 );
g2.animate( { 'transform' : 'r360,125,125' },4000);
r3 = s.rect(0,0,100,100,20,20).attr({ 'fill': "yellow", 'opacity': "0.8", 'stroke': "black", 'strokeWidth': "2" })
c3 = s.circle(75,75,10).attr({ 'fill': "silver", 'stroke': "black", 'strokeWidth': "4" }).attr({ 'id': 'c3' })
g3 = s.group(r3,c3).attr({ 'id': 'g3' })
g3.appendTo( g2 )
g3.animate( { 'transform' : 'r360,75,75' },4000)
r4 = s.rect(-50,-50,100,100,20,20).attr({ 'fill': "green", 'opacity': "0.8", 'stroke': "black", 'strokeWidth': "2" })
c4 = s.circle(25,25,10).attr({ 'fill': "silver", 'stroke': "black", 'strokeWidth': "4" }).attr({ 'id': 'c4' })
g4 = s.group(r4,c4).attr({ 'id': 'g4' });
g4.appendTo( g3 )
g4.animate( { 'transform' : 'r360,25,25' },4000)
</script>
</body>
</html>
'''
return outstring
@bg9_40323218.route('/snap_gear')
def snap_gear():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 snap 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="/static/snap.svg-min.js"></script>
<script>
window.onload=function(){
brython(1);
}
</script>
</head>
<body>
<svg width="800" height="800" viewBox="0 0 800 800" id="svgout"></svg>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window, document
# 透過 window 與 JSConstructor 從 Brython 物件 snap 擷取 Snap 物件的內容
snap = JSConstructor(window.Snap)
s = snap("#svgout")
# 畫直線
s.line(0, 0, 100, 100).attr({ 'fill': "silver", 'stroke': "black", 'strokeWidth': "1" }).attr({ 'id': 'line1' })
</script>
</body>
</html>
'''
return outstring
| 40323250/bg9_cdw11 | users/b/g9/bg9_40323218.py | Python | agpl-3.0 | 34,761 |
from django.test import TestCase
from django.contrib.auth.models import Group, User
from django.db.models.signals import post_save, post_delete
from community.constants import COMMUNITY_ADMIN
from community.models import Community
from community.signals import manage_community_groups, remove_community_groups
from users.models import SystersUser
class SignalsTestCase(TestCase):
def setUp(self):
post_save.connect(manage_community_groups, sender=Community,
dispatch_uid="manage_groups")
post_delete.connect(remove_community_groups, sender=Community,
dispatch_uid="remove_groups")
def test_manage_community_groups(self):
"""Test handling of operations required when saving a Community
object"""
user1 = User.objects.create(username='foo', password='foobar')
systers_user = SystersUser.objects.get()
community = Community.objects.create(name="Foo", slug="foo", order=1,
admin=systers_user)
groups_count = Group.objects.count()
self.assertEqual(groups_count, 4)
community_admin_group = Group.objects.get(
name=COMMUNITY_ADMIN.format("Foo"))
self.assertEqual(user1.groups.get(), community_admin_group)
self.assertSequenceEqual(community.members.all(), [systers_user])
user2 = User.objects.create(username='bar', password='foobar')
systers_user2 = SystersUser.objects.get(user=user2)
community.name = "Bar"
community.admin = systers_user2
community.save()
removed_groups_count = Group.objects.filter(
name__startswith="Foo").count()
self.assertEqual(removed_groups_count, 0)
new_groups_count = Group.objects.filter(name__startswith="Bar").count()
self.assertEqual(new_groups_count, 4)
community_admin_group = Group.objects.get(
name=COMMUNITY_ADMIN.format("Bar"))
self.assertEqual(user2.groups.get(), community_admin_group)
self.assertNotEqual(list(user1.groups.all()), [community_admin_group])
self.assertCountEqual(Community.objects.get().members.all(),
[systers_user, systers_user2])
def test_remove_community_groups(self):
"""Test the removal of groups when a community is deleted"""
User.objects.create(username='foo', password='foobar')
systers_user = SystersUser.objects.get()
community = Community.objects.create(name="Foo", slug="foo", order=1,
admin=systers_user)
groups_count = Group.objects.count()
self.assertEqual(groups_count, 4)
community.delete()
groups_count = Group.objects.count()
self.assertEqual(groups_count, 0)
| exploreshaifali/portal | systers_portal/community/tests/test_signals.py | Python | gpl-2.0 | 2,832 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for test classes."""
from __future__ import annotations
import io
import signal
import psutil
class PopenStub:
"""Stubs the API of psutil.Popen() to make unit tests less expensive.
Starting a new process for every unit test is intrinsically more expensive
than checking an object's attributes, and for some developers it isn't even
possible for them to kill a spawned process due to a lack of permission on
their operating system.
We used to spawn real processes for tests, and observed the following:
With actual processes: Runs 78 tests in 50.7 seconds
With PopenStub: Runs 97 tests in 32.3 seconds
Thus, using this stub gives us a ~4.62x speed boost per-test.
Attributes:
pid: int. The ID of the process.
stdout: bytes. The text written to standard output by the process.
stderr: bytes. The text written to error output by the process.
poll_count: int. The number of times poll() has been called.
signals_received: list(int). List of received signals (as ints) in order
of receipt.
terminate_count: int. Number of times terminate() has been called.
kill_count: int. Number of times kill() has been called.
alive: bool. Whether the process should be considered to be alive.
reject_signal: bool. Whether to raise OSError in send_signal().
reject_terminate: bool. Whether to raise OSError in terminate().
reject_kill: bool. Whether to raise OSError in kill().
unresponsive: bool. Whether the process will end normally.
returncode: int. The return code of the process.
"""
def __init__(
self, pid=1, name='process', stdout=b'', stderr=b'',
reject_signal=False, reject_terminate=False, reject_kill=False,
alive=True, unresponsive=False, return_code=0, child_procs=None):
"""Initializes a new PopenStub instance.
Args:
pid: int. The ID of the process.
name: str. The name of the process.
stdout: bytes. The text written to standard output by the process.
stderr: bytes. The text written to error output by the process.
return_code: int. The return code of the process.
reject_signal: bool. Whether to raise OSError in send_signal().
reject_terminate: bool. Whether to raise OSError in terminate().
reject_kill: bool. Whether to raise OSError in kill().
alive: bool. Whether the process should be considered to be alive.
unresponsive: bool. Whether the process will end normally.
child_procs: list(PopenStub)|None. Processes "owned" by the stub, or
None if there aren't any.
"""
self.pid = pid
self.stdin = io.BytesIO()
self.stdout = io.BytesIO(stdout)
self.stderr = io.BytesIO(stderr)
self.poll_count = 0
self.signals_received = []
self.terminate_count = 0
self.kill_count = 0
self.alive = alive
self.reject_signal = reject_signal
self.reject_terminate = reject_terminate
self.reject_kill = reject_kill
self.unresponsive = unresponsive
self._name = name
self._child_procs = tuple(child_procs) if child_procs else ()
self._return_code = return_code
@property
def returncode(self):
"""Returns the return code of the process.
Returns:
int. The return code of the process.
"""
return self._return_code
@returncode.setter
def returncode(self, return_code):
"""Assigns a return code to the process.
Args:
return_code: int. The return code to assign to the process.
"""
self._return_code = return_code
def is_running(self):
"""Returns whether the process is running.
Returns:
bool. The value of self.alive, which mocks whether the process is
still alive.
"""
return self.alive
def name(self):
"""Returns the name of the process.
Returns:
str. The name of the process.
"""
return self._name
def children(self, recursive=False):
"""Returns the children spawned by this process.
Args:
recursive: bool. Whether to also return non-direct decendants from
self (i.e. children of children).
Returns:
list(PopenStub). A list of the child processes.
"""
children = []
for child in self._child_procs:
children.append(child)
if recursive:
children.extend(child.children(recursive=True))
return children
def terminate(self):
"""Increment terminate_count.
Mocks the process being terminated.
"""
self.terminate_count += 1
if self.reject_terminate:
raise OSError('rejected')
if self.unresponsive:
return
self._exit(return_code=1)
def kill(self):
"""Increment kill_count.
NOTE: kill() does not respect self.unresponsive.
Mocks the process being killed.
"""
self.kill_count += 1
if self.reject_kill:
raise OSError('rejected')
self._exit(return_code=1)
def send_signal(self, signal_number):
"""Append signal to self.signals_received.
Mocks receiving a process signal. If a SIGINT signal is received (e.g.
from ctrl-C) and self.unresponsive is True, then we call self._exit().
Args:
signal_number: int. The number of the received signal.
"""
self.signals_received.append(signal_number)
if self.reject_signal:
raise OSError('rejected')
if signal_number == signal.SIGINT and not self.unresponsive:
self._exit(return_code=1)
def poll(self):
"""Increment poll_count.
Mocks checking whether the process is still alive.
Returns:
int|None. The return code of the process if it has ended, otherwise
None.
"""
self.poll_count += 1
return None if self.alive else self._return_code
def wait(self, timeout=None): # pylint: disable=unused-argument
"""Wait for the process completion.
Mocks the process waiting for completion before it continues execution.
No time is actually spent waiting, however, since the lifetime of the
program is completely defined by the initialization params.
Args:
timeout: int|None. Time to wait before raising an exception, or None
to wait indefinitely.
"""
if not self.alive:
return
if not self.unresponsive:
self._exit()
elif timeout is not None:
raise psutil.TimeoutExpired(timeout)
else:
raise RuntimeError('PopenStub has entered an infinite loop')
def communicate(self, input=b''): # pylint: disable=unused-argument, redefined-builtin
"""Mocks an interaction with the process.
Args:
input: bytes. Input string to write to the process's stdin.
Returns:
tuple(bytes, bytes). The stdout and stderr of the process,
respectively.
"""
if not self.alive:
return self.stdout.getvalue(), self.stderr.getvalue()
if not self.unresponsive:
self.stdin.write(input)
self._exit()
return self.stdout.getvalue(), self.stderr.getvalue()
else:
raise RuntimeError('PopenStub has entered an infinite loop')
def _exit(self, return_code=None):
"""Simulates the end of the process.
Args:
return_code: int|None. The return code of the program. If None, the
return code assigned at initialization is used instead.
"""
self.alive = False
if return_code is not None:
self._return_code = return_code
| brianrodri/oppia | scripts/scripts_test_utils.py | Python | apache-2.0 | 8,753 |
import re
import os
import bleach
def get_env(name):
val = os.getenv(name)
if val is None:
raise Exception("Can't find environment variable {0}.".format(name))
return val
def remove_html(text):
"""
Using bleach remove all HTML markup from text.
http://bleach.readthedocs.org/en/latest/clean.html#stripping-markup
"""
return bleach.clean(text, strip=True, tags=[])
def scrub_doi(val):
"""
Get only the DOI. Not other stuff.
"""
#Remove html
v = remove_html(val)
#lower case
v = v.lower()
v = v.replace('http://dx.doi.org/', '')
v = v.replace('dx.doi.org/', '')
#leading DOI prefix
v = v.replace('doi:', '')
v = v.replace(' ', '')
return v.strip()
def pull(meta, k):
f = lambda x: None if str(x) == '' else x
return f(meta.get(k))
def get_user_agent():
"""
Utility to get user agent for requests library.
"""
try:
agent = get_env('VDM_USER_AGENT')
return {'User-Agent': agent}
except Exception:
#No agent set
return {}
def scrub_pmid(value):
"""
Minimal cleanup on incoming PMIDs for validation.
http://www.nlm.nih.gov/bsd/mms/medlineelements.html#pmid
"""
if value.startswith("PMC"):
return None
match = re.findall(r'([1-9]{1}\d{2,7})', value)
try:
v = match[0]
except IndexError:
return None
#Don't allow 0 to be returned.
if v == 0:
return None
return v
| Brown-University-Library/vivo-data-management | vdm/utils.py | Python | mit | 1,494 |
import unittest
from katas.kyu_8.kata_example_twist import websites
class WebsitesTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(len(websites), 1000)
def test_equals_2(self):
self.assertEqual(websites.count('codewars'), 1000)
| the-zebulan/CodeWars | tests/kyu_8_tests/test_kata_example_twist.py | Python | mit | 276 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import re
from devtools_testutils import AzureTestCase
from azure_devtools.scenario_tests import RecordingProcessor, ReplayableTest
from azure_devtools.scenario_tests.utilities import is_text_payload
class ResponseReplacerProcessor(RecordingProcessor):
def __init__(self, keys=None, replacement="sanitized"):
self._keys = keys if keys else []
self._replacement = replacement
def process_response(self, response):
def sanitize_dict(dictionary):
for key in dictionary:
value = dictionary[key]
if isinstance(value, str):
dictionary[key] = re.sub(
r"("+'|'.join(self._keys)+r")",
self._replacement,
dictionary[key])
elif isinstance(value, dict):
sanitize_dict(value)
sanitize_dict(response)
return response
class BodyReplacerProcessor(RecordingProcessor):
"""Sanitize the sensitive info inside request or response bodies"""
def __init__(self, keys=None, replacement="sanitized"):
self._replacement = replacement
self._keys = keys if keys else []
def process_request(self, request):
if is_text_payload(request) and request.body:
request.body = self._replace_keys(request.body.decode()).encode()
return request
def process_response(self, response):
if is_text_payload(response) and response['body']['string']:
response['body']['string'] = self._replace_keys(response['body']['string'])
return response
def _replace_keys(self, body):
def _replace_recursively(dictionary):
for key in dictionary:
value = dictionary[key]
if key in self._keys:
dictionary[key] = self._replacement
elif isinstance(value, dict):
_replace_recursively(value)
elif key == 'iceServers':
_replace_recursively(value[0])
elif key == 'urls':
dictionary[key][0] = "turn.skype.com"
import json
try:
body = json.loads(body)
_replace_recursively(body)
except (KeyError, ValueError):
return body
return json.dumps(body)
class CommunicationTestCase(AzureTestCase):
FILTER_HEADERS = ReplayableTest.FILTER_HEADERS + ['x-azure-ref', 'x-ms-content-sha256', 'location']
def __init__(self, method_name, *args, **kwargs):
super(CommunicationTestCase, self).__init__(method_name, *args, **kwargs) | Azure/azure-sdk-for-python | sdk/communication/azure-communication-networktraversal/tests/_shared/testcase.py | Python | mit | 2,951 |
from .numbers import PalindromeNumber
class DoubleBasePalindrome(PalindromeNumber):
"""This class provides sum of numbers tha are palindrome
in both decimal and binary base
Examples:
>>> palindromes = DoubleBasePalindrome()
>>> palindromes.sum_palindrome_numbers()
872187"""
@staticmethod
def _binary(n):
"""Convert integer to binary in string format"""
return '{0:b}'.format(n)
@classmethod
def is_double_base_palindrome(cls, n):
"""Check whether a number is palindrome in both
decimal and binary base"""
if n % 2 == 0: # Only odd numbers can be valid
return False
if cls.is_palindrome(n) and cls.is_palindrome(cls._binary(n)):
return True
return False
def double_base_palindrome_generator(self):
"""yield double base palindrome numbers
not bigger than max_number"""
for x in self.palindrome_numbers_generator():
if self.is_double_base_palindrome(x):
yield x
def sum_palindrome_numbers(self):
"""Sum all palindrome numbers not bigger than max_number"""
s = 0
for x in self.double_base_palindrome_generator():
s += x
return s
| jam182/palindrome | palindrome/doublebase.py | Python | bsd-2-clause | 1,270 |
'''
Module of Windows API for plyer.email.
'''
import os
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
from plyer.facades import Email
class WindowsEmail(Email):
'''
Implementation of Windows email API.
'''
def _send(self, **kwargs):
recipient = kwargs.get('recipient')
subject = kwargs.get('subject')
text = kwargs.get('text')
uri = "mailto:"
if recipient:
uri += str(recipient)
if subject:
uri += "?" if "?" not in uri else "&"
uri += "subject="
uri += quote(str(subject))
if text:
uri += "?" if "?" not in uri else "&"
uri += "body="
uri += quote(str(text))
# WE + startfile are available only on Windows
try:
os.startfile(uri)
except WindowsError:
print("Warning: unable to find a program able to send emails.")
def instance():
'''
Instance for facade proxy.
'''
return WindowsEmail()
| kivy/plyer | plyer/platforms/win/email.py | Python | mit | 1,060 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IoT Docker RPC handler."""
from docker import errors
from oslo.config import cfg
from iot.common import docker_utils
from iot.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Handler(object):
def __init__(self):
super(Handler, self).__init__()
self._docker = None
def _encode_utf8(self, value):
return unicode(value).encode('utf-8')
# Device operations
def device_create(self, ctxt, name, device_uuid, device):
LOG.debug('Creating device name %s'
% (name))
def device_list(self, ctxt):
LOG.debug("device_list")
def device_delete(self, ctxt, device_uuid):
LOG.debug("device_delete %s" % device_uuid)
def device_show(self, ctxt, device_uuid):
LOG.debug("device_show %s" % device_uuid)
| digambar15/openstack-iot | iot/conductor/handlers/driver.py | Python | apache-2.0 | 1,411 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import argparse
import logging
import os
import pexpect
import shutil
import signal
import subprocess
import sys
import tempfile
import time
import traceback
from xmlrpc import client as xmlrpclib
from glob import glob
#----------------------------------------------------------
# Utils
#----------------------------------------------------------
ROOTDIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
TSTAMP = time.strftime("%Y%m%d", time.gmtime())
exec(open(os.path.join(ROOTDIR, 'odoo', 'release.py'), 'rb').read())
VERSION = version.split('-')[0].replace('saas~', '')
GPGPASSPHRASE = os.getenv('GPGPASSPHRASE')
GPGID = os.getenv('GPGID')
DOCKERVERSION = VERSION.replace('+', '')
INSTALL_TIMEOUT = 600
DOCKERUSER = """
RUN mkdir /var/lib/odoo && \
groupadd -g %(group_id)s odoo && \
useradd -u %(user_id)s -g odoo odoo -d /var/lib/odoo && \
mkdir /data && \
chown odoo:odoo /var/lib/odoo /data
USER odoo
""" % {'group_id': os.getgid(), 'user_id': os.getuid()}
def run_cmd(cmd, chdir=None, timeout=None):
logging.info("Running command %s", cmd)
return subprocess.run(cmd, cwd=chdir, timeout=timeout)
def _rpc_count_modules(addr='http://127.0.0.1', port=8069, dbname='mycompany'):
time.sleep(5)
uid = xmlrpclib.ServerProxy('%s:%s/xmlrpc/2/common' % (addr, port)).authenticate(
dbname, 'admin', 'admin', {}
)
modules = xmlrpclib.ServerProxy('%s:%s/xmlrpc/2/object' % (addr, port)).execute(
dbname, uid, 'admin', 'ir.module.module', 'search', [('state', '=', 'installed')]
)
if len(modules) > 1:
time.sleep(1)
toinstallmodules = xmlrpclib.ServerProxy('%s:%s/xmlrpc/2/object' % (addr, port)).execute(
dbname, uid, 'admin', 'ir.module.module', 'search', [('state', '=', 'to install')]
)
if toinstallmodules:
logging.error("Package test: FAILED. Not able to install dependencies of base.")
raise Exception("Installation of package failed")
else:
logging.info("Package test: successfuly installed %s modules" % len(modules))
else:
logging.error("Package test: FAILED. Not able to install base.")
raise Exception("Package test: FAILED. Not able to install base.")
def publish(args, pub_type, extensions):
"""Publish builded package (move builded files and generate a symlink to the latests)
:args: parsed program args
:pub_type: one of [deb, rpm, src, exe]
:extensions: list of extensions to publish
:returns: published files
"""
def _publish(release):
build_path = os.path.join(args.build_dir, release)
filename = release.split(os.path.sep)[-1]
release_dir = os.path.join(args.pub, pub_type)
release_path = os.path.join(release_dir, filename)
os.renames(build_path, release_path)
# Latest/symlink handler
release_abspath = os.path.abspath(release_path)
latest_abspath = release_abspath.replace(TSTAMP, 'latest')
if os.path.islink(latest_abspath):
os.unlink(latest_abspath)
os.symlink(release_abspath, latest_abspath)
return release_path
published = []
for extension in extensions:
release = glob("%s/odoo_*.%s" % (args.build_dir, extension))
if release:
published.append(_publish(release[0]))
return published
# ---------------------------------------------------------
# Generates Packages, Sources and Release files of debian package
# ---------------------------------------------------------
def gen_deb_package(args, published_files):
# Executes command to produce file_name in path, and moves it to args.pub/deb
def _gen_file(args, command, file_name, path):
cur_tmp_file_path = os.path.join(path, file_name)
with open(cur_tmp_file_path, 'w') as out:
subprocess.call(command, stdout=out, cwd=path)
shutil.copy(cur_tmp_file_path, os.path.join(args.pub, 'deb', file_name))
# Copy files to a temp directory (required because the working directory must contain only the
# files of the last release)
temp_path = tempfile.mkdtemp(suffix='debPackages')
for pub_file_path in published_files:
shutil.copy(pub_file_path, temp_path)
commands = [
(['dpkg-scanpackages', '.'], "Packages"), # Generate Packages file
(['dpkg-scansources', '.'], "Sources"), # Generate Sources file
(['apt-ftparchive', 'release', '.'], "Release") # Generate Release file
]
# Generate files
for command in commands:
_gen_file(args, command[0], command[-1], temp_path)
# Remove temp directory
shutil.rmtree(temp_path)
if args.sign:
# Generate Release.gpg (= signed Release)
# Options -abs: -a (Create ASCII armored output), -b (Make a detach signature), -s (Make a signature)
subprocess.call(['gpg', '--default-key', GPGID, '--passphrase', GPGPASSPHRASE, '--yes', '-abs', '--no-tty', '-o', 'Release.gpg', 'Release'], cwd=os.path.join(args.pub, 'deb'))
# ---------------------------------------------------------
# Generates an RPM repo
# ---------------------------------------------------------
def gen_rpm_repo(args, file_name):
"""Genereate a rpm repo in publish directory"""
# Sign the RPM
rpmsign = pexpect.spawn('/bin/bash', ['-c', 'rpm --resign %s' % file_name], cwd=os.path.join(args.pub, 'rpm'))
rpmsign.expect_exact('Enter pass phrase: ')
rpmsign.send(GPGPASSPHRASE + '\r\n')
rpmsign.expect(pexpect.EOF)
# Removes the old repodata
shutil.rmtree(os.path.join(args.pub, 'rpm', 'repodata'))
# Copy files to a temp directory (required because the working directory must contain only the
# files of the last release)
temp_path = tempfile.mkdtemp(suffix='rpmPackages')
shutil.copy(file_name, temp_path)
run_cmd(['createrepo', temp_path]).check_returncode() # creates a repodata folder in temp_path
shutil.copytree(os.path.join(temp_path, "repodata"), os.path.join(args.pub, 'rpm', 'repodata'))
# Remove temp directory
shutil.rmtree(temp_path)
def _prepare_build_dir(args, win32=False):
"""Copy files to the build directory"""
logging.info('Preparing build dir "%s"', args.build_dir)
cmd = ['rsync', '-a', '--delete', '--exclude', '.git', '--exclude', '*.pyc', '--exclude', '*.pyo']
if win32 is False:
cmd += ['--exclude', 'setup/win32']
run_cmd(cmd + ['%s/' % args.odoo_dir, args.build_dir])
for addon_path in glob(os.path.join(args.build_dir, 'addons/*')):
if args.blacklist is None or os.path.basename(addon_path) not in args.blacklist:
try:
shutil.move(addon_path, os.path.join(args.build_dir, 'odoo/addons'))
except shutil.Error as e:
logging.warning("Warning '%s' while moving addon '%s", e, addon_path)
if addon_path.startswith(args.build_dir) and os.path.isdir(addon_path):
logging.info("Removing ''".format(addon_path))
try:
shutil.rmtree(addon_path)
except shutil.Error as rm_error:
logging.warning("Cannot remove '{}': {}".format(addon_path, rm_error))
# Docker stuffs
class OdooTestTimeoutError(Exception):
pass
class OdooTestError(Exception):
pass
class Docker():
"""Base Docker class. Must be inherited by specific Docker builder class"""
arch = None
def __init__(self, args):
"""
:param args: argparse parsed arguments
"""
self.args = args
self.tag = 'odoo-%s-%s-nightly-tests' % (DOCKERVERSION, self.arch)
self.container_name = None
self.exposed_port = None
dockerfiles = {
'tgz': os.path.join(args.build_dir, 'setup/package.dfsrc'),
'deb': os.path.join(args.build_dir, 'setup/package.dfdebian'),
'rpm': os.path.join(args.build_dir, 'setup/package.dffedora'),
}
self.dockerfile = dockerfiles[self.arch]
self.test_log_file = '/data/src/test-%s.log' % self.arch
self.build_image()
def build_image(self):
"""Build the dockerimage by copying Dockerfile into build_dir/docker"""
docker_dir = os.path.join(self.args.build_dir, 'docker')
docker_file_path = os.path.join(docker_dir, 'Dockerfile')
os.mkdir(docker_dir)
shutil.copy(self.dockerfile, docker_file_path)
with open(docker_file_path, 'a') as dockerfile:
dockerfile.write(DOCKERUSER)
shutil.copy(os.path.join(self.args.build_dir, 'requirements.txt'), docker_dir)
run_cmd(["docker", "build", "--rm=True", "-t", self.tag, "."], chdir=docker_dir, timeout=1200).check_returncode()
shutil.rmtree(docker_dir)
def run(self, cmd, build_dir, container_name, user='odoo', exposed_port=None, detach=False, timeout=None):
self.container_name = container_name
docker_cmd = [
"docker",
"run",
"--user=%s" % user,
"--name=%s" % container_name,
"--rm",
"--volume=%s:/data/src" % build_dir
]
if exposed_port:
docker_cmd.extend(['-p', '127.0.0.1:%s:%s' % (exposed_port, exposed_port)])
self.exposed_port = exposed_port
if detach:
docker_cmd.append('-d')
# preserve logs in case of detached docker container
cmd = '(%s) > %s 2>&1' % (cmd, self.test_log_file)
docker_cmd.extend([
self.tag,
"/bin/bash",
"-c",
"cd /data/src && %s" % cmd
])
run_cmd(docker_cmd, timeout=timeout).check_returncode()
def is_running(self):
dinspect = subprocess.run(['docker', 'container', 'inspect', self.container_name], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
return True if dinspect.returncode == 0 else False
def stop(self):
run_cmd(["docker", "stop", self.container_name]).check_returncode()
def test_odoo(self):
logging.info('Starting to test Odoo install test')
start_time = time.time()
while self.is_running() and (time.time() - start_time) < INSTALL_TIMEOUT:
time.sleep(5)
if os.path.exists(os.path.join(args.build_dir, 'odoo.pid')):
_rpc_count_modules(port=self.exposed_port)
self.stop()
return
if self.is_running():
self.stop()
raise OdooTestTimeoutError('Odoo pid file never appeared after %s sec' % INSTALL_TIMEOUT)
raise OdooTestError('Error while installing/starting Odoo after %s sec.\nSee testlogs.txt in build dir' % int(time.time() - start_time))
def build(self):
"""To be overriden by specific builder"""
pass
def start_test(self):
"""To be overriden by specific builder"""
pass
class DockerTgz(Docker):
"""Docker class to build python src package"""
arch = 'tgz'
def build(self):
logging.info('Start building python tgz package')
self.run('python3 setup.py sdist --quiet --formats=gztar,zip', self.args.build_dir, 'odoo-src-build-%s' % TSTAMP)
os.rename(glob('%s/dist/odoo-*.tar.gz' % self.args.build_dir)[0], '%s/odoo_%s.%s.tar.gz' % (self.args.build_dir, VERSION, TSTAMP))
os.rename(glob('%s/dist/odoo-*.zip' % self.args.build_dir)[0], '%s/odoo_%s.%s.zip' % (self.args.build_dir, VERSION, TSTAMP))
logging.info('Finished building python tgz package')
def start_test(self):
if not self.args.test:
return
logging.info('Start testing python tgz package')
cmds = [
'service postgresql start',
'pip3 install /data/src/odoo_%s.%s.tar.gz' % (VERSION, TSTAMP),
'su postgres -s /bin/bash -c "createuser -s odoo"',
'su postgres -s /bin/bash -c "createdb mycompany"',
'su odoo -s /bin/bash -c "odoo -d mycompany -i base --stop-after-init"',
'su odoo -s /bin/bash -c "odoo -d mycompany --pidfile=/data/src/odoo.pid"',
]
self.run(' && '.join(cmds), self.args.build_dir, 'odoo-src-test-%s' % TSTAMP, user='root', detach=True, exposed_port=8069, timeout=300)
self.test_odoo()
logging.info('Finished testing tgz package')
class DockerDeb(Docker):
"""Docker class to build debian package"""
arch = 'deb'
def build(self):
logging.info('Start building debian package')
# Append timestamp to version for the .dsc to refer the right .tar.gz
cmds = ["sed -i '1s/^.*$/odoo (%s.%s) stable; urgency=low/' debian/changelog" % (VERSION, TSTAMP)]
cmds.append('dpkg-buildpackage -rfakeroot -uc -us -tc')
# As the packages are built in the parent of the buildir, we move them back to build_dir
cmds.append('mv ../odoo_* ./')
self.run(' && '.join(cmds), self.args.build_dir, 'odoo-deb-build-%s' % TSTAMP)
logging.info('Finished building debian package')
def start_test(self):
if not self.args.test:
return
logging.info('Start testing debian package')
cmds = [
'service postgresql start',
'su postgres -s /bin/bash -c "createdb mycompany"',
'/usr/bin/apt-get update -y',
'/usr/bin/dpkg -i /data/src/odoo_%s.%s_all.deb ; /usr/bin/apt-get install -f -y' % (VERSION, TSTAMP),
'su odoo -s /bin/bash -c "odoo -d mycompany -i base --stop-after-init"',
'su odoo -s /bin/bash -c "odoo -d mycompany --pidfile=/data/src/odoo.pid"',
]
self.run(' && '.join(cmds), self.args.build_dir, 'odoo-deb-test-%s' % TSTAMP, user='root', detach=True, exposed_port=8069, timeout=300)
self.test_odoo()
logging.info('Finished testing debian package')
class DockerRpm(Docker):
"""Docker class to build rpm package"""
arch = 'rpm'
def build(self):
logging.info('Start building fedora rpm package')
self.run('python3 setup.py --quiet bdist_rpm', self.args.build_dir, 'odoo-rpm-build-%s' % TSTAMP)
os.rename(glob('%s/dist/odoo-*.noarch.rpm' % self.args.build_dir)[0], '%s/odoo_%s.%s.rpm' % (self.args.build_dir, VERSION, TSTAMP))
logging.info('Finished building fedora rpm package')
def start_test(self):
if not self.args.test:
return
logging.info('Start testing rpm package')
cmds = [
'su postgres -c "/usr/bin/pg_ctl -D /var/lib/postgres/data start"',
'sleep 5',
'su postgres -c "createdb mycompany"',
'dnf install -d 0 -e 0 /data/src/odoo_%s.%s.rpm -y' % (VERSION, TSTAMP),
'su odoo -s /bin/bash -c "odoo -c /etc/odoo/odoo.conf -d mycompany -i base --stop-after-init"',
'su odoo -s /bin/bash -c "odoo -c /etc/odoo/odoo.conf -d mycompany --pidfile=/data/src/odoo.pid"',
]
self.run(' && '.join(cmds), args.build_dir, 'odoo-rpm-test-%s' % TSTAMP, user='root', detach=True, exposed_port=8069, timeout=300)
self.test_odoo()
logging.info('Finished testing rpm package')
# KVM stuffs
class KVM(object):
def __init__(self, args):
self.args = args
self.image = args.vm_winxp_image
self.ssh_key = args.vm_winxp_ssh_key
self.login = args.vm_winxp_login
def timeout(self, signum, frame):
logging.warning("vm timeout kill (pid: {})".format(self.kvm_proc.pid))
self.kvm_proc.terminate()
def start(self):
kvm_cmd = [
"kvm",
"-cpu", "core2duo",
"-smp", "2,sockets=2,cores=1,threads=1",
"-net", "nic,model=rtl8139",
"-net", "user,hostfwd=tcp:127.0.0.1:10022-:22,hostfwd=tcp:127.0.0.1:18069-:8069,hostfwd=tcp:127.0.0.1:15432-:5432",
"-m", "1024",
"-drive", "file=%s,snapshot=on" % self.image,
"-nographic"
]
logging.info("Starting kvm: {}".format(" ".join(kvm_cmd)))
self.kvm_proc = subprocess.Popen(kvm_cmd)
time.sleep(50)
signal.alarm(2400)
signal.signal(signal.SIGALRM, self.timeout)
try:
self.run()
finally:
signal.signal(signal.SIGALRM, signal.SIG_DFL)
self.kvm_proc.terminate()
time.sleep(10)
def ssh(self, cmd):
run_cmd([
'ssh',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'-p', '10022',
'-i', self.ssh_key,
'%[email protected]' % self.login,
cmd
]).check_returncode()
def rsync(self, rsync_args, options=['--delete', '--exclude', '.git', '--exclude', '.tx', '--exclude', '__pycache__']):
cmd = [
'rsync',
'-rt',
'-e', 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 10022 -i %s' % self.ssh_key
]
cmd.extend(options)
cmd.extend(rsync_args)
run_cmd(cmd).check_returncode()
def run(self):
pass
class KVMWinBuildExe(KVM):
def run(self):
logging.info('Start building Windows package')
with open(os.path.join(self.args.build_dir, 'setup/win32/Makefile.version'), 'w') as f:
f.write("VERSION=%s\n" % VERSION.replace('~', '_').replace('+', ''))
with open(os.path.join(self.args.build_dir, 'setup/win32/Makefile.python'), 'w') as f:
f.write("PYTHON_VERSION=%s\n" % self.args.vm_winxp_python_version)
with open(os.path.join(self.args.build_dir, 'setup/win32/Makefile.servicename'), 'w') as f:
f.write("SERVICENAME=%s\n" % nt_service_name)
remote_build_dir = '/cygdrive/c/odoobuild/server/'
self.ssh("mkdir -p build")
logging.info("Syncing Odoo files to virtual machine...")
self.rsync(['%s/' % self.args.build_dir, '%[email protected]:%s' % (self.login, remote_build_dir)])
self.ssh("cd {}setup/win32;time make allinone;".format(remote_build_dir))
self.rsync(['%[email protected]:%ssetup/win32/release/' % (self.login, remote_build_dir), '%s/' % self.args.build_dir])
logging.info('Finished building Windows package')
class KVMWinTestExe(KVM):
def run(self):
logging.info('Start testing Windows package')
setup_path = glob("%s/openerp-server-setup-*.exe" % self.args.build_dir)[0]
setupfile = setup_path.split('/')[-1]
setupversion = setupfile.split('openerp-server-setup-')[1].split('.exe')[0]
self.rsync(['"%s"' % setup_path, '%[email protected]:' % self.login])
self.ssh("TEMP=/tmp ./%s /S" % setupfile)
self.ssh('PGPASSWORD=openpgpwd /cygdrive/c/"Program Files"/"Odoo %s"/PostgreSQL/bin/createdb.exe -e -U openpg mycompany' % setupversion)
self.ssh('netsh advfirewall set publicprofile state off')
self.ssh('/cygdrive/c/"Program Files"/"Odoo {sv}"/python/python.exe \'c:\\Program Files\\Odoo {sv}\\server\\odoo-bin\' -d mycompany -i base --stop-after-init'.format(sv=setupversion))
_rpc_count_modules(port=18069)
logging.info('Finished testing Windows package')
def build_exe(args):
KVMWinBuildExe(args).start()
shutil.copy(glob('%s/openerp*.exe' % args.build_dir)[0], '%s/odoo_%s.%s.exe' % (args.build_dir, VERSION, TSTAMP))
def test_exe(args):
if args.test:
KVMWinTestExe(args).start()
def parse_args():
ap = argparse.ArgumentParser()
build_dir = "%s-%s" % (ROOTDIR, TSTAMP)
log_levels = {"debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARN, "error": logging.ERROR, "critical": logging.CRITICAL}
ap.add_argument("-b", "--build-dir", default=build_dir, help="build directory (%(default)s)", metavar="DIR")
ap.add_argument("-p", "--pub", default=None, help="pub directory %(default)s", metavar="DIR")
ap.add_argument("--logging", action="store", choices=list(log_levels.keys()), default="info", help="Logging level")
ap.add_argument("--build-deb", action="store_true")
ap.add_argument("--build-rpm", action="store_true")
ap.add_argument("--build-tgz", action="store_true")
ap.add_argument("--build-win", action="store_true")
# Windows VM
ap.add_argument("--vm-winxp-image", default='/home/odoo/vm/win1036/win10_winpy36.qcow2', help="%(default)s")
ap.add_argument("--vm-winxp-ssh-key", default='/home/odoo/vm/win1036/id_rsa', help="%(default)s")
ap.add_argument("--vm-winxp-login", default='Naresh', help="Windows login %(default)s")
ap.add_argument("--vm-winxp-python-version", default='3.7.4', help="Windows Python version installed in the VM (default: %(default)s)")
ap.add_argument("-t", "--test", action="store_true", default=False, help="Test built packages")
ap.add_argument("-s", "--sign", action="store_true", default=False, help="Sign Debian package / generate Rpm repo")
ap.add_argument("--no-remove", action="store_true", help="don't remove build dir")
ap.add_argument("--blacklist", nargs="*", help="Modules to blacklist in package")
parsed_args = ap.parse_args()
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %I:%M:%S', level=log_levels[parsed_args.logging])
parsed_args.odoo_dir = ROOTDIR
return parsed_args
def main(args):
try:
if args.build_tgz:
_prepare_build_dir(args)
docker_tgz = DockerTgz(args)
docker_tgz.build()
try:
docker_tgz.start_test()
published_files = publish(args, 'tgz', ['tar.gz', 'zip'])
except Exception as e:
logging.error("Won't publish the tgz release.\n Exception: %s" % str(e))
if args.build_rpm:
_prepare_build_dir(args)
docker_rpm = DockerRpm(args)
docker_rpm.build()
try:
docker_rpm.start_test()
published_files = publish(args, 'rpm', ['rpm'])
if args.sign:
gen_rpm_repo(args, published_files[0])
except Exception as e:
logging.error("Won't publish the rpm release.\n Exception: %s" % str(e))
if args.build_deb:
_prepare_build_dir(args)
docker_deb = DockerDeb(args)
docker_deb.build()
try:
docker_deb.start_test()
published_files = publish(args, 'deb', ['deb', 'dsc', 'changes', 'tar.xz'])
gen_deb_package(args, published_files)
except Exception as e:
logging.error("Won't publish the deb release.\n Exception: %s" % str(e))
if args.build_win:
_prepare_build_dir(args, win32=True)
build_exe(args)
try:
test_exe(args)
published_files = publish(args, 'windows', ['exe'])
except Exception as e:
logging.error("Won't publish the exe release.\n Exception: %s" % str(e))
except Exception as e:
logging.error('Something bad happened ! : {}'.format(e))
traceback.print_exc()
finally:
if args.no_remove:
logging.info('Build dir "{}" not removed'.format(args.build_dir))
else:
if os.path.exists(args.build_dir):
shutil.rmtree(args.build_dir)
logging.info('Build dir %s removed' % args.build_dir)
if __name__ == '__main__':
args = parse_args()
if os.path.exists(args.build_dir):
logging.error('Build dir "%s" already exists.', args.build_dir)
sys.exit(1)
main(args)
| ddico/odoo | setup/package.py | Python | agpl-3.0 | 23,784 |
# Author: echel0n <[email protected]>
# URL: https://sickrage.ca
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import io
import os
import platform
import re
import shutil
import stat
import subprocess
import sys
import tarfile
import threading
import traceback
import sickrage
from sickrage.core.helpers import backupSR
from sickrage.notifiers import Notifiers
class VersionUpdater(object):
"""
Version check class meant to run as a thread object with the sr scheduler.
"""
def __init__(self):
self.name = "VERSIONUPDATER"
self.amActive = False
@property
def updater(self):
return self.find_install_type()
def run(self, force=False):
if self.amActive:
return
self.amActive = True
# set thread name
threading.currentThread().setName(self.name)
try:
if self.check_for_new_version(force) and sickrage.app.config.auto_update:
if sickrage.app.show_updater.amActive:
sickrage.app.log.debug("We can't proceed with auto-updating. Shows are being updated")
return
sickrage.app.log.info("New update found for SiCKRAGE, starting auto-updater ...")
sickrage.app.alerts.message(_('New update found for SiCKRAGE, starting auto-updater'))
if self.update():
sickrage.app.log.info("Update was successful!")
sickrage.app.alerts.message(_('Update was successful'))
sickrage.app.shutdown(restart=True)
else:
sickrage.app.log.info("Update failed!")
sickrage.app.alerts.message(_('Update failed!'))
finally:
self.amActive = False
def backup(self):
if self.safe_to_update():
# Do a system backup before update
sickrage.app.log.info("Config backup in progress...")
sickrage.app.alerts.message(_('Backup'), _('Config backup in progress...'))
try:
backupDir = os.path.join(sickrage.app.data_dir, 'backup')
if not os.path.isdir(backupDir):
os.mkdir(backupDir)
if backupSR(backupDir, keep_latest=True):
sickrage.app.log.info("Config backup successful, updating...")
sickrage.app.alerts.message(_('Backup'), _('Config backup successful, updating...'))
return True
else:
sickrage.app.log.error("Config backup failed, aborting update")
sickrage.app.alerts.message(_('Backup'), _('Config backup failed, aborting update'))
return False
except Exception as e:
sickrage.app.log.error('Update: Config backup failed. Error: %s' % e)
sickrage.app.alerts.message(_('Backup'), _('Config backup failed, aborting update'))
return False
@staticmethod
def safe_to_update():
if not sickrage.app.started:
return True
if not sickrage.app.auto_postprocessor.amActive:
return True
sickrage.app.log.debug("We can't proceed with the update. Post-Processor is running")
@staticmethod
def find_install_type():
"""
Determines how this copy of sr was installed.
returns: type of installation. Possible values are:
'git': running from source using git
'pip': running from source using pip
'source': running from source without git
"""
# default to source install type
install_type = SourceUpdateManager()
if os.path.isdir(os.path.join(os.path.dirname(sickrage.PROG_DIR), '.git')):
# GIT install type
install_type = GitUpdateManager()
elif PipUpdateManager().version:
# PIP install type
install_type = PipUpdateManager()
return install_type
def check_for_new_version(self, force=False):
"""
Checks the internet for a newer version.
returns: bool, True for new version or False for no new version.
:param force: if true the VERSION_NOTIFY setting will be ignored and a check will be forced
"""
if sickrage.app.developer:
return False
if self.updater and self.updater.need_update():
if force: self.updater.set_newest_text()
return True
def update(self):
if self.updater and self.backup():
# check for updates
if self.updater.need_update():
if self.updater.update():
# Clean up after update
to_clean = os.path.join(sickrage.app.cache_dir, 'mako')
for root, dirs, files in os.walk(to_clean, topdown=False):
[os.remove(os.path.join(root, name)) for name in files]
[shutil.rmtree(os.path.join(root, name)) for name in dirs]
return True
@property
def version(self):
if self.updater:
return self.updater.version
@property
def branch(self):
if self.updater:
return self.updater.current_branch
return "master"
class UpdateManager(object):
@property
def _git_path(self):
test_cmd = '--version'
main_git = sickrage.app.config.git_path or 'git'
sickrage.app.log.debug("Checking if we can use git commands: " + main_git + ' ' + test_cmd)
__, __, exit_status = self._git_cmd(main_git, test_cmd)
if exit_status == 0:
sickrage.app.log.debug("Using: " + main_git)
return main_git
else:
sickrage.app.log.debug("Not using: " + main_git)
# trying alternatives
alternative_git = []
# osx people who start sr from launchd have a broken path, so try a hail-mary attempt for them
if platform.system().lower() == 'darwin':
alternative_git.append('/usr/local/git/bin/git')
if platform.system().lower() == 'windows':
if main_git != main_git.lower():
alternative_git.append(main_git.lower())
if alternative_git:
sickrage.app.log.debug("Trying known alternative git locations")
for cur_git in alternative_git:
sickrage.app.log.debug("Checking if we can use git commands: " + cur_git + ' ' + test_cmd)
__, __, exit_status = self._git_cmd(cur_git, test_cmd)
if exit_status == 0:
sickrage.app.log.debug("Using: " + cur_git)
return cur_git
else:
sickrage.app.log.debug("Not using: " + cur_git)
# Still haven't found a working git
error_message = _('Unable to find your git executable - Set your git path from Settings->General->Advanced OR '
'delete your .git folder and run from source to enable updates.')
sickrage.app.newest_version_string = error_message
return None
@property
def _pip_path(self):
test_cmd = '-V'
main_pip = sickrage.app.config.pip_path or 'pip'
sickrage.app.log.debug("Checking if we can use pip commands: " + main_pip + ' ' + test_cmd)
__, __, exit_status = self._pip_cmd(main_pip, test_cmd)
if exit_status == 0:
sickrage.app.log.debug("Using: " + main_pip)
return main_pip
else:
sickrage.app.log.debug("Not using: " + main_pip)
# trying alternatives
alternative_pip = []
# osx people who start sr from launchd have a broken path, so try a hail-mary attempt for them
if platform.system().lower() == 'darwin':
alternative_pip.append('/usr/local/python2.7/bin/pip')
if platform.system().lower() == 'windows':
if main_pip != main_pip.lower():
alternative_pip.append(main_pip.lower())
if alternative_pip:
sickrage.app.log.debug("Trying known alternative pip locations")
for cur_pip in alternative_pip:
sickrage.app.log.debug("Checking if we can use pip commands: " + cur_pip + ' ' + test_cmd)
__, __, exit_status = self._pip_cmd(cur_pip, test_cmd)
if exit_status == 0:
sickrage.app.log.debug("Using: " + cur_pip)
return cur_pip
else:
sickrage.app.log.debug("Not using: " + cur_pip)
# Still haven't found a working git
error_message = _('Unable to find your pip executable - Set your pip path from Settings->General->Advanced')
sickrage.app.newest_version_string = error_message
return None
@staticmethod
def _git_cmd(git_path, args):
output = err = None
if not git_path:
sickrage.app.log.warning("No path to git specified, can't use git commands")
exit_status = 1
return output, err, exit_status
cmd = [git_path] + args.split()
try:
sickrage.app.log.debug("Executing " + ' '.join(cmd) + " with your shell in " + sickrage.PROG_DIR)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=(sys.platform == 'win32'), cwd=sickrage.PROG_DIR)
output, err = p.communicate()
exit_status = p.returncode
if output:
output = output.strip()
except OSError:
sickrage.app.log.info("Command " + ' '.join(cmd) + " didn't work")
exit_status = 1
if exit_status == 0:
sickrage.app.log.debug(' '.join(cmd) + " : returned successful")
exit_status = 0
elif exit_status == 1:
if 'stash' in output:
sickrage.app.log.warning(
"Please enable 'git reset' in settings or stash your changes in local files")
else:
sickrage.app.log.debug(' '.join(cmd) + " returned : " + str(output))
exit_status = 1
elif exit_status == 128 or 'fatal:' in output or err:
sickrage.app.log.debug(' '.join(cmd) + " returned : " + str(output))
exit_status = 128
else:
sickrage.app.log.debug(' '.join(cmd) + " returned : " + str(output) + ", treat as error for now")
exit_status = 1
return output, err, exit_status
@staticmethod
def _pip_cmd(pip_path, args):
output = err = None
if not pip_path:
sickrage.app.log.warning("No path to pip specified, can't use pip commands")
exit_status = 1
return output, err, exit_status
cmd = [pip_path] + args.split()
try:
sickrage.app.log.debug("Executing " + ' '.join(cmd) + " with your shell in " + sickrage.PROG_DIR)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=(sys.platform == 'win32'), cwd=sickrage.PROG_DIR)
output, err = p.communicate()
exit_status = p.returncode
if output:
output = output.strip()
except OSError:
sickrage.app.log.info("Command " + ' '.join(cmd) + " didn't work")
exit_status = 1
if exit_status == 0:
sickrage.app.log.debug(' '.join(cmd) + " : returned successful")
exit_status = 0
else:
sickrage.app.log.debug(' '.join(cmd) + " returned : " + str(output) + ", treat as error for now")
exit_status = 1
return output, err, exit_status
@staticmethod
def get_update_url():
return "{}/home/update/?pid={}".format(sickrage.app.config.web_root, sickrage.app.pid)
def install_requirements(self):
__, __, exit_status = self._pip_cmd(self._pip_path,
'install --no-cache-dir --user -r {}'.format(sickrage.REQS_FILE))
return (False, True)[exit_status == 0]
class GitUpdateManager(UpdateManager):
def __init__(self):
self.type = "git"
@property
def version(self):
return self._find_installed_version()
@property
def get_newest_version(self):
return self._check_for_new_version() or self.version
@property
def current_branch(self):
branch, __, exit_status = self._git_cmd(self._git_path, 'rev-parse --abbrev-ref HEAD')
return ("", branch)[exit_status == 0 and branch is not None]
@property
def remote_branches(self):
branches, __, exit_status = self._git_cmd(self._git_path,
'ls-remote --heads {}'.format(sickrage.app.config.git_remote))
if exit_status == 0 and branches:
return re.findall(r'refs/heads/(.*)', branches)
return []
def _find_installed_version(self):
"""
Attempts to find the currently installed version of SiCKRAGE.
Uses git show to get commit version.
Returns: True for success or False for failure
"""
output, __, exit_status = self._git_cmd(self._git_path, 'rev-parse HEAD')
if exit_status == 0 and output:
cur_commit_hash = output.strip()
if not re.match('^[a-z0-9]+$', cur_commit_hash):
sickrage.app.log.error("Output doesn't look like a hash, not using it")
return False
return cur_commit_hash
def _check_for_new_version(self):
"""
Uses git commands to check if there is a newer version that the provided
commit hash. If there is a newer version it sets _num_commits_behind.
"""
# get all new info from server
output, __, exit_status = self._git_cmd(self._git_path, 'remote update')
if not exit_status == 0:
sickrage.app.log.warning("Unable to contact server, can't check for update")
return
# get latest commit_hash from remote
output, __, exit_status = self._git_cmd(self._git_path,
'rev-parse --verify --quiet origin/{}'.format(self.current_branch))
if exit_status == 0 and output:
return output.strip()
def set_newest_text(self):
# if we're up to date then don't set this
sickrage.app.newest_version_string = None
if self.version != self.get_newest_version:
newest_text = _(
'There is a newer version available, version {} — <a href=\"{}\">Update Now</a>').format(
self.get_newest_version, self.get_update_url())
sickrage.app.newest_version_string = newest_text
def need_update(self):
try:
return (False, True)[self.version != self.get_newest_version]
except Exception as e:
sickrage.app.log.warning("Unable to contact server, can't check for update: " + repr(e))
return False
def update(self):
"""
Calls git pull origin <branch> in order to update SiCKRAGE. Returns a bool depending
on the call's success.
"""
# remove untracked files and performs a hard reset on git branch to avoid update issues
if sickrage.app.config.git_reset:
# self.clean() # This is removing user data and backups
self.reset()
__, __, exit_status = self._git_cmd(self._git_path, 'pull -f {} {}'.format(sickrage.app.config.git_remote,
self.current_branch))
if exit_status == 0:
sickrage.app.log.info("Updating SiCKRAGE from GIT servers")
Notifiers.notify_version_update(self.get_newest_version)
self.install_requirements()
return True
return False
def clean(self):
"""
Calls git clean to remove all untracked files. Returns a bool depending
on the call's success.
"""
__, __, exit_status = self._git_cmd(self._git_path, 'clean -df ""')
return (False, True)[exit_status == 0]
def reset(self):
"""
Calls git reset --hard to perform a hard reset. Returns a bool depending
on the call's success.
"""
__, __, exit_status = self._git_cmd(self._git_path, 'reset --hard')
return (False, True)[exit_status == 0]
def fetch(self):
"""
Calls git fetch to fetch all remote branches
on the call's success.
"""
__, __, exit_status = self._git_cmd(self._git_path,
'config remote.origin.fetch %s' % '+refs/heads/*:refs/remotes/origin/*')
if exit_status == 0:
__, __, exit_status = self._git_cmd(self._git_path, 'fetch --all')
return (False, True)[exit_status == 0]
def checkout_branch(self, branch):
if branch in self.remote_branches:
sickrage.app.log.debug("Branch checkout: " + self._find_installed_version() + "->" + branch)
# remove untracked files and performs a hard reset on git branch to avoid update issues
if sickrage.app.config.git_reset:
self.reset()
# fetch all branches
self.fetch()
__, __, exit_status = self._git_cmd(self._git_path, 'checkout -f ' + branch)
if exit_status == 0:
self.install_requirements()
return True
return False
def get_remote_url(self):
url, __, exit_status = self._git_cmd(self._git_path,
'remote get-url {}'.format(sickrage.app.config.git_remote))
return ("", url)[exit_status == 0 and url is not None]
def set_remote_url(self):
if not sickrage.app.developer:
self._git_cmd(self._git_path, 'remote set-url {} {}'.format(sickrage.app.config.git_remote,
sickrage.app.config.git_remote_url))
class SourceUpdateManager(UpdateManager):
def __init__(self):
self.type = "source"
@property
def version(self):
return self._find_installed_version()
@property
def get_newest_version(self):
return self._check_for_new_version() or self.version
@property
def current_branch(self):
return 'master'
@staticmethod
def _find_installed_version():
with io.open(os.path.join(sickrage.PROG_DIR, 'version.txt')) as f:
return f.read().strip() or ""
def need_update(self):
try:
return (False, True)[self.version != self.get_newest_version]
except Exception as e:
sickrage.app.log.warning("Unable to contact server, can't check for update: " + repr(e))
return False
def _check_for_new_version(self):
git_version_url = "https://git.sickrage.ca/SiCKRAGE/sickrage/raw/master/sickrage/version.txt"
try:
return sickrage.app.wsession.get(git_version_url).text
except Exception:
return self._find_installed_version()
def set_newest_text(self):
# if we're up to date then don't set this
sickrage.app.newest_version_string = None
if not self.version:
sickrage.app.log.debug("Unknown current version number, don't know if we should update or not")
newest_text = _("Unknown current version number: If yo've never used the SiCKRAGE upgrade system before "
"then current version is not set. — "
"<a href=\"{}\">Update Now</a>").format(self.get_update_url())
else:
newest_text = _("There is a newer version available, version {} — "
"<a href=\"{}\">Update Now</a>").format(self.get_newest_version, self.get_update_url())
sickrage.app.newest_version_string = newest_text
def update(self):
"""
Downloads the latest source tarball from server and installs it over the existing version.
"""
tar_download_url = 'https://git.sickrage.ca/SiCKRAGE/sickrage/repository/archive.tar?ref=master'
try:
# prepare the update dir
sr_update_dir = os.path.join(sickrage.PROG_DIR, 'sr-update')
if os.path.isdir(sr_update_dir):
sickrage.app.log.info("Clearing out update folder " + sr_update_dir + " before extracting")
shutil.rmtree(sr_update_dir)
sickrage.app.log.info("Creating update folder " + sr_update_dir + " before extracting")
os.makedirs(sr_update_dir)
# retrieve file
sickrage.app.log.info("Downloading update from " + repr(tar_download_url))
tar_download_path = os.path.join(sr_update_dir, 'sr-update.tar')
sickrage.app.wsession.download(tar_download_url, tar_download_path)
if not os.path.isfile(tar_download_path):
sickrage.app.log.warning(
"Unable to retrieve new version from " + tar_download_url + ", can't update")
return False
if not tarfile.is_tarfile(tar_download_path):
sickrage.app.log.error(
"Retrieved version from " + tar_download_url + " is corrupt, can't update")
return False
# extract to sr-update dir
sickrage.app.log.info("Extracting file " + tar_download_path)
tar = tarfile.open(tar_download_path)
tar.extractall(sr_update_dir)
tar.close()
# delete .tar.gz
sickrage.app.log.info("Deleting file " + tar_download_path)
os.remove(tar_download_path)
# find update dir name
update_dir_contents = [x for x in os.listdir(sr_update_dir) if
os.path.isdir(os.path.join(sr_update_dir, x))]
if len(update_dir_contents) != 1:
sickrage.app.log.error("Invalid update data, update failed: " + str(update_dir_contents))
return False
content_dir = os.path.join(sr_update_dir, update_dir_contents[0])
# walk temp folder and move files to main folder
sickrage.app.log.info("Moving files from " + content_dir + " to " + sickrage.PROG_DIR)
for dirname, __, filenames in os.walk(content_dir):
dirname = dirname[len(content_dir) + 1:]
for curfile in filenames:
old_path = os.path.join(content_dir, dirname, curfile)
new_path = os.path.join(sickrage.PROG_DIR, dirname, curfile)
# Avoid DLL access problem on WIN32/64
# These files needing to be updated manually
# or find a way to kill the access from memory
if curfile in ('unrar.dll', 'unrar64.dll'):
try:
os.chmod(new_path, stat.S_IWRITE)
os.remove(new_path)
os.renames(old_path, new_path)
except Exception as e:
sickrage.app.log.debug("Unable to update " + new_path + ': ' + e.message)
os.remove(old_path) # Trash the updated file without moving in new path
continue
if os.path.isfile(new_path):
os.remove(new_path)
os.renames(old_path, new_path)
except Exception as e:
sickrage.app.log.error("Error while trying to update: {}".format(e.message))
sickrage.app.log.debug("Traceback: " + traceback.format_exc())
return False
# Notify update successful
Notifiers.notify_version_update(self.get_newest_version)
# install requirements
self.install_requirements()
return True
class PipUpdateManager(UpdateManager):
def __init__(self):
self.type = "pip"
@property
def version(self):
return self._find_installed_version()
@property
def get_newest_version(self):
return self._check_for_new_version() or self.version
@property
def current_branch(self):
return 'master'
def _find_installed_version(self):
out, __, exit_status = self._pip_cmd(self._pip_path, 'show sickrage')
if exit_status == 0:
return out.split('\n')[1].split()[1]
return ""
def need_update(self):
# need this to run first to set self._newest_commit_hash
try:
pypi_version = self.get_newest_version
if self.version != pypi_version:
sickrage.app.log.debug(
"Version upgrade: " + self._find_installed_version() + " -> " + pypi_version)
return True
except Exception as e:
sickrage.app.log.warning("Unable to contact PyPi, can't check for update: " + repr(e))
return False
def _check_for_new_version(self):
from distutils.version import StrictVersion
url = "https://pypi.python.org/pypi/{}/json".format('sickrage')
resp = sickrage.app.wsession.get(url)
versions = resp.json()["releases"].keys()
versions.sort(key=StrictVersion, reverse=True)
try:
return versions[0]
except Exception:
return self._find_installed_version()
def set_newest_text(self):
# if we're up to date then don't set this
sickrage.app.newest_version_string = None
if not self.version:
sickrage.app.log.debug("Unknown current version number, don't know if we should update or not")
return
else:
newest_text = _("New SiCKRAGE update found on PyPy servers, version {} — "
"<a href=\"{}\">Update Now</a>").format(self.get_newest_version, self.get_update_url())
sickrage.app.newest_version_string = newest_text
def update(self):
"""
Performs pip upgrade
"""
__, __, exit_status = self._pip_cmd(self._pip_path, 'install -U --no-cache-dir sickrage')
if exit_status == 0:
sickrage.app.log.info("Updating SiCKRAGE from PyPi servers")
Notifiers.notify_version_update(self.get_newest_version)
return True
return False
| gborri/SickRage | sickrage/core/version_updater.py | Python | gpl-3.0 | 27,394 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.python.ops import init_ops
from tensorflow.python.platform import test
class KerasInitializersTest(test.TestCase):
def _runner(self, init, shape, target_mean=None, target_std=None,
target_max=None, target_min=None):
variable = keras.backend.variable(init(shape))
output = keras.backend.get_value(variable)
lim = 3e-2
if target_std is not None:
self.assertGreater(lim, abs(output.std() - target_std))
if target_mean is not None:
self.assertGreater(lim, abs(output.mean() - target_mean))
if target_max is not None:
self.assertGreater(lim, abs(output.max() - target_max))
if target_min is not None:
self.assertGreater(lim, abs(output.min() - target_min))
# Test serialization (assumes deterministic behavior).
config = init.get_config()
reconstructed_init = init.__class__.from_config(config)
variable = keras.backend.variable(reconstructed_init(shape))
output_2 = keras.backend.get_value(variable)
self.assertAllClose(output, output_2, atol=1e-4)
def test_uniform(self):
tensor_shape = (9, 6, 7)
with self.test_session():
self._runner(keras.initializers.RandomUniform(minval=-1,
maxval=1,
seed=124),
tensor_shape,
target_mean=0., target_max=1, target_min=-1)
def test_normal(self):
tensor_shape = (8, 12, 99)
with self.test_session():
self._runner(keras.initializers.RandomNormal(mean=0, stddev=1, seed=153),
tensor_shape,
target_mean=0., target_std=1)
def test_truncated_normal(self):
tensor_shape = (12, 99, 7)
with self.test_session():
self._runner(keras.initializers.TruncatedNormal(mean=0,
stddev=1,
seed=126),
tensor_shape,
target_mean=0., target_std=None, target_max=2)
def test_constant(self):
tensor_shape = (5, 6, 4)
with self.test_session():
self._runner(keras.initializers.Constant(2), tensor_shape,
target_mean=2, target_max=2, target_min=2)
def test_lecun_uniform(self):
tensor_shape = (5, 6, 4, 2)
with self.test_session():
fan_in, _ = init_ops._compute_fans(tensor_shape)
scale = np.sqrt(3. / fan_in)
self._runner(keras.initializers.lecun_uniform(seed=123), tensor_shape,
target_mean=0., target_max=scale, target_min=-scale)
def test_glorot_uniform(self):
tensor_shape = (5, 6, 4, 2)
with self.test_session():
fan_in, fan_out = init_ops._compute_fans(tensor_shape)
scale = np.sqrt(6. / (fan_in + fan_out))
self._runner(keras.initializers.glorot_uniform(seed=123), tensor_shape,
target_mean=0., target_max=scale, target_min=-scale)
def test_he_uniform(self):
tensor_shape = (5, 6, 4, 2)
with self.test_session():
fan_in, _ = init_ops._compute_fans(tensor_shape)
scale = np.sqrt(6. / fan_in)
self._runner(keras.initializers.he_uniform(seed=123), tensor_shape,
target_mean=0., target_max=scale, target_min=-scale)
def test_glorot_normal(self):
tensor_shape = (5, 6, 4, 2)
with self.test_session():
fan_in, fan_out = init_ops._compute_fans(tensor_shape)
scale = np.sqrt(2. / (fan_in + fan_out))
self._runner(keras.initializers.glorot_normal(seed=123), tensor_shape,
target_mean=0., target_std=None, target_max=2 * scale)
def test_he_normal(self):
tensor_shape = (5, 6, 4, 2)
with self.test_session():
fan_in, _ = init_ops._compute_fans(tensor_shape)
scale = np.sqrt(2. / fan_in)
self._runner(keras.initializers.he_normal(seed=123), tensor_shape,
target_mean=0., target_std=None, target_max=2 * scale)
def test_orthogonal(self):
tensor_shape = (10, 10)
with self.test_session():
self._runner(keras.initializers.orthogonal(seed=123), tensor_shape,
target_mean=0.)
def test_identity(self):
with self.test_session():
tensor_shape = (3, 4, 5)
with self.assertRaises(ValueError):
self._runner(keras.initializers.identity(), tensor_shape,
target_mean=1. / tensor_shape[0], target_max=1.)
tensor_shape = (3, 3)
self._runner(keras.initializers.identity(), tensor_shape,
target_mean=1. / tensor_shape[0], target_max=1.)
def test_zero(self):
tensor_shape = (4, 5)
with self.test_session():
self._runner(keras.initializers.zeros(), tensor_shape,
target_mean=0., target_max=0.)
def test_one(self):
tensor_shape = (4, 5)
with self.test_session():
self._runner(keras.initializers.ones(), tensor_shape,
target_mean=1., target_max=1.)
if __name__ == '__main__':
test.main()
| thesuperzapper/tensorflow | tensorflow/contrib/keras/python/keras/initializers_test.py | Python | apache-2.0 | 5,930 |
from __future__ import unicode_literals
from django.test import TestCase
from wtl.wtparser.parsers import PodfileParser
PODFILE = """
# Example Podfile
platform :ios, '7.0'
inhibit_all_warnings!
xcodeproj `MyProject`
pod 'SSToolkit'
pod 'AFNetworking', '>= 0.5.1'
pod 'Objection', :head # 'bleeding edge'
pod 'Rejection', '0.0.0'
target :test do
pod 'OCMock', '~> 2.0.1'
end
generate_bridge_support!
post_install do |installer|
installer.project.targets.each do |target|
puts "#target.name"
end
end
"""
class BaseTestCase(TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.parser = PodfileParser()
class PodfileDetectTestCase(BaseTestCase):
def test_podfile(self):
self.assertTrue(self.parser.detect(PODFILE))
def test_unknown(self):
content = "#include <iostream>"
self.assertFalse(self.parser.detect(content))
class PodfileParseTestCase(BaseTestCase):
maxDiff = None
def test_podfile(self):
expect = {
'filename': self.parser.filename,
'language': self.parser.language,
'platform': 'ios',
'version': '7.0',
'packages': [
{'name': 'SSToolkit',
'version': None,
'version_special': 'stable'},
{'name': 'AFNetworking',
'version': '0.5.1',
'version_special': '>='},
{'name': 'Objection',
'version': None,
'version_special': 'latest'},
{'name': 'Rejection',
'version': '0.0.0',
'version_special': ''},
{'name': 'OCMock',
'version': '2.0.1',
'version_special': '~>'},
],
}
self.assertEqual(self.parser.parse(PODFILE), expect)
| elegion/djangodash2013 | wtl/wtparser/tests/parsers/podfile.py | Python | mit | 1,871 |
# OpenShot Video Editor is a program that creates, modifies, and edits video files.
# Copyright (C) 2009 Jonathan Thomas
#
# This file is part of OpenShot Video Editor (http://launchpad.net/openshot/).
#
# OpenShot Video Editor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenShot Video Editor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenShot Video Editor. If not, see <http://www.gnu.org/licenses/>.
import gtk, gobject, pango
from classes.project import project
def treeviewAddGeneralTextColumn(treeview, name, pos = 0, resizable=True, reorderable=False, editable=False, visible=True, elipses=False, autosize=False, project=None):
'''Add a new text column to the model'''
cell = gtk.CellRendererText()
cell.set_property('editable', editable)
if (elipses):
cell.set_property("ellipsize", pango.ELLIPSIZE_END)
col = gtk.TreeViewColumn(name, cell, markup = pos)
if (autosize):
col.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
col.set_expand(False)
col.set_resizable(resizable)
col.set_reorderable(reorderable)
col.set_property("visible", visible)
treeview.append_column(col)
treeview.set_headers_clickable(True)
if (editable):
model = treeview.get_model()
cell.connect('edited',cell_edited,model, project)
if (reorderable):
col.set_sort_column_id(pos)
return cell, col
def treeviewAddGeneralPixbufColumn(treeview, name, pos = 0, resizable=True, reorderable=False, project=None):
'''Add a new gtk.gdk.Pixbuf column to the model'''
cell = gtk.CellRendererPixbuf()
col = gtk.TreeViewColumn(name, cell, pixbuf = pos)
col.set_resizable(resizable)
col.set_reorderable(reorderable)
col.set_alignment(0.0)
treeview.append_column(col)
treeview.set_headers_clickable(True)
if (reorderable):
col.set_sort_column_id(pos)
return cell, col
def cell_edited(cell, row, new_text, model, project=None):
##Fired when the editable label cell is edited
#get the row that was edited
iter = model.get_iter_from_string(row)
column = cell.get_data("Label")
#set the edit in the model
model.set(iter,3,new_text)
#update the file object with the label edit
#filename = model.get_value(iter, 1)
#project.project_folder.UpdateFileLabel(filename, new_text, 0)
unique_id = model.get_value(iter, 4)
project.project_folder.UpdateFileLabel(unique_id, new_text, 0)
| i5o/openshot-sugar | openshot/openshot/classes/tree.py | Python | gpl-3.0 | 2,760 |
"""A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import markupbase
import re
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
interesting_cdata = re.compile(r'<(/|\Z)')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~@]*))?')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self):
"""Initialize and reset this instance."""
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
markupbase.ParserBase.reset(self)
def feed(self, data):
"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self):
self.interesting = interesting_cdata
def clear_cdata_mode(self):
self.interesting = interesting_normal
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if end:
self.error("EOF in middle of construct")
break
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
self.error("EOF in middle of entity or char ref")
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
m = attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode()
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
self.updatepos(i, j)
self.error("malformed start tag")
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
j = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
self.error("bad end tag: %r" % (rawdata[i:j],))
tag = match.group(1)
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
entitydefs = None
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:], 16)
else:
c = int(s)
return unichr(c)
else:
# Cannot use name2codepoint directly, because HTMLParser supports apos,
# which is not part of HTML 4
import htmlentitydefs
if HTMLParser.entitydefs is None:
entitydefs = HTMLParser.entitydefs = {'apos':u"'"}
for k, v in htmlentitydefs.name2codepoint.iteritems():
entitydefs[k] = unichr(v)
try:
return self.entitydefs[s]
except KeyError:
return '&'+s+';'
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));", replaceEntities, s)
| deanhiller/databus | webapp/play1.3.x/python/Lib/HTMLParser.py | Python | mpl-2.0 | 13,794 |
from __future__ import division, print_function, absolute_import
from scipy.lib.six import xrange
import scipy.special
from numpy import (logical_and, asarray, pi, zeros_like,
piecewise, array, arctan2, tan, zeros, arange, floor)
from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin,
less_equal, greater_equal)
# From splinemodule.c
from .spline import cspline2d, sepfir2d
from scipy.misc import comb
__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval']
gamma = scipy.special.gamma
def factorial(n):
return gamma(n + 1)
def spline_filter(Iin, lmbda=5.0):
"""Smoothing spline (cubic) filtering of a rank-2 array.
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
fall-off `lmbda`.
"""
intype = Iin.dtype.char
hcol = array([1.0, 4.0, 1.0], 'f') / 6.0
if intype in ['F', 'D']:
Iin = Iin.astype('F')
ckr = cspline2d(Iin.real, lmbda)
cki = cspline2d(Iin.imag, lmbda)
outr = sepfir2d(ckr, hcol, hcol)
outi = sepfir2d(cki, hcol, hcol)
out = (outr + 1j * outi).astype(intype)
elif intype in ['f', 'd']:
ckr = cspline2d(Iin, lmbda)
out = sepfir2d(ckr, hcol, hcol)
out = out.astype(intype)
else:
raise TypeError("Invalid data type for Iin")
return out
_splinefunc_cache = {}
def _bspline_piecefunctions(order):
"""Returns the function defined over the left-side pieces for a bspline of
a given order.
The 0th piece is the first one less than 0. The last piece is a function
identical to 0 (returned as the constant 0). (There are order//2 + 2 total
pieces).
Also returns the condition functions that when evaluated return boolean
arrays for use with `numpy.piecewise`.
"""
try:
return _splinefunc_cache[order]
except KeyError:
pass
def condfuncgen(num, val1, val2):
if num == 0:
return lambda x: logical_and(less_equal(x, val1),
greater_equal(x, val2))
elif num == 2:
return lambda x: less_equal(x, val2)
else:
return lambda x: logical_and(less(x, val1),
greater_equal(x, val2))
last = order // 2 + 2
if order % 2:
startbound = -1.0
else:
startbound = -0.5
condfuncs = [condfuncgen(0, 0, startbound)]
bound = startbound
for num in xrange(1, last - 1):
condfuncs.append(condfuncgen(1, bound, bound - 1))
bound = bound - 1
condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0))
# final value of bound is used in piecefuncgen below
# the functions to evaluate are taken from the left-hand-side
# in the general expression derived from the central difference
# operator (because they involve fewer terms).
fval = factorial(order)
def piecefuncgen(num):
Mk = order // 2 - num
if (Mk < 0):
return 0 # final function is 0
coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
for k in xrange(Mk + 1)]
shifts = [-bound - k for k in xrange(Mk + 1)]
def thefunc(x):
res = 0.0
for k in range(Mk + 1):
res += coeffs[k] * (x + shifts[k]) ** order
return res
return thefunc
funclist = [piecefuncgen(k) for k in xrange(last)]
_splinefunc_cache[order] = (funclist, condfuncs)
return funclist, condfuncs
def bspline(x, n):
"""B-spline basis function of order n.
Notes
-----
Uses numpy.piecewise and automatic function-generator.
"""
ax = -abs(asarray(x))
# number of pieces on the left-side is (n+1)/2
funclist, condfuncs = _bspline_piecefunctions(n)
condlist = [func(ax) for func in condfuncs]
return piecewise(ax, condlist, funclist)
def gauss_spline(x, n):
"""Gaussian approximation to B-spline basis function of order n.
"""
signsq = (n + 1) / 12.0
return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
def cubic(x):
"""A cubic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 1)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
cond2 = ~cond1 & less(ax, 2)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
return res
def quadratic(x):
"""A quadratic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 0.5)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 0.75 - ax1 ** 2
cond2 = ~cond1 & less(ax, 1.5)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = (ax2 - 1.5) ** 2 / 2.0
return res
def _coeff_smooth(lam):
xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
return rho, omeg
def _hc(k, cs, rho, omega):
return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
greater(k, -1))
def _hs(k, cs, rho, omega):
c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
(1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
ak = abs(k)
return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
def _cubic_smooth_coeff(signal, lamb):
rho, omega = _coeff_smooth(lamb)
cs = 1 - 2 * rho * cos(omega) + rho * rho
K = len(signal)
yp = zeros((K,), signal.dtype.char)
k = arange(K)
yp[0] = (_hc(0, cs, rho, omega) * signal[0] +
add.reduce(_hc(k + 1, cs, rho, omega) * signal))
yp[1] = (_hc(0, cs, rho, omega) * signal[0] +
_hc(1, cs, rho, omega) * signal[1] +
add.reduce(_hc(k + 2, cs, rho, omega) * signal))
for n in range(2, K):
yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
rho * rho * yp[n - 2])
y = zeros((K,), signal.dtype.char)
y[K - 1] = add.reduce((_hs(k, cs, rho, omega) +
_hs(k + 1, cs, rho, omega)) * signal[::-1])
y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) +
_hs(k + 2, cs, rho, omega)) * signal[::-1])
for n in range(K - 3, -1, -1):
y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
rho * rho * y[n + 2])
return y
def _cubic_coeff(signal):
zi = -2 + sqrt(3)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 6.0
def _quadratic_coeff(signal):
zi = -3 + 2 * sqrt(2.0)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype.char)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 8.0
def cspline1d(signal, lamb=0.0):
"""
Compute cubic spline coefficients for rank-1 array.
Find the cubic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient, default is 0.0.
Returns
-------
c : ndarray
Cubic spline coefficients.
"""
if lamb != 0.0:
return _cubic_smooth_coeff(signal, lamb)
else:
return _cubic_coeff(signal)
def qspline1d(signal, lamb=0.0):
"""Compute quadratic spline coefficients for rank-1 array.
Find the quadratic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient (must be zero for now).
Returns
-------
c : ndarray
Cubic spline coefficients.
"""
if lamb != 0.0:
raise ValueError("Smoothing quadratic splines not supported yet.")
else:
return _quadratic_coeff(signal)
def cspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of:
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
"""
newx = (asarray(newx) - x0) / float(dx)
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = cspline1d_eval(cj, -newx[cond1])
res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 2).astype(int) + 1
for i in range(4):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * cubic(newx - thisj)
res[cond3] = result
return res
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a quadratic spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of::
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
"""
newx = (asarray(newx) - x0) / dx
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = qspline1d_eval(cj, -newx[cond1])
res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 1.5).astype(int) + 1
for i in range(3):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * quadratic(newx - thisj)
res[cond3] = result
return res
| jsilter/scipy | scipy/signal/bsplines.py | Python | bsd-3-clause | 11,622 |
# Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import json
import logging
import queue
import threading
from utils import file_path
class FileRefresherThread(object):
"""Represents a thread that periodically dumps result of a callback to a file.
Used by bot_main to send authentication headers to task_runner. task_runner
reads them from the file when making HTTP calls.
Uses JSON for serialization. Doesn't delete the file when stopped.
The instance is not reusable (i.e. once stopped, cannot be started again).
"""
def __init__(self, path, producer_callback, interval_sec=15):
self._path = path
self._producer_callback = producer_callback
self._interval_sec = interval_sec
self._thread = None
self._signal = queue.Queue()
self._last_dumped_blob = None
def start(self):
"""Starts a thread that dumps value to the file."""
assert self._thread is None
self._dump() # initial dump
self._thread = threading.Thread(
target=self._run, name='FileRefresherThread %s' % self._path)
self._thread.daemon = True
self._thread.start()
def stop(self):
"""Stops the dumping thread (if it is running)."""
if not self._thread:
return
self._signal.put(None)
self._thread.join(60) # don't wait forever
if self._thread.is_alive():
logging.error('FileRefresherThread failed to terminate in time')
def _dump(self):
"""Attempts to rewrite the file, retrying a bunch of times.
Returns:
True to carry on, False to exit the thread.
"""
try:
content = None
content = self._producer_callback()
blob = json.dumps(
content, sort_keys=True, indent=2,
separators=(',', ': ')).encode('utf-8')
except Exception:
logging.exception('Unexpected exception in the callback, content=%s',
content)
return True
if blob == self._last_dumped_blob:
return True # already have it on disk
# On Windows the file may be locked by reading process. Don't freak out,
# just retry a bit later.
attempts = 100
while True:
try:
file_path.atomic_replace(self._path, blob)
self._last_dumped_blob = blob
logging.info('Updated %s', self._path)
return True # success!
except (IOError, OSError) as e:
logging.error('Failed to update the file: %s', e)
if not attempts:
logging.error(
'Failed to update the file %s after many attempts, giving up',
self._path)
return True
attempts -= 1
if not self._wait(0.05):
return False
def _wait(self, timeout):
"""Waits for the given duration or until the stop signal.
Returns:
True if waited, False if received the stop signal.
"""
try:
self._signal.get(timeout=timeout)
return False
except queue.Empty:
return True
def _run(self):
while self._wait(self._interval_sec) and self._dump():
pass
| luci/luci-py | appengine/swarming/swarming_bot/bot_code/file_refresher.py | Python | apache-2.0 | 3,125 |
#===========================================================================================================================
# aims : define both 3 hidden layers model and 4 hidden layers model
#
# input : x : placeholder variable which has as column number the number of features of the training matrix considered
#
# training_data_len: Number of features of the training matrix considered
# nclasses: node number of the output layer
# train_vars_name: set as initnet_vars_name in main_train_net.py or as ftnet_vars_name in 3_main_fine_tuning.py
#
# return : x : first layer
# out_layer : output layer
#===========================================================================================================================
import tensorflow as tf
from . import freeze
from . import utilities
n_nodes_hl1=500
n_nodes_hl2=500
n_nodes_hl3=500
n_nodes_hl4=500
#======================================================================================================
# I CREATE A 3HL FF MODEL
#======================================================================================================
# NETWORK: 3 hidden layers and 500 neurons per layer
def new_ff_model3(x, ninputdata_len, nclasses, train_vars_name):
# with tf.name_scope("inputs"):
# x = tf.placeholder(tf.float32, [None, ninputdata_len], name='I')
with tf.name_scope(train_vars_name):
W1 = weights([ninputdata_len, n_nodes_hl1], 'W1')
b1 = biases([n_nodes_hl1], 'b1')
W2 = weights([n_nodes_hl1, n_nodes_hl2], 'W2')
b2 = biases([n_nodes_hl2], 'b2')
W3 = weights([n_nodes_hl2, n_nodes_hl3], 'W3')
b3 = biases([n_nodes_hl3], 'b3')
WOUT = weights_out([n_nodes_hl3, nclasses], 'WOUT')
bOUT = biases_out([nclasses], 'bOUT')
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, W1), b1)
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, W2), b2)
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with RELU activation
layer_3 = tf.add(tf.matmul(layer_2, W3), b3)
layer_3 = tf.nn.relu(layer_3)
# Output layer with linear activation
out_layer = tf.add(tf.matmul(layer_3, WOUT), bOUT, name='O')
soft_layer = tf.nn.softmax(out_layer, name='SMO')
return {'input': x, 'output': out_layer}
#======================================================================================================
# I CREATE A 4HL FF MODEL
#======================================================================================================
# NETWORK: 4 hidden layers and 500 neurons per layer
def new_ff_model4(x, ninputdata_len, nclasses, train_vars_name):
# with tf.name_scope("inputs"):
# x = tf.placeholder(tf.float32, [None, ninputdata_len], name='I')
with tf.name_scope(train_vars_name):
W1 = weightsUniform([ninputdata_len, n_nodes_hl1], 'W1')
b1 = biases([n_nodes_hl1], 'b1')
W2 = weightsUniform([n_nodes_hl1, n_nodes_hl2], 'W2')
b2 = biases([n_nodes_hl2], 'b2')
W3 = weightsUniform([n_nodes_hl2, n_nodes_hl3], 'W3')
b3 = biases([n_nodes_hl3], 'b3')
W4 = weightsUniform([n_nodes_hl3, n_nodes_hl4], 'W4')
b4 = biases([n_nodes_hl4], 'b4')
WOUT = weightsUniform_out([n_nodes_hl4, nclasses], 'WOUT')
bOUT = biases_out([nclasses], 'bOUT')
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, W1), b1)
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, W2), b2)
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with RELU activation
layer_3 = tf.add(tf.matmul(layer_2, W3), b3)
layer_3 = tf.nn.relu(layer_3)
# Hidden layer with RELU activation
layer_4 = tf.add(tf.matmul(layer_3, W4), b4)
layer_4 = tf.nn.relu(layer_4)
# Output layer with linear activation
out_layer = tf.add(tf.matmul(layer_4, WOUT), bOUT, name='O')
soft_layer = tf.nn.softmax(out_layer, name='SMO')
return {'input': x, 'output': out_layer}
# ======================================================================================================
# I ADAPT AN EXISTING 3HL FF NET => 4HL NET
# ======================================================================================================
# I inherit from a graph the 2nd & 3rd hidden layers' weights
# I create a 4th hidden layer
# I will train the latter + the 1st and the OutLayer
def adapt_ff_model3(x, ninputdata_len, noutputclasses, train_vars_name, graph, prefix=None):
if prefix is not None:
prefix = prefix + "/"
else:
prefix = ""
nodes = [n.name for n in graph.as_graph_def().node]
W2 = utilities.getNodeBySubstring(graph, prefix + 'model/W2', nodes)
W3 = utilities.getNodeBySubstring(graph, prefix + 'model/W3', nodes)
b2 = utilities.getNodeBySubstring(graph, prefix + 'model/b2', nodes)
b3 = utilities.getNodeBySubstring(graph, prefix + 'model/b3', nodes)
with tf.variable_scope(train_vars_name):
W11p = weights([ninputdata_len, n_nodes_hl1], 'W11p')
b11p = biases([n_nodes_hl1], 'b11p')
W4 = weights([n_nodes_hl3, n_nodes_hl4], 'W4')
b4 = biases([n_nodes_hl4], 'b4')
WOUT1p = weights_out([n_nodes_hl3, noutputclasses], 'WOUT1p')
bOUT1p = biases_out([noutputclasses], 'bOUT1p')
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, W11p), b11p)
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, W2), b2)
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with RELU activation
layer_3 = tf.add(tf.matmul(layer_2, W3), b3)
layer_3 = tf.nn.relu(layer_3)
# Hidden layer with RELU activation
layer_4 = tf.add(tf.matmul(layer_3, W4), b4)
layer_4 = tf.nn.relu(layer_4)
# Output layer with linear activation
out_layer = tf.add(tf.matmul(layer_4, WOUT1p), bOUT1p, name='O')
soft_layer = tf.nn.softmax(out_layer, name='SMO')
return {'input': x, 'output': out_layer}
# ======================================================================================================
# I RE-ADAPT AN EXISTING 4HL FF NET => 4HL NET
# ======================================================================================================
# A)
# I inherit from a graph the 2nd & 3rd & 4th hidden layers' weights
# I will train first and last layer
def readapt_ff_adaptedmodel(x, ninputdata_len, noutputclasses, train_vars_name, graph, prefix=None):
if prefix is not None:
prefix = prefix + "/"
else:
prefix = ""
nodes = [n.name for n in graph.as_graph_def().node]
W2 = utilities.getNodeBySubstring(graph, prefix + 'model/W2', nodes)
W3 = utilities.getNodeBySubstring(graph, prefix + 'model/W3', nodes)
W4 = utilities.getNodeBySubstring(graph, prefix + train_vars_name + '/W4', nodes)
b2 = utilities.getNodeBySubstring(graph, prefix + 'model/b2', nodes)
b3 = utilities.getNodeBySubstring(graph, prefix + 'model/b3', nodes)
b4 = utilities.getNodeBySubstring(graph, prefix + train_vars_name + '/b4', nodes)
with tf.variable_scope(train_vars_name):
W11p = weights([ninputdata_len, n_nodes_hl1], 'W11p')
b11p = biases([n_nodes_hl1], 'b11p')
WOUT1p = weights_out([n_nodes_hl3, noutputclasses], 'WOUT1p')
bOUT1p = biases_out([noutputclasses], 'bOUT1p')
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, W11p), b11p)
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, W2), b2)
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with RELU activation
layer_3 = tf.add(tf.matmul(layer_2, W3), b3)
layer_3 = tf.nn.relu(layer_3)
# Hidden layer with RELU activation
layer_4 = tf.add(tf.matmul(layer_3, W4), b4)
layer_4 = tf.nn.relu(layer_4)
# Output layer with linear activation
out_layer = tf.add(tf.matmul(layer_4, WOUT1p), bOUT1p, name='O')
soft_layer = tf.nn.softmax(out_layer, name='SMO')
return {'input': x, 'output': out_layer}
# B)
# I inherit all the existing weights (nodes names obtained according to : adapt_ff_model3
# I train everything
def readapt_ff_adaptedmodel_2(x, ninputdata_len, noutputclasses, train_vars_name, graph, prefix=None):
if prefix is not None: prefix = prefix + "/"
b11p = graph.get_tensor_by_name('model/b11p:0')
b2 = graph.get_tensor_by_name('model/b2:0')
b3 = graph.get_tensor_by_name('model/b3:0')
b4 = graph.get_tensor_by_name('model/b4:0')
bOUT1p = graph.get_tensor_by_name('model/bOUT1p:0')
# with tf.variable_scope(train_vars_name):
# W11p = weights([ninputdata_len, n_nodes_hl1], 'W11p')
# b11p = biases([n_nodes_hl1], 'b11p')
# WOUT1p = weights_out([n_nodes_hl3, noutputclasses], 'WOUT1p')
# bOUT1p = biases_out([noutputclasses], 'bOUT1p')
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, W11p), b11p)
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, W2), b2)
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with RELU activation
layer_3 = tf.add(tf.matmul(layer_2, W3), b3)
layer_3 = tf.nn.relu(layer_3)
# Hidden layer with RELU activation
layer_4 = tf.add(tf.matmul(layer_3, W4), b4)
layer_4 = tf.nn.relu(layer_4)
# Output layer with linear activation
out_layer = tf.add(tf.matmul(layer_4, WOUT1p), bOUT1p, name='O')
soft_layer = tf.nn.softmax(out_layer, name='SMO')
return {'input': x, 'output': out_layer}
# ======================================================================================================
# ======================================================================================================
def weights(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def weights_out(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def weightsUniform(shape, name):
initial = tf.random_uniform(shape, minval=-0.1, maxval=0.1)
return tf.Variable(initial, name=name)
def weightsUniform_out(shape, name):
initial = tf.random_uniform(shape, minval=-0.1, maxval=0.1)
return tf.Variable(initial, name=name)
def biases(shape,name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
def biases_out(shape,name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
| allspeak/api.allspeak.eu | web/project/training_api/libs/models.py | Python | mit | 10,685 |
# built-ins
import os
import argparse
from math import floor, ceil
from random import shuffle
import json
import cPickle as pck
import itertools as it
from operator import __or__, __not__
from functools import partial
import fnmatch
# external libraries
import numpy as np
from gala import imio
from scipy import spatial
# local modules
from syngeo import io
# "constants"
pat_session_data = '/groups/flyem/data/temp/temp-session/noise/rivlinp.20120508.noise1_0_ground_truth/sessiondata'
proofreaders = ['roxanne', 'leiann', 'shirley', 'ashley', 'omotara', 'mat', 'chris', 'satoko']
ress = ['5nm', '7.5nm', '10nm', '15nm']
noise_levels = [0, 1, 2, 4, 8, 16]
d = '/groups/flyem/data/image_analysis_experiment/assignments'
def compose(f1, f2):
def f(*args, **kwargs):
return f1(f2(*args, **kwargs))
return f
def on_boundary(coords, volshape, margin=20):
on = [(coords[i] < margin or coords[i] > volshape[i] - margin)
for i in range(len(coords))]
return reduce(__or__, on)
def stratified_slices(total, nslices):
if total % nslices == 0:
size = total/nslices
starts, ends = range(0, total, size), range(size, total+size, size)
else:
size_l = int(ceil(float(total)/nslices))
size_s = int(floor(total/nslices))
num_l = total % nslices
num_s = nslices - num_l
switch = num_l * size_l
starts = range(0, switch, size_l) + range(switch, total, size_s)
ends = range(size_l, switch, size_l) + range(switch, total+1, size_s)
return [slice(s, e) for s, e in zip(starts, ends)]
def tracing_accuracy_matrix(base_dir, gt_vol, proofreaders=proofreaders,
nposts=161, assign_dir='assignments', exports_dir='exports'):
"""Return synapse-tracing accuracy statistics.
Note: assume all proofreaders have same set of assignments.
"""
assign_dir = os.path.join(base_dir, assign_dir)
num_conds = len(os.listdir(os.path.join(assign_dir, proofreaders[0])))
conds = [
proofreader_stack_to_resolution(assign_dir, proofreaders[0], '%02i'%c)
for c in range(num_conds)]
ress = np.unique(zip(*conds)[0])
noises = np.unique(zip(*conds)[1])
acc = np.zeros((nposts, len(proofreaders), len(ress), len(noises), 2))
all_coords = []
unc = np.zeros((nposts, len(proofreaders), len(ress), len(noises)))
for c in range(num_conds):
for p, prf in enumerate(proofreaders):
r, n = proofreader_stack_to_resolution(assign_dir, prf, '%02i'%c)
ri, ni = np.flatnonzero(r==ress), np.flatnonzero(n==noises)
fn = os.path.join(base_dir, exports_dir, prf, '%02i'%c,
'annotations-bookmarks.json')
coords, uncertain = bookmark_coords(fn, r/10.0)
all_coords.extend(coords)
ids, starts, ends = map(compose(np.floor, np.array), zip(*coords))
ids = ids.astype(int)
for i in range(starts.ndim):
if starts[0, i] < 0:
starts[:, i] += gt_vol.shape[i]
ends[:, i] += gt_vol.shape[i]
startsr = np.ravel_multi_index(starts.T.astype(int), gt_vol.shape)
endsr = np.ravel_multi_index(ends.T.astype(int), gt_vol.shape)
acc[ids, p, ri, ni, 1] += 1
start_ids = gt_vol.ravel()[startsr]
end_ids = gt_vol.ravel()[endsr]
if (ids == 157).sum() > 0:
start_ids[np.flatnonzero(ids==157)] = 277078
correct = np.flatnonzero(start_ids == end_ids)
acc[ids[correct], p, ri, ni, 0] += 1
unc[uncertain, p, ri, ni] += 1
return acc, all_coords, unc
def proofreader_stack_to_resolution(assign_dir, proofreader, stack_num_str,
prefix='postsyn'):
d = os.path.join(assign_dir, proofreader)
fn = fnmatch.filter(os.listdir(d), prefix+'-'+stack_num_str+'-*.json')[0]
fnsplit = fn.rsplit('.', 1)[0].split('-')
return float(fnsplit[3].rstrip('unm')), int(fnsplit[5])
def get_bookmark_distances(bookmarks):
return [(nid, spatial.distance.euclidean(loc1, loc2))
for nid, loc1, loc2 in bookmarks]
def bookmark_coords(fn, scale=1.0, transform=True):
"""Return triples of (bookmark-id, start-coord, end-coord)."""
bookmarks = {}
uncertain = []
with open(fn, 'r') as f: data = json.load(f)['data']
for textid, location in sorted([(d['text'],
scale * io.raveler_to_numpy_coord_transform(d['location']))
for d in data]):
if textid.endswith('-start'):
nid = int(textid.split('-')[0])
if bookmarks.has_key(nid):
bookmarks[nid].insert(0, location)
else:
bookmarks[nid] = [location]
else:
nid = int(textid.rstrip('?'))
if bookmarks.has_key(nid):
bookmarks[nid].append(location)
else:
bookmarks[nid] = [location]
if textid.endswith('?'):
uncertain.append(nid)
return [(nid, loc1, loc2) for nid, (loc1, loc2) in bookmarks.items()], \
uncertain
parser = argparse.ArgumentParser(
description='Create synapse tracing assignments for proofreaders.')
parser.add_argument('session/json', help='The session or synapse json '+
'containing the synapses to be traced.')
parser.add_argument('output-dir', help='Where to write the data.')
parser.add_argument('-P', '--proofreaders', nargs='+',
help='The names of all the proofreaders.')
parser.add_argument('-r', '--resolutions', type=str, nargs='+', default=ress,
help='The resolutions of the base stacks.')
parser.add_argument('-R', '--annotation-resolution', type=float,
help='The resolution at which the annotations were produced.')
parser.add_argument('-N', '--noise-levels', type=int, nargs='+',
default=noise_levels, help='The noise levels of the base stacks.')
parser.add_argument('-s', '--shape', type=partial(eval, globals={}),
help='The shape of the volume.', default=(1500, 1100, 280))
parser.add_argument('-m', '--margin', type=int, default=20,
help='The margin in which to remove postsynaptics.')
if __name__ == '__main__':
args = parser.parse_args()
posts = io.all_postsynaptic_sites(io.synapses_from_raveler_session_data(
getattr(args, 'session/json')))
posts2 = filter(compose(__not__,
partial(on_boundary, volshape=args.shape, margin=args.margin)), posts)
posts2 = [p for p in posts2 if not on_boundary(p, args.shape, 20)]
float_ress = np.array([float(r.rstrip('unm')) for r in args.resolutions])
relative_float_ress = float_ress / args.annotation_resolution
npo = len(posts2)
ids = range(npo)
aps = np.array(posts2)
apss = [(aps/r).round() for r in relative_float_ress]
apss = [np.concatenate((np.array(ids)[:, np.newaxis], ap), axis=1)
for ap in apss]
apsd = {n: ap for n, ap in zip(args.resolutions, apss)}
conds = list(it.product(args.resolutions, args.noise_levels))
which = stratified_slices(npo, len(conds))
for pr in args.proofreaders:
shuffle(conds)
shuffle(ids)
odir = os.path.join(getattr(args, 'output-dir'), pr)
os.makedirs(odir)
for i, (r, n) in enumerate(conds):
locations = map(list, list(apsd[r]))
locations = [locations[j] for j in ids[which[i]]]
bkmarks = [{'location': loc[1:], 'body ID': -1,
'text': str(loc[0])+'-start'} for loc in locations]
bmdict = {'metadata':
{'description': 'bookmarks', 'file version': 1},
'data': bkmarks}
fn = os.path.join(odir,
'postsyn-%02i-res-%s-noise-%02i.json' % (i, r, n))
with open(fn, 'w') as f:
json.dump(bmdict, f, indent=4)
| janelia-flyem/synapse-geometry | syngeo/assignments.py | Python | bsd-3-clause | 7,886 |
from setuptools import setup, find_packages
setup(
name="ssid-of-st-john",
version="0.1",
packages=find_packages(),
) | lengau/ssid-of-st-john | setup.py | Python | apache-2.0 | 130 |
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import threading
import traceback
import gear
from zuul.merger import merger
class MergeServer(object):
log = logging.getLogger("zuul.MergeServer")
def __init__(self, config, connections={}):
self.config = config
self.zuul_url = config.get('merger', 'zuul_url')
if self.config.has_option('merger', 'git_dir'):
merge_root = self.config.get('merger', 'git_dir')
else:
merge_root = '/var/lib/zuul/git'
if self.config.has_option('merger', 'git_user_email'):
merge_email = self.config.get('merger', 'git_user_email')
else:
merge_email = None
if self.config.has_option('merger', 'git_user_name'):
merge_name = self.config.get('merger', 'git_user_name')
else:
merge_name = None
self.merger = merger.Merger(merge_root, connections, merge_email,
merge_name)
def start(self):
self._running = True
server = self.config.get('gearman', 'server')
if self.config.has_option('gearman', 'port'):
port = self.config.get('gearman', 'port')
else:
port = 4730
self.worker = gear.Worker('Zuul Merger')
self.worker.addServer(server, port)
self.log.debug("Waiting for server")
self.worker.waitForServer()
self.log.debug("Registering")
self.register()
self.log.debug("Starting worker")
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
def register(self):
self.worker.registerFunction("merger:merge")
self.worker.registerFunction("merger:update")
def stop(self):
self.log.debug("Stopping")
self._running = False
self.worker.shutdown()
self.log.debug("Stopped")
def join(self):
self.thread.join()
def run(self):
self.log.debug("Starting merge listener")
while self._running:
try:
job = self.worker.getJob()
try:
if job.name == 'merger:merge':
self.log.debug("Got merge job: %s" % job.unique)
self.merge(job)
elif job.name == 'merger:update':
self.log.debug("Got update job: %s" % job.unique)
self.update(job)
else:
self.log.error("Unable to handle job %s" % job.name)
job.sendWorkFail()
except Exception:
self.log.exception("Exception while running job")
job.sendWorkException(traceback.format_exc())
except Exception:
self.log.exception("Exception while getting job")
def merge(self, job):
args = json.loads(job.arguments)
commit = self.merger.mergeChanges(args['items'])
result = dict(merged=(commit is not None),
commit=commit,
zuul_url=self.zuul_url)
job.sendWorkComplete(json.dumps(result))
def update(self, job):
args = json.loads(job.arguments)
self.merger.updateRepo(args['project'], args['url'])
result = dict(updated=True,
zuul_url=self.zuul_url)
job.sendWorkComplete(json.dumps(result))
| wikimedia/integration-zuul | zuul/merger/server.py | Python | apache-2.0 | 4,031 |
"""
awslimitchecker/services/s3.py
The latest version of this package is available at:
<https://github.com/jantman/awslimitchecker>
################################################################################
Copyright 2015-2018 Jason Antman <[email protected]>
This file is part of awslimitchecker, also known as awslimitchecker.
awslimitchecker is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
awslimitchecker is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with awslimitchecker. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/awslimitchecker> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <[email protected]> <http://www.jasonantman.com>
################################################################################
"""
import abc # noqa
import logging
from .base import _AwsService
from ..limit import AwsLimit
logger = logging.getLogger(__name__)
class _S3Service(_AwsService):
service_name = 'S3'
api_name = 's3' # AWS API name to connect to (boto3.client)
def find_usage(self):
"""
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
"""
logger.debug("Checking usage for service %s", self.service_name)
self.connect_resource()
for lim in self.limits.values():
lim._reset_usage()
count = 0
for bkt in self.resource_conn.buckets.all():
count += 1
self.limits['Buckets']._add_current_usage(
count, aws_type='AWS::S3::Bucket'
)
self._have_usage = True
logger.debug("Done checking usage.")
def get_limits(self):
"""
Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict
"""
if self.limits != {}:
return self.limits
limits = {}
limits['Buckets'] = AwsLimit(
'Buckets',
self,
100,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::S3::Bucket',
)
self.limits = limits
return limits
def required_iam_permissions(self):
"""
Return a list of IAM Actions required for this Service to function
properly. All Actions will be shown with an Effect of "Allow"
and a Resource of "*".
:returns: list of IAM Action strings
:rtype: list
"""
return [
"s3:ListAllMyBuckets",
]
| jantman/awslimitchecker | awslimitchecker/services/s3.py | Python | agpl-3.0 | 3,754 |
# -*- coding: utf-8 -*-
import json
from functools import reduce
import pycurl
from pyload.core.network.http.exceptions import BadHeader
from ..base.multi_account import MultiAccount
def args(**kwargs):
return kwargs
class MegaDebridEu(MultiAccount):
__name__ = "MegaDebridEu"
__type__ = "account"
__version__ = "0.37"
__status__ = "testing"
__config__ = [
("mh_mode", "all;listed;unlisted", "Filter hosters to use", "all"),
("mh_list", "str", "Hoster list (comma separated)", ""),
("mh_interval", "int", "Reload interval in hours", 12),
]
__description__ = """Mega-debrid.eu account plugin"""
__license__ = "GPLv3"
__authors__ = [
("Devirex Hazzard", "[email protected]"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
("FoxyDarnec", "goupildavid[AT]gmail[DOT]com"),
]
LOGIN_TIMEOUT = -1
API_URL = "https://www.mega-debrid.eu/api.php"
def api_response(self, action, get={}, post={}):
get["action"] = action
# Better use pyLoad User-Agent so we don't get blocked
self.req.http.c.setopt(
pycurl.USERAGENT, "pyLoad/{}".format(self.pyload.version).encode()
)
json_data = self.load(self.API_URL, get=get, post=post)
return json.loads(json_data)
def grab_hosters(self, user, password, data):
hosters = []
try:
res = self.api_response("getHostersList")
except BadHeader as exc:
if exc.code == 405:
self.log_error(self._("Unable to retrieve hosters list: Banned IP"))
else:
self.log_error(
self._("Unable to retrieve hosters list: error {}"), exc.code
)
else:
if res["response_code"] == "ok":
hosters = reduce(
(lambda x, y: x + y),
[
h["domains"]
for h in res["hosters"]
if "domains" in h and isinstance(h["domains"], list)
],
)
else:
self.log_error(
self._("Unable to retrieve hoster list: {}").format(
res["response_text"]
)
)
return hosters
def grab_info(self, user, password, data):
validuntil = None
trafficleft = None
premium = False
cache_info = data.get("cache_info", {})
if user in cache_info:
validuntil = float(cache_info[user]["vip_end"])
premium = validuntil > 0
trafficleft = -1
return {
"validuntil": validuntil,
"trafficleft": trafficleft,
"premium": premium,
}
def signin(self, user, password, data):
cache_info = self.db.retrieve("cache_info", {})
if user in cache_info:
data["cache_info"] = cache_info
self.skip_login()
try:
res = self.api_response("connectUser", args(login=user, password=password))
except BadHeader as exc:
if exc.code == 401:
self.fail_login()
elif exc.code == 405:
self.fail(self._("Banned IP"))
else:
raise
if res["response_code"] != "ok":
cache_info.pop(user, None)
data["cache_info"] = cache_info
self.db.store("cache_info", cache_info)
if res["response_code"] == "UNKNOWN_USER":
self.fail_login()
elif res["response_code"] == "UNALLOWED_IP":
self.fail_login(self._("Banned IP"))
else:
self.log_error(res["response_text"])
self.fail_login(res["response_text"])
else:
cache_info[user] = {"vip_end": res["vip_end"], "token": res["token"]}
data["cache_info"] = cache_info
self.db.store("cache_info", cache_info)
def relogin(self):
if self.req:
cache_info = self.info["data"].get("cache_info", {})
cache_info.pop(self.user, None)
self.info["data"]["cache_info"] = cache_info
self.db.store("cache_info", cache_info)
return MultiAccount.relogin(self)
| vuolter/pyload | src/pyload/plugins/accounts/MegaDebridEu.py | Python | agpl-3.0 | 4,359 |
class PhaseContent():
features = {}
@property
def identifier(self):
return '{s.app}:{s.phase}'.format(s=self)
def __str__(self):
return '{s.__class__.__name__} ({s.app}:{s.phase})'.format(s=self)
def has_feature(self, feature, model):
return model in self.features.get(feature, [])
class PhaseContents():
_registry = {}
def __getitem__(self, identifier):
if type(identifier) != str:
raise TypeError('Phase identifier must be str')
return self._registry[identifier]
def __contains__(self, identifier):
return identifier in self._registry
def register(self, phase):
self._registry[phase.identifier] = phase
def as_choices(self):
return [(identifier, str(phase))
for identifier, phase in self._registry.items()]
content = PhaseContents()
| liqd/adhocracy4 | adhocracy4/phases/contents.py | Python | agpl-3.0 | 879 |
import os
from kennyg.element import ValueCollector, Value, KeyValueCollector, KeyValue
from kennyg.sax_handler import KennyGSAXHandler
from kennyg.parser import parse, parseString
__author__ = 'brent'
def get_data_filepath(name):
return os.path.join(os.path.dirname(os.path.abspath(__file__)),"data",name)
def test_naive_sax_handler():
d = []
kg = KennyGSAXHandler(action_tree={'a':{'b':{'c':{'value':lambda x, *args: d.append(x)}}}})
parse(get_data_filepath('simple.xml'), kg)
print d
assert ''.join(d) == '123456'
def test_key_value_collector():
d = []
vc = KeyValueCollector(e=KeyValue('egg'), g=KeyValue('G'), h=KeyValue('H'))
kg = KennyGSAXHandler(action_tree={'a':vc})
xml = "<a><b><c>1</c><c/></b><e>e_value</e><b>bbbb</b><g/></a>"
parseString(xml, kg)
assert vc.collection['egg'].strip() == 'e_value'
assert vc.collection['G'].strip() == ''
exception = False
try:
v = vc.collection['H']
except KeyError as _: # noqa
exception = True
assert exception
def test_value_collector():
d = []
vc = ValueCollector(b=ValueCollector(c=Value()))
kg = KennyGSAXHandler(action_tree={'a':vc})
parse(get_data_filepath('simple.xml'), kg)
print vc.collection
assert ''.join(vc.collection[0]) == '123'
assert ''.join(vc.collection[1]) == '456'
def test_keyvalue_list():
xml = "<a><b><c>agent</c></b></a>"
vc = ValueCollector({u'b':KeyValueCollector({u'c': KeyValue(key='key'),})})
kv = KeyValueCollector({u'a': KeyValue(key=u'wrapped', obj=vc)})
kg = KennyGSAXHandler(kv)
parseString(xml, kg)
print kv.collection
| brentpayne/kennyg | tests/test_parser.py | Python | lgpl-3.0 | 1,651 |
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate import exceptions
from designate.schema import format
from designate.schema import resolvers
from designate.schema import validators
from designate import utils
LOG = logging.getLogger(__name__)
class Schema(object):
def __init__(self, version, name):
self.raw_schema = utils.load_schema(version, name)
self.resolver = resolvers.LocalResolver.from_schema(
version, self.raw_schema)
if version in ['v2', 'admin']:
self.validator = validators.Draft4Validator(
self.raw_schema, resolver=self.resolver,
format_checker=format.draft4_format_checker)
else:
raise Exception('Unknown API version: %s' % version)
@property
def schema(self):
return self.validator.schema
@property
def properties(self):
return self.schema['properties']
@property
def links(self):
return self.schema['links']
@property
def raw(self):
return self.raw_schema
def validate(self, obj):
LOG.debug('Validating values: %r' % obj)
errors = []
for error in self.validator.iter_errors(obj):
errors.append({
'path': ".".join([str(x) for x in error.path]),
'message': error.message,
'validator': error.validator
})
if len(errors) > 0:
LOG.debug('Errors in validation: %r' % errors)
raise exceptions.InvalidObject("Provided object does not match "
"schema", errors=errors)
def filter(self, instance, properties=None):
if not properties:
properties = self.properties
filtered = {}
for name, subschema in list(properties.items()):
if 'type' in subschema and subschema['type'] == 'array':
subinstance = instance.get(name, None)
filtered[name] = self._filter_array(subinstance, subschema)
elif 'type' in subschema and subschema['type'] == 'object':
subinstance = instance.get(name, None)
properties = subschema['properties']
filtered[name] = self.filter(subinstance, properties)
else:
filtered[name] = instance.get(name, None)
return filtered
def _filter_array(self, instance, schema):
if 'items' in schema and isinstance(schema['items'], list):
# NOTE(kiall): We currently don't make use of this..
raise NotImplementedError()
elif 'items' in schema:
schema = schema['items']
if '$ref' in schema:
with self.resolver.resolving(schema['$ref']) as ischema:
schema = ischema
properties = schema['properties']
return [self.filter(i, properties) for i in instance]
elif 'properties' in schema:
schema = schema['properties']
with self.resolver.resolving(schema['$ref']) as ischema:
schema = ischema
return [self.filter(i, schema) for i in instance]
else:
raise NotImplementedError('Can\'t filter unknown array type')
| openstack/designate | designate/schema/__init__.py | Python | apache-2.0 | 3,897 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import subprocess
from luigi import six
import luigi.configuration
import luigi.contrib.hadoop
import luigi.contrib.hadoop_jar
import luigi.contrib.hdfs
from luigi import LocalTarget
from luigi.task import flatten
logger = logging.getLogger('luigi-interface')
"""
Scalding support for Luigi.
Example configuration section in client.cfg::
[scalding]
# scala home directory, which should include a lib subdir with scala jars.
scala-home: /usr/share/scala
# scalding home directory, which should include a lib subdir with
# scalding-*-assembly-* jars as built from the official Twitter build script.
scalding-home: /usr/share/scalding
# provided dependencies, e.g. jars required for compiling but not executing
# scalding jobs. Currently requred jars:
# org.apache.hadoop/hadoop-core/0.20.2
# org.slf4j/slf4j-log4j12/1.6.6
# log4j/log4j/1.2.15
# commons-httpclient/commons-httpclient/3.1
# commons-cli/commons-cli/1.2
# org.apache.zookeeper/zookeeper/3.3.4
scalding-provided: /usr/share/scalding/provided
# additional jars required.
scalding-libjars: /usr/share/scalding/libjars
"""
class ScaldingJobRunner(luigi.contrib.hadoop.JobRunner):
"""
JobRunner for `pyscald` commands. Used to run a ScaldingJobTask.
"""
def __init__(self):
conf = luigi.configuration.get_config()
default = os.environ.get('SCALA_HOME', '/usr/share/scala')
self.scala_home = conf.get('scalding', 'scala-home', default)
default = os.environ.get('SCALDING_HOME', '/usr/share/scalding')
self.scalding_home = conf.get('scalding', 'scalding-home', default)
self.provided_dir = conf.get(
'scalding', 'scalding-provided', os.path.join(default, 'provided'))
self.libjars_dir = conf.get(
'scalding', 'scalding-libjars', os.path.join(default, 'libjars'))
self.tmp_dir = LocalTarget(is_tmp=True)
def _get_jars(self, path):
return [os.path.join(path, j) for j in os.listdir(path)
if j.endswith('.jar')]
def get_scala_jars(self, include_compiler=False):
lib_dir = os.path.join(self.scala_home, 'lib')
jars = [os.path.join(lib_dir, 'scala-library.jar')]
# additional jar for scala 2.10 only
reflect = os.path.join(lib_dir, 'scala-reflect.jar')
if os.path.exists(reflect):
jars.append(reflect)
if include_compiler:
jars.append(os.path.join(lib_dir, 'scala-compiler.jar'))
return jars
def get_scalding_jars(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
return self._get_jars(lib_dir)
def get_scalding_core(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
for j in os.listdir(lib_dir):
if j.startswith('scalding-core-'):
p = os.path.join(lib_dir, j)
logger.debug('Found scalding-core: %s', p)
return p
raise luigi.contrib.hadoop.HadoopJobError('Could not find scalding-core.')
def get_provided_jars(self):
return self._get_jars(self.provided_dir)
def get_libjars(self):
return self._get_jars(self.libjars_dir)
def get_tmp_job_jar(self, source):
job_name = os.path.basename(os.path.splitext(source)[0])
return os.path.join(self.tmp_dir.path, job_name + '.jar')
def get_build_dir(self, source):
build_dir = os.path.join(self.tmp_dir.path, 'build')
return build_dir
def get_job_class(self, source):
# find name of the job class
# usually the one that matches file name or last class that extends Job
job_name = os.path.splitext(os.path.basename(source))[0]
package = None
job_class = None
for l in open(source).readlines():
p = re.search(r'package\s+([^\s\(]+)', l)
if p:
package = p.groups()[0]
p = re.search(r'class\s+([^\s\(]+).*extends\s+.*Job', l)
if p:
job_class = p.groups()[0]
if job_class == job_name:
break
if job_class:
if package:
job_class = package + '.' + job_class
logger.debug('Found scalding job class: %s', job_class)
return job_class
else:
raise luigi.contrib.hadoop.HadoopJobError('Coudl not find scalding job class.')
def build_job_jar(self, job):
job_jar = job.jar()
if job_jar:
if not os.path.exists(job_jar):
logger.error("Can't find jar: %s, full path %s", job_jar, os.path.abspath(job_jar))
raise Exception("job jar does not exist")
if not job.job_class():
logger.error("Undefined job_class()")
raise Exception("Undefined job_class()")
return job_jar
job_src = job.source()
if not job_src:
logger.error("Both source() and jar() undefined")
raise Exception("Both source() and jar() undefined")
if not os.path.exists(job_src):
logger.error("Can't find source: %s, full path %s", job_src, os.path.abspath(job_src))
raise Exception("job source does not exist")
job_src = job.source()
job_jar = self.get_tmp_job_jar(job_src)
build_dir = self.get_build_dir(job_src)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
classpath = ':'.join(filter(None,
self.get_scalding_jars() +
self.get_provided_jars() +
self.get_libjars() +
job.extra_jars()))
scala_cp = ':'.join(self.get_scala_jars(include_compiler=True))
# compile scala source
arglist = ['java', '-cp', scala_cp, 'scala.tools.nsc.Main',
'-classpath', classpath,
'-d', build_dir, job_src]
logger.info('Compiling scala source: %s', ' '.join(arglist))
subprocess.check_call(arglist)
# build job jar file
arglist = ['jar', 'cf', job_jar, '-C', build_dir, '.']
logger.info('Building job jar: %s', ' '.join(arglist))
subprocess.check_call(arglist)
return job_jar
def run_job(self, job):
job_jar = self.build_job_jar(job)
jars = [job_jar] + self.get_libjars() + job.extra_jars()
scalding_core = self.get_scalding_core()
libjars = ','.join(filter(None, jars))
arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', scalding_core, '-libjars', libjars]
arglist += ['-D%s' % c for c in job.jobconfs()]
job_class = job.job_class() or self.get_job_class(job.source())
arglist += [job_class, '--hdfs']
# scalding does not parse argument with '=' properly
arglist += ['--name', job.task_id.replace('=', ':')]
(tmp_files, job_args) = luigi.contrib.hadoop_jar.fix_paths(job)
arglist += job_args
env = os.environ.copy()
jars.append(scalding_core)
hadoop_cp = ':'.join(filter(None, jars))
env['HADOOP_CLASSPATH'] = hadoop_cp
logger.info("Submitting Hadoop job: HADOOP_CLASSPATH=%s %s",
hadoop_cp, ' '.join(arglist))
luigi.contrib.hadoop.run_and_track_hadoop_job(arglist, env=env)
for a, b in tmp_files:
a.move(b)
class ScaldingJobTask(luigi.contrib.hadoop.BaseHadoopJobTask):
"""
A job task for Scalding that define a scala source and (optional) main method.
requires() should return a dictionary where the keys are Scalding argument
names and values are sub tasks or lists of subtasks.
For example:
.. code-block:: python
{'input1': A, 'input2': C} => --input1 <Aoutput> --input2 <Coutput>
{'input1': [A, B], 'input2': [C]} => --input1 <Aoutput> <Boutput> --input2 <Coutput>
"""
def relpath(self, current_file, rel_path):
"""
Compute path given current file and relative path.
"""
script_dir = os.path.dirname(os.path.abspath(current_file))
rel_path = os.path.abspath(os.path.join(script_dir, rel_path))
return rel_path
def source(self):
"""
Path to the scala source for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def jar(self):
"""
Path to the jar file for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def extra_jars(self):
"""
Extra jars for building and running this Scalding Job.
"""
return []
def job_class(self):
"""
optional main job class for this Scalding Job.
"""
return None
def job_runner(self):
return ScaldingJobRunner()
def atomic_output(self):
"""
If True, then rewrite output arguments to be temp locations and
atomically move them into place after the job finishes.
"""
return True
def requires(self):
return {}
def job_args(self):
"""
Extra arguments to pass to the Scalding job.
"""
return []
def args(self):
"""
Returns an array of args to pass to the job.
"""
arglist = []
for k, v in six.iteritems(self.requires_hadoop()):
arglist.append('--' + k)
arglist.extend([t.output().path for t in flatten(v)])
arglist.extend(['--output', self.output()])
arglist.extend(self.job_args())
return arglist
| 17zuoye/luigi | luigi/contrib/scalding.py | Python | apache-2.0 | 10,382 |
# -*- coding: utf-8 -*-
import os
import sys
import socket
import struct
from nixops import known_hosts
from nixops.util import wait_for_tcp_port, ping_tcp_port
from nixops.util import attr_property, create_key_pair, generate_random_string
from nixops.nix_expr import Function, RawValue
from nixops.backends import MachineDefinition, MachineState
from nixops.gce_common import ResourceDefinition, ResourceState
import nixops.resources.gce_static_ip
import nixops.resources.gce_disk
import nixops.resources.gce_image
import nixops.resources.gce_network
import libcloud.common.google
from libcloud.compute.types import Provider, NodeState
from libcloud.compute.providers import get_driver
class GCEDefinition(MachineDefinition, ResourceDefinition):
"""
Definition of a Google Compute Engine machine.
"""
@classmethod
def get_type(cls):
return "gce"
def __init__(self, xml):
MachineDefinition.__init__(self, xml)
x = xml.find("attrs/attr[@name='gce']/attrs")
assert x is not None
self.copy_option(x, 'machineName', str)
self.copy_option(x, 'region', str)
self.copy_option(x, 'instanceType', str, empty = False)
self.copy_option(x, 'project', str)
self.copy_option(x, 'serviceAccount', str)
self.access_key_path = self.get_option_value(x, 'accessKey', str)
self.copy_option(x, 'tags', 'strlist')
self.metadata = { k.get("name"): k.find("string").get("value")
for k in x.findall("attr[@name='metadata']/attrs/attr") }
scheduling = x.find("attr[@name='scheduling']")
self.copy_option(scheduling, 'automaticRestart', bool)
self.copy_option(scheduling, 'onHostMaintenance', str)
self.ipAddress = self.get_option_value(x, 'ipAddress', 'resource', optional = True)
self.copy_option(x, 'network', 'resource', optional = True)
def opt_disk_name(dname):
return ("{0}-{1}".format(self.machine_name, dname) if dname is not None else None)
def parse_block_device(xml):
result = {
'disk': self.get_option_value(xml, 'disk', 'resource', optional = True),
'disk_name': opt_disk_name(self.get_option_value(xml, 'disk_name', str, optional = True)),
'snapshot': self.get_option_value(xml, 'snapshot', str, optional = True),
'image': self.get_option_value(xml, 'image', 'resource', optional = True),
'size': self.get_option_value(xml, 'size', int, optional = True),
'type': self.get_option_value(xml, 'diskType', str),
'deleteOnTermination': self.get_option_value(xml, 'deleteOnTermination', bool),
'readOnly': self.get_option_value(xml, 'readOnly', bool),
'bootDisk': self.get_option_value(xml, 'bootDisk', bool),
'encrypt': self.get_option_value(xml, 'encrypt', bool),
'passphrase': self.get_option_value(xml, 'passphrase', str)
}
if not(result['disk'] or result['disk_name']):
raise Exception("{0}: blockDeviceMapping item must specify either an "
"external disk name to mount or a disk name to create"
.format(self.machine_name))
return result
self.block_device_mapping = { k.get("name"): parse_block_device(k)
for k in x.findall("attr[@name='blockDeviceMapping']/attrs/attr") }
boot_devices = [k for k,v in self.block_device_mapping.iteritems() if v['bootDisk']]
if len(boot_devices) == 0:
raise Exception("machine {0} must have a boot device.".format(self.name))
if len(boot_devices) > 1:
raise Exception("machine {0} must have exactly one boot device.".format(self.name))
def show_type(self):
return "{0} [{1}]".format(self.get_type(), self.region or "???")
class GCEState(MachineState, ResourceState):
"""
State of a Google Compute Engine machine.
"""
@classmethod
def get_type(cls):
return "gce"
machine_name = attr_property("gce.name", None)
public_ipv4 = attr_property("publicIpv4", None)
region = attr_property("gce.region", None)
instance_type = attr_property("gce.instanceType", None)
public_client_key = attr_property("gce.publicClientKey", None)
private_client_key = attr_property("gce.privateClientKey", None)
public_host_key = attr_property("gce.publicHostKey", None)
private_host_key = attr_property("gce.privateHostKey", None)
tags = attr_property("gce.tags", None, 'json')
metadata = attr_property("gce.metadata", {}, 'json')
automatic_restart = attr_property("gce.scheduling.automaticRestart", None, bool)
on_host_maintenance = attr_property("gce.scheduling.onHostMaintenance", None)
ipAddress = attr_property("gce.ipAddress", None)
network = attr_property("gce.network", None)
block_device_mapping = attr_property("gce.blockDeviceMapping", {}, 'json')
backups = nixops.util.attr_property("gce.backups", {}, 'json')
def __init__(self, depl, name, id):
MachineState.__init__(self, depl, name, id)
self._conn = None
@property
def resource_id(self):
return self.machine_name
def show_type(self):
s = super(GCEState, self).show_type()
if self.region: s = "{0} [{1}; {2}]".format(s, self.region, self.instance_type)
return s
credentials_prefix = "deployment.gce"
@property
def full_name(self):
return "GCE machine '{0}'".format(self.machine_name)
def node(self):
return self.connect().ex_get_node(self.machine_name, self.region)
def full_metadata(self, metadata):
result = metadata.copy()
result.update({
'sshKeys': "root:{0}".format(self.public_client_key),
'ssh_host_ecdsa_key': self.private_host_key,
'ssh_host_ecdsa_key_pub': self.public_host_key
})
return result
def gen_metadata(self, metadata):
return {
'kind': 'compute#metadata',
'items': [ {'key': k, 'value': v} for k,v in metadata.iteritems() ]
}
def update_block_device_mapping(self, k, v):
x = self.block_device_mapping
if v == None:
x.pop(k, None)
else:
x[k] = v
self.block_device_mapping = x
def _delete_volume(self, volume_id, region, allow_keep=False):
if not self.depl.logger.confirm("are you sure you want to destroy GCE disk '{0}'?".format(volume_id)):
if allow_keep:
return
else:
raise Exception("not destroying GCE disk '{0}'".format(volume_id))
self.log("destroying GCE disk '{0}'...".format(volume_id))
try:
disk = self.connect().ex_get_volume(volume_id, region)
disk.destroy()
except libcloud.common.google.ResourceNotFoundError:
self.warn("seems to have been destroyed already")
def _node_deleted(self):
self.vm_id = None
self.state = self.STOPPED
for k,v in self.block_device_mapping.iteritems():
v['needsAttach'] = True
self.update_block_device_mapping(k, v)
defn_properties = ['tags', 'region', 'instance_type',
'metadata', 'ipAddress', 'network']
def is_deployed(self):
return (self.vm_id or self.block_device_mapping)
def create(self, defn, check, allow_reboot, allow_recreate):
assert isinstance(defn, GCEDefinition)
self.no_project_change(defn)
self.no_region_change(defn)
self.no_change(self.machine_name != defn.machine_name, "instance name")
self.set_common_state(defn)
self.copy_credentials(defn)
self.machine_name = defn.machine_name
self.region = defn.region
if not self.public_client_key:
(private, public) = create_key_pair()
self.public_client_key = public
self.private_client_key = private
if not self.public_host_key:
(private, public) = create_key_pair(type="ecdsa")
self.public_host_key = public
self.private_host_key = private
recreate = False
if check:
try:
node = self.node()
if self.vm_id:
if node.state == NodeState.TERMINATED:
recreate = True
self.warn("the instance is terminated and needs a reboot")
self.state = self.STOPPED
self.handle_changed_property('region', node.extra['zone'].name, can_fix = False)
# a bit hacky but should work
network_name = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
if network_name == 'default': network_name = None
self.handle_changed_property('network', network_name)
self.handle_changed_property('instance_type', node.size)
self.handle_changed_property('public_ipv4',
node.public_ips[0] if node.public_ips else None,
property_name = 'IP address')
if self.public_ipv4:
known_hosts.add(self.public_ipv4, self.public_host_key)
if self.ipAddress:
try:
address = self.connect().ex_get_address(self.ipAddress)
if self.public_ipv4 and self.public_ipv4 != address.address:
self.warn("static IP Address {0} assigned to this machine has unexpectely "
"changed from {1} to {2} most likely due to being redeployed"
.format(self.ipAddress, self.public_ipv4, address.address) )
self.ipAddress = None
except libcloud.common.google.ResourceNotFoundError:
self.warn("static IP Address resource {0} used by this machine has been destroyed; "
"it is likely that the machine is still holding the address itself ({1}) "
"and this is your last chance to reclaim it before it gets "
"lost in a reboot".format(self.ipAddress, self.public_ipv4) )
self.handle_changed_property('tags', sorted(node.extra['tags']))
actual_metadata = { i['key']: i['value']
for i in node.extra['metadata'].get('items', [])
if i['key'] not in [ 'ssh_host_ecdsa_key', 'sshKeys',
'ssh_host_ecdsa_key_pub'] }
self.handle_changed_property('metadata', actual_metadata)
self.handle_changed_property('automatic_restart',
node.extra['scheduling']["automaticRestart"])
self.handle_changed_property('on_host_maintenance',
node.extra['scheduling']["onHostMaintenance"])
attached_disk_names = [d.get("deviceName", None) for d in node.extra['disks'] ]
# check that all disks are attached
for k, v in self.block_device_mapping.iteritems():
disk_name = v['disk_name'] or v['disk']
is_attached = disk_name in attached_disk_names
if not is_attached and not v.get('needsAttach', False):
self.warn("disk {0} seems to have been detached behind our back; will reattach...".format(disk_name))
v['needsAttach'] = True
self.update_block_device_mapping(k, v)
if is_attached and v.get('needsAttach', False):
self.warn("disk {0} seems to have been attached for us; thank you, mr. Elusive Bug!".format(disk_name))
del v['needsAttach']
self.update_block_device_mapping(k, v)
# check that no extra disks are attached
defn_disk_names = [v['disk_name'] or v['disk'] for k,v in defn.block_device_mapping.iteritems()]
state_disk_names = [v['disk_name'] or v['disk'] for k,v in self.block_device_mapping.iteritems()]
unexpected_disks = list( set(attached_disk_names) - set(defn_disk_names) - set(state_disk_names) )
if unexpected_disks:
self.warn("unexpected disk(s) {0} are attached to this instance; "
"not fixing this just in case".format(unexpected_disks))
else:
self.warn_not_supposed_to_exist(valuable_data = True)
self.confirm_destroy(node, self.full_name)
except libcloud.common.google.ResourceNotFoundError:
if self.vm_id:
self.warn("the instance seems to have been destroyed behind our back")
if not allow_recreate: raise Exception("use --allow-recreate to fix")
self._node_deleted()
# check that the disks that should exist do exist
# and that the disks we expected to create don't exist yet
for k,v in defn.block_device_mapping.iteritems():
disk_name = v['disk_name'] or v['disk']
try:
disk = self.connect().ex_get_volume(disk_name, v.get('region', None) )
if k not in self.block_device_mapping and v['disk_name']:
self.warn_not_supposed_to_exist(resource_name = disk_name, valuable_data = True)
self.confirm_destroy(disk, disk_name)
except libcloud.common.google.ResourceNotFoundError:
if v['disk']:
raise Exception("external disk '{0}' is required but doesn't exist".format(disk_name))
if k in self.block_device_mapping and v['disk_name']:
self.warn("disk '{0}' is supposed to exist, but is missing; will recreate...".format(disk_name))
self.update_block_device_mapping(k, None)
# create missing disks
for k, v in defn.block_device_mapping.iteritems():
if k in self.block_device_mapping: continue
if v['disk'] is None:
extra_msg = ( " from snapshot '{0}'".format(v['snapshot']) if v['snapshot']
else " from image '{0}'".format(v['image']) if v['image']
else "" )
self.log("creating GCE disk of {0} GiB{1}..."
.format(v['size'] if v['size'] else "auto", extra_msg))
v['region'] = defn.region
try:
self.connect().create_volume(v['size'], v['disk_name'], v['region'],
snapshot = v['snapshot'], image = v['image'],
ex_disk_type = "pd-" + v.get('type', 'standard'),
use_existing= False)
except libcloud.common.google.ResourceExistsError:
raise Exception("tried creating a disk that already exists; "
"please run 'deploy --check' to fix this")
v['needsAttach'] = True
self.update_block_device_mapping(k, v)
if self.vm_id:
if self.instance_type != defn.instance_type:
recreate = True
self.warn("change of the instance type requires a reboot")
if self.network != defn.network:
recreate = True
self.warn("change of the network requires a reboot")
for k, v in self.block_device_mapping.iteritems():
defn_v = defn.block_device_mapping.get(k, None)
if defn_v and not v.get('needsAttach', False):
if v['bootDisk'] != defn_v['bootDisk']:
recreate = True
self.warn("change of the boot disk requires a reboot")
if v['readOnly'] != defn_v['readOnly']:
recreate = True
self.warn("remounting disk as ro/rw requires a reboot")
if recreate:
if not allow_reboot:
raise Exception("reboot is required for the requested changes; please run with --allow-reboot")
self.stop()
self.create_node(defn)
def create_node(self, defn):
if not self.vm_id:
self.log("creating {0}...".format(self.full_name))
boot_disk = next((v for k,v in defn.block_device_mapping.iteritems() if v.get('bootDisk', False)), None)
if not boot_disk:
raise Exception("no boot disk found for {0}".format(self.full_name))
try:
node = self.connect().create_node(self.machine_name, defn.instance_type, 'none',
location = self.connect().ex_get_zone(defn.region),
ex_boot_disk = self.connect().ex_get_volume(boot_disk['disk_name'] or boot_disk['disk'], boot_disk.get('region', None)),
ex_metadata = self.full_metadata(defn.metadata), ex_tags = defn.tags,
external_ip = (self.connect().ex_get_address(defn.ipAddress) if defn.ipAddress else 'ephemeral'),
ex_network = (defn.network if defn.network else 'default') )
except libcloud.common.google.ResourceExistsError:
raise Exception("tried creating an instance that already exists; "
"please run 'deploy --check' to fix this")
self.vm_id = self.machine_name
self.state = self.STARTING
self.ssh_pinged = False
self.copy_properties(defn)
self.public_ipv4 = node.public_ips[0]
self.log("got IP: {0}".format(self.public_ipv4))
known_hosts.add(self.public_ipv4, self.public_host_key)
for k,v in self.block_device_mapping.iteritems():
v['needsAttach'] = True
self.update_block_device_mapping(k, v)
# set scheduling config here instead of triggering an update using None values
# because we might be called with defn = self, thus modifying self would ruin defn
self.connect().ex_set_node_scheduling(node,
automatic_restart = defn.automatic_restart,
on_host_maintenance = defn.on_host_maintenance)
self.automatic_restart = defn.automatic_restart
self.on_host_maintenance = defn.on_host_maintenance
# Attach missing volumes
for k, v in self.block_device_mapping.items():
defn_v = defn.block_device_mapping.get(k, None)
if v.get('needsAttach', False) and defn_v:
disk_name = v['disk_name'] or v['disk']
disk_region = v.get('region', None)
v['readOnly'] = defn_v['readOnly']
v['bootDisk'] = defn_v['bootDisk']
v['deleteOnTermination'] = defn_v['deleteOnTermination']
v['passphrase'] = defn_v['passphrase']
self.log("attaching GCE disk '{0}'...".format(disk_name))
if not v.get('bootDisk', False):
self.connect().attach_volume(self.node(), self.connect().ex_get_volume(disk_name, disk_region),
device = disk_name,
ex_mode = ('READ_ONLY' if v['readOnly'] else 'READ_WRITE'))
del v['needsAttach']
self.update_block_device_mapping(k, v)
# generate LUKS key if the model didn't specify one
if v.get('encrypt', False) and v.get('passphrase', "") == "" and v.get('generatedKey', "") == "":
v['generatedKey'] = generate_random_string(length=256)
self.update_block_device_mapping(k, v)
if self.metadata != defn.metadata:
self.log('setting new metadata values')
node = self.node()
meta = self.gen_metadata(self.full_metadata(defn.metadata))
request = '/zones/%s/instances/%s/setMetadata' % (node.extra['zone'].name,
node.name)
metadata_data = {}
metadata_data['items'] = meta['items']
metadata_data['kind'] = meta['kind']
metadata_data['fingerprint'] = node.extra['metadata']['fingerprint']
self.connect().connection.async_request(request, method='POST',
data=metadata_data)
self.metadata = defn.metadata
if self.tags != defn.tags:
self.log('updating tags')
self.connect().ex_set_node_tags(self.node(), defn.tags)
self.tags = defn.tags
if self.public_ipv4 and self.ipAddress != defn.ipAddress:
self.log("detaching old IP address {0}".format(self.public_ipv4))
self.connect().connection.async_request(
"/zones/{0}/instances/{1}/deleteAccessConfig?accessConfig=External+NAT&networkInterface=nic0"
.format(self.region, self.machine_name), method = 'POST')
self.public_ipv4 = None
self.ipAddress = None
if self.public_ipv4 is None:
self.log("attaching IP address {0}".format(defn.ipAddress or "[Ephemeral]"))
self.connect().connection.async_request(
"/zones/{0}/instances/{1}/addAccessConfig?networkInterface=nic0"
.format(self.region, self.machine_name), method = 'POST', data = {
'kind': 'compute#accessConfig',
'type': 'ONE_TO_ONE_NAT',
'name': 'External NAT',
'natIP': self.connect().ex_get_address(defn.ipAddress).address if defn.ipAddress else None
})
self.ipAddress = defn.ipAddress
self.public_ipv4 = self.node().public_ips[0]
self.log("got IP: {0}".format(self.public_ipv4))
known_hosts.add(self.public_ipv4, self.public_host_key)
self.ssh.reset()
self.ssh_pinged = False
if self.automatic_restart != defn.automatic_restart or self.on_host_maintenance != defn.on_host_maintenance:
self.log("setting scheduling configuration")
self.connect().ex_set_node_scheduling(self.node(),
automatic_restart = defn.automatic_restart,
on_host_maintenance = defn.on_host_maintenance)
self.automatic_restart = defn.automatic_restart
self.on_host_maintenance = defn.on_host_maintenance
def reboot(self, hard=False):
if hard:
self.log("sending hard reset to GCE machine...")
self.node().reboot()
self.state = self.STARTING
else:
MachineState.reboot(self, hard=hard)
def start(self):
if self.vm_id:
try:
node = self.node()
except libcloud.common.google.ResourceNotFoundError:
self.warn("seems to have been destroyed already")
self._node_deleted()
node = None
if node and (node.state == NodeState.TERMINATED):
self.stop()
if node and (node.state == NodeState.STOPPED):
self.warn("kicking the machine with a hard reboot to start it")
self.reboot_sync(hard=True)
if not self.vm_id and self.block_device_mapping:
prev_public_ipv4 = self.public_ipv4
self.create_node(self)
if prev_public_ipv4 != self.public_ipv4:
self.warn("IP address has changed from {0} to {1}, "
"you may need to run 'nixops deploy'"
.format(prev_public_ipv4, self.public_ipv4) )
self.wait_for_ssh(check=True)
self.send_keys()
def stop(self):
if not self.vm_id: return
try:
node = self.node()
except libcloud.common.google.ResourceNotFoundError:
self.warn("seems to have been destroyed already")
self._node_deleted()
return
if node.state != NodeState.TERMINATED:
self.log_start("stopping GCE machine... ")
self.run_command("poweroff", check=False)
self.state = self.STOPPING
def check_stopped():
self.log_continue(".")
return self.node().state == NodeState.TERMINATED
if nixops.util.check_wait(check_stopped, initial=3, max_tries=100, exception=False): # = 5 min
self.log_end("stopped")
else:
self.log_end("(timed out)")
self.state = self.STOPPED
self.log("tearing down the instance; disk contents are preserved");
node.destroy()
self._node_deleted()
self.ssh.reset()
def destroy(self, wipe=False):
if wipe:
log.warn("wipe is not supported")
try:
node = self.node()
question = "are you sure you want to destroy {0}?"
if not self.depl.logger.confirm(question.format(self.full_name)):
return False
known_hosts.remove(self.public_ipv4, self.public_host_key)
self.log("destroying the GCE machine...")
node.destroy()
except libcloud.common.google.ResourceNotFoundError:
self.warn("seems to have been destroyed already")
self._node_deleted()
# Destroy volumes created for this instance.
for k, v in self.block_device_mapping.items():
if v.get('deleteOnTermination', False):
self._delete_volume(v['disk_name'], v['region'])
self.update_block_device_mapping(k, None)
return True
def after_activation(self, defn):
# Detach volumes that are no longer in the deployment spec.
for k, v in self.block_device_mapping.items():
if k not in defn.block_device_mapping:
disk_name = v['disk'] or v['disk_name']
self.log("unmounting device '{0}'...".format(disk_name))
if v.get('encrypt', False):
dm = "/dev/mapper/{0}".format(disk_name)
self.run_command("umount -l {0}".format(dm), check=False)
self.run_command("cryptsetup luksClose {0}".format(dm), check=False)
else:
self.run_command("umount -l {0}".format(k), check=False)
node = self.node()
try:
if not v.get('needsAttach', False):
self.log("detaching GCE disk '{0}'...".format(disk_name))
volume = self.connect().ex_get_volume(disk_name, v.get('region', None) )
self.connect().detach_volume(volume, node)
v['needsAttach'] = True
self.update_block_device_mapping(k, v)
if v.get('deleteOnTermination', False):
self._delete_volume(disk_name, v['region'])
except libcloud.common.google.ResourceNotFoundError:
self.warn("GCE disk '{0}' seems to have been already destroyed".format(disk_name))
self.update_block_device_mapping(k, None)
def get_console_output(self):
node = self.node()
if node.state == NodeState.TERMINATED:
raise Exception("cannot get console output of a state=TERMINATED machine '{0}'".format(self.name))
request = '/zones/%s/instances/%s/serialPort' % (node.extra['zone'].name, node.name)
return self.connect().connection.request(request, method='GET').object['contents']
def _check(self, res):
try:
node = self.node()
res.exists = True
res.is_up = node.state == NodeState.RUNNING or node.state == NodeState.REBOOTING
if node.state == NodeState.REBOOTING or node.state == NodeState.PENDING: self.state = self.STARTING
if node.state == NodeState.STOPPED or node.state == NodeState.TERMINATED: self.state = self.STOPPED
if node.state == NodeState.UNKNOWN: self.state = self.UNKNOWN
if node.state == NodeState.RUNNING:
# check that all disks are attached
res.disks_ok = True
for k, v in self.block_device_mapping.iteritems():
disk_name = v['disk_name'] or v['disk']
if all(d.get("deviceName", None) != disk_name for d in node.extra['disks']):
res.disks_ok = False
res.messages.append("disk {0} is detached".format(disk_name))
try:
disk = self.connect().ex_get_volume(disk_name, v.get('region', None))
except libcloud.common.google.ResourceNotFoundError:
res.messages.append("disk {0} is destroyed".format(disk_name))
self.handle_changed_property('public_ipv4',
node.public_ips[0] if node.public_ips else None,
property_name = 'IP address')
if self.public_ipv4:
known_hosts.add(self.public_ipv4, self.public_host_key)
MachineState._check(self, res)
except libcloud.common.google.ResourceNotFoundError:
res.exists = False
res.is_up = False
self.state = self.MISSING;
def create_after(self, resources, defn):
# Just a check for all GCE resource classes
return {r for r in resources if
isinstance(r, nixops.resources.gce_static_ip.GCEStaticIPState) or
isinstance(r, nixops.resources.gce_disk.GCEDiskState) or
isinstance(r, nixops.resources.gce_image.GCEImageState) or
isinstance(r, nixops.resources.gce_network.GCENetworkState)}
def backup(self, defn, backup_id):
self.log("backing up {0} using ID '{1}'".format(self.full_name, backup_id))
if sorted(defn.block_device_mapping.keys()) != sorted(self.block_device_mapping.keys()):
self.warn("the list of disks currently deployed doesn't match the current deployment"
" specification; consider running 'deploy' first; the backup may be incomplete")
backup = {}
_backups = self.backups
for k, v in self.block_device_mapping.iteritems():
disk_name = v['disk_name'] or v['disk']
volume = self.connect().ex_get_volume(disk_name, v.get('region', None))
snapshot_name = "backup-{0}-{1}".format(backup_id, disk_name[-32:])
self.log("initiating snapshotting of disk '{0}': '{1}'".format(disk_name, snapshot_name))
self.connect().connection.request(
'/zones/%s/disks/%s/createSnapshot'
%(volume.extra['zone'].name, volume.name),
method = 'POST', data = {
'name': snapshot_name,
'description': "backup of disk {0} attached to {1}"
.format(volume.name, self.machine_name)
})
backup[disk_name] = snapshot_name
_backups[backup_id] = backup
self.backups = _backups
def restore(self, defn, backup_id, devices=[]):
self.log("restoring {0} to backup '{1}'".format(self.full_name, backup_id))
self.stop()
for k, v in self.block_device_mapping.items():
disk_name = v['disk_name'] or v['disk']
s_id = self.backups[backup_id].get(disk_name, None)
if s_id and (devices == [] or k in devices or disk_name in devices):
try:
snapshot = self.connect().ex_get_snapshot(s_id)
except libcloud.common.google.ResourceNotFoundError:
self.warn("snapsnot {0} for disk {1} is missing; skipping".format(s_id, disk_name))
continue
try:
self.log("destroying disk {0}".format(disk_name))
self.connect().ex_get_volume(disk_name, v.get('region', None)).destroy()
except libcloud.common.google.ResourceNotFoundError:
self.warn("disk {0} seems to have been destroyed already".format(disk_name))
self.log("creating disk {0} from snapshot '{1}'".format(disk_name, s_id))
self.connect().create_volume(None, disk_name, v.get('region', None),
ex_disk_type = "pd-" + v.get('type', 'standard'),
snapshot = snapshot, use_existing= False)
def remove_backup(self, backup_id, keep_physical=False):
self.log('removing backup {0}'.format(backup_id))
_backups = self.backups
if not backup_id in _backups.keys():
self.warn('backup {0} not found; skipping'.format(backup_id))
else:
for d_name, snapshot_id in _backups[backup_id].iteritems():
try:
self.log('removing snapshot {0}'.format(snapshot_id))
self.connect().ex_get_snapshot(snapshot_id).destroy()
except libcloud.common.google.ResourceNotFoundError:
self.warn('snapshot {0} not found; skipping'.format(snapshot_id))
_backups.pop(backup_id)
self.backups = _backups
def get_backups(self):
self.connect()
backups = {}
for b_id, snapshots in self.backups.iteritems():
backups[b_id] = {}
backup_status = "complete"
info = []
for k, v in self.block_device_mapping.items():
disk_name = v['disk_name'] or v['disk']
if not disk_name in snapshots.keys():
backup_status = "incomplete"
info.append("{0} - {1} - not available in backup".format(self.name, disk_name))
else:
snapshot_id = snapshots[disk_name]
try:
snapshot = self.connect().ex_get_snapshot(snapshot_id)
if snapshot.status != 'READY':
backup_status = "running"
except libcloud.common.google.ResourceNotFoundError:
info.append("{0} - {1} - {2} - snapshot has disappeared".format(self.name, disk_name, snapshot_id))
backup_status = "unavailable"
for d_name, s_id in snapshots.iteritems():
if not any(d_name == v['disk_name'] or d_name == v['disk'] for k,v in self.block_device_mapping.iteritems()):
info.append("{0} - {1} - {2} - a snapshot of a disk that is not or no longer deployed".format(self.name, d_name, s_id))
backups[b_id]['status'] = backup_status
backups[b_id]['info'] = info
return backups
def get_physical_spec(self):
block_device_mapping = {}
for k, v in self.block_device_mapping.items():
if (v.get('encrypt', False)
and v.get('passphrase', "") == ""
and v.get('generatedKey', "") != ""):
block_device_mapping[k] = {
'passphrase': Function("pkgs.lib.mkOverride 10",
v['generatedKey'], call=True),
}
return {
'require': [
RawValue("<nixpkgs/nixos/modules/virtualisation/google-compute-config.nix>")
],
('deployment', 'gce', 'blockDeviceMapping'): block_device_mapping,
}
def get_keys(self):
keys = MachineState.get_keys(self)
# Ugly: we have to add the generated keys because they're not
# there in the first evaluation (though they are present in
# the final nix-build).
for k, v in self.block_device_mapping.items():
if v.get('encrypt', False) and v.get('passphrase', "") == "" and v.get('generatedKey', "") != "":
keys["luks-" + (v['disk_name'] or v['disk'])] = { 'text': v['generatedKey'], 'group': 'root', 'permissions': '0600', 'user': 'root'}
return keys
def get_ssh_name(self):
if not self.public_ipv4:
raise Exception("{0} does not have a public IPv4 address (yet)".format(self.full_name))
return self.public_ipv4
def get_ssh_private_key_file(self):
return self._ssh_private_key_file or self.write_ssh_private_key(self.private_client_key)
def get_ssh_flags(self, scp=False):
return super(GCEState, self).get_ssh_flags(scp) + [ "-i", self.get_ssh_private_key_file() ]
| Bsami/nixops | nixops/backends/gce.py | Python | lgpl-3.0 | 37,799 |
import re
import fileinput
import requests
from pyes import *
# def call_es(error):
# error = error[:500]
# conn = ES('ec2-52-8-185-215.us-west-1.compute.amazonaws.com:9200')
# error = re.sub('[\W_]+', ' ', error)
# q = QueryStringQuery("ques.snippets:{}".format(error))
# results = conn.search(query=q)
# return results
def call_server(error):
server = 'http://ec2-52-8-219-37.us-west-1.compute.amazonaws.com/api/v1/error'
# server = 'http://localhost:5000/api/v1/error'
params = {
'error_log': error
}
result = {}
r = requests.post(server, params)
if r.status_code == 200:
result['answer_link'] = r.json()['answer_link']
result['num_results'] = r.json()['num_results']
return result
def store_link(result):
link = result['answer_link']
num_results = result['num_results']
line_to_write = link + '\t' + str(num_results) + '\n'
with open('links.txt', 'a') as f:
f.write(line_to_write)
def read_stdin():
for line in fileinput.input():
result = call_server(line)
store_link(result)
if __name__ == '__main__':
read_stdin()
# watch -n 0.1 tail -n 20 links.txt
| nave91/rebot | scripts/rebot.py | Python | gpl-2.0 | 1,193 |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
from os import path
from osa_toolkit import filesystem as fs
import sys
import unittest
from unittest import mock
from test_inventory import cleanup
from test_inventory import get_inventory
from test_inventory import make_config
INV_DIR = 'inventory'
sys.path.append(path.join(os.getcwd(), INV_DIR))
TARGET_DIR = path.join(os.getcwd(), 'tests', 'inventory')
USER_CONFIG_FILE = path.join(TARGET_DIR, 'openstack_user_config.yml')
def setUpModule():
# The setUpModule function is used by the unittest framework.
make_config()
def tearDownModule():
# This file should only be removed after all tests are run,
# thus it is excluded from cleanup.
os.remove(USER_CONFIG_FILE)
class TestMultipleRuns(unittest.TestCase):
def test_creating_backup_file(self):
inventory_file_path = os.path.join(TARGET_DIR,
'openstack_inventory.json')
get_backup_name_path = 'osa_toolkit.filesystem._get_backup_name'
backup_name = 'openstack_inventory.json-20160531_171804.json'
tar_file = mock.MagicMock()
tar_file.__enter__.return_value = tar_file
# run make backup with faked tarfiles and date
with mock.patch('osa_toolkit.filesystem.tarfile.open') as tar_open:
tar_open.return_value = tar_file
with mock.patch(get_backup_name_path) as backup_mock:
backup_mock.return_value = backup_name
fs._make_backup(TARGET_DIR, inventory_file_path)
backup_path = path.join(TARGET_DIR, 'backup_openstack_inventory.tar')
tar_open.assert_called_with(backup_path, 'a')
# This chain is present because of how tarfile.open is called to
# make a context manager inside the make_backup function.
tar_file.add.assert_called_with(inventory_file_path,
arcname=backup_name)
def test_recreating_files(self):
# Deleting the files after the first run should cause the files to be
# completely remade
get_inventory()
get_inventory()
backup_path = path.join(TARGET_DIR, 'backup_openstack_inventory.tar')
self.assertFalse(os.path.exists(backup_path))
def test_rereading_files(self):
# Generate the initial inventory files
get_inventory(clean=False)
inv, path = fs.load_inventory(TARGET_DIR)
self.assertIsInstance(inv, dict)
self.assertIn('_meta', inv)
# This test is basically just making sure we get more than
# INVENTORY_SKEL populated, so we're not going to do deep testing
self.assertIn('log_hosts', inv)
def tearDown(self):
# Clean up here since get_inventory will not do it by design in
# this test.
cleanup()
if __name__ == '__main__':
unittest.main(catchbreak=True)
| openstack/openstack-ansible | tests/test_filesystem.py | Python | apache-2.0 | 3,436 |
import os
from os import path
from datetime import datetime
import getpass
import re
import time
from fabric.context_managers import cd, hide, settings
from fabric.operations import require, prompt, get, run, sudo, local
from fabric.state import env
from fabric.contrib import files
from fabric import utils
def _setup_paths(project_settings):
# first merge in variables from project_settings - but ignore __doc__ etc
user_settings = [x for x in vars(project_settings).keys() if not x.startswith('__')]
for setting in user_settings:
env[setting] = vars(project_settings)[setting]
# allow for project_settings having set up some of these differently
env.setdefault('verbose', False)
env.setdefault('use_sudo', True)
env.setdefault('cvs_rsh', 'CVS_RSH="ssh"')
env.setdefault('default_branch', {'production': 'master', 'staging': 'master'})
env.setdefault('server_project_home',
path.join(env.server_home, env.project_name))
# TODO: change dev -> current
env.setdefault('vcs_root_dir', path.join(env.server_project_home, 'dev'))
env.setdefault('prev_root', path.join(env.server_project_home, 'previous'))
env.setdefault('next_dir', path.join(env.server_project_home, 'next'))
env.setdefault('dump_dir', path.join(env.server_project_home, 'dbdumps'))
env.setdefault('deploy_dir', path.join(env.vcs_root_dir, 'deploy'))
env.setdefault('settings', '%(project_name)s.settings' % env)
if env.project_type == "django":
env.setdefault('relative_django_dir', env.project_name)
env.setdefault('relative_django_settings_dir', env['relative_django_dir'])
env.setdefault('relative_ve_dir', path.join(env['relative_django_dir'], '.ve'))
# now create the absolute paths of everything else
env.setdefault('django_dir',
path.join(env['vcs_root_dir'], env['relative_django_dir']))
env.setdefault('django_settings_dir',
path.join(env['vcs_root_dir'], env['relative_django_settings_dir']))
env.setdefault('ve_dir',
path.join(env['vcs_root_dir'], env['relative_ve_dir']))
env.setdefault('manage_py', path.join(env['django_dir'], 'manage.py'))
# local_tasks_bin is the local copy of tasks.py
# this should be the copy from where ever fab.py is being run from ...
if 'DEPLOYDIR' in os.environ:
env.setdefault('local_tasks_bin',
path.join(os.environ['DEPLOYDIR'], 'tasks.py'))
else:
env.setdefault('local_tasks_bin',
path.join(path.dirname(__file__), 'tasks.py'))
# valid environments - used for require statements in fablib
env.valid_envs = env.host_list.keys()
def _linux_type():
if 'linux_type' not in env:
# work out if we're based on redhat or centos
# TODO: look up stackoverflow question about this.
if files.exists('/etc/redhat-release'):
env.linux_type = 'redhat'
elif files.exists('/etc/debian_version'):
env.linux_type = 'debian'
else:
# TODO: should we print a warning here?
utils.abort("could not determine linux type of server we're deploying to")
return env.linux_type
def _get_python():
if 'python_bin' not in env:
python26 = path.join('/', 'usr', 'bin', 'python2.6')
if files.exists(python26):
env.python_bin = python26
else:
env.python_bin = path.join('/', 'usr', 'bin', 'python')
return env.python_bin
def _get_tasks_bin():
if 'tasks_bin' not in env:
env.tasks_bin = path.join(env.deploy_dir, 'tasks.py')
return env.tasks_bin
def _tasks(tasks_args, verbose=False):
tasks_cmd = _get_tasks_bin()
if env.verbose or verbose:
tasks_cmd += ' -v'
sudo_or_run(tasks_cmd + ' ' + tasks_args)
def _get_svn_user_and_pass():
if 'svnuser' not in env or len(env.svnuser) == 0:
# prompt user for username
prompt('Enter SVN username:', 'svnuser')
if 'svnpass' not in env or len(env.svnpass) == 0:
# prompt user for password
env.svnpass = getpass.getpass('Enter SVN password:')
def verbose(verbose=True):
"""Set verbose output"""
env.verbose = verbose
def deploy_clean(revision=None):
""" delete the entire install and do a clean install """
if env.environment == 'production':
utils.abort('do not delete the production environment!!!')
require('server_project_home', provided_by=env.valid_envs)
# TODO: dump before cleaning database?
with settings(warn_only=True):
webserver_cmd('stop')
clean_db()
clean_files()
deploy(revision)
def clean_files():
sudo_or_run('rm -rf %s' % env.server_project_home)
def _create_dir_if_not_exists(path):
if not files.exists(path):
sudo_or_run('mkdir -p %s' % path)
def deploy(revision=None, keep=None):
""" update remote host environment (virtualenv, deploy, update)
It takes two arguments:
* revision is the VCS revision ID to checkout (if not specified then
the latest will be checked out)
* keep is the number of old versions to keep around for rollback (default
5)"""
require('server_project_home', provided_by=env.valid_envs)
check_for_local_changes()
_create_dir_if_not_exists(env.server_project_home)
# TODO: check if our live site is in <sitename>/dev/ - if so
# move it to <sitename>/current/ and make a link called dev/ to
# the current/ directory
# TODO: if dev/ is found to be a link, ask the user if the apache config
# has been updated to point at current/ - and if so then delete dev/
# _migrate_from_dev_to_current()
create_copy_for_next()
checkout_or_update(in_next=True, revision=revision)
# remove any old pyc files - essential if the .py file has been removed
if env.project_type == "django":
rm_pyc_files(path.join(env.next_dir, env.relative_django_dir))
# create the deploy virtualenv if we use it
create_deploy_virtualenv(in_next=True)
# we only have to disable this site after creating the rollback copy
# (do this so that apache carries on serving other sites on this server
# and the maintenance page for this vhost)
downtime_start = datetime.now()
link_webserver_conf(maintenance=True)
with settings(warn_only=True):
webserver_cmd('reload')
next_to_current_to_rollback()
# Use tasks.py deploy:env to actually do the deployment, including
# creating the virtualenv if it thinks it necessary, ignoring
# env.use_virtualenv as tasks.py knows nothing about it.
_tasks('deploy:' + env.environment)
# bring this vhost back in, reload the webserver and touch the WSGI
# handler (which reloads the wsgi app)
link_webserver_conf()
webserver_cmd('reload')
downtime_end = datetime.now()
touch_wsgi()
delete_old_rollback_versions(keep)
if env.environment == 'production':
setup_db_dumps()
_report_downtime(downtime_start, downtime_end)
def _report_downtime(downtime_start, downtime_end):
downtime = downtime_end - downtime_start
utils.puts("Downtime lasted for %.1f seconds" % downtime.total_seconds())
utils.puts("(Downtime started at %s and finished at %s)" %
(downtime_start, downtime_end))
def set_up_celery_daemon():
require('vcs_root_dir', 'project_name', provided_by=env)
for command in ('celerybeat', 'celeryd'):
command_project = command + '_' + env.project_name
celery_run_script_location = path.join(env['vcs_root_dir'],
'celery', 'init', command)
celery_run_script = path.join('/etc', 'init.d', command_project)
celery_configuration_location = path.join(env['vcs_root_dir'],
'celery', 'config', command)
celery_configuration_destination = path.join('/etc', 'default',
command_project)
sudo_or_run(" ".join(['cp', celery_run_script_location,
celery_run_script]))
sudo_or_run(" ".join(['chmod', '+x', celery_run_script]))
sudo_or_run(" ".join(['cp', celery_configuration_location,
celery_configuration_destination]))
sudo_or_run('/etc/init.d/%s restart' % command_project)
def clean_old_celery():
"""As the scripts have moved location you might need to get rid of old
versions of celery."""
require('vcs_root_dir', provided_by=env)
for command in ('celerybeat', 'celeryd'):
celery_run_script = path.join('/etc', 'init.d', command)
if files.exists(celery_run_script):
sudo_or_run('/etc/init.d/%s stop' % command)
sudo_or_run('rm %s' % celery_run_script)
celery_configuration_destination = path.join('/etc', 'default', command)
if files.exists(celery_configuration_destination):
sudo_or_run('rm %s' % celery_configuration_destination)
def create_copy_for_next():
"""Copy the current version to "next" so that we can do stuff like
the VCS update and virtualenv update without taking the site offline"""
# TODO: check if next directory already exists
# if it does maybe there was an aborted deploy, or maybe someone else is
# deploying. Either way, stop and ask the user what to do.
if files.exists(env.next_dir):
utils.warn('The "next" directory already exists. Maybe a previous '
'deploy failed, or maybe another deploy is in progress.')
continue_anyway = prompt('Would you like to continue anyway '
'(and delete the current next dir)? [no/yes]',
default='no', validate='^no|yes$')
if continue_anyway.lower() != 'yes':
utils.abort("Aborting deploy - try again when you're certain what to do.")
sudo_or_run('rm -rf %s' % env.next_dir)
# if this is the initial deploy, the vcs_root_dir won't exist yet. In that
# case, don't create it (otherwise the checkout code will get confused).
if files.exists(env.vcs_root_dir):
# cp -a - amongst other things this preserves links and timestamps
# so the compare that bootstrap.py does to see if the virtualenv
# needs an update should still work.
sudo_or_run('cp -a %s %s' % (env.vcs_root_dir, env.next_dir))
def next_to_current_to_rollback():
"""Move the current version to the previous directory (so we can roll back
to it, move the next version to the current version (so it will be used) and
do a db dump in the rollback directory."""
# create directory for it
# if this is the initial deploy, the vcs_root_dir won't exist yet. In that
# case just skip the rollback version.
if files.exists(env.vcs_root_dir):
_create_dir_if_not_exists(env.prev_root)
prev_dir = path.join(env.prev_root, time.strftime("%Y-%m-%d_%H-%M-%S"))
sudo_or_run('mv %s %s' % (env.vcs_root_dir, prev_dir))
_dump_db_in_previous_directory(prev_dir)
sudo_or_run('mv %s %s' % (env.next_dir, env.vcs_root_dir))
def create_copy_for_rollback():
"""Move the current version to the previous directory (so we can roll back
to it, move the next version to the current version (so it will be used) and
do a db dump in the rollback directory."""
# create directory for it
prev_dir = path.join(env.prev_root, time.strftime("%Y-%m-%d_%H-%M-%S"))
_create_dir_if_not_exists(prev_dir)
# cp -a
sudo_or_run('cp %s %s' % (env.vcs_root_dir, prev_dir))
_dump_db_in_previous_directory(prev_dir)
def _dump_db_in_previous_directory(prev_dir):
require('django_settings_dir', provided_by=env.valid_envs)
if (env.project_type == 'django' and
files.exists(path.join(env.django_settings_dir, 'local_settings.py'))):
# dump database (provided local_settings has been set up properly)
with cd(prev_dir):
# just in case there is some other reason why the dump fails
with settings(warn_only=True):
_tasks('dump_db')
def delete_old_rollback_versions(keep=None):
"""Delete old rollback directories, keeping the last "keep" (default 5)"."""
require('prev_root', provided_by=env.valid_envs)
# the -1 argument ensures one directory per line
prev_versions = run('ls -1 ' + env.prev_root).split('\n')
if keep is None:
if 'versions_to_keep' in env:
keep = env.versions_to_keep
else:
keep = 5
else:
keep = int(keep)
if keep == 0:
return
versions_to_keep = -1 * int(keep)
prev_versions_to_delete = prev_versions[:versions_to_keep]
for version_to_delete in prev_versions_to_delete:
sudo_or_run('rm -rf ' + path.join(
env.prev_root, version_to_delete.strip()))
def list_previous():
"""List the previous versions available to rollback to."""
# could also determine the VCS revision number
require('prev_root', provided_by=env.valid_envs)
run('ls ' + env.prev_root)
def rollback(version='last', migrate=False, restore_db=False):
"""Redeploy one of the old versions.
Arguments are 'version', 'migrate' and 'restore_db':
* if version is 'last' (the default) then the most recent version will be
restored. Otherwise specify by timestamp - use list_previous to get a list
of available versions.
* if restore_db is True, then the database will be restored as well as the
code. The default is False.
* if migrate is True, then fabric will attempt to work out the new and old
migration status and run the migrations to match the database versions.
The default is False
Note that migrate and restore_db cannot both be True."""
require('prev_root', 'vcs_root_dir', provided_by=env.valid_envs)
if migrate and restore_db:
utils.abort('rollback cannot do both migrate and restore_db')
if migrate:
utils.abort("rollback: haven't worked out how to do migrate yet ...")
if version == 'last':
# get the latest directory from prev_dir
# list directories in env.prev_root, use last one
version = run('ls ' + env.prev_root).split('\n')[-1]
# check version specified exists
rollback_dir = path.join(env.prev_root, version)
if not files.exists(rollback_dir):
utils.abort("Cannot rollback to version %s, it does not exist, use list_previous to see versions available" % version)
webserver_cmd("stop")
# first copy this version out of the way
create_copy_for_rollback()
if migrate:
# run the south migrations back to the old version
# but how to work out what the old version is??
pass
if restore_db:
# feed the dump file into mysql command
with cd(rollback_dir):
_tasks('load_dbdump')
# delete everything - don't want stray files left over
sudo_or_run('rm -rf %s' % env.vcs_root_dir)
# cp -a from rollback_dir to vcs_root_dir
sudo_or_run('cp -a %s %s' % (rollback_dir, env.vcs_root_dir))
webserver_cmd("start")
def local_test():
""" run the django tests on the local machine """
require('project_name')
with cd(path.join("..", env.project_name)):
local("python " + env.test_cmd, capture=False)
def remote_test():
""" run the django tests remotely - staging only """
require('django_dir', provided_by=env.valid_envs)
if env.environment == 'production':
utils.abort('do not run tests on the production environment')
with cd(env.django_dir):
sudo_or_run(_get_python() + env.test_cmd)
def version():
""" return the deployed VCS revision and commit comments"""
require('server_project_home', 'repo_type', 'vcs_root_dir', 'repository',
provided_by=env.valid_envs)
if env.repo_type == "git":
with cd(env.vcs_root_dir):
sudo_or_run('git log | head -5')
elif env.repo_type == "svn":
_get_svn_user_and_pass()
with cd(env.vcs_root_dir):
with hide('running'):
cmd = 'svn log --non-interactive --username %s --password %s | head -4' % (env.svnuser, env.svnpass)
sudo_or_run(cmd)
else:
utils.abort('Unsupported repo type: %s' % (env.repo_type))
def _check_git_branch():
env.revision = None
with cd(env.vcs_root_dir):
with settings(warn_only=True):
# get branch information
server_branch = sudo_or_run('git rev-parse --abbrev-ref HEAD')
server_commit = sudo_or_run('git rev-parse HEAD')
local_branch = local('git rev-parse --abbrev-ref HEAD', capture=True)
default_branch = env.default_branch.get(env.environment, 'master')
git_branch_r = sudo_or_run('git branch --color=never -r')
git_branch_r = git_branch_r.split('\n')
branches = [b.split('/')[-1].strip() for b in git_branch_r if 'HEAD' not in b]
# if all branches are the same, just stick to this branch
if server_branch == local_branch == default_branch:
env.revision = server_branch
else:
if server_branch == 'HEAD':
# not on a branch - just print a warning
print 'The server git repository is not on a branch'
print 'Branch mismatch found:'
print '* %s is the default branch for this server' % default_branch
if server_branch == 'HEAD':
print '* %s is the commit checked out on the server.' % server_commit
else:
print '* %s is the branch currently checked out on the server' % server_branch
print '* %s is the current branch of your local git repo' % local_branch
print ''
print 'Available branches are:'
for branch in branches:
print '* %s' % branch
print ''
escaped_branches = [re.escape(b) for b in branches]
validate_branch = '^' + '|'.join(escaped_branches) + '$'
env.revision = prompt('Which branch would you like to use on the server? (or hit Ctrl-C to exit)',
default=default_branch, validate=validate_branch)
def check_for_local_changes():
""" check if there are local changes on the remote server """
require('repo_type', 'vcs_root_dir', provided_by=env.valid_envs)
status_cmd = {
'svn': 'svn status --quiet',
'git': 'git status --short',
'cvs': '#not worked out yet'
}
if env.repo_type == 'cvs':
print "TODO: write CVS status command"
return
if files.exists(path.join(env.vcs_root_dir, "." + env.repo_type)):
with cd(env.vcs_root_dir):
status = sudo_or_run(status_cmd[env.repo_type])
if status:
print 'Found local changes on %s server' % env.environment
print status
cont = prompt('Would you like to continue with deployment? (yes/no)',
default='no', validate=r'^yes|no$')
if cont == 'no':
utils.abort('Aborting deployment')
if env.repo_type == 'git':
_check_git_branch()
def checkout_or_update(in_next=False, revision=None):
""" checkout or update the project from version control.
This command works with svn, git and cvs repositories.
You can also specify a revision to checkout, as an argument."""
require('server_project_home', 'repo_type', 'vcs_root_dir', 'repository',
provided_by=env.valid_envs)
checkout_fn = {
'cvs': _checkout_or_update_cvs,
'svn': _checkout_or_update_svn,
'git': _checkout_or_update_git,
}
if in_next:
vcs_root_dir = env.next_dir
else:
vcs_root_dir = env.vcs_root_dir
if env.repo_type.lower() in checkout_fn:
checkout_fn[env.repo_type](vcs_root_dir, revision)
else:
utils.abort('Unsupported VCS: %s' % env.repo_type.lower())
def _checkout_or_update_svn(vcs_root_dir, revision=None):
# function to ask for svnuser and svnpass
_get_svn_user_and_pass()
# if the .svn directory exists, do an update, otherwise do
# a checkout
cmd = 'svn %s --non-interactive --no-auth-cache --username %s --password %s'
if files.exists(path.join(vcs_root_dir, ".svn")):
cmd = cmd % ('update', env.svnuser, env.svnpass)
if revision:
cmd += " --revision " + revision
with cd(vcs_root_dir):
with hide('running'):
sudo_or_run(cmd)
else:
cmd = cmd + " %s %s"
cmd = cmd % ('checkout', env.svnuser, env.svnpass, env.repository, vcs_root_dir)
if revision:
cmd += "@" + revision
with cd(env.server_project_home):
with hide('running'):
sudo_or_run(cmd)
def _checkout_or_update_git(vcs_root_dir, revision=None):
# if the .git directory exists, do an update, otherwise do
# a clone
if files.exists(path.join(vcs_root_dir, ".git")):
with cd(vcs_root_dir):
sudo_or_run('git remote rm origin')
sudo_or_run('git remote add origin %s' % env.repository)
# fetch now, merge later (if on branch)
sudo_or_run('git fetch origin')
if revision is None:
revision = env.revision
with cd(vcs_root_dir):
stash_result = sudo_or_run('git stash')
sudo_or_run('git checkout %s' % revision)
# check if revision is a branch, and do a merge if it is
with settings(warn_only=True):
rev_is_branch = sudo_or_run('git branch -r | grep %s' % revision)
# use old fabric style here to support Ubuntu 10.04
if not rev_is_branch.failed:
sudo_or_run('git merge origin/%s' % revision)
# if we did a stash, now undo it
if not stash_result.startswith("No local changes"):
sudo_or_run('git stash pop')
else:
with cd(env.server_project_home):
default_branch = env.default_branch.get(env.environment, 'master')
sudo_or_run('git clone -b %s %s %s' %
(default_branch, env.repository, vcs_root_dir))
if files.exists(path.join(vcs_root_dir, ".gitmodules")):
with cd(vcs_root_dir):
sudo_or_run('git submodule update --init')
def _checkout_or_update_cvs(vcs_root_dir, revision=None):
if files.exists(vcs_root_dir):
with cd(vcs_root_dir):
sudo_or_run('CVS_RSH="ssh" cvs update -d -P')
else:
if 'cvs_user' in env:
user_spec = env.cvs_user + "@"
else:
user_spec = ""
with cd(env.server_project_home):
cvs_options = '-d:%s:%s%s:%s' % (env.cvs_connection_type,
user_spec,
env.repository,
env.repo_path)
command_options = '-d %s' % vcs_root_dir
if revision is not None:
command_options += ' -r ' + revision
sudo_or_run('%s cvs %s checkout %s %s' % (env.cvs_rsh, cvs_options,
command_options,
env.cvs_project))
def sudo_or_run(command):
if env.use_sudo:
return sudo(command)
else:
return run(command)
def create_deploy_virtualenv(in_next=False):
""" if using new style dye stuff, create the virtualenv to hold dye """
require('deploy_dir', provided_by=env.valid_envs)
if in_next:
# TODO: use relative_deploy_dir
bootstrap_path = path.join(env.next_dir, 'deploy', 'bootstrap.py')
else:
bootstrap_path = path.join(env.deploy_dir, 'bootstrap.py')
sudo_or_run('%s %s --full-rebuild --quiet' %
(_get_python(), bootstrap_path))
def update_requirements():
""" update external dependencies on remote host """
_tasks('update_ve')
def collect_static_files():
""" coolect static files in the 'static' directory """
sudo(_get_tasks_bin() + ' collect_static')
def clean_db(revision=None):
""" delete the entire database """
if env.environment == 'production':
utils.abort('do not delete the production database!!!')
_tasks("clean_db")
def get_remote_dump(filename='/tmp/db_dump.sql', local_filename='./db_dump.sql',
rsync=True):
""" do a remote database dump and copy it to the local filesystem """
# future enhancement, do a mysqldump --skip-extended-insert (one insert
# per line) and then do rsync rather than get() - less data transferred on
# however rsync might need ssh keys etc
require('user', 'host', provided_by=env.valid_envs)
if rsync:
_tasks('dump_db:' + filename + ',for_rsync=true')
local("rsync -vz -e 'ssh -p %s' %s@%s:%s %s" % (env.port,
env.user, env.host, filename, local_filename))
else:
_tasks('dump_db:' + filename)
get(filename, local_path=local_filename)
sudo_or_run('rm ' + filename)
def get_remote_dump_and_load(filename='/tmp/db_dump.sql',
local_filename='./db_dump.sql', keep_dump=True, rsync=True):
""" do a remote database dump, copy it to the local filesystem and then
load it into the local database """
get_remote_dump(filename=filename, local_filename=local_filename, rsync=rsync)
local(env.local_tasks_bin + ' restore_db:' + local_filename)
if not keep_dump:
local('rm ' + local_filename)
def update_db(force_use_migrations=False):
""" create and/or update the database, do migrations etc """
_tasks('update_db:force_use_migrations=%s' % force_use_migrations)
def setup_db_dumps():
""" set up mysql database dumps """
require('dump_dir', provided_by=env.valid_envs)
_tasks('setup_db_dumps:' + env.dump_dir)
def touch_wsgi():
""" touch wsgi file to trigger reload """
require('vcs_root_dir', provided_by=env.valid_envs)
wsgi_dir = path.join(env.vcs_root_dir, 'wsgi')
sudo_or_run('touch ' + path.join(wsgi_dir, 'wsgi_handler.py'))
def rm_pyc_files(py_dir=None):
"""Remove all the old pyc files to prevent stale files being used"""
require('django_dir', provided_by=env.valid_envs)
if py_dir is None:
py_dir = env.django_dir
with settings(warn_only=True):
with cd(py_dir):
sudo_or_run('find . -name \*.pyc | xargs rm')
def _delete_file(path):
if files.exists(path):
sudo_or_run('rm %s' % path)
def _link_files(source_file, target_path):
if not files.exists(target_path):
sudo_or_run('ln -s %s %s' % (source_file, target_path))
def link_webserver_conf(maintenance=False):
"""link the webserver conf file"""
require('vcs_root_dir', provided_by=env.valid_envs)
if env.webserver is None:
return
vcs_config_stub = path.join(env.vcs_root_dir, env.webserver, env.environment)
vcs_config_live = vcs_config_stub + '.conf'
vcs_config_maintenance = vcs_config_stub + '-maintenance.conf'
webserver_conf = _webserver_conf_path()
if maintenance:
_delete_file(webserver_conf)
if not files.exists(vcs_config_maintenance):
return
_link_files(vcs_config_maintenance, webserver_conf)
else:
if not files.exists(vcs_config_live):
utils.abort('No %s conf file found - expected %s' %
(env.webserver, vcs_config_live))
_delete_file(webserver_conf)
_link_files(vcs_config_live, webserver_conf)
# debian has sites-available/sites-enabled split with links
if _linux_type() == 'debian':
webserver_conf_enabled = webserver_conf.replace('available', 'enabled')
sudo_or_run('ln -s %s %s' % (webserver_conf, webserver_conf_enabled))
webserver_configtest()
def _webserver_conf_path():
webserver_conf_dir = {
'apache_redhat': '/etc/httpd/conf.d',
'apache_debian': '/etc/apache2/sites-available',
}
key = env.webserver + '_' + _linux_type()
if key in webserver_conf_dir:
return path.join(webserver_conf_dir[key],
'%s_%s.conf' % (env.project_name, env.environment))
else:
utils.abort('webserver %s is not supported (linux type %s)' %
(env.webserver, _linux_type()))
def webserver_configtest():
""" test webserver configuration """
tests = {
'apache_redhat': '/usr/sbin/httpd -S',
'apache_debian': '/usr/sbin/apache2ctl -S',
}
if env.webserver:
key = env.webserver + '_' + _linux_type()
if key in tests:
sudo(tests[key])
else:
utils.abort('webserver %s is not supported (linux type %s)' %
(env.webserver, _linux_type()))
def webserver_reload():
""" reload webserver on remote host """
webserver_cmd('reload')
def webserver_restart():
""" restart webserver on remote host """
webserver_cmd('restart')
def webserver_cmd(cmd):
""" run cmd against webserver init.d script """
cmd_strings = {
'apache_redhat': '/etc/init.d/httpd',
'apache_debian': '/etc/init.d/apache2',
}
if env.webserver:
key = env.webserver + '_' + _linux_type()
if key in cmd_strings:
sudo(cmd_strings[key] + ' ' + cmd)
else:
utils.abort('webserver %s is not supported' % env.webserver)
| qris/mailer-dye | dye/fablib.py | Python | gpl-3.0 | 29,664 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kombu import Connection
from st2common import log as logging
from st2common.constants import action as action_constants
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.models.db.liveaction import LiveActionDB
from st2common.services import action as action_service
from st2common.persistence.liveaction import LiveAction
from st2common.persistence.policy import Policy
from st2common import policies
from st2common.transport import consumers, liveaction
from st2common.transport import utils as transport_utils
from st2common.util import action_db as action_utils
__all__ = [
'get_scheduler',
]
LOG = logging.getLogger(__name__)
ACTIONRUNNER_REQUEST_Q = liveaction.get_status_management_queue(
'st2.actionrunner.req', routing_key=action_constants.LIVEACTION_STATUS_REQUESTED)
class ActionExecutionScheduler(consumers.MessageHandler):
message_type = LiveActionDB
def process(self, request):
"""Schedules the LiveAction and publishes the request
to the appropriate action runner(s).
LiveAction in statuses other than "requested" are ignored.
:param request: Action execution request.
:type request: ``st2common.models.db.liveaction.LiveActionDB``
"""
if request.status != action_constants.LIVEACTION_STATUS_REQUESTED:
LOG.info('%s is ignoring %s (id=%s) with "%s" status.',
self.__class__.__name__, type(request), request.id, request.status)
return
try:
liveaction_db = action_utils.get_liveaction_by_id(request.id)
except StackStormDBObjectNotFoundError:
LOG.exception('Failed to find liveaction %s in the database.', request.id)
raise
# Apply policies defined for the action.
liveaction_db = self._apply_pre_run_policies(liveaction_db=liveaction_db)
# Exit if the status of the request is no longer runnable.
# The status could have be changed by one of the policies.
if liveaction_db.status not in [action_constants.LIVEACTION_STATUS_REQUESTED,
action_constants.LIVEACTION_STATUS_SCHEDULED]:
LOG.info('%s is ignoring %s (id=%s) with "%s" status after policies are applied.',
self.__class__.__name__, type(request), request.id, liveaction_db.status)
return
# Update liveaction status to "scheduled".
if liveaction_db.status == action_constants.LIVEACTION_STATUS_REQUESTED:
liveaction_db = action_service.update_status(
liveaction_db, action_constants.LIVEACTION_STATUS_SCHEDULED, publish=False)
# Publish the "scheduled" status here manually. Otherwise, there could be a
# race condition with the update of the action_execution_db if the execution
# of the liveaction completes first.
LiveAction.publish_status(liveaction_db)
def _apply_pre_run_policies(self, liveaction_db):
# Apply policies defined for the action.
policy_dbs = Policy.query(resource_ref=liveaction_db.action, enabled=True)
LOG.debug('Applying %s pre_run policies' % (len(policy_dbs)))
for policy_db in policy_dbs:
driver = policies.get_driver(policy_db.ref,
policy_db.policy_type,
**policy_db.parameters)
try:
LOG.debug('Applying pre_run policy "%s" (%s) for liveaction %s' %
(policy_db.ref, policy_db.policy_type, str(liveaction_db.id)))
liveaction_db = driver.apply_before(liveaction_db)
except:
LOG.exception('An exception occurred while applying policy "%s".', policy_db.ref)
if liveaction_db.status == action_constants.LIVEACTION_STATUS_DELAYED:
break
return liveaction_db
def get_scheduler():
with Connection(transport_utils.get_messaging_urls()) as conn:
return ActionExecutionScheduler(conn, [ACTIONRUNNER_REQUEST_Q])
| lakshmi-kannan/st2 | st2actions/st2actions/scheduler.py | Python | apache-2.0 | 4,884 |
from bcpp_subject_form_validators import HeartAttackFormValidator
from ..models import HeartAttack
from .form_mixins import SubjectModelFormMixin
class HeartAttackForm (SubjectModelFormMixin):
form_validator_cls = HeartAttackFormValidator
class Meta:
model = HeartAttack
fields = '__all__'
| botswana-harvard/bcpp-subject | bcpp_subject/forms/heart_attack_form.py | Python | gpl-3.0 | 319 |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Collabora Ltd
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
"""C language support for Hotdoc
This extension provides support for providing documentation in C
"""
from hotdoc.extensions.gi.language import *
from hotdoc.extensions.gi.utils import *
# FIXME: Avoid the use of a global dictionary
TRANSLATED = {}
class CLanguage(Language):
language_name = 'c'
def __init__(self):
Language.__init__(self)
def make_translations(self, unique_name, node):
if node.tag == core_ns('member'):
TRANSLATED[unique_name] = unique_name
elif c_ns('identifier') in node.attrib:
TRANSLATED[unique_name] = unique_name
elif c_ns('type') in node.attrib:
TRANSLATED[unique_name] = unique_name
elif node.tag == core_ns('field'):
components = []
get_field_c_name_components(node, components)
display_name = '.'.join(components[1:])
TRANSLATED[unique_name] = display_name
elif node.tag == core_ns('virtual-method'):
display_name = node.attrib['name']
TRANSLATED[unique_name] = display_name
elif node.tag == core_ns('property'):
display_name = node.attrib['name']
TRANSLATED[unique_name] = display_name
elif node.attrib.get(glib_ns('fundamental')) == '1':
TRANSLATED[unique_name] = node.attrib[glib_ns('type-name')]
else:
TRANSLATED[unique_name] = node.attrib.get('name')
def get_translation(self, unique_name):
return TRANSLATED.get (unique_name)
FUNDAMENTALS[CLanguage.language_name] = {
"GParam": Link("https://developer.gnome.org/gobject/stable/gobject-GParamSpec.html#GParamSpec",
'GParamSpec', 'GParamSpec'),
"GInterface": Link("https://developer.gnome.org/gobject/stable/gobject-Type-Information.html#GInterfaceInfo",
'GInterface', 'GInterface')
}
def get_language_classes():
"""Nothing important, really"""
return [CLanguage]
| hotdoc/hotdoc | hotdoc/extensions/gi/languages/c.py | Python | lgpl-2.1 | 2,676 |
import logging
logger = logging.getLogger(__name__)
import re
from subprocess import Popen, PIPE
version_declaration_pattern = r'\s*#\s*version\s+(\d+)\s+es\s*'
layour_qualifier_pattern = r'layout\s*\(\s*location\s*=\s*(\d+)\s*\)\s*'
sampler2DArray_pattern = r'\bsampler2DArray\b'
def Preprocess(input_text):
version = 100
lines = input_text.splitlines()
# check empty input
if len(lines) == 0:
return '', version
# check the version
# The valid format for version declaration:
# whitespace_opt POUND whitespace_opt VERSION whitespace number whitespace ES whitespace_opt
match = re.match(version_declaration_pattern, lines[0])
if match:
version = int(match.group(1))
lines[0] = ''
input_text = '\n'.join(lines)
try:
# Note the use of universal_newlines to treat all newlines
# as \n for Python's purpose
#
command = ['cpp', '-DGL_ES']
logger.debug('Preprocess Command : %s' % ' '.join(command))
pipe = Popen(command, stdin=PIPE, stdout=PIPE, universal_newlines=True)
text, error = pipe.communicate(input=input_text)
if error:
logger.error('Preprocess Error : %s' % error)
except OSError as e:
raise RuntimeError("Unable to invoke 'cpp'. " +
'Make sure its path was passed correctly\n' +
('Original error: %s' % e))
# remove the leading comment lines
new_lines = []
line_number = 1
line_marker_pattern = r'# (\d+) ".*"'
for line in text.splitlines():
match = re.match(line_marker_pattern, line)
if match:
next_line_number = int(match.group(1))
new_lines += [''] * (next_line_number - line_number)
line_number = next_line_number
else:
new_lines.append(line)
line_number += 1
text = '\n'.join(new_lines)
return text, version
def ConvertESSLToCGCCompilable(source):
lines = source.splitlines()
# convert "#version 300 es" to "#version 300"
if re.match(version_declaration_pattern, lines[0]):
lines[0] = '#version 300\n#extension GL_NV_shadow : enable\n#extension GL_OES_texture_3D : enable'
for i in range(len(lines)):
# filter away layout qualifiers
match = re.search(layour_qualifier_pattern, lines[i])
if match:
lines[i] = lines[i].replace(match.group(), '')
# replace sampler2DArray with sampler3D
match = re.search(sampler2DArray_pattern, lines[i])
if match:
lines[i] = lines[i].replace(match.group(), 'sampler3D')
return '\n'.join(lines)
| hantempo/GLESDep | ShaderUtility.py | Python | mit | 2,645 |
# (C) British Crown Copyright 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test function :func:`iris.fileformats._pyke_rules.compiled_krb.\
fc_rules_cf_fc.build_cube_metadata`.
"""
from __future__ import (absolute_import, division, print_function)
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import numpy as np
import mock
from iris.cube import Cube
from iris.fileformats._pyke_rules.compiled_krb.fc_rules_cf_fc import \
build_cube_metadata
class TestInvalidGlobalAttributes(tests.IrisTest):
@staticmethod
def _make_engine(global_attributes=None):
if global_attributes is None:
global_attributes = {}
cf_group = mock.Mock(global_attributes=global_attributes)
cf_var = mock.Mock(
cf_name='wibble',
standard_name=None,
long_name=None,
units='m',
dtype=np.float64,
cell_methods=None,
cf_group=cf_group)
engine = mock.Mock(
cube=Cube([23]),
cf_var=cf_var)
return engine
def test_valid(self):
global_attributes = {'Conventions': 'CF-1.5',
'comment': 'Mocked test object'}
engine = self._make_engine(global_attributes)
build_cube_metadata(engine)
expected = global_attributes
self.assertEqual(engine.cube.attributes, expected)
def test_invalid(self):
global_attributes = {'Conventions': 'CF-1.5',
'comment': 'Mocked test object',
'calendar': 'standard'}
engine = self._make_engine(global_attributes)
with mock.patch('warnings.warn') as warn:
build_cube_metadata(engine)
# Check for a warning.
self.assertEqual(warn.call_count, 1)
self.assertIn("Skipping global attribute 'calendar'",
warn.call_args[0][0])
# Check resulting attributes. The invalid entry 'calendar'
# should be filtered out.
global_attributes.pop('calendar')
expected = global_attributes
self.assertEqual(engine.cube.attributes, expected)
if __name__ == "__main__":
tests.main()
| Jozhogg/iris | lib/iris/tests/unit/fileformats/pyke_rules/compiled_krb/fc_rules_cf_fc/test_build_cube_metadata.py | Python | lgpl-3.0 | 2,908 |
import os, sys
def get_filepaths(directory):
file_paths = [] # List which will store all of the full filepaths.
for root, directories, files in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
file_paths.append(filepath) # Add it to the list.
return file_paths # Self-explanatory.
lists = get_filepaths(os.path.dirname(os.path.abspath(__file__)))
with open('result.txt', 'a') as f:
for url in lists:
if (os.path.basename(url).startswith('0_')):
f.write(url)
f.write('\n')
f.write('0\n')
if (os.path.basename(url).startswith('1_')):
f.write(url)
f.write('\n')
f.write('1\n')
if (os.path.basename(url).startswith('2_')):
f.write(url)
f.write('\n')
f.write('2\n')
if (os.path.basename(url).startswith('3_')):
f.write(url)
f.write('\n')
f.write('3\n')
if (os.path.basename(url).startswith('4_')):
f.write(url)
f.write('\n')
f.write('4\n')
if (os.path.basename(url).startswith('5_')):
f.write(url)
f.write('\n')
f.write('5\n')
if (os.path.basename(url).startswith('6_')):
f.write(url)
f.write('\n')
f.write('6\n')
if (os.path.basename(url).startswith('7_')):
f.write(url)
f.write('\n')
f.write('7\n')
if (os.path.basename(url).startswith('8_')):
f.write(url)
f.write('\n')
f.write('8\n')
if (os.path.basename(url).startswith('9_')):
f.write(url)
f.write('\n')
f.write('9\n') | wzyuliyang/hand-write-digit-recognition-with-opencv | tools/createpath.py | Python | mit | 1,927 |
"""passlib.handlers.digests - plain hash digests
"""
#=============================================================================
# imports
#=============================================================================
# core
import hashlib
import logging; log = logging.getLogger(__name__)
# site
# pkg
from passlib.utils import to_native_str, to_bytes, render_bytes, consteq
from passlib.utils.compat import unicode, str_to_uascii
import passlib.utils.handlers as uh
from passlib.crypto.digest import lookup_hash
# local
__all__ = [
"create_hex_hash",
"hex_md4",
"hex_md5",
"hex_sha1",
"hex_sha256",
"hex_sha512",
]
#=============================================================================
# helpers for hexadecimal hashes
#=============================================================================
class HexDigestHash(uh.StaticHandler):
"""this provides a template for supporting passwords stored as plain hexadecimal hashes"""
#===================================================================
# class attrs
#===================================================================
_hash_func = None # hash function to use - filled in by create_hex_hash()
checksum_size = None # filled in by create_hex_hash()
checksum_chars = uh.HEX_CHARS
#===================================================================
# methods
#===================================================================
@classmethod
def _norm_hash(cls, hash):
return hash.lower()
def _calc_checksum(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
return str_to_uascii(self._hash_func(secret).hexdigest())
#===================================================================
# eoc
#===================================================================
def create_hex_hash(digest, module=__name__):
# NOTE: could set digest_name=hash.name for cpython, but not for some other platforms.
info = lookup_hash(digest)
name = "hex_" + info.name
return type(name, (HexDigestHash,), dict(
name=name,
__module__=module, # so ABCMeta won't clobber it
_hash_func=staticmethod(info.const), # sometimes it's a function, sometimes not. so wrap it.
checksum_size=info.digest_size*2,
__doc__="""This class implements a plain hexadecimal %s hash, and follows the :ref:`password-hash-api`.
It supports no optional or contextual keywords.
""" % (info.name,)
))
#=============================================================================
# predefined handlers
#=============================================================================
hex_md4 = create_hex_hash("md4")
hex_md5 = create_hex_hash("md5")
hex_md5.django_name = "unsalted_md5"
hex_sha1 = create_hex_hash("sha1")
hex_sha256 = create_hex_hash("sha256")
hex_sha512 = create_hex_hash("sha512")
#=============================================================================
# htdigest
#=============================================================================
class htdigest(uh.MinimalHandler):
"""htdigest hash function.
.. todo::
document this hash
"""
name = "htdigest"
setting_kwds = ()
context_kwds = ("user", "realm", "encoding")
default_encoding = "utf-8"
@classmethod
def hash(cls, secret, user, realm, encoding=None):
# NOTE: this was deliberately written so that raw bytes are passed through
# unchanged, the encoding kwd is only used to handle unicode values.
if not encoding:
encoding = cls.default_encoding
uh.validate_secret(secret)
if isinstance(secret, unicode):
secret = secret.encode(encoding)
user = to_bytes(user, encoding, "user")
realm = to_bytes(realm, encoding, "realm")
data = render_bytes("%s:%s:%s", user, realm, secret)
return hashlib.md5(data).hexdigest()
@classmethod
def _norm_hash(cls, hash):
"""normalize hash to native string, and validate it"""
hash = to_native_str(hash, param="hash")
if len(hash) != 32:
raise uh.exc.MalformedHashError(cls, "wrong size")
for char in hash:
if char not in uh.LC_HEX_CHARS:
raise uh.exc.MalformedHashError(cls, "invalid chars in hash")
return hash
@classmethod
def verify(cls, secret, hash, user, realm, encoding="utf-8"):
hash = cls._norm_hash(hash)
other = cls.hash(secret, user, realm, encoding)
return consteq(hash, other)
@classmethod
def identify(cls, hash):
try:
cls._norm_hash(hash)
except ValueError:
return False
return True
@uh.deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genconfig(cls):
return cls.hash("", "", "")
@uh.deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genhash(cls, secret, config, user, realm, encoding=None):
# NOTE: 'config' is ignored, as this hash has no salting / other configuration.
# just have to make sure it's valid.
cls._norm_hash(config)
return cls.hash(secret, user, realm, encoding)
#=============================================================================
# eof
#=============================================================================
| morreene/tradenews | venv/Lib/site-packages/passlib/handlers/digests.py | Python | bsd-3-clause | 5,452 |
from conans import ConanFile, CMake
class AversivePlusPlusModuleConan(ConanFile):
name = "stm32cube-hal-stm32f4xx"
version = "0.1"
exports = "*"
settings = "os", "compiler", "build_type", "arch", "target"
requires = "cmsis-stm32f4xx/0.1@AversivePlusPlus/dev", "toolchain-switch/0.1@AversivePlusPlus/dev"
generators = "cmake"
def imports(self):
self.copy("*.cmake", dst="toolchain", src="toolchain")
self.copy("*.ld", dst="toolchain/linker", src="linker")
def build(self):
cmake = CMake(self.settings)
toolchain = '-DCMAKE_TOOLCHAIN_FILE=toolchain/%s.cmake' % self.settings.target
self.run('cmake "%s" %s %s' % (self.conanfile_directory, cmake.command_line, toolchain))
self.run('cmake --build . %s' % cmake.build_config)
def package(self):
self.copy("*.hpp", src="include", dst="include")
self.copy("*.h", src="include", dst="include")
self.copy("*.a", src="lib", dst="lib")
| AversivePlusPlus/AversivePlusPlus | modules/thirdparty/stm32cube-hal-stm32f4xx/conanfile.py | Python | bsd-3-clause | 970 |
"""
De-linearize skin weights.
.. figure:: https://github.com/robertjoosten/rjSkinningTools/raw/master/delinearWeights/README.gif
:align: center
Installation
============
Copy the **rjSkinningTools** folder to your Maya scripts directory
::
C:/Users/<USER>/Documents/maya/scripts
Usage
=====
Display UI
::
import rjSkinningTools.delinearWeights.ui
rjSkinningTools.delinearWeights.ui.show()
Note
====
Delinear weights applies an easing algorithm to the skin weights. This tool
is best used if skin weights have been copied from a low polygon source, when
this is done sometimes it is very obvious that the weights are linearly
divided between the vertices of the low polygon source. This tool will tween
those weights.
Code
====
"""
from maya import cmds, mel, OpenMayaAnim
from rjSkinningTools import utils
from . import tweening
__author__ = "Robert Joosten"
__version__ = "0.7.0"
__email__ = "[email protected]"
# ----------------------------------------------------------------------------
def getTweeningMethod(method):
"""
Get the tweening method from a string, if the function doesn't exists
None will be returned.
:return: Tweening function
:rtype: func/None
"""
if method in dir(tweening):
return getattr(tweening, method)
# ----------------------------------------------------------------------------
def getSelectedVertices():
"""
Get all of the selected vertices. If no component mode selection is made
all vertices of a selected mesh will be appended to the selection.
:return: List of selected vertices
:rtype: list of strings
"""
# get vertices
vertices = [
vtx
for vtx in cmds.ls(sl=True, fl=True, l=True)
if vtx.count(".vtx")
]
# append meshes
meshes = utils.getMeshesFromSelection()
for mesh in meshes:
vertices.extend(
cmds.ls(
"{0}.vtx[*]".format(mesh),
fl=True,
l=True
)
)
return vertices
# ----------------------------------------------------------------------------
def getIndexFromString(vtx):
"""
Get the index from a component string.
:param str vtx: Path to component
:return: Index of component string
:rtype: int
"""
return int(vtx.split("[")[-1][:-1])
def splitByInfluences(weights, num):
"""
Split a list of weights into the size of the number of influences.
:param list weights: List of weights
:param int num: Size of split
:return: Index of component string
:rtype: int
"""
chunks = []
for i in xrange(0, len(weights), num):
chunks.append(weights[i:i + num])
return chunks
# ----------------------------------------------------------------------------
def deLinearSkinWeightsOnSelection(method):
"""
All of the selected vertices will be queried with the
:func:`getSelectedVertices` function, these vertices will then be parsed
to the :func:`deLinearSkinWeights` function that will process the weights.
:param str method: De-linearization method
"""
vertices = getSelectedVertices()
deLinearSkinWeights(vertices, method)
def deLinearSkinWeights(vertices, method):
"""
Loop over all of the provided vertices. Loop over all of the vertices and
see if these vertices are deformed by a skin cluster. If this is the case,
the weights will be de-linearized by the function provided. This function
is found in the tweening module using :func:`getTweeningMethod`.
:param list vertices: List of vertices
:param str method: De-linearization method
"""
func = getTweeningMethod(method)
if not func:
raise ValueError("Tweening method is not supported.")
data = {}
objects = list(set([vtx.split(".")[0] for vtx in vertices]))
with utils.UndoChunkContext():
for obj in objects:
# get skin cluster
sk = utils.getSkinCluster(obj)
if not sk:
continue
# get indices
indices = [
getIndexFromString(vtx)
for vtx in vertices
if vtx.startswith(obj)
]
# get api objects
meshObj = utils.asMObject(obj)
meshDag = utils.asMDagPath(meshObj)
meshDag.extendToShape()
skObj = utils.asMObject(sk)
skMfn = OpenMayaAnim.MFnSkinCluster(skObj)
# get weights
components = utils.asComponent(indices)
weightsAll, num = utils.getSkinWeights(
meshDag,
skMfn,
components
)
# split weights
weightChunks = splitByInfluences(weightsAll, num)
for i, weights in enumerate(weightChunks):
# calculate per vertex weights
weights = utils.normalizeWeights(weights)
weights = [func(w) for w in weights]
weights = utils.normalizeWeights(weights)
# set weights
for j, w in enumerate(weights):
cmds.setAttr(
"{0}.weightList[{1}].weights[{2}]".format(
sk,
indices[i],
j
),
w
)
| josephkirk/PipelineTools | packages/rjTools/delinearWeights/__init__.py | Python | bsd-2-clause | 4,744 |
# Under MIT license, see LICENSE.txt
""" Constantes concernant les tactiques. """
# Flags
INIT = 0
WIP = 1
FAILURE = 2
SUCCESS = 3
DEFAULT_TIME_TO_LIVE = 0.5
def is_complete(p_status_flag):
return p_status_flag == FAILURE or p_status_flag == SUCCESS
| agingrasc/StrategyIA | ai/STA/Tactic/tactic_constants.py | Python | mit | 256 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Configuration options for the reverse monitor application.
"""
from django.utils.translation import ugettext_lazy as _
from desktop.lib.conf import Config, ConfigSection
DATA_SERVICE = ConfigSection(
key='di-service',
help=_('Configuration options for Oauth 1.0 authentication'),
members=dict(
DI_DATA_SERVICE_URL = Config(
key="di_data_service_url",
help=_("The Consumer key of the application."),
type=str,
default="http://localhost:8080/di-data-service/"
)
)
)
| Ctrip-DI/Hue-Ctrip-DI | monitor/src/monitor/conf.py | Python | mit | 1,336 |
# coding=utf-8
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Item",
"_doctype": "Item",
"color": "#f39c12",
"icon": "octicon octicon-package",
"type": "link",
"link": "List/Item"
},
{
"module_name": "Customer",
"_doctype": "Customer",
"color": "#1abc9c",
"icon": "octicon octicon-tag",
"type": "link",
"link": "List/Customer"
},
{
"module_name": "Supplier",
"_doctype": "Supplier",
"color": "#c0392b",
"icon": "octicon octicon-briefcase",
"type": "link",
"link": "List/Supplier"
},
{
"_doctype": "Employee",
"module_name": "Employee",
"color": "#2ecc71",
"icon": "octicon octicon-organization",
"type": "link",
"link": "List/Employee"
},
{
"module_name": "Project",
"_doctype": "Project",
"color": "#8e44ad",
"icon": "octicon octicon-rocket",
"type": "link",
"link": "List/Project"
},
{
"module_name": "Issue",
"color": "#2c3e50",
"icon": "octicon octicon-issue-opened",
"_doctype": "Issue",
"type": "link",
"link": "List/Issue"
},
{
"module_name": "Lead",
"icon": "octicon octicon-broadcast",
"type": "module",
"_doctype": "Lead",
"type": "link",
"link": "List/Lead"
},
{
"module_name": "Profit and Loss Statement",
"_doctype": "Account",
"color": "#3498db",
"icon": "octicon octicon-repo",
"type": "link",
"link": "query-report/Profit and Loss Statement"
},
# old
{
"module_name": "Accounts",
"color": "#3498db",
"icon": "octicon octicon-repo",
"type": "module",
"hidden": 1
},
{
"module_name": "Stock",
"color": "#f39c12",
"icon": "fa fa-truck",
"icon": "octicon octicon-package",
"type": "module",
"hidden": 1
},
{
"module_name": "CRM",
"color": "#EF4DB6",
"icon": "octicon octicon-broadcast",
"type": "module",
"hidden": 1
},
{
"module_name": "Selling",
"color": "#1abc9c",
"icon": "fa fa-tag",
"icon": "octicon octicon-tag",
"type": "module",
"hidden": 1
},
{
"module_name": "Buying",
"color": "#c0392b",
"icon": "fa fa-shopping-cart",
"icon": "octicon octicon-briefcase",
"type": "module",
"hidden": 1
},
{
"module_name": "HR",
"color": "#2ecc71",
"icon": "fa fa-group",
"icon": "octicon octicon-organization",
"label": _("Human Resources"),
"type": "module",
"hidden": 1
},
{
"module_name": "Manufacturing",
"color": "#7f8c8d",
"icon": "fa fa-cogs",
"icon": "octicon octicon-tools",
"type": "module",
"hidden": 1
},
{
"module_name": "POS",
"color": "#589494",
"icon": "octicon octicon-credit-card",
"type": "page",
"link": "pos",
"label": _("POS")
},
{
"module_name": "Leaderboard",
"color": "#589494",
"icon": "octicon octicon-graph",
"type": "page",
"link": "leaderboard",
"label": _("Leaderboard")
},
{
"module_name": "Projects",
"color": "#8e44ad",
"icon": "fa fa-puzzle-piece",
"icon": "octicon octicon-rocket",
"type": "module",
"hidden": 1
},
{
"module_name": "Support",
"color": "#2c3e50",
"icon": "fa fa-phone",
"icon": "octicon octicon-issue-opened",
"type": "module",
"hidden": 1
},
{
"module_name": "Learn",
"color": "#FF888B",
"icon": "octicon octicon-device-camera-video",
"type": "module",
"is_help": True,
"label": _("Learn"),
"hidden": 1
},
{
"module_name": "Maintenance",
"color": "#FF888B",
"icon": "octicon octicon-tools",
"type": "module",
"label": _("Maintenance"),
"hidden": 1
},
{
"module_name": "Student",
"color": "#c0392b",
"icon": "octicon octicon-person",
"label": _("Student"),
"link": "List/Student",
"_doctype": "Student",
"type": "list",
"hidden": 1
},
{
"module_name": "Student Group",
"color": "#d59919",
"icon": "octicon octicon-organization",
"label": _("Student Group"),
"link": "List/Student Group",
"_doctype": "Student Group",
"type": "list",
"hidden": 1
},
{
"module_name": "Course Schedule",
"color": "#fd784f",
"icon": "octicon octicon-calendar",
"label": _("Course Schedule"),
"link": "Calendar/Course Schedule",
"_doctype": "Course Schedule",
"type": "list",
"hidden": 1
},
{
"module_name": "Student Attendance Tool",
"color": "#C0392B",
"icon": "octicon octicon-checklist",
"label": _("Student Attendance Tool"),
"link": "List/Student Attendance Tool",
"_doctype": "Student Attendance Tool",
"type": "list",
"hidden": 1
},
{
"module_name": "Course",
"color": "#8e44ad",
"icon": "octicon octicon-book",
"label": _("Course"),
"link": "List/Course",
"_doctype": "Course",
"type": "list",
"hidden": 1
},
{
"module_name": "Program",
"color": "#9b59b6",
"icon": "octicon octicon-repo",
"label": _("Program"),
"link": "List/Program",
"_doctype": "Program",
"type": "list",
"hidden": 1
},
{
"module_name": "Student Applicant",
"color": "#4d927f",
"icon": "octicon octicon-clippy",
"label": _("Student Applicant"),
"link": "List/Student Applicant",
"_doctype": "Student Applicant",
"type": "list",
"hidden": 1
},
{
"module_name": "Fees",
"color": "#83C21E",
"icon": "fa fa-money",
"label": _("Fees"),
"link": "List/Fees",
"_doctype": "Fees",
"type": "list",
"hidden": 1
},
{
"module_name": "Instructor",
"color": "#a99e4c",
"icon": "octicon octicon-broadcast",
"label": _("Instructor"),
"link": "List/Instructor",
"_doctype": "Instructor",
"type": "list",
"hidden": 1
},
{
"module_name": "Room",
"color": "#f22683",
"icon": "fa fa-map-marker",
"label": _("Room"),
"link": "List/Room",
"_doctype": "Room",
"type": "list",
"hidden": 1
},
{
"module_name": "Education",
"color": "#428B46",
"icon": "octicon octicon-mortar-board",
"type": "module",
"label": _("Education"),
"hidden": 1
},
{
"module_name": "Healthcare",
"color": "#FF888B",
"icon": "fa fa-heartbeat",
"type": "module",
"label": _("Healthcare"),
"hidden": 1
},
{
"module_name": "Patient",
"color": "#6BE273",
"icon": "fa fa-user",
"doctype": "Patient",
"type": "link",
"link": "List/Patient",
"label": _("Patient"),
"hidden": 1
},
{
"module_name": "Patient Appointment",
"color": "#934F92",
"icon": "fa fa-calendar-plus-o",
"doctype": "Patient Appointment",
"type": "link",
"link": "List/Patient Appointment",
"label": _("Patient Appointment"),
"hidden": 1
},
{
"module_name": "Consultation",
"color": "#2ecc71",
"icon": "fa fa-stethoscope",
"doctype": "Consultation",
"type": "link",
"link": "List/Consultation",
"label": _("Consultation"),
"hidden": 1
},
{
"module_name": "Lab Test",
"color": "#7578f6",
"icon": "octicon octicon-beaker",
"doctype": "Lab Test",
"type": "list",
"link": "List/Lab Test",
"label": _("Lab Test"),
"hidden": 1
},
{
"module_name": "Hub",
"color": "#009248",
"icon": "/assets/erpnext/images/hub_logo.svg",
"type": "page",
"link": "hub",
"label": _("Hub")
},
{
"module_name": "Data Import",
"color": "#FFF168",
"reverse": 1,
"doctype": "Data Import",
"icon": "octicon octicon-cloud-upload",
"label": _("Data Import"),
"link": "List/Data Import",
"type": "list"
},
{
"module_name": "Restaurant",
"color": "#EA81E8",
"icon": "🍔",
"_doctype": "Restaurant",
"type": "list",
"link": "List/Restaurant",
"label": _("Restaurant"),
"hidden": 1
},
{
"module_name": "Agriculture",
"color": "#8BC34A",
"icon": "octicon octicon-globe",
"type": "module",
"label": _("Agriculture"),
"hidden": 1
},
{
"module_name": "Crop",
"_doctype": "Crop",
"label": _("Crop"),
"color": "#8BC34A",
"icon": "fa fa-tree",
"type": "list",
"link": "List/Crop",
"hidden": 1
},
{
"module_name": "Crop Cycle",
"_doctype": "Crop Cycle",
"label": _("Crop Cycle"),
"color": "#8BC34A",
"icon": "fa fa-circle-o-notch",
"type": "list",
"link": "List/Crop Cycle",
"hidden": 1
},
{
"module_name": "Fertilizer",
"_doctype": "Fertilizer",
"label": _("Fertilizer"),
"color": "#8BC34A",
"icon": "fa fa-leaf",
"type": "list",
"link": "List/Fertilizer",
"hidden": 1
},
{
"module_name": "Land Unit",
"_doctype": "Land Unit",
"label": _("Land Unit"),
"color": "#8BC34A",
"icon": "fa fa-map",
"type": "list",
"link": "List/Land Unit",
"hidden": 1
},
{
"module_name": "Disease",
"_doctype": "Disease",
"label": _("Disease"),
"color": "#8BC34A",
"icon": "octicon octicon-bug",
"type": "list",
"link": "List/Disease",
"hidden": 1
},
{
"module_name": "Plant Analysis",
"_doctype": "Plant Analysis",
"label": _("Plant Analysis"),
"color": "#8BC34A",
"icon": "fa fa-pagelines",
"type": "list",
"link": "List/Plant Analysis",
"hidden": 1
},
{
"module_name": "Soil Analysis",
"_doctype": "Soil Analysis",
"label": _("Soil Analysis"),
"color": "#8BC34A",
"icon": "fa fa-flask",
"type": "list",
"link": "List/Soil Analysis",
"hidden": 1
},
{
"module_name": "Soil Texture",
"_doctype": "Soil Texture",
"label": _("Soil Texture"),
"color": "#8BC34A",
"icon": "octicon octicon-beaker",
"type": "list",
"link": "List/Soil Texture",
"hidden": 1
},
{
"module_name": "Water Analysis",
"_doctype": "Water Analysis",
"label": _("Water Analysis"),
"color": "#8BC34A",
"icon": "fa fa-tint",
"type": "list",
"link": "List/Water Analysis",
"hidden": 1
},
{
"module_name": "Weather",
"_doctype": "Weather",
"label": _("Weather"),
"color": "#8BC34A",
"icon": "fa fa-sun-o",
"type": "list",
"link": "List/Weather",
"hidden": 1
},
{
"module_name": "Assets",
"color": "#4286f4",
"icon": "octicon octicon-database",
"hidden": 1,
"label": _("Assets"),
"type": "module"
},
{
"module_name": "Grant Application",
"color": "#E9AB17",
"icon": "fa fa-gift",
"_doctype": "Grant Application",
"type": "list",
"link": "List/Grant Application",
"label": _("Grant Application"),
"hidden": 1
},
{
"module_name": "Donor",
"color": "#7F5A58",
"icon": "fa fa-tint",
"_doctype": "Donor",
"type": "list",
"link": "List/Donor",
"label": _("Donor"),
"hidden": 1
},
{
"module_name": "Volunteer",
"color": "#7E587E",
"icon": "fa fa-angellist",
"_doctype": "Volunteer",
"type": "list",
"link": "List/Volunteer",
"label": _("Volunteer"),
"hidden": 1
},
{
"module_name": "Member",
"color": "#79BAEC",
"icon": "fa fa-users",
"_doctype": "Member",
"type": "list",
"link": "List/Member",
"label": _("Member"),
"hidden": 1
},
{
"module_name": "Chapter",
"color": "#3B9C9C",
"icon": "fa fa-handshake-o",
"_doctype": "Chapter",
"type": "list",
"link": "List/Chapter",
"label": _("Chapter"),
"hidden": 1
},
{
"module_name": "Non Profit",
"color": "#DE2B37",
"icon": "octicon octicon-heart",
"type": "module",
"label": _("Non Profit"),
"hidden": 1
}
]
| adityaduggal/erpnext | erpnext/config/desktop.py | Python | gpl-3.0 | 11,448 |
"""engine.SCons.Tool.sunar
Tool-specific initialization for Solaris (Forte) ar (library archive). If CC
exists, static libraries should be built with it, so that template
instantians can be resolved.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunar.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Defaults
import SCons.Tool
import SCons.Util
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
if env.Detect('CC'):
env['AR'] = 'CC'
env['ARFLAGS'] = SCons.Util.CLVar('-xar')
env['ARCOM'] = '$AR $ARFLAGS -o $TARGET $SOURCES'
else:
env['AR'] = 'ar'
env['ARFLAGS'] = SCons.Util.CLVar('r')
env['ARCOM'] = '$AR $ARFLAGS $TARGET $SOURCES'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -G')
env['SHLINKCOM'] = '$SHLINK $SHLINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
def exists(env):
return env.Detect('CC') or env.Detect('ar')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| ake-koomsin/mapnik_nvpr | scons/scons-local-2.2.0/SCons/Tool/sunar.py | Python | lgpl-2.1 | 2,593 |
import BaseHTTPServer
import SocketServer
import random
import time
from django.core.servers.basehttp import WSGIServer, WSGIRequestHandler
from django.conf import settings
class RandomWaitMixin(object):
def process_request(self, *args, **kwargs):
if getattr(settings, 'CONCURRENT_RANDOM_DELAY', None):
time.sleep(random.random()/3)
return super(RandomWaitMixin, self).process_request(*args, **kwargs)
class ThreadedServer(RandomWaitMixin, SocketServer.ThreadingMixIn, WSGIServer):
def __init__(self, server_address, RequestHandlerClass=None):
BaseHTTPServer.HTTPServer.__init__(self, server_address, RequestHandlerClass)
class ForkedServer(RandomWaitMixin, SocketServer.ForkingMixIn, WSGIServer):
def __init__(self, server_address, RequestHandlerClass=None):
BaseHTTPServer.HTTPServer.__init__(self, server_address, RequestHandlerClass)
def run(addr, port, wsgi_handler):
server_address = (addr, port)
threaded = True # else forked
if hasattr(settings, 'CONCURRENT_THREADING'):
threaded = settings.CONCURRENT_THREADING
if threaded:
httpd = ThreadedServer(server_address, WSGIRequestHandler)
else:
httpd = ForkedServer(server_address, WSGIRequestHandler)
httpd.set_app(wsgi_handler)
httpd.serve_forever()
| bancek/egradebook | src/lib/concurrent_server/servers.py | Python | gpl-3.0 | 1,319 |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 15 15:12:48 2016
@author: acbart
"""
import matplotlib
import matplotlib.pyplot as plt
import real_estate
list_of_building = real_estate.get_buildings()
owned_VA = 0
leased_VA = 0
owned_PA = 0
leased_PA = 0
owned_MD = 0
leased_MD = 0
owned_buildings = []
leased_buildings = []
for report in list_of_building:
if report["location"]["address"]["state"] == "VA":
if report["data"]["owned or leased"] == "OWNED":
owned_VA = owned_VA + 1
leased_VA = leased_VA + 1
elif report["location"]["address"]["state"] == "PA":
if report["data"]["owned or leased"] == "OWNED":
owned_PA = owned_PA + 1
leased_VA = leased_VA + 1
elif report["location"]["address"]["state"] == "MD":
if report["data"]["owned or leased"] == "OWNED":
owned_MD = owned_MD + 1
leased_MD = leased_MD + 1
owned_buildings = [owned_VA, owned_PA, owned_MD]
leased_buildings = [leased_VA, leased_PA, leased_MD]
labels = ["Virginia", "Pennsylvania", "Maryland"]
xcoords = range(len(labels))
plt.bar(xcoords, owned_buildings, tick_label= labels, align='center')
plt.title("Buildings Owned or Leased in Mid-Atlantic States")
plt.xlabel("States")
plt.ylabel("Number owned or leased")
plt.legend()
plt.show() | RealTimeWeb/datasets | datasets/python/real_estate/test.py | Python | gpl-2.0 | 1,333 |
import copy
import flask
import pickle
import logging
from pprint import pprint
import os
import hashlib
from flask import Flask, request
from .pyfeed import startpoint
log = logging.getLogger()
log.level = logging.DEBUG
#fh = logging.FileHandler('/tmp/web.log')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')
#fh.setFormatter(formatter)
#log.addHandler(fh)
RDB = {'sites':{}, 'groups':{}}
DBNAME = '/tmp/plfeed.db'
class SiteDetails(object):
def __init__(self, feed, hashvalue, unread=0, current=False):
self.url = ""
self.feed = feed
self.hashvalue = hashvalue
self.unread = unread
self.current = current
self.items = []
self.item_urls = {}
class GroupDetails(object):
def __init__(self, name):
self.name = name
self.items = []
def save_db():
"Saves all the information into a pickle file"
global RDB
with open(DBNAME, 'wb') as fobj:
pickle.dump(RDB, fobj)
def get_all_site_details(name):
"""
Creates the all sites details for the navbar
:return: A list of sites.
"""
global RDB
result = []
groups = RDB['groups']
for g, gsites in groups.items():
ir = GroupDetails(g)
for k in gsites:
data = copy.deepcopy(RDB['sites'].get(k))
if data.hashvalue == name.strip():
data.current = True
ir.items.append(data)
result.append(ir)
return result
app = Flask(__name__)
@app.route('/')
def hello_world():
global RDB
sites = RDB['sites']
sites = {v.feed:k for k,v in sites.items()}
return flask.render_template('home.html', links=sites)
@app.route('/read/<name>/')
def read_a_site(name):
global RDB
if name not in RDB['sites']:
return flask.redirect(flask.url_for('hello_world')), 404
allsites = get_all_site_details(name)
site = RDB['sites'][name]
site.unread = 0
RDB['sites'][name] = site
posts = site.items
return flask.render_template('posts.html', posts=posts, allsites=allsites)
@app.route('/addsites/<group>/',methods=['GET', 'POST'])
def addsite(group):
global RDB
if request.method == 'POST':
url = request.form['url']
url = url.strip()
grp = request.form['group']
grp = grp.strip()
hash = hashlib.sha1(url.encode()).hexdigest()
site = SiteDetails(url, hash)
RDB['sites'][hash] = site
RDB['groups'].setdefault(grp, []).append(hash)
return flask.render_template('addsites.html', group=group)
else:
return flask.render_template('addsites.html', group=group)
@app.route('/update/')
def update_sites():
global RDB
RDB = startpoint(RDB)
save_db()
return flask.redirect(flask.url_for('hello_world'))
if os.path.exists(DBNAME):
with open(DBNAME, "rb") as fobj:
RDB = pickle.load(fobj)
if __name__ == '__main__':
if os.path.exists('/output/'):
DBNAME = '/output/site.db'
else:
DBNAME = '/tmp/site.db'
app.run(host='0.0.0.0', debug=True)
| kushaldas/personalfeed | webfeed/__init__.py | Python | gpl-3.0 | 3,085 |
import json
data = json.load(open('input/knowit22'))
from sys import stdout
def valid(t):
ma = None
mi = None
seen_any_rect = False
for row in t:
ma_l = None
mi_l = None
seen_rect = False
in_rect = False
x = 0
for c in row:
if c:
if seen_rect and not in_rect:
return False
seen_rect = True
seen_any_rect = True
in_rect = True
ma_l = x
if mi_l is None:
mi_l = x
else:
in_rect = False
x += 1
if ma_l is not None:
if ma is None:
ma = ma_l
elif ma_l != ma:
return False
if mi_l is not None:
if mi is None:
mi = mi_l
elif mi_l != mi:
return False
if ma is None or mi is None:
return False
if not seen_any_rect:
return False
return True
def print_table(t):
for row in t:
for c in row:
stdout.write(str(c) if c else '.')
stdout.write("\n")
stdout.write("\n")
results = []
for task in data:
table = []
for i in range(0, 7):
row = []
for j in range(0, 10):
row.append(False)
table.append(row)
overlap = False
iidx = 1
for instr in task:
x1, y1, x2, y2 = instr
for y in range(y1, y2):
for x in range(x1, x2):
if table[7-y-1][x+1]:
overlap = True
table[7-y-1][x+1] = hex(iidx)[2:]
iidx += 1
print_table(table)
_valid = valid(table)
print(_valid)
results.append(_valid)
print(', '.join(['true' if r else 'false' for r in results]))
| matslindh/codingchallenges | knowit2016/knowit22.py | Python | mit | 1,854 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Imports
from PyQt4.QtGui import QDialog, QIcon
import ui_qtsixa_aboutw
class AboutW(QDialog, ui_qtsixa_aboutw.Ui_AboutW):
def __init__(self, *args):
QDialog.__init__(self, *args)
self.setupUi(self)
self.setWindowIcon(QIcon(":/icons/qtsixa.png"))
| Hannimal/raspicar | wiiController/QtSixA-1.5.1/qtsixa/gui/qtsixa_about.py | Python | unlicense | 328 |
# coding: utf-8
"""
jinja2schema.visitors.util
~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import jinja2.nodes
from ..mergers import merge
from ..model import Dictionary, Scalar, Unknown
def visit(node, macroses, config, predicted_struct_cls=Scalar, return_struct_cls=Unknown):
if isinstance(node, jinja2.nodes.Stmt):
structure = visit_stmt(node, macroses, config)
elif isinstance(node, jinja2.nodes.Expr):
ctx = Context(predicted_struct=predicted_struct_cls.from_ast(node, order_nr=config.ORDER_OBJECT.get_next()),
return_struct_cls=return_struct_cls)
_, structure = visit_expr(node, ctx, macroses, config)
elif isinstance(node, jinja2.nodes.Template):
structure = visit_many(node.body, macroses, config)
return structure
def visit_many(nodes, macroses, config, predicted_struct_cls=Scalar, return_struct_cls=Unknown):
"""Visits ``nodes`` and merges results.
:param nodes: list of :class:`jinja2.nodes.Node`
:param predicted_struct_cls: ``predicted_struct`` for expression visitors will be constructed
using this class by calling :meth:`from_ast` method
:return: :class:`Dictionary`
"""
rv = Dictionary()
for node in nodes:
if isinstance(node, jinja2.nodes.Extends):
structure = visit_extends(node, macroses, config, [x for x in nodes if isinstance(x, jinja2.nodes.Block)])
else:
structure = visit(node, macroses, config, predicted_struct_cls, return_struct_cls)
rv = merge(rv, structure)
return rv
# keep these at the end of file to avoid circular imports
from .expr import Context, visit_expr
from .stmt import visit_stmt, visit_extends
| aromanovich/jinja2schema | jinja2schema/visitors/util.py | Python | bsd-3-clause | 1,720 |
#! /usr/bin/env python
from __future__ import division, print_function
import pandas as pd
import sys
import os
import dill
import numpy as np
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, './'))
sys.path.append(lib_path)
import candle
additional_definitions = [
{'name': 'uqmode',
'type': str,
'default': None,
'choices': ['hom', 'het', 'qtl', 'contam'],
'help': 'mode of UQ regression used: homoscedastic (hom), heteroscedastic (het) or quantile (qtl)'},
{'name': 'plot_steps',
'type': candle.str2bool,
'default': False,
'help': 'plot step-by-step the computation of the empirical calibration by binning'},
{'name': 'results_filename',
'type': str,
'default': None,
'help': 'file with uq inference results'},
{'name': 'cv',
'type': int,
'default': 10,
'help': 'number of cross validations for calibration by interpolation'},
{'name': 'sigma',
'type': float,
'default': None,
'help': 'Standard deviation of normal distribution in contamination model'},
]
required = [
'uqmode',
'results_filename'
]
def coverage_80p(y_test, y_pred, std_pred, y_pred_1d=None, y_pred_9d=None):
""" Determine the fraction of the true data that falls
into the 80p coverage of the model.
For homoscedastic and heteroscedastic models the
standard deviation prediction is used.
For quantile model, if first and nineth deciles
are available, these are used instead of computing
a standard deviation based on Gaussian assumptions.
Parameters
----------
y_test : numpy array
True (observed) values array.
y_pred : numpy array
Mean predictions made by the model.
std_pred : numpy array
Standard deviation predictions made by the model.
y_pred_1d : numpy array
First decile predictions made by qtl model.
y_pred_9d : numpy array
Nineth decile predictions made by qtl model.
"""
if std_pred is None: # for qtl
topLim = y_pred_9d
botLim = y_pred_1d
else: # for hom and het
topLim = y_pred + 1.28 * std_pred
botLim = y_pred - 1.28 * std_pred
# greater than top
count_gr = np.count_nonzero(np.clip(y_test - topLim, 0., None))
# less than bottom
count_ls = np.count_nonzero(np.clip(botLim - y_test, 0., None))
count_out = count_gr + count_ls
N_test = y_test.shape[0]
frac_out = (float(count_out) / float(N_test))
return (1. - frac_out)
class CalibrationApp(candle.Benchmark):
def set_locals(self):
"""Functionality to set variables specific for the benchmark
- required: set of required parameters for the benchmark.
- additional_definitions: list of dictionaries describing the additional parameters for the
benchmark.
"""
if required is not None:
self.required = set(required)
if additional_definitions is not None:
self.additional_definitions = additional_definitions
def initialize_parameters(default_model='calibration_default.txt'):
# Build benchmark object
calBmk = CalibrationApp(file_path, default_model, 'python',
prog='calibration_main', desc='script to compute empirical calibration for UQ regression')
# config_file, rng_seed and save_path from standard
# Finalize parameters
gParameters = candle.finalize_parameters(calBmk)
return gParameters
def run(params):
candle.set_seed(params['rng_seed'])
uqmode = params['uqmode'] # hom, het, qtl
filename = params['results_filename']
cv = params['cv']
index_dp = filename.find('DR=')
if index_dp == -1: # DR is not in filename
print('No dropout rate found in filename')
print('Using -1 to denote NA')
dp_perc = -1
else:
if filename[index_dp + 6] == '.':
dp = float(filename[index_dp + 3:index_dp + 3 + 3])
else:
dp = float(filename[index_dp + 3:index_dp + 3 + 4])
print('Droput rate: ', dp)
dp_perc = dp * 100.
method = uqmode + ' - dropout ' + str(dp_perc) + '%'
prefix = params['output_dir'] + '/' + uqmode + '_DR=' + str(dp_perc)
df_data = pd.read_csv(filename, sep='\t')
print('data read shape: ', df_data.shape)
# compute statistics according to uqmode
if uqmode == 'hom':
if df_data.shape[1] < 9:
print('Too few columns... Asumming that a summary (and not individual realizations) has been given as input')
Ytest, Ypred_mean, yerror, sigma, Ypred_std, pred_name = candle.compute_statistics_homoscedastic_summary(df_data)
else: # all individual realizations
Ytest, Ypred_mean, yerror, sigma, Ypred_std, pred_name = candle.compute_statistics_homoscedastic(df_data)
cov80p = coverage_80p(Ytest, Ypred_mean, sigma)
elif uqmode == 'het': # for heteroscedastic UQ
Ytest, Ypred_mean, yerror, sigma, Ypred_std, pred_name = candle.compute_statistics_heteroscedastic(df_data)
cov80p = coverage_80p(Ytest, Ypred_mean, sigma)
elif uqmode == 'qtl': # for quantile UQ
Ytest, Ypred_mean, yerror, sigma, Ypred_std, pred_name, Ypred_1d_mean, Ypred_9d_mean = candle.compute_statistics_quantile(df_data)
cov80p = coverage_80p(Ytest, Ypred_mean, None, Ypred_1d_mean, Ypred_9d_mean)
decile_list = ['5th', '1st', '9th']
candle.plot_decile_predictions(Ypred_mean, Ypred_1d_mean, Ypred_9d_mean, decile_list, pred_name, prefix)
elif uqmode == 'contam':
Ytest, Ypred_mean, yerror, sigma_, Ypred_std, pred_name = candle.compute_statistics_homoscedastic(df_data)
sigma_scalar = params['sigma']
if sigma_scalar is None:
raise Exception('ERROR ! No sigma specified for contamination model... Exiting')
sigma = sigma_scalar * np.ones(Ytest.shape[0])
cov80p = coverage_80p(Ytest, Ypred_mean, sigma)
print('Coverage (80%): ', cov80p)
candle.plot_density_observed_vs_predicted(Ytest, Ypred_mean, pred_name, prefix)
candle.plot_2d_density_sigma_vs_error(sigma, yerror, method, prefix)
mse = np.mean((Ytest - Ypred_mean)**2)
mae = np.mean(np.abs(Ytest - Ypred_mean))
print('Prediction error in testing')
print('MSE: ', mse)
print('MAE: ', mae)
candle.plot_contamination(Ytest, Ypred_mean, sigma, pred_name=pred_name, figprefix=prefix)
print('Since in contamination model std prediction is uniform for all samples, no point in calibrating... Finishing')
return
else:
raise Exception('ERROR ! UQ mode specified for calibration: ' + uqmode + ' not implemented... Exiting')
print('Coverage (80%) before calibration: ', cov80p)
# Density / Histogram plots
candle.plot_density_observed_vs_predicted(Ytest, Ypred_mean, pred_name, prefix)
candle.plot_2d_density_sigma_vs_error(sigma, yerror, method, prefix)
candle.plot_histogram_error_per_sigma(sigma, yerror, method, prefix)
# shuffle data for calibration
index_perm_total, pSigma_cal, pSigma_test, pMean_cal, pMean_test, true_cal, true_test = candle.split_data_for_empirical_calibration(Ytest, Ypred_mean, sigma)
# Compute empirical calibration by smooth interpolation
splineobj1, splineobj2 = candle.compute_empirical_calibration_interpolation(pSigma_cal, pMean_cal, true_cal, cv)
error = np.abs(true_cal - pMean_cal)
candle.plot_calibration_interpolation(pSigma_cal, error, splineobj1, splineobj2, method, prefix, params['plot_steps'])
# Check prediction error
eabs_pred = splineobj2(pSigma_test)
cov80p = coverage_80p(true_test, pMean_test, eabs_pred)
print('Coverage (80%) after calibration: ', cov80p)
eabs_true = np.abs(true_test - pMean_test)
mse = np.mean((eabs_true - eabs_pred)**2)
mae = np.mean(np.abs(eabs_true - eabs_pred))
print('Prediction error in testing calibration')
print('MSE: ', mse)
print('MAE: ', mae)
# Use MAE as threshold of accuracy
# Mark samples with predicted std > mae
candle.plot_calibrated_std(true_test, pMean_test, eabs_pred, 2. * mae, pred_name, prefix)
# store calibration
fname = prefix + '_calibration_interpolation_spline.dkl'
with open(fname, 'wb') as f:
dill.dump(splineobj2, f)
print('Calibration spline (interpolation) stored in file: ', fname)
def main():
params = initialize_parameters()
run(params)
if __name__ == '__main__':
main()
| ECP-CANDLE/Benchmarks | common/calibration_main.py | Python | mit | 8,705 |
#!/usr/bin/env ipy
import clr
clr.AddReference("MyMediaLite.dll")
from MyMediaLite import *
# load the data
train_data = IO.ItemData.Read("u1.base")
test_data = IO.ItemData.Read("u1.test")
# set up the recommender
recommender = ItemRecommendation.UserKNN() # don't forget ()
recommender.K = 20
recommender.Feedback = train_data
recommender.Train()
# measure the accuracy on the test data set
print Eval.Items.Evaluate(recommender, test_data, train_data)
# make a prediction for a certain user and item
print recommender.Predict(1, 1)
| jashwanth9/Expert-recommendation-system | MyMediaLite-3.10/examples/python/item_recommendation.py | Python | apache-2.0 | 539 |
def quadratic_fitness(context, node):
return context['quadratic'][node].score(node.data.x, node.data.y)
| yarden-livnat/regulus | regulus/measures/quadratic.py | Python | bsd-3-clause | 108 |
#!/usr/bin/env python3
# TODO:
# FILES table: OBSOLETE column
# OBSOLETE says that this file was replaced by a newer version, for example checkrels may want to create a new file with only part of the output.
# Should this be a pointer to the file that replaced the obsolete one? How to signal a file that is obsolete, but not replaced by anything?
# If one file is replaced by several (say, due to a data corruption in the middle), we need a 1:n relationship. If several files are replaced by one (merge),
# we need n:1. What do? Do we really want an n:n relationship here? Disallow fragmenting files, or maybe simply not track it in the DB if we do?
# FILES table: CHECKSUM column
# We need a fast check that the information stored in the DB still accurately reflects the file system contents. The test should also warn about files in upload/ which are not listed in DB
# Make Python 2.7 use the print() syntax from Python 3
from __future__ import print_function
import sys
import sqlite3
try:
import mysql
import mysql.connector
HAVE_MYSQL=True
except ImportError:
HAVE_MYSQL=False
import threading
import traceback
import collections
import abc
from datetime import datetime
import re
import time
if re.search("^/", "@CMAKE_INSTALL_PREFIX@/@LIBSUFFIX@"):
sys.path.append("@CMAKE_INSTALL_PREFIX@/@LIBSUFFIX@")
from workunit import Workunit
if sys.version_info.major == 3:
from queue import Queue
else:
from Queue import Queue
import patterns
import cadologger
import logging
DEBUG = 1
exclusive_transaction = [None, None]
DEFERRED = object()
IMMEDIATE = object()
EXCLUSIVE = object()
logger = logging.getLogger("Database")
logger.setLevel(logging.NOTSET)
PRINTED_CANCELLED_WARNING = False
def join3(l, pre=None, post=None, sep=", "):
"""
If any parameter is None, it is interpreted as the empty string
>>> join3 ( ('a'), pre="+", post="-", sep=", ")
'+a-'
>>> join3 ( ('a', 'b'), pre="+", post="-", sep=", ")
'+a-, +b-'
>>> join3 ( ('a', 'b'))
'a, b'
>>> join3 ( ('a', 'b', 'c'), pre="+", post="-", sep=", ")
'+a-, +b-, +c-'
"""
if pre is None:
pre = ""
if post is None:
post = ""
if sep is None:
sep = "";
return sep.join([pre + k + post for k in l])
def dict_join3(d, sep=None, op=None, pre=None, post=None):
"""
If any parameter is None, it is interpreted as the empty string
>>> t = dict_join3 ( {"a": "1", "b": "2"}, sep=",", op="=", pre="-", post="+")
>>> t == '-a=1+,-b=2+' or t == '-b=2+,-a=1+'
True
"""
if pre is None:
pre = ""
if post is None:
post = ""
if sep is None:
sep = "";
if op is None:
op = ""
return sep.join([pre + op.join(k) + post for k in d.items()])
def conn_commit(conn):
logger.transaction("Commit on connection %d", id(conn))
if DEBUG > 1:
if not exclusive_transaction[0] is None and not conn is exclusive_transaction[0]:
logger.warning("Commit on connection %d, but exclusive lock was on %d", id(conn), id(exclusive_transaction[0]))
exclusive_transaction[0] = None
exclusive_transaction[1] = None
conn.commit()
def conn_close(conn):
# I'm really having difficulties here. I can't see what's going on.
# Sometimes I have an uncaught exception popping up.
#target = 92800609832959449330691138186
#log(target) = 32359472153599817010011705
#Warning:Database: Connection 140584385754280 being closed while in transaction
#Exception ignored in: <bound method WuAccess.__del__ of <wudb.WuAccess object at 0x7fdc5a5fb470>>
#Traceback (most recent call last):
# File "/home/thome/NFS/cado/scripts/cadofactor/wudb.py", line 1128, in __del__
# File "/home/thome/NFS/cado/scripts/cadofactor/wudb.py", line 107, in conn_close
# File "/usr/lib/python3.5/logging/__init__.py", line 1292, in warning
# File "/usr/lib/python3.5/logging/__init__.py", line 1416, in _log
# File "/usr/lib/python3.5/logging/__init__.py", line 1426, in handle
# File "/usr/lib/python3.5/logging/__init__.py", line 1488, in callHandlers
# File "/usr/lib/python3.5/logging/__init__.py", line 856, in handle
# File "/usr/lib/python3.5/logging/__init__.py", line 1048, in emit
# File "/usr/lib/python3.5/logging/__init__.py", line 1038, in _open
#NameError: name 'open' is not defined
#
try:
logger.transaction("Closing connection %d", id(conn))
if conn.in_transaction:
logger.warning("Connection %d being closed while in transaction", id(conn))
conn.close()
except:
pass
# Dummy class for defining "constants" with reverse lookup
STATUS_NAMES = ["AVAILABLE", "ASSIGNED", "NEED_RESUBMIT", "RECEIVED_OK",
"RECEIVED_ERROR", "VERIFIED_OK", "VERIFIED_ERROR", "CANCELLED"]
STATUS_VALUES = range(len(STATUS_NAMES))
WuStatusBase = collections.namedtuple("WuStatusBase", STATUS_NAMES)
class WuStatusClass(WuStatusBase):
def check(self, status):
assert status in self
def get_name(self, status):
self.check(status)
return STATUS_NAMES[status]
WuStatus = WuStatusClass(*STATUS_VALUES)
def check_tablename(name):
""" Test whether name is a valid SQL table name.
Raise an exception if it isn't.
"""
no_ = name.replace("_", "")
if not no_[0].isalpha() or not no_[1:].isalnum():
raise Exception("%s is not valid for an SQL table name" % name)
# If we try to update the status in any way other than progressive
# (AVAILABLE -> ASSIGNED -> ...), we raise this exception
class StatusUpdateError(Exception):
pass
# I wish I knew how to make that inherit from a template argument (which
# would be sqlite3.Cursor or mysql.Cursor). I'm having difficulties to
# grok that syntax though, so let's stay simple and stupid. We'll have a
# *member* which is the cursor object, and so be it.
class CursorWrapperBase(object,metaclass=abc.ABCMeta):
""" This class represents a DB cursor and provides convenience functions
around SQL queries. In particular it is meant to provide an
(1) an interface to SQL functionality via method calls with parameters,
and
(2) hiding some particularities of the SQL variant of the underlying
DBMS as far as possible """
# This is used in where queries; it converts from named arguments such as
# "eq" to a binary operator such as "="
name_to_operator = {"lt": "<", "le": "<=", "eq": "=", "ge": ">=", "gt" : ">", "ne": "!=", "like": "like"}
@abc.abstractproperty
def cursor(self):
pass
@abc.abstractproperty
def connection(self):
pass
# override in the derived cursor class if needed
@property
def _string_translations(self):
return []
# override in the derived cursor class if needed
def translations(self, x):
if type(x) == tuple:
return tuple([self.translations(u) for u in x])
elif type(x) == list:
return [self.translations(u) for u in x]
else:
v=x
for a,b in self._string_translations:
v,nrepl=re.subn(a, b, v)
return v
# override in the derived cursor class if needed
@property
def parameter_auto_increment(self):
return "?"
def __init__(self):
pass
def in_transaction(self):
return self.connection.in_transaction
@staticmethod
def _without_None(d):
""" Return a copy of the dictionary d, but without entries whose values
are None """
return {k[0]:k[1] for k in d.items() if k[1] is not None}
@staticmethod
def as_string(d):
if d is None:
return ""
else:
return ", " + dict_join3(d, sep=", ", op=" AS ")
def _where_str(self, name, **args):
where = ""
values = []
qm=self.parameter_auto_increment
for opname in args:
if args[opname] is None:
continue
if where == "":
where = " " + name + " "
else:
where = where + " AND "
where = where + join3(args[opname].keys(),
post=" " + self.name_to_operator[opname] + " " + qm,
sep=" AND ")
values = values + list(args[opname].values())
return (where, values)
def _exec(self, command, values=None):
""" Wrapper around self.execute() that prints arguments
for debugging and retries in case of "database locked" exception """
# FIXME: should be the caller's class name, as _exec could be
# called from outside this class
classname = self.__class__.__name__
parent = sys._getframe(1).f_code.co_name
command = self.translations(command)
command_str = command.replace("?", "%r")
if not values is None:
command_str = command_str % tuple(values)
logger.transaction("%s.%s(): connection = %s, command = %s",
classname, parent, id(self.connection), command_str)
i = 0
while True:
try:
if values is None or len(values)==0:
self.cursor.execute(command)
else:
self.cursor.execute(command, values)
break
except (sqlite3.OperationalError, sqlite3.DatabaseError) as e:
if str(e) == "database disk image is malformed" or \
str(e) == "disk I/O error":
logger.critical("sqlite3 reports error accessing the database.")
logger.critical("Database file may have gotten corrupted, "
"or maybe filesystem does not properly support "
"file locking.")
raise
if str(e) != "database is locked":
raise
i += 1
time.sleep(1) # wait for 1 second if database is locked
if i == 10:
logger.critical("You might try 'fuser xxx.db' to see which process is locking the database")
raise
logger.transaction("%s.%s(): connection = %s, command finished",
classname, parent, id(self.connection))
def begin(self, mode=None):
if mode is None:
self._exec("BEGIN")
elif mode is DEFERRED:
self._exec("BEGIN DEFERRED")
elif mode is IMMEDIATE:
self._exec("BEGIN IMMEDIATE")
elif mode is EXCLUSIVE:
if DEBUG > 1:
tb = traceback.extract_stack()
if not exclusive_transaction == [None, None]:
old_tb_str = "".join(traceback.format_list(exclusive_transaction[1]))
new_tb_str = "".join(traceback.format_list(tb))
logger.warning("Called Cursor.begin(EXCLUSIVE) when there was aleady an exclusive transaction %d\n%s",
id(exclusive_transaction[0]), old_tb_str)
logger.warning("New transaction: %d\n%s", id(self.connection), new_tb_str)
self._exec("BEGIN EXCLUSIVE")
if DEBUG > 1:
assert exclusive_transaction == [None, None]
exclusive_transaction[0] = self.connection
exclusive_transaction[1] = tb
else:
raise TypeError("Invalid mode parameter: %r" % mode)
def pragma(self, prag):
self._exec("PRAGMA %s;" % prag)
def create_table(self, table, layout):
""" Creates a table with fields as described in the layout parameter """
command = "CREATE TABLE IF NOT EXISTS %s( %s );" % \
(table, ", ".join(" ".join(k) for k in layout))
self._exec (command)
def create_index(self, name, table, columns):
# we get so many of these...
try:
""" Creates an index with fields as described in the columns list """
command = self.translations("CREATE INDEX IF NOT EXISTS") + " %s ON %s( %s );" % (name, table, ", ".join(columns))
self._exec (command)
except Exception as e:
logger.warning(e)
pass
def insert(self, table, d):
""" Insert a new entry, where d is a dictionary containing the
field:value pairs. Returns the row id of the newly created entry """
# INSERT INTO table (field_1, field_2, ..., field_n)
# VALUES (value_1, value_2, ..., value_n)
# Fields is a copy of d but with entries removed that have value None.
# This is done primarily to avoid having "id" listed explicitly in the
# INSERT statement, because the DB fills in a new value automatically
# if "id" is the primary key. But I guess not listing field:NULL items
# explicitly in an INSERT is a good thing in general
fields = self._without_None(d)
fields_str = ", ".join(fields.keys())
qm=self.parameter_auto_increment
sqlformat = ", ".join((qm,) * len(fields)) # sqlformat = "?, ?, ?, " ... "?"
command = "INSERT INTO %s( %s ) VALUES ( %s );" \
% (table, fields_str, sqlformat)
values = list(fields.values())
self._exec(command, values)
rowid = self.lastrowid
return rowid
def update(self, table, d, **conditions):
""" Update fields of an existing entry. conditions specifies the where
clause to use for to update, entries in the dictionary d are the
fields and their values to update """
# UPDATE table SET column_1=value1, column2=value_2, ...,
# column_n=value_n WHERE column_n+1=value_n+1, ...,
qm=self.parameter_auto_increment
setstr = join3(d.keys(), post = " = " + qm, sep = ", ")
(wherestr, wherevalues) = self._where_str("WHERE", **conditions)
command = "UPDATE %s SET %s %s" % (table, setstr, wherestr)
values = list(d.values()) + wherevalues
self._exec(command, values)
def where_query(self, joinsource, col_alias=None, limit=None, order=None,
**conditions):
# Table/Column names cannot be substituted, so include in query directly.
(WHERE, values) = self._where_str("WHERE", **conditions)
if order is None:
ORDER = ""
else:
if not order[1] in ("ASC", "DESC"):
raise Exception
ORDER = " ORDER BY %s %s" % (order[0], order[1])
if limit is None:
LIMIT = ""
else:
LIMIT = " LIMIT %s" % int(limit)
AS = self.as_string(col_alias);
command = "SELECT * %s FROM %s %s %s %s" \
% (AS, joinsource, WHERE, ORDER, LIMIT)
return (command, values)
def where(self, joinsource, col_alias=None, limit=None, order=None,
values=[], **conditions):
""" Get a up to "limit" table rows (limit == 0: no limit) where
the key:value pairs of the dictionary "conditions" are set to the
same value in the database table """
(command, newvalues) = self.where_query(joinsource, col_alias, limit,
order, **conditions)
self._exec(command + ";", values + newvalues)
def count(self, joinsource, **conditions):
""" Count rows where the key:value pairs of the dictionary "conditions" are
set to the same value in the database table """
# Table/Column names cannot be substituted, so include in query directly.
(WHERE, values) = self._where_str("WHERE", **conditions)
command = "SELECT COUNT(*) FROM %s %s;" % (joinsource, WHERE)
self._exec(command, values)
r = self.cursor.fetchone()
return int(r[0])
def delete(self, table, **conditions):
""" Delete the rows specified by conditions """
(WHERE, values) = self._where_str("WHERE", **conditions)
command = "DELETE FROM %s %s;" % (table, WHERE)
self._exec(command, values)
def where_as_dict(self, joinsource, col_alias=None, limit=None,
order=None, values=[], **conditions):
self.where(joinsource, col_alias=col_alias, limit=limit,
order=order, values=values, **conditions)
# cursor.description is a list of lists, where the first element of
# each inner list is the column name
result = []
desc = [k[0] for k in self.cursor.description]
row = self.cursor.fetchone()
while row is not None:
# print("Cursor.where_as_dict(): row = %s" % row)
result.append(dict(zip(desc, row)))
row = self.cursor.fetchone()
return result
def execute(self, *args, **kwargs):
return self._exec(*args, **kwargs)
def fetchone(self, *args, **kwargs):
return self.cursor.fetchone(*args, **kwargs)
def close(self):
self.cursor.close()
@property
def lastrowid(self):
self.cursor.lastrowid
class DB_base(object):
@property
def general_pattern(self):
return "(?:db:)?(\w+)://(?:(?:(\w+)(?::(.*))?@)?(?:([\w\.]+)|\[([\d:]+)*\])(?::(\d+))?/)?(.*)$"
def __init__(self, uri, backend_pattern=None):
self.uri = uri
foo=re.match(self.general_pattern,uri)
if not foo:
raise ValueError("db URI %s does not match regexp %s" % (uri,self.general_pattern))
self.hostname=foo.group(4)
self.host_ipv6=False
if not self.hostname:
self.hostname=foo.group(5)
self.host_ipv6=True
self.backend=foo.group(1)
if backend_pattern is not None and not re.match(backend_pattern, self.backend):
raise ValueError("back-end type %s not supported, expected %s" % (self.backend, backend_pattern))
self.db_connect_args=dict(
user=foo.group(2),
password=foo.group(3),
host=self.hostname,
port=foo.group(6)
)
self.db_name=foo.group(7)
self.talked=False
# logger.info("Database URI is %s" % self.uri_without_credentials)
@property
def uri_without_credentials(self):
text="db:%s://" % self.backend
d=self.db_connect_args
if "host" in d:
if "user" in d:
text+="USERNAME"
if "password" in d:
text+=":PASSWORD"
text+="@"
if self.host_ipv6:
text+="[%s]" % d["host"]
else:
text+=d["host"]
if "port" in d:
text+=":%s" % d["port"]
text+="/"
text+=self.db_name
return text
def advertise_connection(self):
if not self.talked:
logger.info("Opened connection to database %s" % self.db_name)
self.talked=True
class DB_SQLite(DB_base):
class CursorWrapper(CursorWrapperBase):
@property
def cursor(self):
return self.__cursor
@property
def connection(self):
return self.cursor.connection
def __init__(self, cursor, *args, **kwargs):
self.__cursor=cursor
super().__init__(*args, **kwargs)
class ConnectionWrapper(sqlite3.Connection):
def cursor(self):
return DB_SQLite.CursorWrapper(super().cursor())
def __init__(self, *args, **kwargs):
super().__init__(isolation_level=None, *args, **kwargs)
def connect(self):
c=self.ConnectionWrapper(self.path)
self.advertise_connection()
return c
# FIXME I think that in any case the sqlite3 module ends up creating
# the db, no ?
def __init__(self, uri, create=False):
super().__init__(uri, backend_pattern="sqlite3?")
self.path = self.db_name
if HAVE_MYSQL:
class DB_MySQL(DB_base):
class CursorWrapper(CursorWrapperBase):
@property
def parameter_auto_increment(self):
return "%s"
@property
def _string_translations(self):
return [
('\\bASC\\b', "AUTO_INCREMENT"),
('\\bCREATE INDEX IF NOT EXISTS\\b', "CREATE INDEX"),
('\\bBEGIN EXCLUSIVE\\b', "START TRANSACTION"),
('\\bpurge\\b', "purgetable"),
]
@property
def cursor(self):
return self.__cursor
@property
def connection(self):
return self._connection
def __init__(self, cursor, connection=None, *args, **kwargs):
self._connection = connection
self.__cursor=cursor
super().__init__(*args, **kwargs)
class ConnectionWrapper(object):
def _reconnect_anonymous(self):
self._conn = mysql.connector.connect(**self._db_factory.db_connect_args)
def _reconnect(self):
self._conn = mysql.connector.connect(database=self._db_factory.db_name, **self._db_factory.db_connect_args)
def cursor(self):
# provide some retry capability. This must be done on the
# connection object, since reconnecting changes the
# connection member.
for i in range(10):
try:
c = self._conn.cursor()
break
except mysql.connector.errors.OperationalError as e:
logger.warning("Got exception connecting to the database, retrying (#%d)" % i)
if self.db:
self._reconnect()
else:
raise
self._conn.commit()
return DB_MySQL.CursorWrapper(c, connection=self)
def __init__(self, db_factory, create=False):
self._db_factory = db_factory
db_name = self._db_factory.db_name
if create:
try:
self._reconnect()
except mysql.connector.errors.ProgrammingError:
# need to create the database first. Do it by
# hand, with a connection which starts without a
# database name.
logger.info("Creating database %s" % db_name)
self._reconnect_anonymous()
cursor = self._conn.cursor()
cursor.execute("CREATE DATABASE %s;" % db_name)
cursor.execute("USE %s;" % db_name)
cursor.execute("SET autocommit = 1")
self._conn.commit()
else:
self._reconnect()
def rollback(self):
self._conn.rollback()
def close(self):
self._conn.close()
def commit(self):
self._conn.commit()
@property
def in_transaction(self):
return self._conn.in_transaction
def connect(self, *args, **kwargs):
return self.ConnectionWrapper(self, *args, **kwargs)
def __init__(self, uri,create=False):
super().__init__(uri, backend_pattern="mysql")
self.path = None
if create:
conn=self.connect(create=True)
conn.close()
class DBFactory(object):
# This class initializes the database from the supplied db uri.
# db:engine:[//[user[:password]@][host][:port]/][dbname][?params][#fragment]
def __init__(self, uri, *args, **kwargs):
self.uri = uri
self.base = None
error={}
sc=DB_base.__subclasses__()
for c in sc:
# logger.info("Trying database back-end %s (among %d)" % (c, len(sc)))
try:
self.base = c(uri, *args, **kwargs)
break
except ValueError as err:
error[str(c)]=err
pass
if self.base is None:
msg = "Cannot use database URI %s" % uri
msg += "\n" + "Messages received from %d backends:" % len(sc)
for c in error.keys():
msg += "\n" + "Error from %s: %s" % (c, error[c])
raise ValueError(msg)
def connect(self):
return self.base.connect()
@property
def uri_without_credentials(self):
return self.base.uri_without_credentials
@property
def path(self):
# TODO: remove
return self.base.path
class DbTable(object):
""" A class template defining access methods to a database table """
@staticmethod
def _subdict(d, l):
""" Returns a dictionary of those key:value pairs of d for which key
exists l """
if d is None:
return None
return {k:d[k] for k in d.keys() if k in l}
def _get_colnames(self):
return [k[0] for k in self.fields]
def getname(self):
return self.tablename
def getpk(self):
return self.primarykey
def dictextract(self, d):
""" Return a dictionary with all those key:value pairs of d
for which key is in self._get_colnames() """
return self._subdict(d, self._get_colnames())
def create(self, cursor):
fields = list(self.fields)
if self.references:
# If this table references another table, we use the primary
# key of the referenced table as the foreign key name
r = self.references # referenced table
fk = (r.getpk(), "INTEGER", "REFERENCES %s ( %s ) " \
% (r.getname(), r.getpk()))
fields.append(fk)
cursor.create_table(self.tablename, fields)
if self.references:
# We always create an index on the foreign key
cursor.create_index(self.tablename + "_pkindex", self.tablename,
(fk[0], ))
for indexname in self.index:
# cursor.create_index(self.tablename + "_" + indexname, self.tablename, self.index[indexname])
try:
cursor.create_index(self.tablename + "_" + indexname + "_index",
self.tablename, self.index[indexname])
except Exception as e:
logger.warning(e)
pass
def insert(self, cursor, values, foreign=None):
""" Insert a new row into this table. The column:value pairs are
specified key:value pairs of the dictionary d.
The database's row id for the new entry is stored in
d[primarykey] """
d = self.dictextract(values)
assert self.primarykey not in d or d[self.primarykey] is None
# If a foreign key is specified in foreign, add it to the column
# that is marked as being a foreign key
if foreign:
r = self.references.primarykey
assert not r in d or d[r] is None
d[r] = foreign
values[self.primarykey] = cursor.insert(self.tablename, d)
def insert_list(self, cursor, values, foreign=None):
for v in values:
self.insert(cursor, v, foreign)
def update(self, cursor, d, **conditions):
""" Update an existing row in this table. The column:value pairs to
be written are specified key:value pairs of the dictionary d """
cursor.update(self.tablename, d, **conditions)
def delete(self, cursor, **conditions):
""" Delete an existing row in this table """
cursor.delete(self.tablename, **conditions)
def where(self, cursor, limit=None, order=None, **conditions):
assert order is None or order[0] in self._get_colnames()
return cursor.where_as_dict(self.tablename, limit=limit,
order=order, **conditions)
class WuTable(DbTable):
tablename = "workunits"
fields = (
("wurowid", "INTEGER PRIMARY KEY ASC", "UNIQUE NOT NULL"),
("wuid", "VARCHAR(512)", "UNIQUE NOT NULL"),
("submitter", "VARCHAR(512)", ""),
("status", "INTEGER", "NOT NULL"),
("wu", "TEXT", "NOT NULL"),
("timecreated", "TEXT", ""),
("timeassigned", "TEXT", ""),
("assignedclient", "TEXT", ""),
("timeresult", "TEXT", ""),
("resultclient", "TEXT", ""),
("errorcode", "INTEGER", ""),
("failedcommand", "INTEGER", ""),
("timeverified", "TEXT", ""),
("retryof", "INTEGER", "REFERENCES %s" % tablename),
("priority", "INTEGER", "")
)
primarykey = fields[0][0]
references = None
index = {"wuid": (fields[1][0],),
"submitter" : (fields[2][0],),
"priority" : (fields[14][0],),
"status" : (fields[3][0],)
}
class FilesTable(DbTable):
tablename = "files"
fields = (
("filesrowid", "INTEGER PRIMARY KEY ASC", "UNIQUE NOT NULL"),
("filename", "TEXT", ""),
("path", "VARCHAR(512)", "UNIQUE NOT NULL"),
("type", "TEXT", ""),
("command", "INTEGER", "")
)
primarykey = fields[0][0]
references = WuTable()
index = {}
# The sqrt_factors table contains the input number to be factored. As
# such, we must make sure that it's permitted to go at least as far as we
# intend to go. 200 digits is definitely too small.
class DictDbTable(DbTable):
fields = (
("rowid", "INTEGER PRIMARY KEY ASC", "UNIQUE NOT NULL"),
("kkey", "VARCHAR(300)", "UNIQUE NOT NULL"),
("type", "INTEGER", "NOT NULL"),
("value", "TEXT", "")
)
primarykey = fields[0][0]
references = None
def __init__(self, *args, name = None, **kwargs):
self.tablename = name
# index creation now always prepends the table name, and appends "index"
self.index = {"dictdb_kkey": ("kkey",)} # useful ?
super().__init__(*args, **kwargs)
class DictDbAccess(collections.MutableMapping):
""" A DB-backed flat dictionary.
Flat means that the value of each dictionary entry must be a type that
the underlying DB understands, like integers, strings, etc., but not
collections or other complex types.
A copy of all the data in the table is kept in memory; read accesses
are always served from the in-memory dict. Write accesses write through
to the DB.
>>> conn = DBFactory('db:sqlite3://:memory:').connect()
>>> d = DictDbAccess(conn, 'test')
>>> d == {}
True
>>> d['a'] = '1'
>>> d == {'a': '1'}
True
>>> d['a'] = 2
>>> d == {'a': 2}
True
>>> d['b'] = '3'
>>> d == {'a': 2, 'b': '3'}
True
>>> del(d)
>>> d = DictDbAccess(conn, 'test')
>>> d == {'a': 2, 'b': '3'}
True
>>> del(d['b'])
>>> d == {'a': 2}
True
>>> d.setdefault('a', '3')
2
>>> d == {'a': 2}
True
>>> d.setdefault('b', 3.0)
3.0
>>> d == {'a': 2, 'b': 3.0}
True
>>> d.setdefault(None, {'a': '3', 'c': '4'})
>>> d == {'a': 2, 'b': 3.0, 'c': '4'}
True
>>> d.update({'a': '3', 'd': True})
>>> d == {'a': '3', 'b': 3.0, 'c': '4', 'd': True}
True
>>> del(d)
>>> d = DictDbAccess(conn, 'test')
>>> d == {'a': '3', 'b': 3.0, 'c': '4', 'd': True}
True
>>> d.clear(['a', 'd'])
>>> d == {'b': 3.0, 'c': '4'}
True
>>> del(d)
>>> d = DictDbAccess(conn, 'test')
>>> d == {'b': 3.0, 'c': '4'}
True
>>> d.clear()
>>> d == {}
True
>>> del(d)
>>> d = DictDbAccess(conn, 'test')
>>> d == {}
True
"""
types = (str, int, float, bool)
def __init__(self, db, name):
''' Attaches to a DB table and reads values stored therein.
db can be a string giving the file name for the DB (same as for
sqlite3.connect()), or an open DB connection. The latter is allowed
primarily for making the doctest work, so we can reuse the same
memory-backed DB connection, but it may be useful in other contexts.
'''
if isinstance(db, DBFactory):
self._db = db
self._conn = db.connect()
self._ownconn = True
elif isinstance(db, str):
raise ValueError("unexpected: %s" % db)
else:
self._db = None
self._conn = db
self._ownconn = False
self._table = DictDbTable(name = name)
# Create an empty table if none exists
cursor = self.get_cursor()
self._table.create(cursor);
# Get the entries currently stored in the DB
self._data = self._getall()
cursor.close()
def get_cursor(self):
return self._conn.cursor()
# Implement the abstract methods defined by collections.MutableMapping
# All but __del__ and __setitem__ are simply passed through to the self._data
# dictionary
def __getitem__(self, key):
return self._data.__getitem__(key)
def __iter__(self):
return self._data.__iter__()
def __len__(self):
return self._data.__len__()
def __str__(self):
return self._data.__str__()
def __del__(self):
""" Close the DB connection and delete the in-memory dictionary """
if self._ownconn:
# When we shut down Python hard, e.g., in an exception, the
# conn_close() function object may have been destroyed already
# and trying to call it would raise another exception.
if callable(conn_close):
conn_close(self._conn)
else:
self._conn.close()
def __convert_value(self, row):
valuestr = row["value"]
valuetype = row["type"]
# Look up constructor for this type
typecon = self.types[int(valuetype)]
# Bool is handled separately as bool("False") == True
if typecon == bool:
if valuestr == "True":
return True
elif valuestr == "False":
return False
else:
raise ValueError("Value %s invalid for Bool type", valuestr)
return typecon(valuestr)
def __get_type_idx(self, value):
valuetype = type(value)
for (idx, t) in enumerate(self.types):
if valuetype == t:
return idx
raise TypeError("Type %s not supported" % str(valuetype))
def _getall(self):
""" Reads the whole table and returns it as a dict """
cursor = self.get_cursor()
rows = self._table.where(cursor)
cursor.close()
return {r["kkey"]: self.__convert_value(r) for r in rows}
def __setitem_nocommit(self, cursor, key, value):
""" Set dictionary key to value and update/insert into table,
but don't commit. Cursor must be given
"""
update = {"value": str(value), "type": self.__get_type_idx(value)}
if key in self._data:
# Update the table row where column "key" equals key
self._table.update(cursor, update, eq={"kkey": key})
else:
# Insert a new row
update["kkey"] = key
self._table.insert(cursor, update)
# Update the in-memory dict
self._data[key] = value
def __setitem__(self, key, value):
""" Access by indexing, e.g., d["foo"]. Always commits """
cursor = self.get_cursor()
if not cursor.in_transaction:
cursor.begin(EXCLUSIVE)
self.__setitem_nocommit(cursor, key, value)
conn_commit(self._conn)
cursor.close()
def __delitem__(self, key, commit=True):
""" Delete a key from the dictionary """
cursor = self.get_cursor()
if not cursor.in_transaction:
cursor.begin(EXCLUSIVE)
self._table.delete(cursor, eq={"kkey": key})
if commit:
conn_commit(self._conn)
cursor.close()
del(self._data[key])
def setdefault(self, key, default = None, commit=True):
''' Setdefault function that allows a mapping as input
Values from default dict are merged into self, *not* overwriting
existing values in self '''
if key is None and isinstance(default, collections.Mapping):
update = {key:default[key] for key in default if not key in self}
if update:
self.update(update, commit=commit)
return None
elif not key in self:
self.update({key:default}, commit=commit)
return self._data[key]
def update(self, other, commit=True):
cursor = self.get_cursor()
if not self._conn.in_transaction:
cursor.begin(EXCLUSIVE)
for (key, value) in other.items():
self.__setitem_nocommit(cursor, key, value)
if commit:
conn_commit(self._conn)
cursor.close()
def clear(self, args = None, commit=True):
""" Overridden clear that allows removing several keys atomically """
cursor = self.get_cursor()
if not self._conn.in_transaction:
cursor.begin(EXCLUSIVE)
if args is None:
self._data.clear()
self._table.delete(cursor)
else:
for key in args:
del(self._data[key])
self._table.delete(cursor, eq={"kkey": key})
if commit:
conn_commit(self._conn)
cursor.close()
class Mapper(object):
""" This class translates between application objects, i.e., Python
directories, and the relational data layout in an SQL DB, i.e.,
one or more tables which possibly have foreign key relationships
that map to hierarchical data structures. For now, only one
foreign key / subdirectory."""
def __init__(self, table, subtables = None):
self.table = table
self.subtables = {}
if subtables:
for s in subtables.keys():
self.subtables[s] = Mapper(subtables[s])
def __sub_dict(self, d):
""" For each key "k" that has a subtable assigned in "self.subtables",
pop the entry with key "k" from "d", and store it in a new directory
which is returned. I.e., the directory d is separated into
two parts: the part which corresponds to subtables and is the return
value, and the rest which is left in the input dictionary. """
sub_dict = {}
for s in self.subtables.keys():
# Don't store s:None entries even if they exist in d
t = d.pop(s, None)
if not t is None:
sub_dict[s] = t
return sub_dict
def getname(self):
return self.table.getname()
def getpk(self):
return self.table.getpk()
def create(self, cursor):
self.table.create(cursor)
for t in self.subtables.values():
t.create(cursor)
def insert(self, cursor, wus, foreign=None):
pk = self.getpk()
for wu in wus:
# Make copy so sub_dict does not change caller's data
wuc = wu.copy()
# Split off entries that refer to subtables
sub_dict = self.__sub_dict(wuc)
# We add the entries in wuc only if it does not have a primary
# key yet. If it does have a primary key, we add only the data
# for the subtables
if not pk in wuc:
self.table.insert(cursor, wuc, foreign=foreign)
# Copy primary key into caller's data
wu[pk] = wuc[pk]
for subtable_name in sub_dict.keys():
self.subtables[subtable_name].insert(
cursor, sub_dict[subtable_name], foreign=wu[pk])
def update(self, cursor, wus):
pk = self.getpk()
for wu in wus:
assert not wu[pk] is None
wuc = wu.copy()
sub_dict = self.__sub_dict(wuc)
rowid = wuc.pop(pk, None)
if rowid:
self.table.update(cursor, wuc, {wp: rowid})
for s in sub.keys:
self.subtables[s].update(cursor, sub_dict[s])
def count(self, cursor, **cond):
joinsource = self.table.tablename
return cursor.count(joinsource, **cond)
def where(self, cursor, limit = None, order = None, **cond):
# We want:
# SELECT * FROM (SELECT * from workunits WHERE status = 2 LIMIT 1) LEFT JOIN files USING ( wurowid );
pk = self.getpk()
(command, values) = cursor.where_query(self.table.tablename,
limit=limit, **cond)
joinsource = "( %s )" % command
for s in self.subtables.keys():
# FIXME: this probably breaks with more than 2 tables
joinsource = "%s tmp LEFT JOIN %s USING ( %s )" \
% (joinsource, self.subtables[s].getname(), pk)
# FIXME: don't get result rows as dict! Leave as tuple and
# take them apart positionally
rows = cursor.where_as_dict(joinsource, order=order, values=values)
wus = []
for r in rows:
# Collapse rows with identical primary key
if len(wus) == 0 or r[pk] != wus[-1][pk]:
wus.append(self.table.dictextract(r))
for s in self.subtables.keys():
wus[-1][s] = None
for (sn, sm) in self.subtables.items():
spk = sm.getpk()
# if there was a match on the subtable
if spk in r and not r[spk] is None:
if wus[-1][sn] == None:
# If this sub-array is empty, init it
wus[-1][sn] = [sm.table.dictextract(r)]
elif r[spk] != wus[-1][sn][-1][spk]:
# If not empty, and primary key of sub-table is not
# same as in previous entry, add it
wus[-1][sn].append(sm.table.dictextract(r))
return wus
class WuAccess(object): # {
""" This class maps between the WORKUNIT and FILES tables
and a dictionary
{"wuid": string, ..., "timeverified": string, "files": list}
where list is None or a list of dictionaries of the from
{"id": int, "type": int, "wuid": string, "filename": string,
"path": string}
Operations on instances of WuAcccess are directly carried
out on the database persistent storage, i.e., they behave kind
of as if the WuAccess instance were itself a persistent
storage device """
def __init__(self, db):
if isinstance(db, DBFactory):
self.conn = db.connect()
self._ownconn = True
elif isinstance(db, str):
raise ValueError("unexpected")
else:
self.conn = db
self._ownconn = False
cursor = self.get_cursor()
if isinstance(cursor, DB_SQLite.CursorWrapper):
cursor.pragma("foreign_keys = ON")
# I'm not sure it's relevant to do commit() at this point.
# self.commit()
cursor.close()
self.mapper = Mapper(WuTable(), {"files": FilesTable()})
def get_cursor(self):
c = self.conn.cursor()
return c
def __del__(self):
if self._ownconn:
if callable(conn_close):
conn_close(self.conn)
else:
self.conn.close()
@staticmethod
def to_str(wus):
r = []
for wu in wus:
s = "Workunit %s:\n" % wu["wuid"]
for (k,v) in wu.items():
if k != "wuid" and k != "files":
s += " %s: %r\n" % (k, v)
if "files" in wu:
s += " Associated files:\n"
if wu["files"] is None:
s += " None\n"
else:
for f in wu["files"]:
s += " %s\n" % f
r.append(s)
return '\n'.join(r)
@staticmethod
def _checkstatus(wu, status):
#logger.debug("WuAccess._checkstatus(%s, %s)", wu, status)
wu_status = wu["status"]
if isinstance(status, collections.Container):
ok = wu_status in status
else:
ok = wu_status == status
if not ok:
msg = "Workunit %s has status %s (%s), expected %s (%s)" % \
(wu["wuid"], wu_status, WuStatus.get_name(wu_status),
status, WuStatus.get_name(status))
if status is WuStatus.ASSIGNED and wu_status is WuStatus.CANCELLED:
logger.warning ("WuAccess._checkstatus(): %s, presumably timed out", msg)
raise StatusUpdateError(msg)
elif status is WuStatus.ASSIGNED and wu_status is WuStatus.NEED_RESUBMIT:
logger.warning ("WuAccess._checkstatus(): %s, manually expired", msg)
raise StatusUpdateError(msg)
else:
logger.error ("WuAccess._checkstatus(): %s", msg)
raise StatusUpdateError(msg)
# Which fields should be None for which status
should_be_unset = {
"errorcode": (WuStatus.AVAILABLE, WuStatus.ASSIGNED),
"timeresult": (WuStatus.AVAILABLE, WuStatus.ASSIGNED),
"resultclient": (WuStatus.AVAILABLE, WuStatus.ASSIGNED),
"timeassigned": (WuStatus.AVAILABLE,),
"assignedclient": (WuStatus.AVAILABLE,),
}
def check(self, data):
status = data["status"]
WuStatus.check(status)
wu = Workunit(data["wu"])
assert wu.get_id() == data["wuid"]
if status == WuStatus.RECEIVED_ERROR:
assert data["errorcode"] != 0
if status == WuStatus.RECEIVED_OK:
assert data["errorcode"] is None or data["errorcode"] == 0
for field in self.should_be_unset:
if status in self.should_be_unset[field]:
assert data[field] is None
# Here come the application-visible functions that implement the
# "business logic": creating a new workunit from the text of a WU file,
# assigning it to a client, receiving a result for the WU, marking it as
# verified, or marking it as cancelled
def _add_files(self, cursor, files, wuid=None, rowid=None):
# Exactly one must be given
assert not wuid is None or not rowid is None
assert wuid is None or rowid is None
# FIXME: allow selecting row to update directly via wuid, without
# doing query for rowid first
pk = self.mapper.getpk()
if rowid is None:
wu = get_by_wuid(cursor, wuid)
if wu:
rowid = wu[pk]
else:
return False
colnames = ("filename", "path", "type", "command")
# zipped length is that of shortest list, so "command" is optional
d = (dict(zip(colnames, f)) for f in files)
# These two should behave identically
if True:
self.mapper.insert(cursor, [{pk:rowid, "files": d},])
else:
self.mapper.subtables["files"].insert(cursor, d, foreign=rowid)
def commit(self, do_commit=True):
if do_commit:
conn_commit(self.conn)
def create_tables(self):
cursor = self.get_cursor()
if isinstance(cursor, DB_SQLite.CursorWrapper):
cursor.pragma("journal_mode=WAL")
self.mapper.create(cursor)
self.commit()
cursor.close()
def _create1(self, cursor, wutext, priority=None):
d = {
"wuid": Workunit(wutext).get_id(),
"wu": wutext,
"status": WuStatus.AVAILABLE,
"timecreated": str(datetime.utcnow())
}
if not priority is None:
d["priority"] = priority
# Insert directly into wu table
self.mapper.table.insert(cursor, d)
def create(self, wus, priority=None, commit=True):
""" Create new workunits from wus which contains the texts of the
workunit files """
cursor = self.get_cursor()
# todo restore transactions
if not self.conn.in_transaction:
cursor.begin(EXCLUSIVE)
if isinstance(wus, str):
self._create1(cursor, wus, priority)
else:
for wu in wus:
self._create1(cursor, wu, priority)
self.commit(commit)
cursor.close()
def assign(self, clientid, commit=True, timeout_hint=None):
""" Finds an available workunit and assigns it to clientid.
Returns the text of the workunit, or None if no available
workunit exists """
cursor = self.get_cursor()
if not self.conn.in_transaction:
cursor.begin(EXCLUSIVE)
# This "priority" stuff is the root cause for the server taking time to
# hand out WUs when the count of available WUs drops to zero.
# (introduced in 90ae4beb7 -- it's an optional-and-never-used feature
# anyway)
# r = self.mapper.table.where(cursor, limit = 1,
# order=("priority", "DESC"),
# eq={"status": WuStatus.AVAILABLE})
r = self.mapper.table.where(cursor, limit = 1,
eq={"status": WuStatus.AVAILABLE})
assert len(r) <= 1
if len(r) == 1:
try:
self._checkstatus(r[0], WuStatus.AVAILABLE)
except StatusUpdateError:
self.commit(commit)
cursor.close()
raise
if DEBUG > 0:
self.check(r[0])
d = {"status": WuStatus.ASSIGNED,
"assignedclient": clientid,
"timeassigned": str(datetime.utcnow())
}
pk = self.mapper.getpk()
self.mapper.table.update(cursor, d, eq={pk:r[0][pk]})
result = r[0]["wu"]
if timeout_hint:
dltext = "%d\n" % int(time.time() + int(timeout_hint))
result = result + "DEADLINE " + dltext
else:
result = None
self.commit(commit)
cursor.close()
return result
def get_by_wuid(self, cursor, wuid):
r = self.mapper.where(cursor, eq={"wuid": wuid})
assert len(r) <= 1
if len(r) == 1:
return r[0]
else:
return None
def result(self, wuid, clientid, files, errorcode=None,
failedcommand=None, commit=True):
cursor = self.get_cursor()
if not self.conn.in_transaction:
cursor.begin(EXCLUSIVE)
data = self.get_by_wuid(cursor, wuid)
if data is None:
self.commit(commit)
cursor.close()
return False
try:
self._checkstatus(data, WuStatus.ASSIGNED)
except StatusUpdateError:
self.commit(commit)
cursor.close()
if data["status"] == WuStatus.CANCELLED:
global PRINTED_CANCELLED_WARNING
if not PRINTED_CANCELLED_WARNING:
logger.warning("If workunits get cancelled due to timeout "
"even though the clients are still processing them, "
"consider increasing the tasks.wutimeout parameter or "
"decreasing the range covered in each workunit, "
"i.e., the tasks.polyselect.adrange or "
"tasks.sieve.qrange parameters.")
PRINTED_CANCELLED_WARNING = True
raise
if DEBUG > 0:
self.check(data)
d = {"resultclient": clientid,
"errorcode": errorcode,
"failedcommand": failedcommand,
"timeresult": str(datetime.utcnow())}
if errorcode is None or errorcode == 0:
d["status"] = WuStatus.RECEIVED_OK
else:
d["status"] = WuStatus.RECEIVED_ERROR
pk = self.mapper.getpk()
self._add_files(cursor, files, rowid = data[pk])
self.mapper.table.update(cursor, d, eq={pk:data[pk]})
self.commit(commit)
cursor.close()
return True
def verification(self, wuid, ok, commit=True):
cursor = self.get_cursor()
if not self.conn.in_transaction:
cursor.begin(EXCLUSIVE)
data = self.get_by_wuid(cursor, wuid)
if data is None:
self.commit(commit)
cursor.close()
return False
# FIXME: should we do the update by wuid and skip these checks?
try:
self._checkstatus(data, [WuStatus.RECEIVED_OK, WuStatus.RECEIVED_ERROR])
except StatusUpdateError:
self.commit(commit)
cursor.close()
raise
if DEBUG > 0:
self.check(data)
d = {"timeverified": str(datetime.utcnow())}
d["status"] = WuStatus.VERIFIED_OK if ok else WuStatus.VERIFIED_ERROR
pk = self.mapper.getpk()
self.mapper.table.update(cursor, d, eq={pk:data[pk]})
self.commit(commit)
cursor.close()
return True
def cancel(self, wuid, commit=True):
self.cancel_by_condition(eq={"wuid": wuid}, commit=commit)
def cancel_all_available(self, commit=True):
self.cancel_by_condition(eq={"status": WuStatus.AVAILABLE}, commit=commit)
def cancel_all_assigned(self, commit=True):
self.cancel_by_condition(eq={"status": WuStatus.ASSIGNED}, commit=commit)
def cancel_by_condition(self, commit=True, **conditions):
self.set_status(WuStatus.CANCELLED, commit=commit, **conditions)
def set_status(self, status, commit=True, **conditions):
cursor = self.get_cursor()
if not self.conn.in_transaction:
cursor.begin(EXCLUSIVE)
self.mapper.table.update(cursor, {"status": status}, **conditions)
self.commit(commit)
cursor.close()
def query(self, limit=None, **conditions):
cursor = self.get_cursor()
r = self.mapper.where(cursor, limit=limit, **conditions)
cursor.close()
return r
def count(self, **cond):
cursor = self.get_cursor()
count = self.mapper.count(cursor, **cond)
cursor.close()
return count
def count_available(self):
return self.count(eq={"status": WuStatus.AVAILABLE})
def get_one_result(self):
r = self.query(limit = 1, eq={"status": WuStatus.RECEIVED_OK})
if not r:
r = self.query(limit = 1, eq={"status": WuStatus.RECEIVED_ERROR})
if not r:
return None
else:
return r[0]
#}
class WuResultMessage(metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_wu_id(self):
pass
@abc.abstractmethod
def get_output_files(self):
pass
@abc.abstractmethod
def get_stdout(self, command_nr):
pass
@abc.abstractmethod
def get_stdoutfile(self, command_nr):
pass
@abc.abstractmethod
def get_stderr(self, command_nr):
pass
@abc.abstractmethod
def get_stderrfile(self, command_nr):
pass
@abc.abstractmethod
def get_exitcode(self, command_nr):
pass
@abc.abstractmethod
def get_command_line(self, command_nr):
pass
@abc.abstractmethod
def get_host(self):
pass
def _read(self, filename, data):
if not filename is None:
with open(filename, "rb") as inputfile:
data = inputfile.read()
return bytes() if data is None else data
def read_stdout(self, command_nr):
""" Returns the contents of stdout of command_nr as a byte string.
If no stdout was captured, returns the empty byte string.
"""
return self._read(self.get_stdoutfile(command_nr),
self.get_stdout(command_nr))
def read_stderr(self, command_nr):
""" Like read_stdout() but for stderr """
return self._read(self.get_stderrfile(command_nr),
self.get_stderr(command_nr))
class ResultInfo(WuResultMessage):
def __init__(self, record):
# record looks like this:
# {'status': 0, 'errorcode': None, 'timeresult': None, 'wuid': 'testrun_polyselect_0-5000',
# 'wurowid': 1, 'timecreated': '2013-05-23 22:28:08.333310', 'timeverified': None,
# 'failedcommand': None, 'priority': None, 'wu': "WORKUNIT [..rest of workunit text...] \n",
# 'assignedclient': None, 'retryof': None, 'timeassigned': None, 'resultclient': None,
# 'files': None}
self.record = record
def __str__(self):
return str(self.record)
def get_wu_id(self):
return self.record["wuid"]
def get_output_files(self):
""" Returns the list of output files of this workunit
Only files that were specified in RESULT lines appear here;
automatically captured stdout and stderr does not.
"""
if self.record["files"] is None:
return []
files = []
for f in self.record["files"]:
if f["type"] == "RESULT":
files.append(f["path"])
return files
def _get_stdio(self, filetype, command_nr):
""" Get the file location of the stdout or stderr file of the
command_nr-th command. Used internally.
"""
if self.record["files"] is None:
return None
for f in self.record["files"]:
if f["type"] == filetype and int(f["command"]) == command_nr:
return f["path"]
return None
def get_stdout(self, command_nr):
# stdout is always captured into a file, not made available directly
return None
def get_stdoutfile(self, command_nr):
""" Return the path to the file that captured stdout of the
command_nr-th COMMAND in the workunit, or None if there was no stdout
output. Note that explicitly redirected stdout that was uploaded via
RESULT does not appear here, but in get_files()
"""
return self._get_stdio("stdout", command_nr)
def get_stderr(self, command_nr):
# stderr is always captured into a file, not made available directly
return None
def get_stderrfile(self, command_nr):
""" Like get_stdoutfile(), but for stderr """
return self._get_stdio("stderr", command_nr)
def get_exitcode(self, command_nr):
""" Return the exit code of the command_nr-th command """
if not self.record["failedcommand"] is None \
and command_nr == int(self.record["failedcommand"]):
return int(self.record["errorcode"])
else:
return 0
def get_command_line(self, command_nr):
return None
def get_host(self):
return self.record["resultclient"]
class DbListener(patterns.Observable):
""" Class that queries the Workunit database for available results
and sends them to its Observers.
The query is triggered by receiving a SIGUSR1 (the instance subscribes to
the signal handler relay), or by calling send_result().
"""
# FIXME: SIGUSR1 handler is not implemented
def __init__(self, *args, db, **kwargs):
super().__init__(*args, **kwargs)
self.wuar = WuAccess(db)
def send_result(self):
# Check for results
r = self.wuar.get_one_result()
if not r:
return False
message = ResultInfo(r)
was_received = self.notifyObservers(message)
if not was_received:
logger.error("Result for workunit %s was not processed by any task. "
"Setting it to status CANCELLED", message.get_wu_id())
self.wuar.cancel(message.get_wu_id())
return was_received
class IdMap(object):
""" Identity map. Ensures that DB-backed dictionaries of the same table
name are instantiated only once.
Problem: we should also require that the DB is identical, but file names
are not a unique specifier to a file, and we allow connection objects
instead of DB file name. Not clear how to test for identity, lacking
support for this from the sqlite3 module API.
"""
def __init__(self):
self.db_dicts = {}
def make_db_dict(self, db, name):
key = name
if not key in self.db_dicts:
self.db_dicts[key] = DictDbAccess(db, name)
return self.db_dicts[key]
# Singleton instance of IdMap
idmap = IdMap()
class DbAccess(object):
""" Base class that lets subclasses create DB-backed dictionaries or
WuAccess instances on a database whose file name is specified in the db
parameter to __init__.
Meant to be used as a cooperative class; it strips the db parameter from
the parameter list and remembers it in a private variable so that it can
later be used to open DB connections.
"""
def __init__(self, *args, db, **kwargs):
super().__init__(*args, **kwargs)
self.__db = db
def get_db_connection(self):
return self.__db.connect()
def get_db_filename(self):
return self.__db.path
def get_db_uri(self):
return self.__db.uri
def make_db_dict(self, name, connection=None):
if connection is None:
return idmap.make_db_dict(self.__db, name)
else:
return idmap.make_db_dict(connection, name)
def make_wu_access(self, connection=None):
if connection is None:
return WuAccess(self.__db)
else:
return WuAccess(connection)
def make_db_listener(self, connection=None):
if connection is None:
return DbListener(db=self.__db)
else:
return DbListener(db=connection)
class HasDbConnection(DbAccess):
""" Gives sub-classes a db_connection attribute which is a database
connection instance.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.db_connection = self.get_db_connection()
class UsesWorkunitDb(HasDbConnection):
""" Gives sub-classes a wuar attribute which is WuAccess instance, using
the sub-classes' shared database connection.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.wuar = self.make_wu_access(self.db_connection)
class DbWorker(DbAccess, threading.Thread):
"""Thread executing WuAccess requests from a given tasks queue"""
def __init__(self, taskqueue, *args, daemon=None, **kwargs):
super().__init__(*args, **kwargs)
self.taskqueue = taskqueue
if not daemon is None:
self.daemon = daemon
self.start()
def run(self):
# One DB connection per thread. Created inside the new thread to make
# sqlite happy
wuar = self.make_wu_access()
while True:
# We expect a 4-tuple in the task queue. The elements of the tuple:
# a 2-array, where element [0] receives the result of the DB call,
# and [1] is an Event variable to notify the caller when the
# result is available
# fn_name, the name (as a string) of the WuAccess method to call
# args, a tuple of positional arguments
# kargs, a dictionary of keyword arguments
(result_tuple, fn_name, args, kargs) = self.taskqueue.get()
if fn_name == "terminate":
break
ev = result_tuple[1]
# Assign to tuple in-place, so result is visible to caller.
# No slice etc. here which would create a copy of the array
try: result_tuple[0] = getattr(wuar, fn_name)(*args, **kargs)
except Exception as e:
traceback.print_exc()
ev.set()
self.taskqueue.task_done()
class DbRequest(object):
""" Class that represents a request to a given WuAccess function.
Used mostly so that DbThreadPool's __getattr__ can return a callable
that knows which of WuAccess's methods should be called by the
worker thread """
def __init__(self, taskqueue, func):
self.taskqueue = taskqueue
self.func = func
def do_task(self, *args, **kargs):
"""Add a task to the queue, wait for its completion, and return the result"""
ev = threading.Event()
result = [None, ev]
self.taskqueue.put((result, self.func, args, kargs))
ev.wait()
return result[0]
class DbThreadPool(object):
"""Pool of threads consuming tasks from a queue"""
def __init__(self, dburi, num_threads=1):
self.taskqueue = Queue(num_threads)
self.pool = []
for _ in range(num_threads):
worker = DbWorker(self.taskqueue, daemon=True, db=dburi)
self.pool.append(worker)
def terminate(self):
for t in self.pool:
self.taskqueue.put((None, "terminate", None, None))
self.wait_completion
def wait_completion(self):
"""Wait for completion of all the tasks in the queue"""
self.taskqueue.join()
def __getattr__(self, name):
""" Delegate calls to methods of WuAccess to a worker thread.
If the called method exists in WuAccess, creates a new
DbRequest instance that remembers the name of the method that we
tried to call, and returns the DbRequest instance's do_task
method which will process the method call via the thread pool.
We need to go through a new object's method since we cannot make
the caller pass the name of the method to call to the thread pool
otherwise """
if hasattr(WuAccess, name):
task = DbRequest(self.taskqueue, name)
return task.do_task
else:
raise AttributeError(name)
# One entry in the WU DB, including the text with the WU contents
# (FILEs, COMMANDs, etc.) and info about the progress on this WU (when and
# to whom assigned, received, etc.)
# wuid is the unique wuid of the workunit
# status is a status code as defined in WuStatus
# data is the str containing the text of the workunit
# timecreated is the string containing the date and time of when the WU was added to the db
# timeassigned is the ... of when the WU was assigned to a client
# assignedclient is the clientid of the client to which the WU was assigned
# timeresult is the ... of when a result for this WU was received
# resultclient is the clientid of the client that uploaded a result for this WU
# errorcode is the exit status code of the first failed command, or 0 if none failed
# timeverified is the ... of when the result was marked as verified
if __name__ == '__main__': # {
import argparse
queries = {"avail" : ("Available workunits", {"eq": {"status": WuStatus.AVAILABLE}}),
"assigned": ("Assigned workunits", {"eq": {"status": WuStatus.ASSIGNED}}),
"receivedok": ("Received ok workunits", {"eq":{"status": WuStatus.RECEIVED_OK}}),
"receivederr": ("Received with error workunits", {"eq": {"status": WuStatus.RECEIVED_ERROR}}),
"verifiedok": ("Verified ok workunits", {"eq": {"status": WuStatus.VERIFIED_OK}}),
"verifiederr": ("Verified with error workunits", {"eq": {"status": WuStatus.VERIFIED_ERROR}}),
"cancelled": ("Cancelled workunits", {"eq": {"status": WuStatus.CANCELLED}}),
"all": ("All existing workunits", {})
}
use_pool = False
parser = argparse.ArgumentParser()
parser.add_argument('-dbfile', help='Name of the database file')
parser.add_argument('-create', action="store_true",
help='Create the database tables if they do not exist')
parser.add_argument('-add', action="store_true",
help='Add new workunits. Contents of WU(s) are '
'read from stdin, separated by blank line')
parser.add_argument('-assign', nargs = 1, metavar = 'clientid',
help = 'Assign an available WU to clientid')
parser.add_argument('-cancel', action="store_true",
help = 'Cancel selected WUs')
parser.add_argument('-expire', action="store_true",
help = 'Expire selected WUs')
# parser.add_argument('-setstatus', metavar = 'STATUS',
# help = 'Forcibly set selected workunits to status (integer)')
parser.add_argument('-prio', metavar = 'N',
help = 'If used with -add, newly added WUs '
'receive priority N')
parser.add_argument('-limit', metavar = 'N',
help = 'Limit number of records in queries',
default = None)
parser.add_argument('-result', nargs = 6,
metavar = ('wuid', 'clientid', 'filename', 'filepath',
'filetype', 'command'),
help = 'Return a result for wu from client')
parser.add_argument('-test', action="store_true",
help='Run some self tests')
parser.add_argument('-debug', help='Set debugging level')
parser.add_argument('-setdict', nargs = 4,
metavar = ("dictname", "keyname", "type", "keyvalue"),
help='Set an entry of a DB-backed dictionary')
parser.add_argument('-wuid', help="Select workunit with given id",
metavar="WUID")
for arg in queries:
parser.add_argument('-' + arg, action="store_true", required=False,
help="Select %s" % queries[arg][0].lower())
parser.add_argument('-dump', nargs='?', default = None, const = "all",
metavar = "FIELD",
help='Dump WU contents, optionally a single field')
parser.add_argument('-sort', metavar = "FIELD",
help='With -dump, sort output by FIELD')
# Parse command line, store as dictionary
args = vars(parser.parse_args())
# print(args)
dbname = "wudb"
if args["dbfile"]:
dbname = args["dbfile"]
if args["test"]:
import doctest
doctest.testmod()
if args["debug"]:
DEBUG = int(args["debug"])
prio = 0
if args["prio"]:
prio = int(args["prio"][0])
limit = args["limit"]
if use_pool:
db_pool = DbThreadPool(dbname)
else:
db_pool = WuAccess(dbname)
if args["create"]:
db_pool.create_tables()
if args["add"]:
s = ""
wus = []
for line in sys.stdin:
if line == "\n":
wus.append(s)
s = ""
else:
s += line
if s != "":
wus.append(s)
db_pool.create(wus, priority=prio)
# Functions for queries
queries_list = []
for (arg, (msg, condition)) in queries.items():
if args[arg]:
queries_list.append([msg, condition])
if args["wuid"]:
for wuid in args["wuid"].split(","):
msg = "Workunit %s" % wuid
condition = {"eq": {"wuid": wuid}}
queries_list.append([msg, condition])
for (msg, condition) in queries_list:
print("%s: " % msg)
if not args["dump"]:
count = db_pool.count(limit=args["limit"], **condition)
print (count)
else:
wus = db_pool.query(limit=args["limit"], **condition)
if wus is None:
print("0")
else:
print (len(wus))
if args["sort"]:
wus.sort(key=lambda wu: str(wu[args["sort"]]))
if args["dump"] == "all":
print(WuAccess.to_str(wus))
else:
for wu in wus:
print(wu[args["dump"]])
if args["cancel"]:
print("Cancelling selected workunits")
db_pool.cancel_by_condition(**condition)
if args["expire"]:
print("Expiring selected workunits")
db_pool.set_status(WuStatus.NEED_RESUBMIT, commit=True, **condition)
# if args["setstatus"]:
# db_pool.set_status(int(args["setstatus"]), **condition)
# Dict manipulation
if args["setdict"]:
(name, keyname, itemtype, keyvalue) = args["setdict"]
# Type-cast value to the specified type
value = getattr(__builtins__, itemtype)(keyvalue)
dbdict = DictDbAccess(dbname, name)
dbdict[keyname] = value
del(dbdict)
# Functions for testing
if args["assign"]:
clientid = args["assign"][0]
wus = db_pool.assign(clientid)
if args["result"]:
result = args["result"]
db_pool.result(result.wuid, result.clientid, result[2:])
if use_pool:
db_pool.terminate()
# }
# Local Variables:
# version-control: t
# End:
| mancoast/cado-nfs | scripts/cadofactor/wudb.py | Python | lgpl-2.1 | 73,432 |
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Help actions for :class:`gaupol.Application`."""
import gaupol
class BrowseDocumentationAction(gaupol.Action):
def __init__(self):
gaupol.Action.__init__(self, "browse-documentation")
self.action_group = "safe"
class ReportABugAction(gaupol.Action):
def __init__(self):
gaupol.Action.__init__(self, "report-a-bug")
self.action_group = "safe"
class ViewAboutDialogAction(gaupol.Action):
def __init__(self):
gaupol.Action.__init__(self, "view-about-dialog")
self.action_group = "safe"
__all__ = tuple(x for x in dir() if x.endswith("Action"))
| otsaloma/gaupol | gaupol/actions/help.py | Python | gpl-3.0 | 1,303 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
from PIL import Image
from utils.misc import get_file_list
import re
def validate_dim(ctx, param, value):
percents = False
if value.endswith('%'):
value = value[:-1]
percents = True
if not re.match(r'^\d{1,6}(\.\d{1,6})?$', value):
raise click.BadParameter('invalid value')
value = float(value)
if value == 0:
raise click.BadParameter('invalid value')
return value, percents
@click.command()
@click.argument('path', type=click.Path(exists=True))
@click.argument('w', callback=validate_dim)
@click.argument('h', callback=validate_dim)
def resize(path, w, h):
"""
Resize all images in the given folder and all sub folders.
Width and Height can be integer or float values in pixels or percents: 10, 12.5, 80%, 20.5%.
Result size will be rounded to integer.
"""
for f in get_file_list(path):
im = Image.open(f).convert('RGBA')
new_w = int(round(im.size[0] * w[0] / 100. if w[1] else w[0]))
new_h = int(round(im.size[1] * h[0] / 100. if h[1] else h[0]))
new_im = im.resize((new_w, new_h), Image.LANCZOS)
new_im.save(f)
if __name__ == '__main__':
resize()
| vladimirgamalian/pictools | resize.py | Python | mit | 1,240 |
# -*- coding: utf-8 -*-
#
# PySPED - Python libraries to deal with Brazil's SPED Project
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation, either version 2.1 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PySPED - Bibliotecas Python para o
# SPED - Sistema Público de Escrituração Digital
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br>
#
# Este programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Library General Public License,
# publicada pela Free Software Foundation, em sua versão 2.1 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Library General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Library General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from __future__ import division, print_function, unicode_literals
import os
from pysped.xml_sped import TagCaracter, XMLNFe
from pysped.nfe.leiaute import ESQUEMA_ATUAL_VERSAO_2 as ESQUEMA_ATUAL
from pysped.nfe.leiaute.eventonfe_100 import (DetEvento, EnvEvento,
Evento, InfEvento,
InfEventoRecebido,
ProcEvento,
RetEvento,
RetEnvEvento)
DIRNAME = os.path.dirname(__file__)
_TEXTO_FIXO = 'A Carta de Correção é disciplinada pelo § 1º-A do art. 7º do Convênio S/N, de 15 de dezembro de 1970 e pode ser utilizada para regularização de erro ocorrido na emissão de documento fiscal, desde que o erro não esteja relacionado com: I - as variáveis que determinam o valor do imposto tais como: base de cálculo, alíquota, diferença de preço, quantidade, valor da operação ou da prestação; II - a correção de dados cadastrais que implique mudança do remetente ou do destinatário; III - a data de emissão ou de saída.'
class DetEventoCCe(DetEvento):
def __init__(self):
super(DetEventoCCe, self).__init__()
self.xCorrecao = TagCaracter(nome='xCorrecao', codigo='', tamanho=[15, 1000, 15], raiz='//detEvento')
self.xCondUso = TagCaracter(nome='xCondUso', codigo='', raiz='//detEvento', valor=_TEXTO_FIXO)
self.descEvento.valor = 'Carta de Correção'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += self.versao.xml
xml += self.descEvento.xml
xml += self.xCorrecao.xml
xml += self.xCondUso.xml
xml += '</detEvento>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.descEvento.xml = arquivo
self.xCorrecao.xml = arquivo
self.xCondUso.xml = arquivo
xml = property(get_xml, set_xml)
class InfEventoCCe(InfEvento):
def __init__(self):
super(InfEventoCCe, self).__init__()
self.detEvento = DetEventoCCe()
self.tpEvento.valor = '110110'
class EventoCCe(Evento):
def __init__(self):
super(EventoCCe, self).__init__()
self.infEvento = InfEventoCCe()
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'CCe_v1.00.xsd'
class InfEventoRecebidoCCe(InfEventoRecebido):
def __init__(self):
super(InfEventoRecebidoCCe, self).__init__()
class RetEventoCCe(RetEvento):
def __init__(self):
super(RetEventoCCe, self).__init__()
class ProcEventoCCe(ProcEvento):
def __init__(self):
super(ProcEventoCCe, self).__init__()
self.evento = EventoCCe()
self.retEvento = RetEventoCCe()
self.caminho_esquema = os.path.join(DIRNAME, 'schema', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'procCCeNFe_v1.00.xsd'
class EnvEventoCCe(EnvEvento):
def __init__(self):
super(EnvEventoCCe, self).__init__()
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'envCCe_v1.00.xsd'
def get_xml(self):
return super(EnvEventoCCe, self).get_xml()
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.idLote.xml = arquivo
self.evento = self.le_grupo('//envEvento/evento', EventoCCe)
xml = property(get_xml, set_xml)
class RetEnvEventoCCe(RetEnvEvento):
def __init__(self):
super(RetEnvEventoCCe, self).__init__()
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'retEnvCCe_V1.00.xsd'
| henriquechehad/PySPED | pysped/nfe/leiaute/evtccenfe_100.py | Python | lgpl-2.1 | 5,642 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyRequestsMock(PythonPackage):
"""Mock out responses from the requests package."""
homepage = "https://requests-mock.readthedocs.io/"
pypi = "requests-mock/requests-mock-1.7.0.tar.gz"
version('1.7.0', sha256='88d3402dd8b3c69a9e4f9d3a73ad11b15920c6efd36bc27bf1f701cf4a8e4646')
depends_on('py-setuptools', type='build')
| LLNL/spack | var/spack/repos/builtin/packages/py-requests-mock/package.py | Python | lgpl-2.1 | 567 |
# -* encoding: utf-8 *-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
| gopythongo/gopythongo | src/py/gopythongo/shared/__init__.py | Python | mpl-2.0 | 225 |
import inspect
import asyncio
import typing as t
from .util import isasync
Feature = t.TypeVar("Feature")
SyncFeatureProvider = t.Callable[..., Feature]
AsyncFeatureProvider = t.Callable[..., t.Awaitable[Feature]]
FeatureProvider = t.Union[SyncFeatureProvider, AsyncFeatureProvider]
SyncProviderMap = t.Dict[Feature, SyncFeatureProvider]
AsyncProviderMap = t.Dict[Feature, AsyncFeatureProvider]
FuncType = t.Callable[..., t.Any]
F = t.TypeVar("F", bound=FuncType)
if t.TYPE_CHECKING:
from .injector import Injector # noqa
def mark_provides(
func: FeatureProvider, feature: Feature, context: bool = False
) -> None:
func.__provides__ = feature
func.__contextprovider__ = context
func.__asyncproider__ = isasync(func)
def provider(func: FeatureProvider, context: bool = False) -> FeatureProvider:
signature = inspect.signature(func)
mark_provides(func, signature.return_annotation, context)
return func
def contextprovider(func: FeatureProvider) -> FeatureProvider:
signature = inspect.signature(func)
mark_provides(func, signature.return_annotation, True)
return func
def provides(feature: Feature, context=False):
def _decorator(func: FeatureProvider) -> FeatureProvider:
mark_provides(func, feature, context)
return func
return _decorator
class ModuleMeta(type):
def __new__(mcls, name, bases, attrs):
providers = {}
async_providers = {}
for base in bases:
if isinstance(base, mcls):
providers.update(base.providers)
async_providers.update(async_providers)
for attr in attrs.values():
if hasattr(attr, "__provides__"):
if attr.__asyncproider__:
async_providers[attr.__provides__] = attr
else:
providers[attr.__provides__] = attr
attrs["providers"] = providers
attrs["async_providers"] = async_providers
return super().__new__(mcls, name, bases, attrs)
class Module(metaclass=ModuleMeta):
# providers: SyncProviderMap
# async_providers: AsyncProviderMap
@classmethod
def provider(cls, func: FeatureProvider) -> FeatureProvider:
cls.register(func)
return func
@classmethod
def provides(cls, feature: Feature):
def _decorator(func: FeatureProvider) -> FeatureProvider:
cls.register(func, feature)
return func
return _decorator
@classmethod
def register(
cls,
func: FeatureProvider,
feature: t.Optional[Feature] = None,
context: bool = False,
) -> None:
"""Register `func` to be a provider for `feature`.
If `feature` is `None`, the feature's return annotation will be
inspected."""
if feature:
mark_provides(func, feature, context)
else:
provider(func, context)
if isasync(func):
cls.async_providers[func.__provides__] = func
else:
cls.providers[func.__provides__] = func
def load(self, injector: "Injector"):
pass
def unload(self, injector: "Injector"):
pass
| xlevus/python-diana | diana/module.py | Python | mit | 3,199 |
# -*- coding: iso-8859-1 -*-
#
# Copyright (C) 2009 Rene Liebscher
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
__revision__ = "$Id: GeometricMean.py,v 1.4 2009/08/07 07:19:19 rliebscher Exp $"
from fuzzy.norm.Norm import Norm,product
class GeometricMean(Norm):
def __init__(self):
Norm.__init__(self,0)
def __call__(self,*args):
return pow(product(*args),1.0/len(args))
| arruda/pyfuzzy | fuzzy/norm/GeometricMean.py | Python | lgpl-3.0 | 1,017 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for generate_detection_data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import tempfile
import unittest
import numpy as np
import six
import tensorflow.compat.v1 as tf
from object_detection import exporter
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.dataset_tools.context_rcnn import generate_detection_data
from object_detection.protos import pipeline_pb2
from object_detection.utils import tf_version
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
mock = unittest.mock
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
class FakeModel(model.DetectionModel):
"""A Fake Detection model with expected output nodes from post-processing."""
def preprocess(self, inputs):
true_image_shapes = [] # Doesn't matter for the fake model.
return tf.identity(inputs), true_image_shapes
def predict(self, preprocessed_inputs, true_image_shapes):
return {'image': tf.layers.conv2d(preprocessed_inputs, 3, 1)}
def postprocess(self, prediction_dict, true_image_shapes):
with tf.control_dependencies(prediction_dict.values()):
postprocessed_tensors = {
'detection_boxes': tf.constant([[[0.0, 0.1, 0.5, 0.6],
[0.5, 0.5, 0.8, 0.8]]], tf.float32),
'detection_scores': tf.constant([[0.95, 0.6]], tf.float32),
'detection_multiclass_scores': tf.constant([[[0.1, 0.7, 0.2],
[0.3, 0.1, 0.6]]],
tf.float32),
'detection_classes': tf.constant([[0, 1]], tf.float32),
'num_detections': tf.constant([2], tf.float32)
}
return postprocessed_tensors
def restore_map(self, checkpoint_path, fine_tune_checkpoint_type):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
@contextlib.contextmanager
def InMemoryTFRecord(entries):
temp = tempfile.NamedTemporaryFile(delete=False)
filename = temp.name
try:
with tf.python_io.TFRecordWriter(filename) as writer:
for value in entries:
writer.write(value)
yield filename
finally:
os.unlink(filename)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class GenerateDetectionDataTest(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_path):
"""A function to save checkpoint from a fake Detection Model.
Args:
checkpoint_path: Path to save checkpoint from Fake model.
"""
g = tf.Graph()
with g.as_default():
mock_model = FakeModel(num_classes=5)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(
tf.placeholder(tf.float32, shape=[None, None, None, 3]))
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
tf.train.get_or_create_global_step()
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(init)
saver.save(sess, checkpoint_path)
def _export_saved_model(self):
tmp_dir = self.get_temp_dir()
checkpoint_path = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path)
output_directory = os.path.join(tmp_dir, 'output')
saved_model_path = os.path.join(output_directory, 'saved_model')
tf.io.gfile.makedirs(output_directory)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=5)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
detection_model = model_builder.build(pipeline_config.model,
is_training=False)
outputs, placeholder_tensor = exporter.build_detection_graph(
input_type='tf_example',
detection_model=detection_model,
input_shape=None,
output_collection_name='inference_op',
graph_hook_fn=None)
output_node_names = ','.join(outputs.keys())
saver = tf.train.Saver()
input_saver_def = saver.as_saver_def()
frozen_graph_def = exporter.freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=checkpoint_path,
output_node_names=output_node_names,
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
output_graph='',
clear_devices=True,
initializer_nodes='')
exporter.write_saved_model(
saved_model_path=saved_model_path,
frozen_graph_def=frozen_graph_def,
inputs=placeholder_tensor,
outputs=outputs)
return saved_model_path
def _create_tf_example(self):
with self.test_session():
encoded_image = tf.image.encode_jpeg(
tf.constant(np.ones((4, 6, 3)).astype(np.uint8))).eval()
def BytesFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def Int64Feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': BytesFeature(encoded_image),
'image/source_id': BytesFeature(b'image_id'),
'image/height': Int64Feature(4),
'image/width': Int64Feature(6),
'image/object/class/label': Int64Feature(5),
'image/object/class/text': BytesFeature(b'hyena'),
'image/class/label': Int64Feature(5),
'image/class/text': BytesFeature(b'hyena'),
}))
return example.SerializeToString()
def assert_expected_example(self, example):
self.assertAllClose(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.0])
self.assertAllClose(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.1])
self.assertAllClose(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.5])
self.assertAllClose(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.6])
self.assertAllClose(
example.features.feature['image/object/class/score']
.float_list.value, [0.95])
self.assertAllClose(
example.features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
self.assertAllClose(
example.features.feature['image/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/class/text']
.bytes_list.value, [b'hyena'])
# Check other essential attributes.
self.assertAllEqual(
example.features.feature['image/height'].int64_list.value, [4])
self.assertAllEqual(
example.features.feature['image/width'].int64_list.value, [6])
self.assertAllEqual(
example.features.feature['image/source_id'].bytes_list.value,
[b'image_id'])
self.assertTrue(
example.features.feature['image/encoded'].bytes_list.value)
def test_generate_detection_data_fn(self):
saved_model_path = self._export_saved_model()
confidence_threshold = 0.8
inference_fn = generate_detection_data.GenerateDetectionDataFn(
saved_model_path, confidence_threshold)
inference_fn.start_bundle()
generated_example = self._create_tf_example()
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
output = inference_fn.process(generated_example)
output_example = output[0]
self.assertAllEqual(
output_example.features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(output_example.features.feature['image/width']
.int64_list.value, [6])
self.assert_expected_example(output_example)
def test_beam_pipeline(self):
with InMemoryTFRecord([self._create_tf_example()]) as input_tfrecord:
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
output_tfrecord = os.path.join(temp_dir, 'output_tfrecord')
saved_model_path = self._export_saved_model()
confidence_threshold = 0.8
num_shards = 1
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
generate_detection_data.construct_pipeline(
p, input_tfrecord, output_tfrecord, saved_model_path,
confidence_threshold, num_shards)
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.python_io.tf_record_iterator(path=filenames[0])
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 1)
self.assert_expected_example(tf.train.Example.FromString(
actual_output[0]))
if __name__ == '__main__':
tf.test.main()
| tombstone/models | research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf1_test.py | Python | apache-2.0 | 10,558 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from Tkinter import *
from ttk import Frame, Button, Style
class Example(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.initUI()
def initUI(self):
self.parent.title("Quit button")
self.style = Style()
self.style.theme_use("default")
self.pack(fill=BOTH, expand=1)
quitButton = Button(self, text="Quit",
command=self.quit)
quitButton.place(x=50, y=50)
def display_bots:
pass
def
def main():
root = Tk()
root.geometry("250x150+300+300")
app = Example(root)
root.mainloop()
if __name__ == '__main__':
main() | RachaelT/UTDchess-RospyXbee | src/chessbot/src/gui.py | Python | mit | 775 |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
BASIC_TESTS = """
>>> from google.appengine.api import users
>>> from models import User, AnonymousUser
>>> appengine_user = users.User("[email protected]")
>>> django_user = User.get_djangouser_for_user(appengine_user)
>>> django_user.email == appengine_user.email()
True
>>> django_user.username == appengine_user.nickname()
True
>>> django_user.user == appengine_user
True
>>> django_user.username = 'test2'
>>> key = django_user.save()
>>> django_user.username == 'test2'
True
>>> django_user2 = User.get_djangouser_for_user(appengine_user)
>>> django_user2 == django_user
True
>>> django_user.is_authenticated()
True
>>> django_user.is_staff
False
>>> django_user.is_active
True
>>> a = AnonymousUser()
>>> a.is_authenticated()
False
>>> a.is_staff
False
>>> a.is_active
False
>>> a.groups.all()
[]
>>> a.user_permissions.all()
[]
"""
__test__ = {'BASIC_TESTS': BASIC_TESTS}
| CubicERP/geraldo | site/newsite/site-geraldo/appengine_django/auth/tests.py | Python | lgpl-3.0 | 1,462 |
#!/usr/bin/env python
import sys
import requests
# r = requests.get('https://health.data.ny.gov/resource/child-and-adult-care-food-program-participation-beginning-2007.json?recall_id=94', headers={'X-App-Token': sys.argv[1]})
r = requests.get('https://health.data.ny.gov/resource/child-and-adult-care-food-program-participation-beginning-2007.json?', headers={})
print r.status_code
print r.text
| SUNY-Albany-CCI/NY-State-Health-Data-Code-A-Thon | python/example.py | Python | apache-2.0 | 399 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author Jonas Ohrstrom <[email protected]>
import sys
import time
import os
import socket
import string
import logging
from util import json
class DlsClient():
def __init__(self, dls_host, dls_port, dls_user, dls_pass):
self.dls_host = dls_host
self.dls_port = dls_port
self.dls_user = dls_user
self.dls_pass = dls_pass
def set_txt(self, txt):
logger = logging.getLogger("DlsClient.set_txt")
try:
print 'connecting to: %s:%s@%s:%s' % (self.dls_user, self.dls_pass, self.dls_host, self.dls_port)
print
logger.debug('connecting to: %s:%s@%s:%s', self.dls_user, self.dls_pass, self.dls_host, self.dls_port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1.0)
s.connect((self.dls_host, self.dls_port))
print 'Set STRING'
s.send('' + '' + str(txt) + "\r\n")
data = s.recv(1024)
print data
s.close()
print 'OK'
except Exception, e:
dls_status = False
logger.error("Unable to connect to the dls server - %s", e)
return
def set_txt_(self, txt):
logger = logging.getLogger("DlsClient.set_txt")
try:
print 'connecting to: %s:%s@%s:%s' % (self.dls_user, self.dls_pass, self.dls_host, self.dls_port)
print
logger.debug('connecting to: %s:%s@%s:%s', self.dls_user, self.dls_pass, self.dls_host, self.dls_port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5.0)
s.connect((self.dls_host, self.dls_port))
# Handshake
print 'Handshake',
s.send('RS_DLS_CLIENT' + '' + "\r\n")
data = s.recv(1024)
print data
# Version
print 'Version',
s.send('RS_DLS_VERSION' + ' 1' + "\r\n")
data = s.recv(1024)
print data
# Authentication
print 'Authentication',
s.send('SERVICE' + ' OPENBRO+' + "\r\n")
s.send('PASSWORD' + ' OPENBRO+' + "\r\n")
data = s.recv(1024)
print data
# Update text
print 'Clear DLS'
s.send('CLEAR_DLS' + '' + "\r\n")
data = s.recv(1024)
print data
print 'Set DLS'
s.send('SET_DLS' + ' ' + str(txt) + "\r\n")
data = s.recv(1024)
print data
s.close()
print 'OK'
except Exception, e:
dls_status = False
logger.error("Unable to connect to the dls server - %s", e)
return
| hzlf/openbroadcast | services/__orig_pypo/dls/dls_client.py | Python | gpl-3.0 | 3,067 |
# correlate.py Demo of retrieving signal buried in noise
# Released under the MIT licence.
# Copyright Peter Hinch 2018
# This simulates a Pyboard ADC acquiring a signal using read_timed. The signal is
# digital (switching between two fixed voltages) but is corrupted by a larger analog
# noise source.
# The output array contains the correlation of the corrupted input with the known
# expected signal.
# A successful detection comprises a uniquely large value at sample 930 where the
# end of the signal is located. The program also outputs the second largest
# correlation value and the ratio of the two as a measure of the certainty of
# detection.
# The chosen signal is a pseudo-random digital burst. This was chosen because the
# auto-correlation function of random noise is an impulse function.
# The module autocorrelate.py was used to generate this
# In this test it is added to a pseudo-random analog signal of much longer
# duration to test the reliability of discriminaton.
import utime
import pyb
from array import array
from filt import dcf, WRAP, SCALE, REVERSE
# Read buffer length
RBUFLEN = 1000
# Digital amplitude. Compares with analog amplitude of 1000.
DIGITAL_AMPLITUDE = 600 # 400 is about the limit with occasional false positives
def rn_analog(): # Random number in range +- 1000
return int(pyb.rng() / 536870 - 1000)
# Max runlength 2, DR == 10 (5 bits/50)
# in 100 runs lowest DR seen was 1.6, with no false positives.
signal = bytearray([1,-1,1,-1,1,-1,1,1,-1,-1,1,1,-1,-1,1,-1,
1,-1,1,1,-1,1,-1,1,-1,1,-1,-1,1,1,-1,1,
-1,-1,1,-1,1,-1,1,1,-1,-1,1,-1,1,1,-1,1,
-1,-1,])
siglen = len(signal)
# Input buffer contains random noise. While this is a simulation, the bias of 2048
# emulates a Pyboard ADC biassed at its mid-point (1.65V).
# Order (as from read_timed) is oldest first.
bufin = array('H', (2048 + rn_analog() for i in range(RBUFLEN))) # range 2048 +- 1000
# Add signal in. Burst ends 20 samples before the end.
x = RBUFLEN - siglen - 20
for s in signal:
s = 1 if s == 1 else -1
bufin[x] += s * DIGITAL_AMPLITUDE
x += 1
# Coeffs hold the expected signal in normal time order (oldest first).
coeffs = array('f', (1 if signal[i] == 1 else -1 for i in range(siglen))) # range +-1
op = array('f', (0 for _ in range(RBUFLEN)))
setup = array('i', [0]*5)
setup[0] = len(bufin)
setup[1] = len(coeffs)
setup[2] = SCALE # No wrap, normal time order. No copy back: I/P sample set unchanged
setup[3] = 1 # No decimation.
setup[4] = 2048 # Offset
op[0] = 0.001 # Scale
t = utime.ticks_us()
n_results = dcf(bufin, op, coeffs, setup)
t = utime.ticks_diff(utime.ticks_us(), t)
ns = 0
maxop = 0
for x in range(n_results):
res = op[x]
print('{:3d} {:8.1f}'.format(x, res))
if res > maxop:
maxop = res
ns = x # save sample no.
nextop = 0
for x in op:
if x < maxop and x > nextop:
nextop = x
s = 'Max correlation {:5.1f} at sample {:d} Next largest {:5.1f} Detection ratio {:5.1f}.'
print(s.format(maxop, ns, nextop, maxop/nextop))
if ns == 930:
print('Correct detection.')
else:
print('FALSE POSITIVE.')
print('Duration {:5d}μs'.format(t))
| peterhinch/micropython-filters | non_realtime/correlate.py | Python | mit | 3,143 |
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova.scheduler import filters
from nova.scheduler.filters import extra_specs_ops
LOG = logging.getLogger(__name__)
class ComputeCapabilitiesFilter(filters.BaseHostFilter):
"""HostFilter hard-coded to work with InstanceType records."""
# Instance type and host capabilities do not change within a request
run_filter_once_per_request = True
RUN_ON_REBUILD = False
def _get_capabilities(self, host_state, scope):
cap = host_state
for index in range(0, len(scope)):
try:
if isinstance(cap, str):
try:
cap = jsonutils.loads(cap)
except ValueError as e:
LOG.debug("%(host_state)s fails. The capabilities "
"'%(cap)s' couldn't be loaded from JSON: "
"%(error)s",
{'host_state': host_state, 'cap': cap,
'error': e})
return None
if not isinstance(cap, dict):
if getattr(cap, scope[index], None) is None:
# If can't find, check stats dict
cap = cap.stats.get(scope[index], None)
else:
cap = getattr(cap, scope[index], None)
else:
cap = cap.get(scope[index], None)
except AttributeError as e:
LOG.debug("%(host_state)s fails. The capabilities couldn't "
"be retrieved: %(error)s.",
{'host_state': host_state, 'error': e})
return None
if cap is None:
LOG.debug("%(host_state)s fails. There are no capabilities "
"to retrieve.",
{'host_state': host_state})
return None
return cap
def _satisfies_extra_specs(self, host_state, flavor):
"""Check that the host_state provided by the compute service
satisfies the extra specs associated with the instance type.
"""
if 'extra_specs' not in flavor:
return True
for key, req in flavor.extra_specs.items():
# Either not scope format, or in capabilities scope
scope = key.split(':')
# If key does not have a namespace, the scope's size is 1, check
# whether host_state contains the key as an attribute. If not,
# ignore it. If it contains, deal with it in the same way as
# 'capabilities:key'. This is for backward-compatible.
# If the key has a namespace, the scope's size will be bigger than
# 1, check that whether the namespace is 'capabilities'. If not,
# ignore it.
if len(scope) == 1:
stats = getattr(host_state, 'stats', {})
has_attr = hasattr(host_state, key) or key in stats
if not has_attr:
continue
else:
if scope[0] != "capabilities":
continue
else:
del scope[0]
cap = self._get_capabilities(host_state, scope)
if cap is None:
return False
if not extra_specs_ops.match(str(cap), req):
LOG.debug("%(host_state)s fails extra_spec requirements. "
"'%(req)s' does not match '%(cap)s'",
{'host_state': host_state, 'req': req,
'cap': cap})
return False
return True
def host_passes(self, host_state, spec_obj):
"""Return a list of hosts that can create flavor."""
if not self._satisfies_extra_specs(host_state, spec_obj.flavor):
LOG.debug(
"%(host_state)s fails flavor extra_specs requirements",
{'host_state': host_state})
return False
return True
| openstack/nova | nova/scheduler/filters/compute_capabilities_filter.py | Python | apache-2.0 | 4,777 |
from django.db import models
class AbstractPersOAModel(models.Model):
"""
A model made for the PersOA app
"""
class Meta:
abstract = True
app_label = 'app'
| Saevon/PersOA | app/models/abstract.py | Python | mit | 190 |
from django.contrib import admin
from .models import AboutUs, Gallery
# Register your models here.
class AboutUsAdmin(admin.ModelAdmin):
fields = ['category', 'title', 'photo', 'text']
list_display = ('title','category',)
class GalleryAdmin(admin.ModelAdmin):
fields = ['category', 'title', 'photo', 'text']
list_display = ('title','category',)
admin.site.register(AboutUs, AboutUsAdmin)
admin.site.register(Gallery, GalleryAdmin)
| RachellCalhoun/cathotel | hotel/admin.py | Python | mit | 454 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/bb_backend/go.chromium.org/luci/common/proto/options.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='proto/bb_backend/go.chromium.org/luci/common/proto/options.proto',
package='luci',
syntax='proto2',
serialized_options=b'Z!go.chromium.org/luci/common/proto',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n@proto/bb_backend/go.chromium.org/luci/common/proto/options.proto\x12\x04luci\x1a google/protobuf/descriptor.proto\"\x1b\n\x08Metadata\x12\x0f\n\x07\x64oc_url\x18\x01 \x01(\t**\n\x11TextPBFieldFormat\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x08\n\x04JSON\x10\x01:E\n\rfile_metadata\x12\x1c.google.protobuf.FileOptions\x18\xe4\xbf\x04 \x01(\x0b\x32\x0e.luci.Metadata:P\n\x0etext_pb_format\x12\x1d.google.protobuf.FieldOptions\x18\xe5\xbf\x04 \x01(\x0e\x32\x17.luci.TextPBFieldFormat:7\n\x0elucicfg_ignore\x12\x1d.google.protobuf.FieldOptions\x18\xe6\xbf\x04 \x01(\x08\x42#Z!go.chromium.org/luci/common/proto'
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
_TEXTPBFIELDFORMAT = _descriptor.EnumDescriptor(
name='TextPBFieldFormat',
full_name='luci.TextPBFieldFormat',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='JSON', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=137,
serialized_end=179,
)
_sym_db.RegisterEnumDescriptor(_TEXTPBFIELDFORMAT)
TextPBFieldFormat = enum_type_wrapper.EnumTypeWrapper(_TEXTPBFIELDFORMAT)
DEFAULT = 0
JSON = 1
FILE_METADATA_FIELD_NUMBER = 73700
file_metadata = _descriptor.FieldDescriptor(
name='file_metadata', full_name='luci.file_metadata', index=0,
number=73700, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
TEXT_PB_FORMAT_FIELD_NUMBER = 73701
text_pb_format = _descriptor.FieldDescriptor(
name='text_pb_format', full_name='luci.text_pb_format', index=1,
number=73701, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
LUCICFG_IGNORE_FIELD_NUMBER = 73702
lucicfg_ignore = _descriptor.FieldDescriptor(
name='lucicfg_ignore', full_name='luci.lucicfg_ignore', index=2,
number=73702, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
_METADATA = _descriptor.Descriptor(
name='Metadata',
full_name='luci.Metadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='doc_url', full_name='luci.Metadata.doc_url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=108,
serialized_end=135,
)
DESCRIPTOR.message_types_by_name['Metadata'] = _METADATA
DESCRIPTOR.enum_types_by_name['TextPBFieldFormat'] = _TEXTPBFIELDFORMAT
DESCRIPTOR.extensions_by_name['file_metadata'] = file_metadata
DESCRIPTOR.extensions_by_name['text_pb_format'] = text_pb_format
DESCRIPTOR.extensions_by_name['lucicfg_ignore'] = lucicfg_ignore
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Metadata = _reflection.GeneratedProtocolMessageType('Metadata', (_message.Message,), {
'DESCRIPTOR' : _METADATA,
'__module__' : 'proto.bb_backend.go.chromium.org.luci.common.proto.options_pb2'
# @@protoc_insertion_point(class_scope:luci.Metadata)
})
_sym_db.RegisterMessage(Metadata)
file_metadata.message_type = _METADATA
google_dot_protobuf_dot_descriptor__pb2.FileOptions.RegisterExtension(file_metadata)
text_pb_format.enum_type = _TEXTPBFIELDFORMAT
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(text_pb_format)
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(lucicfg_ignore)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| luci/luci-py | appengine/swarming/bb_backend_proto/go/chromium/org/luci/common/proto/options_pb2.py | Python | apache-2.0 | 5,626 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2022 Sébastien Helleu <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
Unparse AST tree to generate scripts in all supported languages
(Python, Perl, Ruby, ...).
"""
# pylint: disable=too-many-lines,unnecessary-pass,useless-object-inheritance
from __future__ import print_function
import argparse
import ast
import inspect
import os
import select
try:
from StringIO import StringIO # python 2
except ImportError:
from io import StringIO # python 3
import sys
sys.dont_write_bytecode = True
class UnparsePython(object):
"""
Unparse AST to generate Python script code.
This class is inspired from unparse.py in cpython:
https://github.com/python/cpython/blob/master/Tools/parser/unparse.py
Note: only part of AST types are supported (just the types used by
the script to test WeeChat scripting API).
"""
__lineno__ = inspect.currentframe().f_lineno
def __init__(self, output=sys.stdout):
self.output = output
self.indent_string = ' ' * 4
self._indent_level = 0
self._prefix = [] # not used in Python, only in other languages
self.binop = {
'Add': '+',
'Sub': '-',
'Mult': '*',
'MatMult': '@',
'Div': '/',
'Mod': '%',
'LShift': '<<',
'RShift': '>>',
'BitOr': '|',
'BitXor': '^',
'BitAnd': '&',
'FloorDiv': '//',
'Pow': '**',
}
self.unaryop = {
'Invert': '~',
'Not': 'not',
'UAdd': '+',
'USub': '-',
}
self.cmpop = {
'Eq': '==',
'NotEq': '!=',
'Lt': '<',
'LtE': '<=',
'Gt': '>',
'GtE': '>=',
'Is': 'is',
'IsNot': 'is not',
'In': 'in',
'NotIn': 'not in',
}
def fill(self, string=''):
"""Add a new line and an indented string."""
self.add('\n%s%s' % (self.indent_string * self._indent_level, string))
def indent(self):
"""Indent code."""
self._indent_level += 1
def unindent(self):
"""Unindent code."""
self._indent_level -= 1
def prefix(self, prefix):
"""Add or remove a prefix from list."""
if prefix:
self._prefix.append(prefix)
else:
self._prefix.pop()
def add(self, *args):
"""Add string/node(s) to the output file."""
for arg in args:
if callable(arg):
arg()
elif isinstance(arg, tuple):
arg[0](*arg[1:])
elif isinstance(arg, list):
for item in arg:
self.add(item)
elif isinstance(arg, ast.AST):
method = getattr(
self, '_ast_%s' % arg.__class__.__name__.lower(),
None)
if method is None:
raise NotImplementedError(arg)
method(arg)
elif isinstance(arg, str):
self.output.write(arg)
@staticmethod
def make_list(values, sep=', '):
"""Add multiple values using a custom method and separator."""
result = []
for value in values:
if result:
result.append(sep)
result.append(value)
return result
def is_bool(self, node): # pylint: disable=no-self-use
"""Check if the node is a boolean."""
return isinstance(node, ast.Name) and node.id in ('False', 'True')
def is_number(self, node): # pylint: disable=no-self-use
"""Check if the node is a number."""
# in python 2, number -1 is Num(n=-1)
# in Python 3, number -1 is UnaryOp(op=USub(), operand=Num(n=1))
return (isinstance(node, ast.Num) or
(isinstance(node, ast.UnaryOp) and
isinstance(node.op, (ast.UAdd, ast.USub))))
def _ast_alias(self, node):
"""Add an AST alias in output."""
# ignore alias
pass
def _ast_arg(self, node):
"""Add an AST arg in output."""
self.add('%s%s' % (self._prefix[-1] if self._prefix else '',
node.arg))
def _ast_assign(self, node):
"""Add an AST Assign in output."""
self.add(
self.fill,
[[target, ' = '] for target in node.targets],
node.value,
)
def _ast_attribute(self, node):
"""Add an AST Attribute in output."""
self.add(node.value, '.', node.attr)
def _ast_binop(self, node):
"""Add an AST BinOp in output."""
self.add(
node.left,
' %s ' % self.binop[node.op.__class__.__name__],
node.right,
)
def _ast_call(self, node):
"""Add an AST Call in output."""
self.add(
node.func,
'(',
self.make_list(node.args),
')',
)
def _ast_compare(self, node):
"""Add an AST Compare in output."""
self.add(node.left)
for operator, comparator in zip(node.ops, node.comparators):
self.add(
' %s ' % self.cmpop[operator.__class__.__name__],
comparator,
)
def _ast_constant(self, node):
"""Add an AST Constant in output."""
self.add(repr(node.s))
def _ast_dict(self, node):
"""Add an AST Dict in output."""
self.add(
'{',
self.make_list([[key, ': ', value]
for key, value in zip(node.keys, node.values)]),
'}',
)
def _ast_expr(self, node):
"""Add an AST Expr in output."""
if not isinstance(node.value, ast.Str): # ignore docstrings
self.add(
self.fill,
node.value,
)
def _ast_functiondef(self, node):
"""Add an AST FunctionDef in output."""
self.add(
self.fill,
self.fill,
self.fill if self._indent_level == 0 else None,
'def %s(' % node.name,
self.make_list(node.args.args),
'):',
self.indent,
node.body,
self.unindent,
)
def _ast_if(self, node):
"""Add an AST If in output."""
self.add(
self.fill,
'if ',
node.test,
':',
self.indent,
node.body,
self.unindent,
)
if node.orelse:
self.add(
self.fill,
'else:',
self.indent,
node.orelse,
self.unindent,
)
def _ast_import(self, node):
"""Add an AST Import in output."""
# ignore import
pass
def _ast_module(self, node):
"""Add an AST Module in output."""
self.add(node.body)
def _ast_name(self, node):
"""Add an AST Name in output."""
self.add('%s%s' % (self._prefix[-1] if self._prefix else '',
node.id))
def _ast_num(self, node):
"""Add an AST Num in output."""
# note: deprecated since Python 3.8, replaced by ast.Constant
self.add(repr(node.n))
def _ast_pass(self, node): # pylint: disable=unused-argument
"""Add an AST Pass in output."""
self.fill('pass')
def _ast_return(self, node):
"""Add an AST Return in output."""
self.fill('return')
if node.value:
self.add(' ', node.value)
def _ast_str(self, node):
"""Add an AST Str in output."""
# note: deprecated since Python 3.8, replaced by ast.Constant
self.add(repr(node.s))
def _ast_tuple(self, node):
"""Add an AST Tuple in output."""
self.add(
'(',
self.make_list(node.elts),
',' if len(node.elts) == 1 else None,
')',
)
def _ast_unaryop(self, node):
"""Add an AST UnaryOp in output."""
self.add(
'(',
self.unaryop[node.op.__class__.__name__],
' ',
node.operand,
')',
)
class UnparsePerl(UnparsePython):
"""
Unparse AST to generate Perl script code.
Note: only part of AST types are supported (just the types used by
the script to test WeeChat scripting API).
"""
__lineno__ = inspect.currentframe().f_lineno
def _ast_assign(self, node):
"""Add an AST Assign in output."""
self.add(
self.fill,
(self.prefix, '%' if isinstance(node.value, ast.Dict) else '$'),
[[target, ' = '] for target in node.targets],
(self.prefix, None),
node.value,
';',
)
def _ast_attribute(self, node):
"""Add an AST Attribute in output."""
saved_prefix = self._prefix
self._prefix = []
self.add(node.value, '::', node.attr)
self._prefix = saved_prefix
def _ast_binop(self, node):
"""Add an AST BinOp in output."""
if isinstance(node.op, ast.Add) and \
(not self.is_number(node.left) or
not self.is_number(node.right)):
str_op = '.'
else:
str_op = self.binop[node.op.__class__.__name__]
self.add(
(self.prefix, '$'),
node.left,
' %s ' % str_op,
node.right,
(self.prefix, None),
)
def _ast_call(self, node):
"""Add an AST Call in output."""
self.add(
node.func,
'(',
(self.prefix, '$'),
self.make_list(node.args),
(self.prefix, None),
')',
)
def _ast_compare(self, node):
"""Add an AST Compare in output."""
self.add(node.left)
for operator, comparator in zip(node.ops, node.comparators):
if isinstance(operator, (ast.Eq, ast.NotEq)) and \
not self.is_number(node.left) and \
not self.is_bool(node.left) and \
not self.is_number(comparator) and \
not self.is_bool(comparator):
custom_cmpop = {
'Eq': 'eq',
'NotEq': 'ne',
}
else:
custom_cmpop = self.cmpop
self.add(
' %s ' % custom_cmpop[operator.__class__.__name__],
comparator,
)
def _ast_constant(self, node):
"""Add an AST Constant in output."""
if isinstance(node.value, str):
self.add('"%s"' % node.s.replace('$', '\\$').replace('@', '\\@'))
else:
self.add(repr(node.s))
def _ast_dict(self, node):
"""Add an AST Dict in output."""
self.add(
'{',
self.make_list([[key, ' => ', value]
for key, value in zip(node.keys, node.values)]),
'}',
)
def _ast_expr(self, node):
"""Add an AST Expr in output."""
if not isinstance(node.value, ast.Str): # ignore docstrings
self.add(
self.fill,
node.value,
';',
)
def _ast_functiondef(self, node):
"""Add an AST FunctionDef in output."""
self.add(
self.fill,
self.fill,
'sub %s' % node.name,
self.fill,
'{',
self.indent,
)
if node.args.args:
self.add(
self.fill,
'my (',
(self.prefix, '$'),
self.make_list(node.args.args),
(self.prefix, None),
') = @_;',
)
self.add(
node.body,
self.unindent,
self.fill,
'}',
)
def _ast_if(self, node):
"""Add an AST If in output."""
self.add(
self.fill,
'if (',
(self.prefix, '$'),
node.test,
(self.prefix, None),
')',
self.fill,
'{',
self.indent,
node.body,
self.unindent,
self.fill,
'}',
)
if node.orelse:
self.add(
self.fill,
'else',
self.fill,
'{',
self.indent,
node.orelse,
self.unindent,
self.fill,
'}',
)
def _ast_pass(self, node):
"""Add an AST Pass in output."""
pass
def _ast_return(self, node):
"""Add an AST Return in output."""
self.fill('return')
if node.value:
self.add(
' ',
(self.prefix,
'%' if isinstance(node.value, ast.Dict) else '$'),
node.value,
(self.prefix, None),
';',
)
def _ast_str(self, node):
"""Add an AST Str in output."""
# note: deprecated since Python 3.8, replaced by ast.Constant
self.add('"%s"' % node.s.replace('$', '\\$').replace('@', '\\@'))
class UnparseRuby(UnparsePython):
"""
Unparse AST to generate Ruby script code.
Note: only part of AST types are supported (just the types used by
the script to test WeeChat scripting API).
"""
__lineno__ = inspect.currentframe().f_lineno
def _ast_attribute(self, node):
"""Add an AST Attribute in output."""
self.add(
node.value,
'::' if node.attr.startswith('WEECHAT_') else '.',
node.attr,
)
def _ast_dict(self, node):
"""Add an AST Dict in output."""
self.add(
'Hash[',
self.make_list([[key, ' => ', value]
for key, value in zip(node.keys, node.values)]),
']',
)
def _ast_functiondef(self, node):
"""Add an AST FunctionDef in output."""
self.add(
self.fill,
self.fill,
'def %s' % node.name,
)
if node.args.args:
self.add(
'(',
self.make_list(node.args.args),
')',
)
self.add(
self.indent,
node.body,
self.unindent,
self.fill,
'end',
)
def _ast_if(self, node):
"""Add an AST If in output."""
self.add(
self.fill,
'if ',
node.test,
self.indent,
node.body,
self.unindent,
)
if node.orelse:
self.add(
self.fill,
'else',
self.indent,
node.orelse,
self.unindent,
self.fill,
'end',
)
def _ast_pass(self, node):
"""Add an AST Pass in output."""
pass
def _ast_str(self, node):
"""Add an AST Str in output."""
# note: deprecated since Python 3.8, replaced by ast.Constant
self.add('"%s"' % node.s)
class UnparseLua(UnparsePython):
"""
Unparse AST to generate Lua script code.
Note: only part of AST types are supported (just the types used by
the script to test WeeChat scripting API).
"""
__lineno__ = inspect.currentframe().f_lineno
def __init__(self, *args, **kwargs):
super(UnparseLua, self).__init__(*args, **kwargs)
self.cmpop = {
'Eq': '==',
'NotEq': '~=',
'Lt': '<',
'LtE': '<=',
'Gt': '>',
'GtE': '>=',
}
def _ast_binop(self, node):
"""Add an AST BinOp in output."""
if isinstance(node.op, ast.Add) and \
(not self.is_number(node.left) or
not self.is_number(node.right)):
str_op = '..'
else:
str_op = self.binop[node.op.__class__.__name__]
self.add(
node.left,
' %s ' % str_op,
node.right,
)
def _ast_dict(self, node):
"""Add an AST Dict in output."""
self.add(
'{',
self.make_list([
['[', key, ']', '=', value]
for key, value in zip(node.keys, node.values)]),
'}',
)
def _ast_functiondef(self, node):
"""Add an AST FunctionDef in output."""
self.add(
self.fill,
self.fill,
'function %s' % node.name,
)
self.add(
'(',
self.make_list(node.args.args),
')',
self.indent,
node.body,
self.unindent,
self.fill,
'end',
)
def _ast_if(self, node):
"""Add an AST If in output."""
self.add(
self.fill,
'if ',
node.test,
' then',
self.indent,
node.body,
self.unindent,
)
if node.orelse:
self.add(
self.fill,
'else',
self.indent,
node.orelse,
self.unindent,
self.fill,
'end',
)
def _ast_pass(self, node):
"""Add an AST Pass in output."""
pass
class UnparseTcl(UnparsePython):
"""
Unparse AST to generate Tcl script code.
Note: only part of AST types are supported (just the types used by
the script to test WeeChat scripting API).
"""
__lineno__ = inspect.currentframe().f_lineno
def __init__(self, *args, **kwargs):
super(UnparseTcl, self).__init__(*args, **kwargs)
self._call = 0
def _ast_assign(self, node):
"""Add an AST Assign in output."""
self.add(
self.fill,
'set ',
node.targets[0],
' ',
'[' if not isinstance(node.value, ast.Str) else '',
node.value,
']' if not isinstance(node.value, ast.Str) else '',
)
def _ast_attribute(self, node):
"""Add an AST Attribute in output."""
saved_prefix = self._prefix
self._prefix = []
if node.attr.startswith('WEECHAT_'):
self.add('$::')
self.add(node.value, '::', node.attr)
self._prefix = saved_prefix
def _ast_binop(self, node):
"""Add an AST BinOp in output."""
self.add(
'[join [list ',
(self.prefix, '$'),
node.left,
' ',
node.right,
(self.prefix, None),
'] ""]',
)
def _ast_call(self, node):
"""Add an AST Call in output."""
if self._call:
self.add('[')
self._call += 1
self.add(
node.func,
' ' if node.args else None,
(self.prefix, '$'),
self.make_list(node.args, sep=' '),
(self.prefix, None),
)
self._call -= 1
if self._call:
self.add(']')
def _ast_compare(self, node):
"""Add an AST Compare in output."""
self.prefix('$')
if self._call:
self.add('[expr {')
self.add(node.left)
for operator, comparator in zip(node.ops, node.comparators):
if isinstance(operator, (ast.Eq, ast.NotEq)) and \
not self.is_number(node.left) and \
not self.is_bool(node.left) and \
not self.is_number(comparator) and \
not self.is_bool(comparator):
custom_cmpop = {
'Eq': 'eq',
'NotEq': 'ne',
}
else:
custom_cmpop = self.cmpop
self.add(
' %s ' % custom_cmpop[operator.__class__.__name__],
comparator,
)
if self._call:
self.add('}]')
self.prefix(None)
def _ast_constant(self, node):
"""Add an AST Constant in output."""
if isinstance(node.value, str):
self.add('"%s"' % node.s.replace('$', '\\$'))
else:
self.add(repr(node.s))
def _ast_dict(self, node):
"""Add an AST Dict in output."""
self.add(
'[dict create ',
self.make_list([[key, ' ', value]
for key, value in zip(node.keys, node.values)],
sep=' '),
']',
)
def _ast_functiondef(self, node):
"""Add an AST FunctionDef in output."""
self.add(
self.fill,
self.fill,
'proc %s {' % node.name,
(self.make_list(node.args.args, sep=' ')
if node.args.args else None),
'} {',
self.indent,
node.body,
self.unindent,
self.fill,
'}',
)
def _ast_if(self, node):
"""Add an AST If in output."""
self.add(
self.fill,
'if {',
(self.prefix, '$'),
node.test,
(self.prefix, None),
'} {',
self.indent,
node.body,
self.unindent,
self.fill,
'}',
)
if node.orelse:
self.add(
' else {',
self.indent,
node.orelse,
self.unindent,
self.fill,
'}',
)
def _ast_pass(self, node):
"""Add an AST Pass in output."""
pass
def _ast_return(self, node):
"""Add an AST Return in output."""
self.fill('return')
if node.value:
self.add(
' ',
(self.prefix, '$'),
node.value,
(self.prefix, None),
)
def _ast_str(self, node):
"""Add an AST Str in output."""
# note: deprecated since Python 3.8, replaced by ast.Constant
self.add('"%s"' % node.s.replace('$', '\\$'))
class UnparseGuile(UnparsePython):
"""
Unparse AST to generate Guile script code.
Note: only part of AST types are supported (just the types used by
the script to test WeeChat scripting API).
"""
__lineno__ = inspect.currentframe().f_lineno
def __init__(self, *args, **kwargs):
super(UnparseGuile, self).__init__(*args, **kwargs)
self.cmpop = {
'Eq': '=',
'NotEq': '<>',
'Lt': '<',
'LtE': '<=',
'Gt': '>',
'GtE': '>=',
}
self._call = 0
self._let = 0
def _ast_assign(self, node):
"""Add an AST Assign in output."""
self.add(
self.fill,
'(let ((',
node.targets[0],
' ',
node.value,
'))',
self.indent,
self.fill,
'(begin',
self.indent,
)
self._let += 1
def _ast_attribute(self, node):
"""Add an AST Attribute in output."""
self.add(node.value, ':', node.attr)
def _ast_binop(self, node):
"""Add an AST BinOp in output."""
if isinstance(node.op, ast.Add) and \
(isinstance(node.left, (ast.Name, ast.Str)) or
isinstance(node.right, (ast.Name, ast.Str))):
self.add(
'(string-append ',
node.left,
' ',
node.right,
')',
)
else:
self.add(
node.left,
' %s ' % self.binop[node.op.__class__.__name__],
node.right,
)
def _ast_call(self, node):
"""Add an AST Call in output."""
self._call += 1
self.add(
'(',
node.func,
' ' if node.args else None,
self.make_list(node.args, sep=' '),
')',
)
self._call -= 1
def _ast_compare(self, node):
"""Add an AST Compare in output."""
for operator, comparator in zip(node.ops, node.comparators):
if isinstance(operator, (ast.Eq, ast.NotEq)) and \
not self.is_number(node.left) and \
not self.is_bool(node.left) and \
not self.is_number(comparator) and \
not self.is_bool(comparator):
prefix = 'string'
else:
prefix = ''
self.add(
'(%s%s ' % (prefix, self.cmpop[operator.__class__.__name__]),
node.left,
' ',
comparator,
')',
)
def _ast_constant(self, node):
"""Add an AST Constant in output."""
if isinstance(node.s, str):
self.add('"%s"' % node.s)
else:
self.add(repr(node.s))
def _ast_dict(self, node):
"""Add an AST Dict in output."""
self.add(
'\'(',
self.make_list([['(', key, ' ', value, ')']
for key, value in zip(node.keys, node.values)],
sep=' '),
')',
)
def _ast_functiondef(self, node):
"""Add an AST FunctionDef in output."""
self.add(
self.fill,
self.fill,
'(define (%s' % node.name,
' ' if node.args.args else None,
(self.make_list(node.args.args, sep=' ')
if node.args.args else None),
')',
self.indent,
node.body,
)
while self._let > 0:
self.add(
self.unindent,
self.fill,
')',
self.unindent,
self.fill,
')',
)
self._let -= 1
self.add(
self.unindent,
self.fill,
')',
)
def _ast_if(self, node):
"""Add an AST If in output."""
self.add(
self.fill,
'(if '
'' if isinstance(node.test, ast.Name) else '(',
node.test,
'' if isinstance(node.test, ast.Name) else ')',
self.indent,
self.fill,
'(begin',
self.indent,
node.body,
self.unindent,
self.fill,
')',
self.unindent,
)
if node.orelse:
self.add(
self.indent,
self.fill,
'(begin',
self.indent,
node.orelse,
self.unindent,
self.fill,
')',
self.unindent,
)
self.add(self.fill, ')')
def _ast_pass(self, node):
"""Add an AST Pass in output."""
pass
def _ast_return(self, node):
"""Add an AST Return in output."""
if node.value:
self.add(self.fill, node.value)
def _ast_str(self, node):
"""Add an AST Str in output."""
# note: deprecated since Python 3.8, replaced by ast.Constant
self.add('"%s"' % node.s)
class UnparseJavascript(UnparsePython):
"""
Unparse AST to generate Javascript script code.
Note: only part of AST types are supported (just the types used by
the script to test WeeChat scripting API).
"""
__lineno__ = inspect.currentframe().f_lineno
def _ast_dict(self, node):
"""Add an AST Dict in output."""
self.add(
'{',
self.make_list([[key, ': ', value]
for key, value in zip(node.keys, node.values)]),
'}',
)
def _ast_functiondef(self, node):
"""Add an AST FunctionDef in output."""
self.add(
self.fill,
self.fill,
'function %s(' % node.name,
self.make_list(node.args.args),
') {',
self.indent,
node.body,
self.unindent,
self.fill,
'}',
)
def _ast_if(self, node):
"""Add an AST If in output."""
self.add(
self.fill,
'if (',
node.test,
') {',
self.indent,
node.body,
self.unindent,
self.fill,
'}',
)
if node.orelse:
self.add(
' else {',
self.indent,
node.orelse,
self.unindent,
self.fill,
'}',
)
def _ast_pass(self, node):
"""Add an AST Pass in output."""
pass
class UnparsePhp(UnparsePython):
"""
Unparse AST to generate PHP script code.
Note: only part of AST types are supported (just the types used by
the script to test WeeChat scripting API).
"""
__lineno__ = inspect.currentframe().f_lineno
def _ast_assign(self, node):
"""Add an AST Assign in output."""
self.add(
self.fill,
(self.prefix, '$'),
[[target, ' = '] for target in node.targets],
(self.prefix, None),
node.value,
';',
)
def _ast_attribute(self, node):
"""Add an AST Attribute in output."""
saved_prefix = self._prefix
self._prefix = []
if not node.attr.startswith('WEECHAT_'):
self.add(node.value, '_')
self.add(node.attr)
self._prefix = saved_prefix
def _ast_binop(self, node):
"""Add an AST BinOp in output."""
if isinstance(node.op, ast.Add) and \
(isinstance(node.left, (ast.Name, ast.Str)) or
isinstance(node.right, (ast.Name, ast.Str))):
str_op = '.'
else:
str_op = self.binop[node.op.__class__.__name__]
self.add(
(self.prefix, '$'),
node.left,
' %s ' % str_op,
node.right,
(self.prefix, None),
)
def _ast_call(self, node):
"""Add an AST Call in output."""
self.add(
node.func,
'(',
(self.prefix, '$'),
self.make_list(node.args),
(self.prefix, None),
')',
)
def _ast_constant(self, node):
"""Add an AST Constant in output."""
if isinstance(node.s, str):
self.add('"%s"' % node.s.replace('$', '\\$'))
else:
self.add(repr(node.s))
def _ast_dict(self, node):
"""Add an AST Dict in output."""
self.add(
'array(',
self.make_list([[key, ' => ', value]
for key, value in zip(node.keys, node.values)]),
')',
)
def _ast_expr(self, node):
"""Add an AST Expr in output."""
if not isinstance(node.value, ast.Str): # ignore docstrings
self.add(
self.fill,
node.value,
';',
)
def _ast_functiondef(self, node):
"""Add an AST FunctionDef in output."""
self.add(
self.fill,
self.fill,
'function %s(' % node.name,
(self.prefix, '$'),
self.make_list(node.args.args),
(self.prefix, None),
')',
self.fill,
'{',
self.indent,
node.body,
self.unindent,
self.fill,
'}',
)
def _ast_if(self, node):
"""Add an AST If in output."""
self.add(
self.fill,
'if (',
(self.prefix, '$'),
node.test,
(self.prefix, None),
')',
self.fill,
'{',
self.indent,
node.body,
self.unindent,
self.fill,
'}',
)
if node.orelse:
self.add(
self.fill,
'else',
self.fill,
'{',
self.indent,
node.orelse,
self.unindent,
self.fill,
'}',
)
def _ast_pass(self, node):
"""Add an AST Pass in output."""
pass
def _ast_return(self, node):
"""Add an AST Return in output."""
self.fill('return')
if node.value:
self.add(
' ',
(self.prefix, '$'),
node.value,
(self.prefix, None),
';',
)
def _ast_str(self, node):
"""Add an AST Str in output."""
# note: deprecated since Python 3.8, replaced by ast.Constant
self.add('"%s"' % node.s.replace('$', '\\$'))
def get_languages():
"""Return a list of supported languages: ['python', 'perl', ...]."""
members = [
member
for member in inspect.getmembers(sys.modules[__name__],
predicate=inspect.isclass)
if inspect.isclass(member[1]) and member[0].startswith('Unparse')
]
languages = [
name[7:].lower()
for name, _ in sorted(members,
key=lambda member: member[1].__lineno__)
]
return languages
LANGUAGES = get_languages()
def get_parser():
"""Get parser arguments."""
all_languages = LANGUAGES + ['all']
default_language = LANGUAGES[0]
parser = argparse.ArgumentParser(
description=('Unparse Python code from stdin and generate code in '
'another language (to stdout).\n\n'
'The code is read from stdin and generated code is '
'written on stdout.'))
parser.add_argument(
'-l', '--language',
default=default_language,
choices=all_languages,
help='output language (default: %s)' % default_language)
return parser
def get_stdin():
"""
Return data from standard input.
If there is nothing in stdin, wait for data until ctrl-D (EOF)
is received.
"""
data = ''
inr = select.select([sys.stdin], [], [], 0)[0]
if not inr:
print('Enter the code to convert (Enter + ctrl+D to end)')
while True:
inr = select.select([sys.stdin], [], [], 0.1)[0]
if not inr:
continue
new_data = os.read(sys.stdin.fileno(), 4096)
if not new_data: # EOF?
break
data += new_data.decode('utf-8')
return data
def convert_to_language(code, language, prefix=''):
"""Convert Python code to a language."""
class_name = 'Unparse%s' % language.capitalize()
unparse_class = getattr(sys.modules[__name__], class_name)
if prefix:
print(prefix)
output = StringIO()
unparse_class(output=output).add(ast.parse(code))
print(output.getvalue().lstrip())
def convert(code, language):
"""Convert Python code to one or all languages."""
if language == 'all':
for lang in LANGUAGES:
convert_to_language(code, lang, '\n%s:' % lang)
else:
convert_to_language(code, language)
def main():
"""Main function."""
parser = get_parser()
args = parser.parse_args()
code = get_stdin()
if not code:
print('ERROR: missing input')
print()
parser.print_help()
sys.exit(1)
convert(code, args.language)
sys.exit(0)
if __name__ == '__main__':
main()
| weechat/weechat | tests/scripts/python/unparse.py | Python | gpl-3.0 | 36,671 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os.path
import sys
import tempfile
import types
import unittest
from contextlib import contextmanager
from django.template import Context, TemplateDoesNotExist
from django.template.engine import Engine
from django.test import SimpleTestCase, ignore_warnings, override_settings
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from .utils import TEMPLATE_DIR
try:
import pkg_resources
except ImportError:
pkg_resources = None
class CachedLoaderTests(SimpleTestCase):
def setUp(self):
self.engine = Engine(
dirs=[TEMPLATE_DIR],
loaders=[
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
]),
],
)
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0].loaders[0])
cache = self.engine.template_loaders[0].get_template_cache
self.assertEqual(cache['index.html'], template)
# Run a second time from cache
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0].loaders[0])
def test_get_template_missing_debug_off(self):
"""
With template debugging disabled, the raw TemplateDoesNotExist class
should be cached when a template is missing. See ticket #26306 and
docstrings in the cached loader for details.
"""
self.engine.debug = False
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('prod-template-missing.html')
e = self.engine.template_loaders[0].get_template_cache['prod-template-missing.html']
self.assertEqual(e, TemplateDoesNotExist)
def test_get_template_missing_debug_on(self):
"""
With template debugging enabled, a TemplateDoesNotExist instance
should be cached when a template is missing.
"""
self.engine.debug = True
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('debug-template-missing.html')
e = self.engine.template_loaders[0].get_template_cache['debug-template-missing.html']
self.assertIsInstance(e, TemplateDoesNotExist)
self.assertEqual(e.args[0], 'debug-template-missing.html')
@unittest.skipIf(six.PY2, "Python 2 doesn't set extra exception attributes")
def test_cached_exception_no_traceback(self):
"""
When a TemplateDoesNotExist instance is cached, the cached instance
should not contain the __traceback__, __context__, or __cause__
attributes that Python sets when raising exceptions.
"""
self.engine.debug = True
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('no-traceback-in-cache.html')
e = self.engine.template_loaders[0].get_template_cache['no-traceback-in-cache.html']
error_msg = "Cached TemplateDoesNotExist must not have been thrown."
self.assertIsNone(e.__traceback__, error_msg)
self.assertIsNone(e.__context__, error_msg)
self.assertIsNone(e.__cause__, error_msg)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template(self):
loader = self.engine.template_loaders[0]
template, origin = loader.load_template('index.html')
self.assertEqual(template.origin.template_name, 'index.html')
cache = self.engine.template_loaders[0].template_cache
self.assertEqual(cache['index.html'][0], template)
# Run a second time from cache
loader = self.engine.template_loaders[0]
source, name = loader.load_template('index.html')
self.assertEqual(template.origin.template_name, 'index.html')
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_missing(self):
"""
#19949 -- TemplateDoesNotExist exceptions should be cached.
"""
loader = self.engine.template_loaders[0]
self.assertFalse('missing.html' in loader.template_cache)
with self.assertRaises(TemplateDoesNotExist):
loader.load_template("missing.html")
self.assertEqual(
loader.template_cache["missing.html"],
TemplateDoesNotExist,
"Cached loader failed to cache the TemplateDoesNotExist exception",
)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_nonexistent_cached_template(self):
loader = self.engine.template_loaders[0]
template_name = 'nonexistent.html'
# fill the template cache
with self.assertRaises(TemplateDoesNotExist):
loader.find_template(template_name)
with self.assertRaisesMessage(TemplateDoesNotExist, template_name):
loader.get_template(template_name)
def test_templatedir_caching(self):
"""
#13573 -- Template directories should be part of the cache key.
"""
# Retrieve a template specifying a template directory to check
t1, name = self.engine.find_template('test.html', (os.path.join(TEMPLATE_DIR, 'first'),))
# Now retrieve the same template name, but from a different directory
t2, name = self.engine.find_template('test.html', (os.path.join(TEMPLATE_DIR, 'second'),))
# The two templates should not have the same content
self.assertNotEqual(t1.render(Context({})), t2.render(Context({})))
@unittest.skipUnless(pkg_resources, 'setuptools is not installed')
class EggLoaderTests(SimpleTestCase):
@contextmanager
def create_egg(self, name, resources):
"""
Creates a mock egg with a list of resources.
name: The name of the module.
resources: A dictionary of template names mapped to file-like objects.
"""
if six.PY2:
name = name.encode('utf-8')
class MockLoader(object):
pass
class MockProvider(pkg_resources.NullProvider):
def __init__(self, module):
pkg_resources.NullProvider.__init__(self, module)
self.module = module
def _has(self, path):
return path in self.module._resources
def _isdir(self, path):
return False
def get_resource_stream(self, manager, resource_name):
return self.module._resources[resource_name]
def _get(self, path):
return self.module._resources[path].read()
def _fn(self, base, resource_name):
return os.path.normcase(resource_name)
egg = types.ModuleType(name)
egg.__loader__ = MockLoader()
egg.__path__ = ['/some/bogus/path/']
egg.__file__ = '/some/bogus/path/__init__.pyc'
egg._resources = resources
sys.modules[name] = egg
pkg_resources._provider_factories[MockLoader] = MockProvider
try:
yield
finally:
del sys.modules[name]
del pkg_resources._provider_factories[MockLoader]
@classmethod
@ignore_warnings(category=RemovedInDjango20Warning)
def setUpClass(cls):
cls.engine = Engine(loaders=[
'django.template.loaders.eggs.Loader',
])
cls.loader = cls.engine.template_loaders[0]
super(EggLoaderTests, cls).setUpClass()
def test_get_template(self):
templates = {
os.path.normcase('templates/y.html'): six.StringIO("y"),
}
with self.create_egg('egg', templates):
with override_settings(INSTALLED_APPS=['egg']):
template = self.engine.get_template("y.html")
self.assertEqual(template.origin.name, 'egg:egg:templates/y.html')
self.assertEqual(template.origin.template_name, 'y.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
output = template.render(Context({}))
self.assertEqual(output, "y")
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
templates = {
os.path.normcase('templates/y.html'): six.StringIO("y"),
}
with self.create_egg('egg', templates):
with override_settings(INSTALLED_APPS=['egg']):
source, name = loader.load_template_source('y.html')
self.assertEqual(source.strip(), 'y')
self.assertEqual(name, 'egg:egg:templates/y.html')
def test_non_existing(self):
"""
Template loading fails if the template is not in the egg.
"""
with self.create_egg('egg', {}):
with override_settings(INSTALLED_APPS=['egg']):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('not-existing.html')
def test_not_installed(self):
"""
Template loading fails if the egg is not in INSTALLED_APPS.
"""
templates = {
os.path.normcase('templates/y.html'): six.StringIO("y"),
}
with self.create_egg('egg', templates):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('y.html')
class FileSystemLoaderTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(dirs=[TEMPLATE_DIR])
super(FileSystemLoaderTests, cls).setUpClass()
@contextmanager
def set_dirs(self, dirs):
original_dirs = self.engine.dirs
self.engine.dirs = dirs
try:
yield
finally:
self.engine.dirs = original_dirs
@contextmanager
def source_checker(self, dirs):
loader = self.engine.template_loaders[0]
def check_sources(path, expected_sources):
expected_sources = [os.path.abspath(s) for s in expected_sources]
self.assertEqual(
[origin.name for origin in loader.get_template_sources(path)],
expected_sources,
)
with self.set_dirs(dirs):
yield check_sources
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
self.assertEqual(template.origin.loader_name, 'django.template.loaders.filesystem.Loader')
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
source, name = loader.load_template_source('index.html')
self.assertEqual(source.strip(), 'index')
self.assertEqual(name, os.path.join(TEMPLATE_DIR, 'index.html'))
def test_directory_security(self):
with self.source_checker(['/dir1', '/dir2']) as check_sources:
check_sources('index.html', ['/dir1/index.html', '/dir2/index.html'])
check_sources('/etc/passwd', [])
check_sources('etc/passwd', ['/dir1/etc/passwd', '/dir2/etc/passwd'])
check_sources('../etc/passwd', [])
check_sources('../../../etc/passwd', [])
check_sources('/dir1/index.html', ['/dir1/index.html'])
check_sources('../dir2/index.html', ['/dir2/index.html'])
check_sources('/dir1blah', [])
check_sources('../dir1blah', [])
def test_unicode_template_name(self):
with self.source_checker(['/dir1', '/dir2']) as check_sources:
# UTF-8 bytestrings are permitted.
check_sources(b'\xc3\x85ngstr\xc3\xb6m', ['/dir1/Ångström', '/dir2/Ångström'])
# Unicode strings are permitted.
check_sources('Ångström', ['/dir1/Ångström', '/dir2/Ångström'])
def test_utf8_bytestring(self):
"""
Invalid UTF-8 encoding in bytestrings should raise a useful error
"""
engine = Engine()
loader = engine.template_loaders[0]
with self.assertRaises(UnicodeDecodeError):
list(loader.get_template_sources(b'\xc3\xc3', ['/dir1']))
def test_unicode_dir_name(self):
with self.source_checker([b'/Stra\xc3\x9fe']) as check_sources:
check_sources('Ångström', ['/Straße/Ångström'])
check_sources(b'\xc3\x85ngstr\xc3\xb6m', ['/Straße/Ångström'])
@unittest.skipUnless(
os.path.normcase('/TEST') == os.path.normpath('/test'),
"This test only runs on case-sensitive file systems.",
)
def test_case_sensitivity(self):
with self.source_checker(['/dir1', '/DIR2']) as check_sources:
check_sources('index.html', ['/dir1/index.html', '/DIR2/index.html'])
check_sources('/DIR1/index.HTML', ['/DIR1/index.HTML'])
def test_file_does_not_exist(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('doesnotexist.html')
@unittest.skipIf(
sys.platform == 'win32',
"Python on Windows doesn't have working os.chmod().",
)
def test_permissions_error(self):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpdir = os.path.dirname(tmpfile.name)
tmppath = os.path.join(tmpdir, tmpfile.name)
os.chmod(tmppath, 0o0222)
with self.set_dirs([tmpdir]):
with self.assertRaisesMessage(IOError, 'Permission denied'):
self.engine.get_template(tmpfile.name)
def test_notafile_error(self):
with self.assertRaises(IOError):
self.engine.get_template('first')
class AppDirectoriesLoaderTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(
loaders=['django.template.loaders.app_directories.Loader'],
)
super(AppDirectoriesLoaderTests, cls).setUpClass()
@override_settings(INSTALLED_APPS=['template_tests'])
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
@ignore_warnings(category=RemovedInDjango20Warning)
@override_settings(INSTALLED_APPS=['template_tests'])
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
source, name = loader.load_template_source('index.html')
self.assertEqual(source.strip(), 'index')
self.assertEqual(name, os.path.join(TEMPLATE_DIR, 'index.html'))
@override_settings(INSTALLED_APPS=[])
def test_not_installed(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('index.html')
class LocmemLoaderTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(
loaders=[('django.template.loaders.locmem.Loader', {
'index.html': 'index',
})],
)
super(LocmemLoaderTests, cls).setUpClass()
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, 'index.html')
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
source, name = loader.load_template_source('index.html')
self.assertEqual(source.strip(), 'index')
self.assertEqual(name, 'index.html')
| vincepandolfo/django | tests/template_tests/test_loaders.py | Python | bsd-3-clause | 16,418 |
"""
<license>
CSPLN_MaryKeelerEdition; Manages images to which notes can be added.
Copyright (C) 2015, Thomas Kercheval
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
___________________________________________________________</license>
Description:
Deletes all generated files.
Resets the system for further testing.
Inputs:
Generated files.
Outputs:
Environment fresh for generation.
Currently:
To Do:
Done:
Resets the system so `./automate_everything.py` may be run
"""
from shutil import rmtree
from os.path import exists
from the_decider import resolve_relative_path as resolve_path
def delete_directories(directory_dictionary):
"""Resets system so further files may be deleted."""
print "\nResetting system...\n"
directory_keys = directory_dictionary.keys()
for directory in directory_keys:
path = resolve_path(__file__, directory_dictionary[directory])
print "Deleteing `{}`.".format(path)
if exists(path):
rmtree(path)
else:
print " But it doesn't exist..."
print "\nFinished resetting system...\n"
return None
def delete_dirs_no_print(directory_dictionary):
"""
Resets system so further files may be deleted.
Does not print status.
"""
directory_keys = directory_dictionary.keys()
for directory in directory_keys:
path = resolve_path(__file__, directory_dictionary[directory])
if exists(path):
print "Deleteing `{}`.".format(path)
rmtree(path)
else:
pass
return None
if __name__ == '__main__':
GENERATED_DIRS = {"web_apps":"../apps/web_apps",
"images_processed":"../images/processed_images",
"populators":"./populators"}
delete_directories(GENERATED_DIRS)
| SpaceKatt/CSPLN | scripts/reset_system.py | Python | gpl-3.0 | 2,393 |
"""Config flow to configure iss component."""
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_NAME, CONF_SHOW_ON_MAP
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from .binary_sensor import DEFAULT_NAME
from .const import DOMAIN
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for iss component."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> config_entries.OptionsFlow:
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None) -> FlowResult:
"""Handle a flow initialized by the user."""
# Check if already configured
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
# Check if location have been defined.
if not self.hass.config.latitude and not self.hass.config.longitude:
return self.async_abort(reason="latitude_longitude_not_defined")
if user_input is not None:
return self.async_create_entry(
title=user_input.get(CONF_NAME, DEFAULT_NAME),
data={},
options={CONF_SHOW_ON_MAP: user_input.get(CONF_SHOW_ON_MAP, False)},
)
return self.async_show_form(step_id="user")
async def async_step_import(self, conf: dict) -> FlowResult:
"""Import a configuration from configuration.yaml."""
return await self.async_step_user(
user_input={
CONF_NAME: conf[CONF_NAME],
CONF_SHOW_ON_MAP: conf[CONF_SHOW_ON_MAP],
}
)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Config flow options handler for iss."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
async def async_step_init(self, user_input=None) -> FlowResult:
"""Manage the options."""
if user_input is not None:
self.options.update(user_input)
return self.async_create_entry(title="", data=self.options)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_SHOW_ON_MAP,
default=self.config_entry.options.get(CONF_SHOW_ON_MAP, False),
): bool,
}
),
)
| rohitranjan1991/home-assistant | homeassistant/components/iss/config_flow.py | Python | mit | 2,741 |
from .submaker import Submaker
from inception.tools.signapk import SignApk
import shutil
import os
from inception.constants import InceptionConstants
class UpdatezipSubmaker(Submaker):
def make(self, updatePkgDir):
keys_name = self.getValue("keys")
signingKeys = self.getMaker().getConfig().getKeyConfig(keys_name) if keys_name else None
updateBinaryKey, updateBinary = self.getTargetBinary("update-binary")
assert updateBinary, "%s is not set" % updateBinaryKey
if keys_name:
assert signingKeys, "update.keys is '%s' but __config__.host.keys.%s is not set" % (keys_name, keys_name)
signingKeys = signingKeys["private"], signingKeys["public"]
shutil.copy(updateBinary, os.path.join(updatePkgDir, "META-INF/com/google/android/update-binary"))
updateZipPath = updatePkgDir + "/../"
updateZipPath += "update_unsigned" if signingKeys else "update"
shutil.make_archive(updateZipPath, "zip", updatePkgDir)
updateZipPath += ".zip"
if signingKeys:
javaKey, javaPath = self.getHostBinary("java")
signApkKey, signApkPath = self.getHostBinary("signapk")
assert signApkPath, "%s is not set" % signApkKey
assert os.path.exists(signApkPath), "'%s' from %s does not exist" % (signApkPath, signApkKey)
assert os.path.exists(javaPath), "'%s' from %s does not exist" % (javaPath, javaKey)
signApk = SignApk(javaPath, signApkPath)
targetPath = updatePkgDir + "/../" + InceptionConstants.OUT_NAME_UPDATE
signApk.sign(updateZipPath, targetPath, signingKeys[0], signingKeys[1])
updateZipPath = targetPath
return updateZipPath
| tgalal/inception | inception/argparsers/makers/submakers/submaker_updatezip.py | Python | gpl-3.0 | 1,747 |
import unittest
import os
from katello.tests.core.action_test_utils import CLIOptionTestCase, CLIActionTestCase
from katello.tests.core.organization import organization_data
from katello.tests.core.template import template_data
import katello.client.core.template
from katello.client.core.template import Delete
from katello.client.api.utils import ApiDataError
class RequiredCLIOptionsTests(CLIOptionTestCase):
#requires: organization, name
#optional: environment (defaults to Library)
action = Delete()
disallowed_options = [
('--environment=dev', '--name=template_1'),
('--environment=dev', '--org=ACME'),
]
allowed_options = [
('--org=ACME', '--name=template_1'),
('--org=ACME', '--environment=dev', '--name=template_1'),
]
class TemplateInfoTest(CLIActionTestCase):
ORG = organization_data.ORGS[0]
ENV = organization_data.ENVS[0]
TPL = template_data.TEMPLATES[0]
OPTIONS = {
'org': ORG['name'],
'environment': ENV['name'],
'name': TPL['name'],
}
def setUp(self):
self.set_action(Delete())
self.set_module(katello.client.core.template)
self.mock_printer()
self.mock_options(self.OPTIONS)
self.mock(self.module, 'get_template', self.TPL)
self.mock(self.action.api, 'delete')
def test_it_finds_the_template(self):
self.run_action()
self.module.get_template.assert_called_once_with(self.ORG['name'], self.ENV['name'], self.TPL['name'])
def test_it_returns_error_when_template_not_found(self):
self.mock(self.module, 'get_template').side_effect = ApiDataError
self.run_action(os.EX_DATAERR)
def test_it_returns_success_when_template_found(self):
self.run_action(os.EX_OK)
def test_it_calls_delete_api(self):
self.run_action()
self.action.api.delete.assert_called_once_with(self.TPL['id'])
| iNecas/katello | cli/test/katello/tests/core/template/template_delete_test.py | Python | gpl-2.0 | 1,936 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import os
import multiprocessing
import random
import shutil
import signal
import time
import tempfile
from helpers import unittest, with_config, skipOnTravis
import luigi.rpc
import luigi.server
from luigi.scheduler import CentralPlannerScheduler
from luigi.six.moves.urllib.parse import urlencode, ParseResult
from tornado.testing import AsyncHTTPTestCase
from nose.plugins.attrib import attr
try:
from unittest import mock
except ImportError:
import mock
class ServerTestBase(AsyncHTTPTestCase):
def get_app(self):
return luigi.server.app(CentralPlannerScheduler())
def setUp(self):
super(ServerTestBase, self).setUp()
self._old_fetch = luigi.rpc.RemoteScheduler._fetch
def _fetch(obj, url, body, *args, **kwargs):
body = urlencode(body).encode('utf-8')
response = self.fetch(url, body=body, method='POST')
if response.code >= 400:
raise luigi.rpc.RPCError(
'Errror when connecting to remote scheduler'
)
return response.body.decode('utf-8')
luigi.rpc.RemoteScheduler._fetch = _fetch
def tearDown(self):
super(ServerTestBase, self).tearDown()
luigi.rpc.RemoteScheduler._fetch = self._old_fetch
class ServerTest(ServerTestBase):
def test_visualizer(self):
page = self.fetch('/').body
self.assertTrue(page.find(b'<title>') != -1)
def _test_404(self, path):
response = self.fetch(path)
self.assertEqual(response.code, 404)
def test_404(self):
self._test_404('/foo')
def test_api_404(self):
self._test_404('/api/foo')
class INETServerClient(object):
def __init__(self):
self.port = random.randint(1024, 9999)
def run_server(self):
luigi.server.run(api_port=self.port, address='127.0.0.1')
def scheduler(self):
return luigi.rpc.RemoteScheduler('http://localhost:' + str(self.port))
class UNIXServerClient(object):
def __init__(self):
self.tempdir = tempfile.mkdtemp()
self.unix_socket = os.path.join(self.temp_dir, 'luigid.sock')
def run_server(self):
luigi.server.run(unix_socket=unix_socket)
def scheduler(self):
url = ParseResult(
scheme='http+unix',
netloc=self.unix_socket,
path='',
params='',
query='',
fragment='',
).geturl()
return luigi.rpc.RemoteScheduler(url)
class ServerTestRun(unittest.TestCase):
"""Test to start and stop the server in a more "standard" way
"""
server_client_class = INETServerClient
def start_server(self):
self._process = multiprocessing.Process(
target=self.server_client.run_server
)
self._process.start()
time.sleep(0.1) # wait for server to start
self.sch = self.server_client.scheduler()
self.sch._wait = lambda: None
def stop_server(self):
self._process.terminate()
self._process.join(1)
if self._process.is_alive():
os.kill(self._process.pid, signal.SIGKILL)
def setUp(self):
self.server_client = self.server_client_class()
state_path = tempfile.mktemp(suffix=self.id())
self.addCleanup(functools.partial(os.unlink, state_path))
luigi.configuration.get_config().set('scheduler', 'state_path', state_path)
self.start_server()
def tearDown(self):
self.stop_server()
def test_ping(self):
self.sch.ping(worker='xyz')
def test_raw_ping(self):
self.sch._request('/api/ping', {'worker': 'xyz'})
def test_raw_ping_extended(self):
self.sch._request('/api/ping', {'worker': 'xyz', 'foo': 'bar'})
def test_404(self):
with self.assertRaises(luigi.rpc.RPCError):
self.sch._request('/api/fdsfds', {'dummy': 1})
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/72953884')
def test_save_state(self):
self.sch.add_task('X', 'B', deps=('A',))
self.sch.add_task('X', 'A')
self.assertEqual(self.sch.get_work('X')['task_id'], 'A')
self.stop_server()
self.start_server()
work = self.sch.get_work('X')['running_tasks'][0]
self.assertEqual(work['task_id'], 'A')
class URLLibServerTestRun(ServerTestRun):
@mock.patch.object(luigi.rpc, 'HAS_REQUESTS', False)
def start_server(self, *args, **kwargs):
super(URLLibServerTestRun, self).start_server(*args, **kwargs)
@attr('unix')
class UNIXServerTestRun(unittest.TestCase):
server_client_class = UNIXServerClient
def tearDown(self):
super(self, ServerTestRun).tearDown()
shutil.rmtree(self.server_client.tempdir)
if __name__ == '__main__':
unittest.main()
| upworthy/luigi | test/server_test.py | Python | apache-2.0 | 5,438 |
# Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Ensures that code samples in Sonnet are accurate."""
import doctest
import inspect
from absl.testing import parameterized
import sonnet as snt
from sonnet.src import test_utils
import tensorflow as tf
import tree
class DoctestTest(test_utils.TestCase, parameterized.TestCase):
# Avoid running doctests inside a `with tf.device` block.
ENTER_PRIMARY_DEVICE = False
def setUp(self):
super().setUp()
if self.primary_device != "TPU":
# `TpuReplicator` cannot be constructed without a TPU, however it has
# exactly the same API as `Replicator` so we can run doctests using that
# instead.
snt.distribute.TpuReplicator = snt.distribute.Replicator
@parameterized.named_parameters(test_utils.find_sonnet_python_modules(snt))
def test_doctest(self, module):
# `snt` et al import all dependencies from `src`, however doctest does not
# test imported deps so we must manually set `__test__` such that imported
# symbols are tested.
# See: docs.python.org/3/library/doctest.html#which-docstrings-are-examined
if not hasattr(module, "__test__") or not module.__test__:
module.__test__ = {}
for name in module.__all__:
value = getattr(module, name)
if not inspect.ismodule(value):
if (inspect.isclass(value) or isinstance(value, str) or
inspect.isfunction(value) or inspect.ismethod(value)):
module.__test__[name] = value
elif hasattr(value, "__doc__"):
module.__test__[name] = value.__doc__
num_failed, num_attempted = doctest.testmod(
module,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
extraglobs={
"snt": snt,
"tf": tf,
"tree": tree,
})
if num_attempted == 0:
self.skipTest("No doctests in %s" % module.__name__)
self.assertEqual(num_failed, 0, "{} doctests failed".format(num_failed))
if __name__ == "__main__":
tf.test.main()
| deepmind/sonnet | sonnet/src/conformance/doctest_test.py | Python | apache-2.0 | 2,651 |
#lets try draw this tree!
decisionNode = dict(boxstyle = "sawtooth", fc="0.8")
leafNode = dict(boxstyle = "round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
createPlot.axl.annotate(nodeTxt, xy=parentPt, xycoords="axes fraction", xytext=centerPt, textcoords="axes fraction", va="center", ha="center", bbox=nodeType, arrowprops=arrow_args)
def plotMidText(cntrPt, parentPt, txtString):
xMid = (parentPt[0] - cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[1] - cntrPt[1])/2.0 + cntrPt[1]
createPlot.axl.text(xMid,yMid, txtString)
def plotTree(myTree, parentPt, nodeTxt):
numLeafs = getNumLeafs(myTree)
getTreeDepth(myTree)
firstStr = myTree.keys()[0]
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW, plotTree.yOff)
plotMidText(cntrPt, parentPt, nodeTxt)
plotNode(firstStr, cntrPt, parentPt, decisionNode)
secondDict = myTree(firstStr)
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
for key in secondDict.keus():
if type(secondDict[key]).__name__=='dict':
plotTree(secondDict[key],cntrPt,str(key)):
else:
plotTree.xOff = plotTree.xOff + 1.0 / plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
def createPlot(inTree):
fig = plt.figre(1, facecolor="white")
fig.clf()
axprops = dict(xticks=[],yticks=[])
createPlot.axl = plt.subplot(111, frameon=False, **axprops)
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW
plotTree.yOff = 1.0
plotTree(inTree, (0.5,1.0),'')
plt.show()
def getNumLeafs(myTree):
numLeafs = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':
numLeafs += getNumLeafs(secondDict[key])
else:
numLeafs += 1
return numLeafs
def getTreeDepth(myTree):
maxDepth = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':
thisDepth = 1 + getTreeDepth(secondDict[key])
else:
thisDepth = 1
if thisDepth > maxDepth:
maxDepth = thisDepth
return maxDepth
#create tree for testing
def retrieveTree(i):
listOfTrees = [{'no surfacing':{0:'no', 1:{'flippers':{0:'no', 1:'yes'}}}}, {'no surfacing': {0:'no',1:{flippers':{0:{'head':{0:'no',1:'yes'}},1:'no'}}}}]
return listOfTrees[i] | jenni4j/ML-from-scratch | trees/treePlotter.py | Python | mit | 2,781 |
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from collections import OrderedDict
from datetime import timedelta
from pytz import timezone
from sqlalchemy.orm import load_only
from sqlalchemy.orm.attributes import set_committed_value
from indico.core.config import Config
from indico.core.db import db
from indico.core.db.sqlalchemy.links import LinkType
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.modules.attachments import Attachment
from indico.modules.attachments.models.folders import AttachmentFolder
from indico.modules.categories import upcoming_events_settings
from indico.modules.events import Event
from indico.modules.events.contributions import Contribution
from indico.modules.events.contributions.models.subcontributions import SubContribution
from indico.modules.events.sessions import Session
from indico.modules.events.timetable.models.entries import TimetableEntry, TimetableEntryType
from indico.util.caching import memoize_redis
from indico.util.date_time import now_utc
from indico.util.i18n import _, ngettext
from indico.util.struct.iterables import materialize_iterable
def get_events_by_year(category_id=None):
"""Get the number of events for each year.
:param category_id: The category ID to get statistics for. Events
from subcategories are also included.
:return: An `OrderedDict` mapping years to event counts.
"""
category_filter = Event.category_chain_overlaps(category_id) if category_id else True
query = (db.session
.query(db.cast(db.extract('year', Event.start_dt), db.Integer).label('year'),
db.func.count())
.filter(~Event.is_deleted,
category_filter)
.order_by('year')
.group_by('year'))
return OrderedDict(query)
def get_contribs_by_year(category_id=None):
"""Get the number of contributions for each year.
:param category_id: The category ID to get statistics for.
Contributions from subcategories are also
included.
:return: An `OrderedDict` mapping years to contribution counts.
"""
category_filter = Event.category_chain_overlaps(category_id) if category_id else True
query = (db.session
.query(db.cast(db.extract('year', TimetableEntry.start_dt), db.Integer).label('year'),
db.func.count())
.join(TimetableEntry.event_new)
.filter(TimetableEntry.type == TimetableEntryType.CONTRIBUTION,
~Event.is_deleted,
category_filter)
.order_by('year')
.group_by('year'))
return OrderedDict(query)
def get_attachment_count(category_id=None):
"""Get the number of attachments in events in a category.
:param category_id: The category ID to get statistics for.
Attachments from subcategories are also
included.
:return: The number of attachments
"""
category_filter = Event.category_chain_overlaps(category_id) if category_id else True
subcontrib_contrib = db.aliased(Contribution)
query = (db.session
.query(db.func.count(Attachment.id))
.join(Attachment.folder)
.join(AttachmentFolder.event_new)
.outerjoin(AttachmentFolder.session)
.outerjoin(AttachmentFolder.contribution)
.outerjoin(AttachmentFolder.subcontribution)
.outerjoin(subcontrib_contrib, subcontrib_contrib.id == SubContribution.contribution_id)
.filter(AttachmentFolder.link_type != LinkType.category,
~Attachment.is_deleted,
~AttachmentFolder.is_deleted,
~Event.is_deleted,
# we have exactly one of those or none if the attachment is on the event itself
~db.func.coalesce(Session.is_deleted, Contribution.is_deleted, SubContribution.is_deleted, False),
# in case of a subcontribution we also need to check that the contrib is not deleted
(subcontrib_contrib.is_deleted.is_(None) | ~subcontrib_contrib.is_deleted),
category_filter))
return query.one()[0]
@memoize_redis(86400)
def get_category_stats(category_id=None):
"""Get category statistics.
This function is mainly a helper so we can get and cache
all values at once and keep a last-update timestamp.
:param category_id: The category ID to get statistics for.
Subcategories are also included.
"""
return {'events_by_year': get_events_by_year(category_id),
'contribs_by_year': get_contribs_by_year(category_id),
'attachments': get_attachment_count(category_id),
'updated': now_utc()}
@memoize_redis(3600)
@materialize_iterable()
def get_upcoming_events():
"""Get the global list of upcoming events"""
from indico.modules.events import Event
data = upcoming_events_settings.get_all()
if not data['max_entries'] or not data['entries']:
return
tz = timezone(Config.getInstance().getDefaultTimezone())
now = now_utc(False).astimezone(tz)
base_query = (Event.query
.filter(Event.effective_protection_mode == ProtectionMode.public,
~Event.is_deleted,
Event.end_dt.astimezone(tz) > now)
.options(load_only('id', 'title', 'start_dt', 'end_dt')))
queries = []
cols = {'category': Event.category_id,
'event': Event.id}
for entry in data['entries']:
delta = timedelta(days=entry['days'])
query = (base_query
.filter(cols[entry['type']] == entry['id'])
.filter(db.cast(Event.start_dt.astimezone(tz), db.Date) > (now - delta).date())
.with_entities(Event, db.literal(entry['weight']).label('weight')))
queries.append(query)
query = (queries[0].union(*queries[1:])
.order_by(db.desc('weight'), Event.start_dt, Event.title)
.limit(data['max_entries']))
for row in query:
event = row[0]
# we cache the result of the function and is_deleted is used in the repr
# and having a broken repr on the cached objects would be ugly
set_committed_value(event, 'is_deleted', False)
yield event
def get_visibility_options(category_or_event, allow_invisible=True):
"""Return the visibility options available for the category or event."""
if isinstance(category_or_event, Event):
category = category_or_event.category
event = category_or_event
else:
category = category_or_event
event = None
def _category_above_message(number):
return ngettext('From the category above', 'From {} categories above', number).format(number)
options = [(n + 1, ('{} \N{RIGHTWARDS ARROW} "{}"'.format(_category_above_message(n).format(n), title)))
for n, title in enumerate(category.chain_titles[::-1])]
if event is None:
options[0] = (1, _("From this category only"))
else:
options[0] = (1, '{} \N{RIGHTWARDS ARROW} "{}"'.format(_("From the current category only"), category.title))
options[-1] = ('', _("From everywhere"))
if allow_invisible:
options.insert(0, (0, _("Invisible")))
# In case the current visibility is higher than the distance to the root category
if category_or_event.visibility is not None and not any(category_or_event.visibility == x[0] for x in options):
options.append((category_or_event.visibility,
'({} \N{RIGHTWARDS ARROW} {})'.format(_category_above_message(category_or_event.visibility),
_("Everywhere"))))
return options
| DavidAndreev/indico | indico/modules/categories/util.py | Python | gpl-3.0 | 8,635 |
import os
import time
from cartas import Mazo, Carta
def _explicar(texto):
"""
Limpiar la pantalla y mostrar in texto recuadrado.
Args:
texto (str): Texto a mostrar en pantalla.
Examples:
::
-----------------------------------------------------
Esto es un texto recuadrado
-----------------------------------------------------
"""
os.system('cls' if os.name == 'nt' else 'clear')
print("-----------------------------------------------------")
print(texto)
print("-----------------------------------------------------")
def _mostrar_pilas(pilas):
"""
Imprimir cuarto pilas (mazos) en columnas.
Args:
pilas (magia.cartas.Mazo): Mazos a mostrar en pantalla.
"""
# obtener cantidad máxima de lineas a imprimir
lineas = max(pilas[0].cant_cartas(),
pilas[1].cant_cartas(),
pilas[2].cant_cartas())
print("{:<14} {:<14} {:<14}".format("Pila 1", "Pila 2", "Pila 3"))
for i in range(lineas):
cartas = [] # cartas a imprimir en esta línea
for pila in pilas:
try:
cartas.append(str(pila.cartas[i]))
except IndexError:
cartas.append("")
print("{:<14} {:<14} {:<14}".format(cartas[0], cartas[1], cartas[2]))
def truco_de_magia():
"""
Hace un truco de magia usando las cartas.
"""
# mazo con todas las cartas
mazo_completo = Mazo()
mazo_completo.generar("español", 48)
explicar("Conseguí un mazo")
print(mazo_completo)
# cartas usadas en el truco, son 20
mazo = Mazo()
# crear 3 pilas en donde se ponen las cartas
pilas = []
for _ in range(3):
pilas.append(Mazo())
# mezclar mazo
mazo_completo.mezclar()
explicar("Mezclé el mazo")
print(mazo_completo)
# tomar 21 cartas del mazo y ponerlas en la mano
for i in range(21):
mazo.poner(mazo_completo.tomar())
explicar("Tomé 21 cartas del mazo, a las demás las descarté\n"
"Pensá en una carta, cuando estés listo apretá Enter")
print(mazo)
input()
# truco de magia en sí
for _ in range(3):
# poner las cartas del mazo en las pilas ordenadamente
p = 0 # pila en donde poner la siguiente carta
for i in range(21):
pilas[p].poner(mazo.tomar())
if p < 2:
p += 1
else:
p = 0
explicar("Estoy poniendo las cartas en pilas")
mostrar_pilas(pilas)
time.sleep(0.1)
explicar("Terminé de poner las cartas en pilas\n"
"Decime en qué pila está tu carta, ingresa 1, 2 o 3")
mostrar_pilas(pilas)
pila_elegida = int(input()) - 1
# poner las cartas en el mazo nuevamente, la pila elegida debe ir al
# medio
orden_pilas = [0, 1, 2] # orden en el cual poner las pilas en el mazo
orden_pilas.remove(pila_elegida) # sacar el número de la pila elegida
orden_pilas.insert(1, pila_elegida) # ponerlo en el segundo lugar
for i in orden_pilas:
mazo.poner(pilas[i])
# obtener la carta pensada tomando la carta número 11
for _ in range(10):
mazo.tomar()
carta_mágica = mazo.tomar()
explicar("La carta que pensaste es:")
print(carta_mágica)
print(
" *\n"
" * *\n"
" * \* / *\n"
" * --.:. *\n"
" * * :\ -\n"
" .* | \\\n"
" * * \\\n"
" . * \\\n"
" .. /\\.\n"
" * |\\)|\n"
" . * \ |\n"
" . . * |/\\\n"
" .* * / \\\n"
" * \\ / \\\n"
" * . * \\ \\\n"
" * . \n"
" * * \n"
" . * * \n"
)
| martinber/guia-sphinx | ejemplos_sphinx/simple/magia/magia.py | Python | mit | 3,853 |
import os
import subprocess
import logging
import dateutil.parser
import collections
import xml.etree.ElementTree
import svn.constants
_logger = logging.getLogger('svn')
class CommonClient(object):
def __init__(self, url_or_path, type_, *args, **kwargs):
self.__url_or_path = url_or_path
self.__username = kwargs.pop('username', None)
self.__password = kwargs.pop('password', None)
self.__svn_filepath = kwargs.pop('svn_filepath', 'svn')
self.__trust_cert = kwargs.pop('trust_cert', None)
if type_ not in (svn.constants.LT_URL, svn.constants.LT_PATH):
raise ValueError("Type is invalid: %s" % (type_))
self.__type = type_
# TODO(dustin): return_stderr is no longer implemented.
def run_command(self, subcommand, args, success_code=0,
return_stderr=False, combine=False, return_binary=False):
cmd = [self.__svn_filepath, '--non-interactive']
if self.__trust_cert:
cmd += ['--trust-server-cert']
if self.__username is not None and self.__password is not None:
cmd += ['--username', self.__username]
cmd += ['--password', self.__password]
cmd += [subcommand] + args
_logger.debug("RUN: %s" % (cmd,))
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={'LANG': 'en_US.UTF-8'})
stdout = p.stdout.read()
r = p.wait()
if r != success_code:
raise ValueError("Command failed with (%d): %s\n%s" %
(p.returncode, cmd, stdout))
if return_binary is True:
return stdout
if combine is True:
return stdout
else:
return stdout.decode().strip('\n').split('\n')
def rows_to_dict(self, rows, lc=True):
d = {}
for row in rows:
row = row.strip()
if not row:
continue
pivot = row.index(': ')
k = row[:pivot]
v = row[pivot + 2:]
if lc is True:
k = k.lower()
d[k] = v
return d
def __element_text(self, element):
"""Return ElementTree text or None
:param xml.etree.ElementTree element: ElementTree to get text.
:return str|None: Element text
"""
if element is not None and len(element.text):
return element.text
return None
def info(self, rel_path=None):
full_url_or_path = self.__url_or_path
if rel_path is not None:
full_url_or_path += '/' + rel_path
result = self.run_command(
'info',
['--xml', full_url_or_path],
combine=True)
root = xml.etree.ElementTree.fromstring(result)
entry_attr = root.find('entry').attrib
commit_attr = root.find('entry/commit').attrib
relative_url = root.find('entry/relative-url')
author = root.find('entry/commit/author')
wcroot_abspath = root.find('entry/wc-info/wcroot-abspath')
wcinfo_schedule = root.find('entry/wc-info/schedule')
wcinfo_depth = root.find('entry/wc-info/depth')
info = {
'url': root.find('entry/url').text,
'relative_url': self.__element_text(relative_url),
# TODO(dustin): These are just for backwards-compatibility. Use the ones added
# below.
'entry#kind': entry_attr['kind'],
'entry#path': entry_attr['path'],
'entry#revision': int(entry_attr['revision']),
'repository/root': root.find('entry/repository/root').text,
'repository/uuid': root.find('entry/repository/uuid').text,
'wc-info/wcroot-abspath': self.__element_text(wcroot_abspath),
'wc-info/schedule': self.__element_text(wcinfo_schedule),
'wc-info/depth': self.__element_text(wcinfo_depth),
'commit/author': self.__element_text(author),
'commit/date': dateutil.parser.parse(
root.find('entry/commit/date').text),
'commit#revision': int(commit_attr['revision']),
}
# Set some more intuitive keys, because no one likes dealing with
# symbols. However, we retain the old ones to maintain backwards-
# compatibility.
# TODO(dustin): Should we be casting the integers?
info['entry_kind'] = info['entry#kind']
info['entry_path'] = info['entry#path']
info['entry_revision'] = info['entry#revision']
info['repository_root'] = info['repository/root']
info['repository_uuid'] = info['repository/uuid']
info['wcinfo_wcroot_abspath'] = info['wc-info/wcroot-abspath']
info['wcinfo_schedule'] = info['wc-info/schedule']
info['wcinfo_depth'] = info['wc-info/depth']
info['commit_author'] = info['commit/author']
info['commit_date'] = info['commit/date']
info['commit_revision'] = info['commit#revision']
return info
def properties(self, rel_path=None):
""" Return a dictionary with all svn-properties associated with a
relative path.
:param rel_path: relative path in the svn repo to query the
properties from
:returns: a dictionary with the property name as key and the content
as value
"""
full_url_or_path = self.__url_or_path
if rel_path is not None:
full_url_or_path += '/' + rel_path
result = self.run_command(
'proplist',
['--xml', full_url_or_path],
combine=True)
# query the proper list of this path
root = xml.etree.ElementTree.fromstring(result)
target_elem = root.find('target')
property_names = [p.attrib["name"]
for p in target_elem.findall('property')]
# now query the content of each propery
property_dict = {}
for property_name in property_names:
result = self.run_command(
'propget',
['--xml', property_name, full_url_or_path, ],
combine=True)
root = xml.etree.ElementTree.fromstring(result)
target_elem = root.find('target')
property_elem = target_elem.find('property')
property_dict[property_name] = property_elem.text
return property_dict
def cat(self, rel_filepath, revision=None):
cmd = []
if revision is not None:
cmd += ['-r', str(revision)]
cmd += [self.__url_or_path + '/' + rel_filepath]
return self.run_command('cat', cmd, return_binary=True)
def log_default(self, timestamp_from_dt=None, timestamp_to_dt=None,
limit=None, rel_filepath=None, stop_on_copy=False,
revision_from=None, revision_to=None, changelist=False):
"""Allow for the most-likely kind of log listing: the complete list,
a FROM and TO timestamp, a FROM timestamp only, or a quantity limit.
"""
full_url_or_path = self.__url_or_path
if rel_filepath is not None:
full_url_or_path += '/' + rel_filepath
timestamp_from_phrase = ('{' + timestamp_from_dt.isoformat() + '}') \
if timestamp_from_dt \
else ''
timestamp_to_phrase = ('{' + timestamp_to_dt.isoformat() + '}') \
if timestamp_to_dt \
else ''
args = []
if timestamp_from_phrase or timestamp_to_phrase:
if not timestamp_from_phrase:
raise ValueError("The default log retriever can not take a TO "
"timestamp without a FROM timestamp.")
if not timestamp_to_phrase:
timestamp_to_phrase = 'HEAD'
args += ['-r', timestamp_from_phrase + ':' + timestamp_to_phrase]
if revision_from or revision_to:
if timestamp_from_phrase or timestamp_to_phrase:
raise ValueError("The default log retriever can not take both "
"timestamp and revision number ranges.")
if not revision_from:
revision_from = '1'
if not revision_to:
revision_to = 'HEAD'
args += ['-r', str(revision_from) + ':' + str(revision_to)]
if limit is not None:
args += ['-l', str(limit)]
if stop_on_copy is True:
args += ['--stop-on-copy']
if changelist is True:
args += ['--verbose']
result = self.run_command(
'log',
args + ['--xml', full_url_or_path],
combine=True)
root = xml.etree.ElementTree.fromstring(result)
named_fields = ['date', 'msg', 'revision', 'author', 'changelist']
c = collections.namedtuple(
'LogEntry',
named_fields)
for e in root.findall('logentry'):
entry_info = {x.tag: x.text for x in e.getchildren()}
date = None
date_text = entry_info.get('date')
if date_text is not None:
date = dateutil.parser.parse(date_text)
log_entry = {
'msg': entry_info.get('msg'),
'author': entry_info.get('author'),
'revision': int(e.get('revision')),
'date': date
}
if changelist is True:
cl = []
for ch in e.findall('paths/path'):
cl.append((ch.attrib['action'], ch.text))
log_entry['changelist'] = cl
else:
log_entry['changelist'] = None
yield c(**log_entry)
def export(self, to_path, revision=None):
cmd = []
if revision is not None:
cmd += ['-r', str(revision)]
cmd += [self.__url_or_path, to_path]
self.run_command('export', cmd)
def list(self, extended=False, rel_path=None):
full_url_or_path = self.__url_or_path
if rel_path is not None:
full_url_or_path += '/' + rel_path
if extended is False:
for line in self.run_command(
'ls',
[full_url_or_path]):
line = line.strip()
if line:
yield line
else:
raw = self.run_command(
'ls',
['--xml', full_url_or_path],
combine=True)
root = xml.etree.ElementTree.fromstring(raw)
list_ = root.findall('list/entry')
for entry in list_:
entry_attr = entry.attrib
kind = entry_attr['kind']
name = entry.find('name').text
size = entry.find('size')
# This will be None for directories.
if size is not None:
size = int(size.text)
commit_node = entry.find('commit')
author = commit_node.find('author').text
date = dateutil.parser.parse(commit_node.find('date').text)
commit_attr = commit_node.attrib
revision = int(commit_attr['revision'])
yield {
'kind': kind,
# To decouple people from the knowledge of the value.
'is_directory': kind == svn.constants.K_DIR,
'name': name,
'size': size,
'author': author,
'date': date,
# Our approach to normalizing a goofy field-name.
'timestamp': date,
'commit_revision': revision,
}
def list_recursive(self, rel_path=None, yield_dirs=False,
path_filter_cb=None):
q = [rel_path]
while q:
current_rel_path = q[0]
del q[0]
for entry in self.list(extended=True, rel_path=current_rel_path):
if entry['is_directory'] is True:
if current_rel_path is not None:
next_rel_path = \
os.path.join(current_rel_path, entry['name'])
else:
next_rel_path = entry['name']
do_queue = True
if path_filter_cb is not None:
result = path_filter_cb(next_rel_path)
if result is False:
do_queue = False
if do_queue is True:
q.append(next_rel_path)
if entry['is_directory'] is False or yield_dirs is True:
current_rel_path_phrase = current_rel_path \
if current_rel_path is not None \
else ''
yield (current_rel_path_phrase, entry)
def diff_summary(self, old, new, rel_path=None):
"""
Provides a summarized output of a diff between two revisions
(file, change type, file type)
"""
full_url_or_path = self.__url_or_path
if rel_path is not None:
full_url_or_path += '/' + rel_path
result = self.run_command(
'diff',
['--old', '{0}@{1}'.format(full_url_or_path, old),
'--new', '{0}@{1}'.format(full_url_or_path, new),
'--summarize', '--xml'],
combine=True)
root = xml.etree.ElementTree.fromstring(result)
diff = []
for element in root.findall('paths/path'):
diff.append({
'path': element.text,
'item': element.attrib['item'],
'kind': element.attrib['kind']})
return diff
def diff(self, old, new, rel_path=None):
"""
Provides output of a diff between two revisions (file, change type,
file type)
"""
full_url_or_path = self.__url_or_path
if rel_path is not None:
full_url_or_path += '/' + rel_path
diff_result = self.run_command(
'diff',
['--old', '{0}@{1}'.format(full_url_or_path, old),
'--new', '{0}@{1}'.format(full_url_or_path, new)],
combine=True)
file_to_diff = {}
for non_empty_diff in filter(None, diff_result.split('Index: ')):
split_diff = non_empty_diff.split('==')
file_to_diff[split_diff[0].strip().strip('/')] = split_diff[-1].strip('=').strip()
diff_summaries = self.diff_summary(old, new, rel_path)
for diff_summary in diff_summaries:
diff_summary['diff'] = \
file_to_diff[diff_summary['path'].split(full_url_or_path)[-1].strip('/')]
return diff_summaries
@property
def url(self):
if self.__type != svn.constants.LT_URL:
raise EnvironmentError(
"Only the remote-client has access to the URL.")
return self.__url_or_path
@property
def path(self):
if self.__type != svn.constants.LT_PATH:
raise EnvironmentError(
"Only the local-client has access to the path.")
return self.__url_or_path
| rnt/PySvn | svn/common.py | Python | gpl-2.0 | 15,416 |
#!/usr/bin/python2
"""
This file is part of ocean.
SEA is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SEA is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SEA. If not, see <http://www.gnu.org/licenses/>.
Copyright 2014 by neuromancer
"""
"""
Dedicated to the intelligence and beauty of a woman that
inspired this code despite of the distance..
"""
import os
import argparse
import csv
import sys
import random
from src.Process import Process
from src.Detection import GetArgs, GetFiles, GetCmd, GetDir
from src.Mutation import NullMutator, RandomByteMutator, RandomExpanderMutator, RandomInputMutator
from src.Printer import TypePrinter
from src.Event import IsTimeout
from src.Misc import readmodfile
"""def readmodfile(modfile):
hooked_mods = []
if modfile is not None:
hooked_mods = open(modfile).read().split("\n")
hooked_mods = filter(lambda x: x <> '', hooked_mods)
return hooked_mods
"""
def prepare_inputs(inputs):
r = []
for input in inputs:
arg = input.PrepareData()
if not (arg is None):
r.append(arg)
return r
if __name__ == "__main__":
if open("/proc/sys/kernel/randomize_va_space").read().strip() <> "0":
print "Address space layout randomization (ASLR) is enabled, disable it before continue"
print "Hint: # echo 0 > /proc/sys/kernel/randomize_va_space"
sys.exit(-1)
# Random seed initialziation
random.seed()
# Arguments
parser = argparse.ArgumentParser(description='xxx')
parser.add_argument("testcase", help="Testcase to use", type=str, default=None)
parser.add_argument("--show-stdout",
help="Don't use /dev/null as stdout/stderr, nor close stdout and stderr if /dev/null doesn't exist",
action="store_true", default=False)
parser.add_argument("--inc-mods",
help="",
type=str, default=None)
parser.add_argument("--ign-mods",
help="",
type=str, default=None)
parser.add_argument("--filter-by",
help="",
type=str, nargs='+', default=[])
parser.add_argument("--timeout", dest="timeout", type=int,
help="Timeout (in seconds)", default=3)
parser.add_argument("-n", dest="max_mut", type=int,
help="", default=0)
parser.add_argument("-d", dest="depth", type=int,
help="", default=1)
parser.add_argument("-w", dest="width", type=int,
help="", default=0)
options = parser.parse_args()
testcase = options.testcase
filters = options.filter_by
incmodfile = options.inc_mods
ignmodfile = options.ign_mods
show_stdout = options.show_stdout
max_mut = options.max_mut
depth = options.depth
width = options.width
csvfile = sys.stdout
os.chdir(GetDir(testcase))
program = GetCmd(None)
os.chdir("crash")
timeout = options.timeout
envs = dict()
args = GetArgs()
files = GetFiles()
# modules to include or ignore
included_mods = readmodfile(incmodfile)
ignored_mods = readmodfile(ignmodfile)
original_inputs = RandomInputMutator(args + files, NullMutator)
expanded_input_generator = RandomInputMutator(args + files, RandomExpanderMutator)
mutated_input_generator = RandomInputMutator(args + files, RandomByteMutator)
app = Process(program, envs, timeout, included_mods, ignored_mods, no_stdout = not show_stdout )
prt = TypePrinter("/dev/stdout", program)
# unchanged input
null_mutt, original_input = original_inputs.next()
original_events = app.getData(prepare_inputs(original_input))
if original_events is None:
print "Execution of",program,"failed!"
exit(-1)
#prt.set_original_events(original_events)
prt.print_events(null_mutt, original_events)
for (i, (d, mutated)) in enumerate(expanded_input_generator):
#if app.timeouted():
# sys.exit(-1)
if i >= max_mut:
break
events = app.getData(prepare_inputs(mutated))
prt.print_events(d, events)
mutated_inputs = []
if depth > 0:
for _ in range(width):
_, mutated = mutated_input_generator.next()
events = app.getData(prepare_inputs(mutated))
prt.print_events(d, events)
#print(map(str,mutated))#, map(type, mutated))
if not IsTimeout(events[-1]):
mutated_inputs.append(mutated)
for _ in range(depth):
for mutated_input in mutated_inputs:
expanded_input_generator = RandomInputMutator(mutated_input, RandomExpanderMutator)
for (i, (d, mutated)) in enumerate(expanded_input_generator):
#if app.timeouted():
# sys.exit(-1)
if i >= max_mut:
break
events = app.getData(prepare_inputs(mutated))
prt.print_events(d, events)
| neuromancer/ocean | ocean.py | Python | gpl-3.0 | 5,448 |
"""
Benchmark script to be used to evaluate the performace improvement of
the MKL with numpy.
Author: Suresh Shanmugam
"""
import os
import sys
import timeit
import numpy
from numpy.random import random
def test_eigenvalue():
"""
Test eigen value computation of a matrix
"""
i = 500
data = random((i, i))
result = numpy.linalg.eig(data)
def test_svd():
"""
Test single value decomposition of a matrix
"""
i = 1000
data = random((i, i))
result = numpy.linalg.svd(data)
result = numpy.linalg.svd(data, full_matrices=False)
def test_inv():
"""
Test matrix inversion
"""
i = 1000
data = random((i, i))
result = numpy.linalg.inv(data)
def test_det():
"""
Test the computation of the matrix determinant
"""
i = 1000
data = random((i, i))
result = numpy.linalg.det(data)
def test_dot():
"""
Test the dot product
"""
i = 1000
a = random((i, i))
b = numpy.linalg.inv(a)
result = numpy.dot(a, b) - numpy.eye(i)
# Test to start. This dict has the value of the results
tests = {test_eigenvalue: (752., 3376.),
test_svd: (4608., 15990.),
test_inv: (418., 1457.),
test_det: (186.0, 400.),
test_dot: (666., 2444.)}
# Setting the following vairable for maximum number of threads for
# computation
THREADS_LIMIT_ENV = 'OMP_NUM_THREADS'
def start_benchmark():
print("Benchmark numpy performance")
if THREADS_LIMIT_ENV in os.environ:
print("Maximum number of threads used for computation is: %s" %
os.environ[THREADS_LIMIT_ENV])
print("-" * 80)
print("Starting timing with numpy %s\n Version: %s" % (numpy.__version__,
sys.version))
print("%20s: %10s - %5s / %5s" % ("Function", "Timing [ms]", "MKL", "No MKL"))
for fun, bench in tests.items():
t = timeit.Timer(stmt="%s()" % fun.__name__, setup="from __main__ import % s" % fun.__name__)
res = t.repeat(repeat=10, number=1)
timing = 1000.0 * sum(res) / len(res)
print("%20s: %7.1f ms - %3.2f / %3.2f" % (fun.__name__, timing, bench[0] /
timing, bench[1] / timing))
if __name__ == '__main__':
start_benchmark()
| suresh/notes | python/mkl_benchmark.py | Python | mit | 2,320 |
from django.shortcuts import render, get_object_or_404, render_to_response, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.template import RequestContext
from django.http import *
from django.core.urlresolvers import reverse
from django.db.models import Q # sql sanitization for custom queries
from .models import Customer, Document
from .forms import CustomerForm, UploadForm
import requests, json
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('config/config.cnf')
def login_user(request):
"Login to the app"
logout(request)
username = password = ''
if request.POST:
# pdb.set_trace()
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/')
return render_to_response('users/login.html', context_instance=RequestContext(request))
def logout_user(request):
"Logout of the app"
logout(request)
return HttpResponseRedirect('/login')
@login_required(login_url='/login/')
def index(request):
"Show all customers"
cs = Customer.objects.order_by('name')
context = {'cs': cs}
return render(request, 'customers/index.html', context)
@login_required(login_url='/login/')
def new(request):
"Create new customer"
if request.method == 'POST':
form = CustomerForm(request.POST)
if form.is_valid():
c = Customer(name=form.cleaned_data['name'])
c.save()
messages.add_message(request, messages.INFO, 'New customer created.')
return HttpResponseRedirect('/')
else:
context = {
'form': CustomerForm()
}
return render(request, 'customers/new.html', context)
@login_required(login_url='/login/')
def customer(request, customer_id):
"View or update customer"
c = get_object_or_404(Customer, pk=customer_id)
if request.method == 'POST':
form = CustomerForm(request.POST)
if form.is_valid():
c.name = form.cleaned_data['name']
c.save()
messages.add_message(request, messages.INFO, 'Customer updated.')
return HttpResponseRedirect('/')
else:
context = {
'customer': c,
'form': CustomerForm(initial = {'name': c.name})
}
return render(request, 'customers/edit.html', context)
@login_required(login_url='/login/')
def upload(request, customer_id):
"Upload a new document for processing"
c = get_object_or_404(Customer, pk=customer_id)
if request.method == 'POST':
form = UploadForm(request.POST, request.FILES)
if form.is_valid():
print "valid"
i = Document(image=request.FILES['file'], customer_id=c.id)
i.save()
messages.add_message(request, messages.INFO, 'Sucessfully imported ID, it will be processed shortly.')
context = {'customer': c, 'imports': c.document_set.all()}
return render(request, 'customers/imports.html', context)
else:
print "not valid"
context = {'customer': c, 'form': UploadForm()}
return render(request, 'customers/upload.html', context)
else:
context = {'customer': c, 'form': UploadForm()}
return render(request, 'customers/upload.html', context)
@login_required(login_url='/login/')
def imports(request, customer_id):
"Display all imported documents"
c = get_object_or_404(Customer, pk=customer_id)
context = {'customer': c, 'imports': c.document_set.all()}
return render(request, 'customers/imports.html', context)
def status(request, customer_id):
"Checks status of all documents associated with the customer by pinging Captricity"
c = get_object_or_404(Customer, pk=customer_id)
ds = c.document_set.exclude(Q(status='imported') | Q(status='finished'))
for document in ds:
job_id = document.job_id
url = 'https://shreddr.captricity.com/api/v1/job/'+str(job_id)+'/instance-set/'
print "requesting: " + url
headers = {
'user-agent': 'bluecard/0.0.1',
'Captricity-API-Token': config.get('captricity', 'apitoken')}
r = requests.get(url, headers=headers)
instance_set_id = json.loads(r.text)[0]['id']
shred_url = "https://shreddr.captricity.com/api/v1/instance-set/"+str(instance_set_id)+"/shred/"
print "finding shred: " + shred_url
shreds = json.loads(r.text)
messages.add_message(request, messages.INFO, 'Status updated.')
context = {'customer': c, 'imports': c.document_set.all()}
return HttpResponseRedirect("/customers/"+str(c.id)+"/imports")
| justinwiley/bluecard | idimport/views.py | Python | lgpl-3.0 | 4,968 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AnswerRule.validate_with_min_value'
db.add_column(u'survey_answerrule', 'validate_with_min_value',
self.gf('django.db.models.fields.PositiveIntegerField')(max_length=2, null=True),
keep_default=False)
# Adding field 'AnswerRule.validate_with_max_value'
db.add_column(u'survey_answerrule', 'validate_with_max_value',
self.gf('django.db.models.fields.PositiveIntegerField')(max_length=2, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'AnswerRule.validate_with_min_value'
db.delete_column(u'survey_answerrule', 'validate_with_min_value')
# Deleting field 'AnswerRule.validate_with_max_value'
db.delete_column(u'survey_answerrule', 'validate_with_max_value')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'locations.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Point']", 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': u"orm['locations.LocationType']"})
},
u'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True'})
},
u'locations.point': {
'Meta': {'object_name': 'Point'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'})
},
'survey.answerrule': {
'Meta': {'object_name': 'AnswerRule'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_rule'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'condition': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'next_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_question_rules'", 'null': 'True', 'to': "orm['survey.Question']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rule'", 'null': 'True', 'to': "orm['survey.Question']"}),
'validate_with_max_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'validate_with_min_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'validate_with_option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answer_rule'", 'null': 'True', 'to': "orm['survey.QuestionOption']"}),
'validate_with_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'validate_with_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.backend': {
'Meta': {'object_name': 'Backend'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
'survey.batch': {
'Meta': {'unique_together': "(('survey', 'name'),)", 'object_name': 'Batch'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch'", 'null': 'True', 'to': "orm['survey.Survey']"})
},
'survey.batchlocationstatus': {
'Meta': {'object_name': 'BatchLocationStatus'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_locations'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_batches'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.formula': {
'Meta': {'object_name': 'Formula'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'formula'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'as_denominator'", 'to': "orm['survey.Question']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'numerator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'as_numerator'", 'to': "orm['survey.Question']"})
},
'survey.groupcondition': {
'Meta': {'unique_together': "(('value', 'attribute', 'condition'),)", 'object_name': 'GroupCondition'},
'attribute': ('django.db.models.fields.CharField', [], {'default': "'AGE'", 'max_length': '20'}),
'condition': ('django.db.models.fields.CharField', [], {'default': "'EQUALS'", 'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'conditions'", 'symmetrical': 'False', 'to': "orm['survey.HouseholdMemberGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'survey.household': {
'Meta': {'object_name': 'Household'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'households'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'uid': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'survey.householdbatchcompletion': {
'Meta': {'object_name': 'HouseholdBatchCompletion'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_households'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_member_batches'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.householdhead': {
'Meta': {'object_name': 'HouseholdHead', '_ormbases': ['survey.HouseholdMember']},
u'householdmember_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['survey.HouseholdMember']", 'unique': 'True', 'primary_key': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'default': "'16'", 'max_length': '100'}),
'resident_since_month': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'resident_since_year': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1984'})
},
'survey.householdmember': {
'Meta': {'object_name': 'HouseholdMember'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'household_member'", 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'survey.householdmembergroup': {
'Meta': {'object_name': 'HouseholdMemberGroup'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True', 'max_length': '5'})
},
'survey.investigator': {
'Meta': {'object_name': 'Investigator'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Backend']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_blocked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'English'", 'max_length': '100', 'null': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'weights': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'survey.locationautocomplete': {
'Meta': {'object_name': 'LocationAutoComplete'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'survey.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'is_old': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer'},
'answer': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '5', 'null': 'True'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'is_old': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.question': {
'Meta': {'object_name': 'Question'},
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'batches': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'questions'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_group'", 'null': 'True', 'to': "orm['survey.HouseholdMemberGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'module': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'module'", 'null': 'True', 'to': "orm['survey.QuestionModule']"}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['survey.Question']"}),
'subquestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.questionmodule': {
'Meta': {'object_name': 'QuestionModule'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'survey.questionoption': {
'Meta': {'object_name': 'QuestionOption'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'null': 'True', 'to': "orm['survey.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.randomhouseholdselection': {
'Meta': {'object_name': 'RandomHouseHoldSelection'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'no_of_households': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'selected_households': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'sample_size': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10', 'max_length': '2'}),
'type': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.textanswer': {
'Meta': {'object_name': 'TextAnswer'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'is_old': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['survey'] | antsmc2/mics | survey/migrations/0083_auto__add_field_answerrule_validate_with_min_value__add_field_answerru.py | Python | bsd-3-clause | 28,210 |
# Copyright 2021 The TF-Coder Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Collects all known TF-Coder benchmarks."""
import inspect
from tf_coder.benchmarks import autopandas_benchmarks
from tf_coder.benchmarks import google_benchmarks
from tf_coder.benchmarks import simple_benchmarks
from tf_coder.benchmarks import stackoverflow_benchmarks
from tf_coder.benchmarks import test_benchmarks
_ALL_BENCHMARK_MODULES = [test_benchmarks, simple_benchmarks, google_benchmarks,
stackoverflow_benchmarks, autopandas_benchmarks]
def all_benchmarks(include_ignored=False, modules=None):
"""Returns a list of all benchmarks.
Args:
include_ignored: A boolean indicating whether the search should include
ignored benchmarks.
modules: A list of module objects to inspect for benchmark functions. If
None, all known relevant modules are used.
Returns:
A list of benchmark.Benchmark objects.
"""
if modules is None:
modules = _ALL_BENCHMARK_MODULES
members = sum((inspect.getmembers(benchmark_module, inspect.isfunction)
for benchmark_module in modules), [])
benchmark_list = []
for unused_name, benchmark_function in members:
benchmark = benchmark_function()
if include_ignored or not benchmark.should_ignore:
benchmark_list.append(benchmark)
return benchmark_list
def find_benchmark_with_name(benchmark_name, include_ignored=False,
modules=None):
"""Returns a benchmark with the given name.
Args:
benchmark_name: A name (string) to search for.
include_ignored: A boolean, used as described in all_benchmarks().
modules: A list of module objects, used as described in all_benchmarks(). If
None, all known relevant modules are used.
Returns:
A benchmark.Benchmark with the given name, if there is exactly one such
benchmark. If there are zero or multiple such benchmarks, None is returned.
"""
benchmark_list = all_benchmarks(include_ignored=include_ignored,
modules=modules)
matching_benchmarks = [benchmark
for benchmark in benchmark_list
if benchmark.name == benchmark_name]
if len(matching_benchmarks) == 1:
return matching_benchmarks[0]
return None
def get_chosen_benchmarks(benchmark_name, include_ignored=False, modules=None):
"""Returns benchmarks according to the benchmark_name argument.
Args:
benchmark_name: The string name of a desired benchmark, or "ALL".
include_ignored: A boolean, used as described in all_benchmarks().
modules: A list of module objects, used as described in all_benchmarks(). If
None, all known relevant modules are used.
Returns:
A list of benchmark.Benchmark objects.
"""
if benchmark_name == 'ALL':
return all_benchmarks(modules=modules)
benchmark = find_benchmark_with_name(
benchmark_name, include_ignored=include_ignored, modules=modules)
if benchmark is None:
return []
return [benchmark]
| google-research/tensorflow-coder | tf_coder/benchmarks/all_benchmarks.py | Python | apache-2.0 | 3,580 |
#
# Example: Find the floquet modes and quasi energies for a driven system and
# plot the floquet states/quasienergies for one period of the driving.
#
from qutip import *
from pylab import *
import time
def hamiltonian_t(t, args):
""" evaluate the hamiltonian at time t. """
H0 = args['H0']
H1 = args['H1']
w = args['w']
return H0 + sin(w * t) * H1
def H1_coeff_t(t, args):
return sin(args['w'] * t)
def qubit_integrate(delta, eps0_vec, A, omega, gamma1, gamma2, psi0, T, option):
# Hamiltonian
sx = sigmax()
sz = sigmaz()
sm = destroy(2)
# collapse operators
c_op_list = []
n_th = 0.0 # zero temperature
# relaxation
rate = gamma1 * (1 + n_th)
if rate > 0.0:
c_op_list.append(sqrt(rate) * sm)
# excitation
rate = gamma1 * n_th
if rate > 0.0:
c_op_list.append(sqrt(rate) * sm.dag())
# dephasing
rate = gamma2
if rate > 0.0:
c_op_list.append(sqrt(rate) * sz)
#quasi_energies = zeros((len(A_vec), 2))
#f_gnd_prob = zeros((len(A_vec), 2))
quasi_energies = zeros((len(eps0_vec), 2))
f_gnd_prob = zeros((len(eps0_vec), 2))
wf_gnd_prob = zeros((len(eps0_vec), 2))
for idx, eps0 in enumerate(eps0_vec):
H0 = - delta/2.0 * sx - eps0/2.0 * sz
H1 = A/2.0 * sz
# H = H0 + H1 * sin(w * t) in the 'list-string' format
H = [H0, [H1, 'sin(w * t)']]
Hargs = {'w': omega}
# find the floquet modes
f_modes,f_energies = floquet_modes(H, T, Hargs)
print "Floquet quasienergies[",idx,"] =", f_energies
quasi_energies[idx,:] = f_energies
f_gnd_prob[idx, 0] = expect(sm.dag() * sm, f_modes[0])
f_gnd_prob[idx, 1] = expect(sm.dag() * sm, f_modes[1])
f_states = floquet_states_t(f_modes, f_energies, 0, H, T, Hargs)
wf_gnd_prob[idx, 0] = expect(sm.dag() * sm, f_states[0])
wf_gnd_prob[idx, 1] = expect(sm.dag() * sm, f_states[1])
return quasi_energies, f_gnd_prob, wf_gnd_prob
#
# set up the calculation: a strongly driven two-level system
# (repeated LZ transitions)
#
delta = 0.2 * 2 * pi # qubit sigma_x coefficient
eps0 = 0.5 * 2 * pi # qubit sigma_z coefficient
gamma1 = 0.0 # relaxation rate
gamma2 = 0.0 # dephasing rate
A = 2.0 * 2 * pi
psi0 = basis(2,0) # initial state
omega = 1.0 * 2 * pi # driving frequency
T = (2*pi)/omega # driving period
param = linspace(-5.0, 5.0, 200) * 2 * pi
eps0 = param
start_time = time.time()
q_energies, f_gnd_prob, wf_gnd_prob = qubit_integrate(delta, eps0, A, omega, gamma1, gamma2, psi0, T, "dynamics")
print 'dynamics: time elapsed = ' + str(time.time() - start_time)
#
# plot the results
#
figure(1)
plot(param, real(q_energies[:,0]) / delta, 'b', param, real(q_energies[:,1]) / delta, 'r')
xlabel('A or e')
ylabel('Quasienergy')
title('Floquet quasienergies')
figure(2)
plot(param, real(f_gnd_prob[:,0]), 'b', param, real(f_gnd_prob[:,1]), 'r')
xlabel('A or e')
ylabel('Occ. prob.')
title('Floquet modes excitation probability')
figure(3)
plot(param, real(wf_gnd_prob[:,0]), 'b', param, real(wf_gnd_prob[:,1]), 'r')
xlabel('A or e')
ylabel('Occ. prob.')
title('Floquet states excitation probability')
show()
| Vutshi/qutip | examples/ex_floquet_quasienergies.py | Python | gpl-3.0 | 3,299 |
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_unicode
class CommentManager(models.Manager):
def for_model(self, model):
"""
QuerySet for all comments for a particular model (either an instance or
a class).
"""
ct = ContentType.objects.get_for_model(model)
qs = self.get_query_set().filter(content_type=ct)
if isinstance(model, models.Model):
qs = qs.filter(object_pk=force_unicode(model._get_pk_val()))
return qs
| vosi/django-xcomments | comments/managers.py | Python | bsd-3-clause | 577 |
from predict import *
if __name__ == '__main__' :
# read and process data
print("Reading files...")
data = read_json(10000)
print("\nProcess data...")
X_train, X_test, Y_train, Y_test = get_processed_data(data)
# cross validation without modifying label
print("\nStart Cross Validation without modifying Label...")
depth1 = dtree_tuning(X_train,Y_train)
k_val1 = knn_tuning(X_train, Y_train)
# predict withour modifying label
print("\nStart testing with test set without modifying label...")
dtree1 = predict_tree(X_train, Y_train, X_test, Y_test, depth)
knn1 = predict_knn(X_train, Y_train, X_test, Y_test, k_val)
# Modify the label, originally 5 stars, now into three catagories
# non-favorable (-1), neutral (0), favorable (1)
print("\nModifying the label of the data...")
print("The original labels have five stars: 1,2,3,4,5")
print("Now we classify the five kinds of labels into three catagories:")
print("Non-favorable (-1): 1 star or 2 star")
print("Neutral (0): 3 star")
print("Favorable (-1): 4 star or 5 star")
Y_train_new = process_label(Y_train)
Y_test_new = process_label(Y_test)
# cross validation with modified label
print("\nStart Cross Validation after modifying label...")
depth2 = dtree_tuning(X_train,Y_train_new)
k_val2 = knn_tuning(X_train, Y_train_new)
# predict with modified
print("\nStart testing with test set after modifying label...")
dtree2 = predict_tree(X_train, Y_train, X_test, Y_test, depth)
knn2 = predict_knn(X_train, Y_train, X_test, Y_test, k_val)
# ask for user input
YorN = input("\nDo you want to test a review? Y/N : ")
if YorN != "N":
review = input("\nGreat! Please enter the review you want to test: ")
print("\nNow predict the label of the review:")
predict_review(review, knn1, dtree1)
print("\nNow predict the catagory of the review:")
predict_review(review, knn2, dtree2)
| maggieli96/35-Final-Project | Machine Learning/main.py | Python | mit | 2,011 |
import datetime, math, sys, argparse
sys.path.append("../util")
# from osxnotifications import Notifier
from sqlalchemy import create_engine
from sqlalchemy.sql import select
from MetaModel import MetaModel
from multiprocessing import Pool
class MPTableProcessor():
def __init__(self, connection_string, tables, mapper):
self.connection_string = connection_string
self.tables = tables
self.mapper = mapper
def execute(self, processes=2, verbose=False):
pool = Pool(processes=processes)
result = []
if verbose:
print('')
for i, _ in enumerate(pool.imap_unordered(self.profileOneTable, self.tables)):
result.append(_)
if verbose:
sys.stdout.write("\033[1A")
totalprogress = "\r\033[K## progress {0}/{1}: {2:.2f}% \n".format(i+1, len(self.tables), round(i/(len(self.tables)-1)*100,2))
sys.stdout.write(totalprogress)
sys.stdout.flush()
pool.close()
return result
def profileOneTable(self, table=None):
try:
engine = create_engine(self.connection_string)
conn = engine.connect()
num_rows = conn.execute(table.count()).fetchone()[0]
num_columns = len(table.columns)
num_explicit_outlinks = len(table.foreign_keys)
return self.mapper.single(table, {'num_rows': num_rows, 'num_columns': num_columns, 'num_explicit_outlinks': num_explicit_outlinks})
# return (table.name, {'num_rows': num_rows, 'num_columns': num_columns, 'num_explicit_outlinks': num_explicit_outlinks})
# cp = self.columnprocessor(values)
# return (column, cp.doOperations())
except Exception as ex:
print(ex)
finally:
conn.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--src", help="connection_string for the subject-database", metavar="string")
args = parser.parse_args()
mm = MetaModel(args.src)
sts = datetime.datetime.now()
processor = MPTableProcessor(connection_string = args.src, tables = mm.tables())
result = processor.execute(processes=32, verbose=True)
duration = datetime.datetime.now() - sts
print('number of processed tables: ' + str(len(result)))
# Calling the notification function
# Notifier.notify(title='cobr.io ds-toolkit', subtitle='MPTableProcessor done!', message='processed: ' + str(len(result)) + ' tables in ' + str(math.floor(duration.total_seconds())) + ' seconds') | mvanderkroon/cobr | profiler/MPTableProcessor.py | Python | apache-2.0 | 2,609 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from multiprocessing import Process
import os
import sys
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer
import click
from staticpycon import utils, gen
def do_serve():
print('Listening on 0.0.0.0:8080 ...\n')
TCPServer.allow_reuse_address = True
server = TCPServer(('0.0.0.0', 8080), SimpleHTTPRequestHandler)
os.chdir(gen.SITE_DIR)
server.serve_forever()
@click.command()
@click.option('--debug', '-d', is_flag=True, default=False,
help=u'调试模式下不合并/压缩 Assets')
@click.option('--generate', '-g', is_flag=True, default=False,
help=u'生成站点')
@click.option('--serve', '-s', is_flag=True, default=False,
help=u'启动本地server')
def run(debug, generate, serve):
utils.init_logger()
if generate and serve:
print(u'--generate和--serve不能同时使用')
sys.exit(1)
if serve:
Process(target=do_serve).start()
gen.create_site(debug=debug, use_reloader=serve, generate=generate)
if __name__ == '__main__':
run()
| PyConChina/PyConChina2017 | bin/app.py | Python | mit | 1,180 |
#!/usr/bin/env python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Contributor(s): Campbell Barton, M.G. Kishalmi
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
"""
Module for accessing project file data for Blender.
Before use, call init(cmake_build_dir).
"""
__all__ = (
"SIMPLE_PROJECTFILE",
"SOURCE_DIR",
"CMAKE_DIR",
"PROJECT_DIR",
"source_list",
"is_project_file",
"is_c_header",
"is_py",
"cmake_advanced_info",
"cmake_compiler_defines",
"project_name_get"
"init",
)
import sys
if not sys.version.startswith("3"):
print("\nPython3.x needed, found %s.\nAborting!\n" %
sys.version.partition(" ")[0])
sys.exit(1)
import os
from os.path import join, dirname, normpath, abspath, splitext, exists
SOURCE_DIR = join(dirname(__file__), "..", "..")
SOURCE_DIR = normpath(SOURCE_DIR)
SOURCE_DIR = abspath(SOURCE_DIR)
SIMPLE_PROJECTFILE = False
# must initialize from 'init'
CMAKE_DIR = None
def init(cmake_path):
global CMAKE_DIR, PROJECT_DIR
# get cmake path
cmake_path = cmake_path or ""
if (not cmake_path) or (not exists(join(cmake_path, "CMakeCache.txt"))):
cmake_path = os.getcwd()
if not exists(join(cmake_path, "CMakeCache.txt")):
print("CMakeCache.txt not found in %r or %r\n"
" Pass CMake build dir as an argument, or run from that dir, aborting" %
(cmake_path, os.getcwd()))
return False
PROJECT_DIR = CMAKE_DIR = cmake_path
return True
def source_list(path, filename_check=None):
for dirpath, dirnames, filenames in os.walk(path):
# skip '.svn'
if dirpath.startswith("."):
continue
for filename in filenames:
filepath = join(dirpath, filename)
if filename_check is None or filename_check(filepath):
yield filepath
# extension checking
def is_cmake(filename):
ext = splitext(filename)[1]
return (ext == ".cmake") or (filename.endswith("CMakeLists.txt"))
def is_c_header(filename):
ext = splitext(filename)[1]
return (ext in {".h", ".hpp", ".hxx", ".hh"})
def is_py(filename):
ext = splitext(filename)[1]
return (ext == ".py")
def is_glsl(filename):
ext = splitext(filename)[1]
return (ext == ".glsl")
def is_c(filename):
ext = splitext(filename)[1]
return (ext in {".c", ".cpp", ".cxx", ".m", ".mm", ".rc", ".cc", ".inl", ".osl"})
def is_c_any(filename):
return is_c(filename) or is_c_header(filename)
def is_svn_file(filename):
dn, fn = os.path.split(filename)
filename_svn = join(dn, ".svn", "text-base", "%s.svn-base" % fn)
return exists(filename_svn)
def is_project_file(filename):
return (is_c_any(filename) or is_cmake(filename) or is_glsl(filename)) # and is_svn_file(filename)
def cmake_advanced_info():
""" Extract includes and defines from cmake.
"""
make_exe = cmake_cache_var("CMAKE_MAKE_PROGRAM")
make_exe_basename = os.path.basename(make_exe)
def create_eclipse_project():
print("CMAKE_DIR %r" % CMAKE_DIR)
if sys.platform == "win32":
cmd = 'cmake "%s" -G"Eclipse CDT4 - MinGW Makefiles"' % CMAKE_DIR
else:
if make_exe_basename.startswith(("make", "gmake")):
cmd = 'cmake "%s" -G"Eclipse CDT4 - Unix Makefiles"' % CMAKE_DIR
elif make_exe_basename.startswith("ninja"):
cmd = 'cmake "%s" -G"Eclipse CDT4 - Ninja"' % CMAKE_DIR
else:
raise Exception("Unknown make program %r" % make_exe)
os.system(cmd)
return join(CMAKE_DIR, ".cproject")
includes = []
defines = []
project_path = create_eclipse_project()
if not exists(project_path):
print("Generating Eclipse Prokect File Failed: %r not found" % project_path)
return None, None
from xml.dom.minidom import parse
tree = parse(project_path)
# to check on nicer xml
# f = open(".cproject_pretty", 'w')
# f.write(tree.toprettyxml(indent=" ", newl=""))
ELEMENT_NODE = tree.ELEMENT_NODE
cproject, = tree.getElementsByTagName("cproject")
for storage in cproject.childNodes:
if storage.nodeType != ELEMENT_NODE:
continue
if storage.attributes["moduleId"].value == "org.eclipse.cdt.core.settings":
cconfig = storage.getElementsByTagName("cconfiguration")[0]
for substorage in cconfig.childNodes:
if substorage.nodeType != ELEMENT_NODE:
continue
moduleId = substorage.attributes["moduleId"].value
# org.eclipse.cdt.core.settings
# org.eclipse.cdt.core.language.mapping
# org.eclipse.cdt.core.externalSettings
# org.eclipse.cdt.core.pathentry
# org.eclipse.cdt.make.core.buildtargets
if moduleId == "org.eclipse.cdt.core.pathentry":
for path in substorage.childNodes:
if path.nodeType != ELEMENT_NODE:
continue
kind = path.attributes["kind"].value
if kind == "mac":
# <pathentry kind="mac" name="PREFIX" path="" value=""/opt/blender25""/>
defines.append((path.attributes["name"].value, path.attributes["value"].value))
elif kind == "inc":
# <pathentry include="/data/src/blender/blender/source/blender/editors/include" kind="inc" path="" system="true"/>
includes.append(path.attributes["include"].value)
else:
pass
return includes, defines
def cmake_cache_var(var):
cache_file = open(join(CMAKE_DIR, "CMakeCache.txt"), encoding='utf-8')
lines = [l_strip for l in cache_file for l_strip in (l.strip(),) if l_strip if not l_strip.startswith("//") if not l_strip.startswith("#")]
cache_file.close()
for l in lines:
if l.split(":")[0] == var:
return l.split("=", 1)[-1]
return None
def cmake_compiler_defines():
compiler = cmake_cache_var("CMAKE_C_COMPILER") # could do CXX too
if compiler is None:
print("Couldn't find the compiler, os defines will be omitted...")
return
import tempfile
temp_c = tempfile.mkstemp(suffix=".c")[1]
temp_def = tempfile.mkstemp(suffix=".def")[1]
os.system("%s -dM -E %s > %s" % (compiler, temp_c, temp_def))
temp_def_file = open(temp_def)
lines = [l.strip() for l in temp_def_file if l.strip()]
temp_def_file.close()
os.remove(temp_c)
os.remove(temp_def)
return lines
def project_name_get():
return cmake_cache_var("CMAKE_PROJECT_NAME")
| Passtechsoft/TPEAlpGen | blender/build_files/cmake/project_info.py | Python | gpl-3.0 | 7,548 |
import abc
from collections.abc import Mapping, MutableMapping
class MultiMapping(Mapping):
@abc.abstractmethod
def getall(self, key, default=None):
raise KeyError
@abc.abstractmethod
def getone(self, key, default=None):
raise KeyError
class MutableMultiMapping(MultiMapping, MutableMapping):
@abc.abstractmethod
def add(self, key, value):
raise NotImplementedError
@abc.abstractmethod
def extend(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def popone(self, key, default=None):
raise KeyError
@abc.abstractmethod
def popall(self, key, default=None):
raise KeyError
| jonyroda97/redbot-amigosprovaveis | lib/multidict/_abc.py | Python | gpl-3.0 | 698 |
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
from PyQt4.QtCore import Qt, QAbstractItemModel, QModelIndex
from volumina.utility import decode_to_qstring
from lazyflow.utility import PathComponents
from ilastik.utility import bind
from opDataSelection import DatasetInfo
class LaneColumn():
LabelsAllowed = 0
NumColumns = 1
class DatasetInfoColumn():
Name = 0
NumColumns = 1
def rowOfButtonsProxy(model_cls):
"""
Given a TableModel class, return a new class that pretends to have an
extra row at the end. This row is used to display "Add..." buttons in
the GUI.
"""
class ProxyModel(model_cls):
def __init__(self, *args, **kwds):
super(ProxyModel, self).__init__(*args, **kwds)
def rowCount(self, parent=QModelIndex()):
"""
Return number of rows in the model.
This proxy model keeps an extra row at the end for buttons.
"""
return super(ProxyModel, self).rowCount(parent) + 1
def headerData(self, section, orientation, role=Qt.DisplayRole ):
"""
Return header information for row/column.
Skip vertical header for the last row, which is used for buttons.
"""
if orientation == Qt.Vertical:
if section >= super(ProxyModel, self).rowCount():
return ""
return super(ProxyModel, self).headerData(section, orientation,
role)
def _getDisplayRoleData(self, index):
# Last row is just buttons
if index.row() >= super(ProxyModel, self).rowCount():
return ""
return model_cls._getDisplayRoleData(self, index)
return ProxyModel
@rowOfButtonsProxy
class DataLaneSummaryTableModel(QAbstractItemModel):
def __init__(self, parent, topLevelOperator):
"""
:param topLevelOperator: An instance of OpMultiLaneDataSelectionGroup
"""
# super does not work here in Python 2.x, decorated class confuses it
QAbstractItemModel.__init__(self, parent)
self._op = topLevelOperator
def handleNewLane( multislot, laneIndex):
assert multislot is self._op.DatasetGroup
self.beginInsertRows( QModelIndex(), laneIndex, laneIndex )
self.endInsertRows()
def handleDatasetInfoChanged(slot):
# Get the row of this slot
laneSlot = slot.operator
laneIndex = laneSlot.operator.index( laneSlot )
# FIXME: For now, we update the whole row.
# Later, update only the columns that correspond to this dataset.
firstIndex = self.createIndex(laneIndex, 0)
lastIndex = self.createIndex(laneIndex, self.columnCount()-1)
self.dataChanged.emit(firstIndex, lastIndex)
def handleNewDatasetInserted(mslot, index):
mslot[index].notifyDirty( bind(handleDatasetInfoChanged) )
for laneIndex, datasetMultiSlot in enumerate(self._op.DatasetGroup):
datasetMultiSlot.notifyInserted( bind(handleNewDatasetInserted) )
for roleIndex, datasetSlot in enumerate(datasetMultiSlot):
handleNewDatasetInserted( datasetMultiSlot, roleIndex )
self._op.DatasetGroup.notifyInserted( bind(handleNewLane) )
def handleLaneRemoved( multislot, laneIndex ):
assert multislot is self._op.DatasetGroup
self.beginRemoveRows( QModelIndex(), laneIndex, laneIndex )
self.endRemoveRows()
self._op.DatasetGroup.notifyRemoved( bind(handleLaneRemoved) )
# Any lanes that already exist must be added now.
for laneIndex, slot in enumerate(self._op.DatasetGroup):
handleNewLane( self._op.DatasetGroup, laneIndex )
def columnCount(self, parent=QModelIndex()):
if not self._op.DatasetRoles.ready():
return 0
roles = self._op.DatasetRoles.value
return LaneColumn.NumColumns + DatasetInfoColumn.NumColumns * len(roles)
def rowCount(self, parent=QModelIndex()):
return len( self._op.ImageGroup )
def data(self, index, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
return self._getDisplayRoleData(index)
def index(self, row, column, parent=QModelIndex()):
return self.createIndex( row, column, object=None )
def parent(self, index):
return QModelIndex()
def headerData(self, section, orientation, role=Qt.DisplayRole ):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Vertical:
return section+1
if section == LaneColumn.LabelsAllowed:
return "Labelable"
infoColumn = section - LaneColumn.NumColumns
roleIndex = infoColumn // DatasetInfoColumn.NumColumns
infoColumn %= LaneColumn.NumColumns
if infoColumn == DatasetInfoColumn.Name:
if self._op.DatasetRoles.ready():
return self._op.DatasetRoles.value[roleIndex]
return ""
assert False, "Unknown header column: {}".format( section )
def _getDisplayRoleData(self, index):
laneIndex = index.row()
if index.column() < LaneColumn.NumColumns:
if index.column() == LaneColumn.LabelsAllowed:
firstInfoSlot = self._op.DatasetGroup[laneIndex][0]
if not firstInfoSlot.ready():
return ""
info = firstInfoSlot.value
return { True: "True", False : "False" }[ info.allowLabels ]
else:
assert False
## Dataset info item
roleIndex = (index.column() - LaneColumn.NumColumns) // DatasetInfoColumn.NumColumns
datasetInfoIndex = (index.column() - LaneColumn.NumColumns) % DatasetInfoColumn.NumColumns
datasetSlot = self._op.DatasetGroup[laneIndex][roleIndex]
if not datasetSlot.ready():
return ""
UninitializedDisplayData = { DatasetInfoColumn.Name : "<please select>" }
datasetSlot = self._op.DatasetGroup[laneIndex][roleIndex]
if datasetSlot.ready():
datasetInfo = self._op.DatasetGroup[laneIndex][roleIndex].value
else:
return UninitializedDisplayData[ datasetInfoIndex ]
if datasetInfoIndex == DatasetInfoColumn.Name:
if datasetInfo.nickname is not None and datasetInfo.nickname != "":
return datasetInfo.nickname
return decode_to_qstring( PathComponents( datasetInfo.filePath ).filename )
if datasetInfoIndex == DatasetInfoColumn.Location:
LocationNames = { DatasetInfo.Location.FileSystem : "External File",
DatasetInfo.Location.ProjectInternal : "Project File" }
return LocationNames[ datasetInfo.location ]
assert False, "Unknown column"
| nielsbuwen/ilastik | ilastik/applets/dataSelection/dataLaneSummaryTableModel.py | Python | gpl-3.0 | 7,961 |
#!/usr/bin/env python
import numpy as np
import math
from multi_link_common import *
#height is probably 0 from multi_link_common.py
#total mass and total length are also defined in multi_link_common.py
num_links = 3.0
link_length = total_length/num_links
link_mass = total_mass/num_links
ee_location = np.matrix([0., -total_length, height]).T
#bod_shapes = ['cube', 'cube', 'cube', 'cube']]
bod_shapes = ['capsule', 'capsule', 'capsule', 'capsule']
bod_dimensions = [[0.03, 0.03, link_length]]*3
bod_com_position = [[0., -link_length/2., height],
[0., -3.0/2.0*link_length, height],
[0., -5.0/2.0*link_length, height]]
bod_color = [[0.4, 0.4, 0.4, 1], [0.8, 0.8, 0.8, 1], [0.33, 0.33, 0.33, 1]]
bod_num_links = 3
bod_mass = [link_mass]*bod_num_links
bod_names = ['link1', 'link2', 'link3']
bodies ={'shapes':bod_shapes, 'dim':bod_dimensions, 'num_links':bod_num_links,
'com_pos':bod_com_position, 'mass':bod_mass, 'name':bod_names, 'color':bod_color}
b_jt_axis = [[0.,0.,1.],[0.,0.,1.], [0.,0.,1.]]
b_jt_anchor = [[0., 0., height],
[0., -link_length, height],
[0., -2*link_length, height]]
b_jt_kp = [30., 20., 15.]
b_jt_kd = [15., 10., 8.]
b_jt_limits_max = np.radians([180, 120, 120]).tolist()
b_jt_limits_min = np.radians([-180, -120, -120]).tolist()
b_jt_axis = [[0.,0.,1.],[0.,0.,1.], [0.,0.,1.]]
b_jt_attach = [[0, -1], [1, 0], [2,1]]
b_jt_start = [-1.74, 1.78, 1.86] #(gives ee pos of [0, -0.2, 0]
b_jts = {'anchor':b_jt_anchor, 'axis':b_jt_axis, 'jt_lim_max':b_jt_limits_max,
'jt_lim_min':b_jt_limits_min, 'jt_init':b_jt_start, 'jt_attach':b_jt_attach,
'jt_stiffness':b_jt_kp, 'jt_damping':b_jt_kd}
| gt-ros-pkg/hrl-haptic-manip | hrl_common_code_darpa_m3/src/hrl_common_code_darpa_m3/robot_config/multi_link_three_planar.py | Python | apache-2.0 | 1,727 |
"""
Contains the manager class and exceptions for operations surrounding the creation,
update, and deletion on a Pulp user.
"""
from gettext import gettext as _
import re
from celery import task
from pulp.server import config
from pulp.server.async.tasks import Task
from pulp.server.db.model.auth import User
from pulp.server.exceptions import (PulpDataException, DuplicateResource, InvalidValue,
MissingResource)
from pulp.server.managers import factory
from pulp.server.managers.auth.role.cud import SUPER_USER_ROLE
# letters, numbers, underscore, hyphen, period
_USER_LOGIN_REGEX = re.compile(r'^[.\-_A-Za-z0-9]+$')
class UserManager(object):
"""
Performs user related functions relating to CRUD operations.
"""
@staticmethod
def create_user(login, password=None, name=None, roles=None):
"""
Creates a new Pulp user and adds it to specified to roles.
@param login: login name / unique identifier for the user
@type login: str
@param password: password for login credentials
@type password: str
@param name: user's full name
@type name: str
@param roles: list of roles user will belong to
@type roles: list
@raise DuplicateResource: if there is already a user with the requested login
@raise InvalidValue: if any of the fields are unacceptable
"""
existing_user = User.get_collection().find_one({'login': login})
if existing_user is not None:
raise DuplicateResource(login)
invalid_values = []
if login is None or _USER_LOGIN_REGEX.match(login) is None:
invalid_values.append('login')
if invalid_type(name, basestring):
invalid_values.append('name')
if invalid_type(roles, list):
invalid_values.append('roles')
if invalid_values:
raise InvalidValue(invalid_values)
# Use the login for name of the user if one was not specified
name = name or login
roles = roles or None
# Encode plain-text password
hashed_password = None
if password:
hashed_password = factory.password_manager().hash_password(password)
# Creation
create_me = User(login=login, password=hashed_password, name=name, roles=roles)
User.get_collection().save(create_me, safe=True)
# Grant permissions
permission_manager = factory.permission_manager()
permission_manager.grant_automatic_permissions_for_user(create_me['login'])
# Retrieve the user to return the SON object
created = User.get_collection().find_one({'login': login})
created.pop('password')
return created
@staticmethod
def update_user(login, delta):
"""
Updates the user. Following fields may be updated through this call:
* password
* name
* roles
Other fields found in delta will be ignored.
@param login: identifies the user
@type login: str
@param delta: list of attributes and their new values to change
@type delta: dict
@raise MissingResource: if there is no user with login
"""
user = User.get_collection().find_one({'login': login})
if user is None:
raise MissingResource(login)
# Check invalid values
invalid_values = []
if 'password' in delta:
password = delta.pop('password')
if password is None or invalid_type(password, basestring):
invalid_values.append('password')
else:
user['password'] = factory.password_manager().hash_password(password)
if 'name' in delta:
name = delta.pop('name')
if name is None or invalid_type(name, basestring):
invalid_values.append('name')
else:
user['name'] = name
if 'roles' in delta:
roles = delta.pop('roles')
if roles is None or invalid_type(roles, list):
invalid_values.append('roles')
else:
# Add new roles to the user and remove deleted roles from the user according to
# delta
role_manager = factory.role_manager()
old_roles = user['roles']
for new_role in roles:
if new_role not in old_roles:
role_manager.add_user_to_role(new_role, login)
for old_role in old_roles:
if old_role not in roles:
role_manager.remove_user_from_role(old_role, login)
user['roles'] = roles
if invalid_values:
raise InvalidValue(invalid_values)
if delta:
raise InvalidValue(delta.keys())
User.get_collection().save(user, safe=True)
# Retrieve the user to return the SON object
updated = User.get_collection().find_one({'login': login})
updated.pop('password')
return updated
@staticmethod
def delete_user(login):
"""
Deletes the given user. Deletion of last superuser is not permitted.
@param login: identifies the user being deleted
@type login: str
@raise MissingResource: if the given user does not exist
@raise InvalidValue: if login value is invalid
"""
# Raise exception if login is invalid
if login is None or invalid_type(login, basestring):
raise InvalidValue(['login'])
# Check whether user exists
found = User.get_collection().find_one({'login': login})
if found is None:
raise MissingResource(login)
# Make sure user is not the last super user
if factory.user_query_manager().is_last_super_user(login):
raise PulpDataException(_("The last superuser [%s] cannot be deleted" % login))
# Revoke all permissions from the user
permission_manager = factory.permission_manager()
permission_manager.revoke_all_permissions_from_user(login)
User.get_collection().remove({'login': login}, safe=True)
def ensure_admin(self):
"""
This function ensures that there is at least one super user for the system.
If no super users are found, the default admin user (from the pulp config)
is looked up or created and added to the super users role.
"""
role_manager = factory.role_manager()
if self.get_admins():
return
default_login = config.config.get('server', 'default_login')
admin = User.get_collection().find_one({'login': default_login})
if admin is None:
default_password = config.config.get('server', 'default_password')
admin = UserManager.create_user(login=default_login,
password=default_password)
role_manager.add_user_to_role(SUPER_USER_ROLE, default_login)
@staticmethod
def get_admins():
"""
Get a list of users with the super-user role.
:return: list of users who are admins.
:rtype: list of User
"""
user_query_manager = factory.user_query_manager()
try:
super_users = user_query_manager.find_users_belonging_to_role(SUPER_USER_ROLE)
return super_users
except MissingResource:
return None
create_user = task(UserManager.create_user, base=Task)
delete_user = task(UserManager.delete_user, base=Task, ignore_result=True)
update_user = task(UserManager.update_user, base=Task)
def invalid_type(input_value, valid_type):
"""
@return: true if input_value is not of valid_type
@rtype: bool
"""
if input_value is not None and not isinstance(input_value, valid_type):
return True
return False
| rbramwell/pulp | server/pulp/server/managers/auth/user/cud.py | Python | gpl-2.0 | 7,971 |
from django.contrib.auth import views
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from imager_images.models import Photo
from registration.backends.hmac.views import RegistrationView
from django.views.generic.base import TemplateView
from django.urls import reverse
class NewRegView(RegistrationView):
"""If the user is logged in, this view will redirect them"""
def get(self, request):
if request.user.is_authenticated():
return render(request, reverse('profile'))
return super().get(request)
def post(self, request):
return views.login(request)
def index(request):
published_photo_querry = Photo.objects.all()
photo = published_photo_querry.order_by('?').first()
content = {'picture': photo}
return render(request, 'imagersite/home_page_splash.html', content)
| ilikesounds/django-imager | imagersite/views.py | Python | mit | 917 |
from django.shortcuts import render
# @todo get_http_page, returns tuple (status/headers, content)
# @todo rename get_http_status (move to core.http?)
# @todo what about fragments? normal.no/#doner
# http://docs.python-requests.org/en/latest/
import httplib
import urlparse
def get_http_status (urlstr):
url = urlparse.urlsplit (urlstr)
assert not url.query # q: what to do with query string?
try:
conn = httplib.HTTPConnection (url.hostname)
conn.request ('HEAD', url.path)
return conn.getresponse().status
except StandardError:
return None # @todo return 500 instead of None?
# Stolen (and modified) from: https://djangosnippets.org/snippets/821/
def render_to (template):
"""
Decorator for Django views that sends returned dict to the render
function with given template and RequestContext as context instance.
If view doesn't return dict then decorator simply returns output.
Additionally view can return two-tuple, which must contain dict as first
element and string with template name as second. This string will
override template name, given as parameter.
Parameters:
- template: Template to use. It can be prefixed with app-name,
e.g., @render_to ('myapp:mytemplate.html').
"""
template = template.replace (':', '/', 1) # app prefix
def renderer (func):
def wrapper (request, *args, **kw):
output = func (request, *args, **kw)
if isinstance (output, (list, tuple)):
return render (request, output[1], output[0])
elif isinstance (output, dict):
return render (request, template, output)
return output
return wrapper
return renderer
| normalnorway/normal.no | django/core/shortcuts.py | Python | gpl-3.0 | 1,769 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.