content
stringlengths 5
1.05M
|
---|
# -*- coding:utf-8 -*-
'''
Created on 2014年2月16日
@author: xuer
'''
import logging
import math
from common.JieQi.JDate import JDate
logger = logging.getLogger(__name__)
# ========角度变换===============
rad = 180 * 3600 / math.pi # 每弧度的角秒数
RAD = 180 / math.pi # 每弧度的角度数
def rad2mrad(v): # 对超过0-2PI的角度转为0-2PI
v = v % (2 * math.pi)
return v
def rad2str(d, tim): # 将弧度转为字串
# tim=0输出格式示例: -23°59' 48.23"
# tim=1输出格式示例: 18h 29m 44.52s
s, w1, w2, w3 = "+", "°", "’", "”"
if (d < 0):
d, s = -d, '-'
if tim:
d *= 12 / math.pi
w1, w2, w3 = "h ", "m ", "s "
else:
d *= 180 / math.pi
a = math.floor(d)
d = (d - a) * 60
b = math.floor(d)
d = (d - b) * 60
c = math.floor(d)
d = (d - c) * 100
d = math.floor(d + 0.5)
if (d >= 100):
d -= 100
c += 1
if (c >= 60):
c -= 60
b += 1
if (b >= 60):
b -= 60
a += 1
a = " " + str(int(a))
b = "0" + str(int(b))
c = "0" + str(int(c))
d = "0" + str(int(d))
s += a[len(a) - 3:] + w1
s += b[len(b) - 2:] + w2
s += c[len(c) - 2:] + "."
s += d[len(d) - 2:] + w3
return s
# ================日历计算===============
J2000 = 2451545 # 2000年前儒略日数(2000-1-1 12:00:00格林威治平时)
# JDate
# =========黄赤交角及黄赤坐标变换===========
hcjjB = [84381.448, -46.8150, -0.00059, 0.001813] # 黄赤交角系数表
preceB = [0, 50287.92262, 111.24406, 0.07699, -0.23479, -0.00178, 0.00018, 0.00001] # Date黄道上的岁差p
def hcjj1(t): # 返回黄赤交角(常规精度),短期精度很高
t1 = t / (36525 - 0.0)
t2 = t1 * t1
t3 = t2 * t1
return (hcjjB[0] + hcjjB[1] * t1 + hcjjB[2] * t2 + hcjjB[3] * t3) / rad
def HCconv(JW, E): # 黄赤转换(黄赤坐标旋转)
HJ = rad2mrad(JW[0])
HW = JW[1]
sinE = math.sin(E)
cosE = math.cos(E)
sinW = cosE * math.sin(HW) + sinE * math.cos(HW) * math.sin(HJ)
J = math.atan2(math.sin(HJ) * cosE - math.tan(HW) * sinE, math.cos(HJ))
JW[0] = rad2mrad(J)
JW[1] = math.asin(sinW)
def addPrece(jd, zb): # 补岁差
t = 1
v = 0
t1 = jd / (365250 - 0.0)
for i in range(8):
t *= t1
v += preceB[i] * t
zb[0] = rad2mrad(zb[0] + (v + 2.9965 * t1) / rad)
# ===============光行差==================
GXC_e = [0.016708634, -0.000042037, -0.0000001267] # 离心率
GXC_p = [102.93735 / RAD, 1.71946 / RAD, 0.00046 / RAD] # 近点
GXC_l = [280.4664567 / RAD, 36000.76982779 / RAD, 0.0003032028 / RAD, 1 / 49931000 / RAD, -1 / 153000000 / RAD] # 太平黄经
GXC_k = 20.49552 / rad # 光行差常数
def addGxc(t, zb): # 恒星周年光行差计算(黄道坐标中)
t1 = t / (36525 - 0.0)
t2 = t1 * t1
t3 = t2 * t1
t4 = t3 * t1
L = GXC_l[0] + GXC_l[1] * t1 + GXC_l[2] * t2 + GXC_l[3] * t3 + GXC_l[4] * t4
p = GXC_p[0] + GXC_p[1] * t1 + GXC_p[2] * t2
e = GXC_e[0] + GXC_e[1] * t1 + GXC_e[2] * t2
dL = L - zb[0]
dP = p - zb[0]
zb[0] -= GXC_k * (math.cos(dL) - e * math.cos(dP)) / math.cos(zb[1])
zb[1] -= GXC_k * math.sin(zb[1]) * (math.sin(dL) - e * math.sin(dP))
zb[0] = rad2mrad(zb[0])
# ===============章动计算==================
# 章动表
nutB = [
2.1824391966, -33.757045954, 0.0000362262, 3.7340E-08, -2.8793E-10, -171996, -1742, 92025, 89,
3.5069406862, 1256.663930738, 0.0000105845, 6.9813E-10, -2.2815E-10, -13187, -16, 5736, -31,
1.3375032491, 16799.418221925, -0.0000511866, 6.4626E-08, -5.3543E-10, -2274, -2, 977, -5,
4.3648783932, -67.514091907, 0.0000724525, 7.4681E-08, -5.7586E-10, 2062, 2, -895, 5,
0.0431251803, -628.301955171, 0.0000026820, 6.5935E-10, 5.5705E-11, -1426, 34, 54, -1,
2.3555557435, 8328.691425719, 0.0001545547, 2.5033E-07, -1.1863E-09, 712, 1, -7, 0,
3.4638155059, 1884.965885909, 0.0000079025, 3.8785E-11, -2.8386E-10, -517, 12, 224, -6,
5.4382493597, 16833.175267879, -0.0000874129, 2.7285E-08, -2.4750E-10, -386, -4, 200, 0,
3.6930589926, 25128.109647645, 0.0001033681, 3.1496E-07, -1.7218E-09, -301, 0, 129, -1,
3.5500658664, 628.361975567, 0.0000132664, 1.3575E-09, -1.7245E-10, 217, -5, -95, 3]
def nutation(t): # 计算黄经章动及交角章动
d = {'Lon': 0, 'Obl': 0}
t /= (36525 - 0.0)
t1 = t
t2 = t1 * t1
t3 = t2 * t1
t4 = t3 * t1
# t5 = t4 * t1
for i in range(0, len(nutB), 9):
c = nutB[i] + nutB[i + 1] * t1 + nutB[i + 2] * t2 + nutB[i + 3] * t3 + nutB[i + 4] * t4
d['Lon'] += (nutB[i + 5] + nutB[i + 6] * t / 10) * math.sin(c) # 黄经章动
d['Obl'] += (nutB[i + 7] + nutB[i + 8] * t / 10) * math.cos(c) # 交角章动
d['Lon'] /= rad * 10000 # 黄经章动
d['Obl'] /= rad * 10000 # 交角章动
return d
def nutationRaDec(t, zb): # 本函数计算赤经章动及赤纬章动
Ra = zb[0]
Dec = zb[1]
E = hcjj1(t)
sinE = math.sin(E)
cosE = math.cos(E)
d = nutation(t) # 计算黄经章动及交角章动
cosRa = math.cos(Ra)
sinRa = math.sin(Ra)
tanDec = math.tan(Dec)
zb[0] += (cosE + sinE * sinRa * tanDec) * d['Lon'] - cosRa * tanDec * d['Obl'] # 赤经章动
zb[1] += sinE * cosRa * d['Lon'] + sinRa * d['Obl'] # 赤纬章动
zb[0] = rad2mrad(zb[0])
# =================以下是月球及地球运动参数表===================
# ***************************************
# * 如果用记事本查看此代码,请在"格式"菜单中去除"自动换行"
# * E10是关于地球的,格式如下:
# * 它是一个数组,每3个数看作一条记录,每条记录的3个数记为A,B,C
# * rec=A*cos(B+C*t); 式中t是J2000起算的儒略千年数
# * 每条记录的计算结果(即rec)取和即得地球的日心黄经的周期量L0
# * E11格式如下: rec = A*cos*(B+C*t) *t, 取和后得泊松量L1
# * E12格式如下: rec = A*cos*(B+C*t) *t*t, 取和后得泊松量L2
# * E13格式如下: rec = A*cos*(B+C*t) *t*t*t, 取和后得泊松量L3
# * 最后地球的地心黄经:L = L0+L1+L2+L3+...
# * E20,E21,E22,E23...用于计算黄纬
# * M10,M11等是关于月球的,参数的用法请阅读Mnn()函数
# *****************************************
# 地球运动VSOP87参数
# 黄经周期项
E10 = [1.75347045673, 0.00000000000, 0.0000000000, 0.03341656456, 4.66925680417, 6283.0758499914, 0.00034894275,
4.62610241759, 12566.1516999828, 0.00003417571, 2.82886579606, 3.5231183490,
0.00003497056, 2.74411800971, 5753.3848848968, 0.00003135896, 3.62767041758, 77713.7714681205, 0.00002676218,
4.41808351397, 7860.4193924392, 0.00002342687, 6.13516237631, 3930.2096962196,
0.00001273166, 2.03709655772, 529.6909650946, 0.00001324292, 0.74246356352, 11506.7697697936, 0.00000901855,
2.04505443513, 26.2983197998, 0.00001199167, 1.10962944315, 1577.3435424478,
0.00000857223, 3.50849156957, 398.1490034082, 0.00000779786, 1.17882652114, 5223.6939198022, 0.00000990250,
5.23268129594, 5884.9268465832, 0.00000753141, 2.53339053818, 5507.5532386674,
0.00000505264, 4.58292563052, 18849.2275499742, 0.00000492379, 4.20506639861, 775.5226113240, 0.00000356655,
2.91954116867, 0.0673103028, 0.00000284125, 1.89869034186, 796.2980068164,
0.00000242810, 0.34481140906, 5486.7778431750, 0.00000317087, 5.84901952218, 11790.6290886588, 0.00000271039,
0.31488607649, 10977.0788046990, 0.00000206160, 4.80646606059, 2544.3144198834,
0.00000205385, 1.86947813692, 5573.1428014331, 0.00000202261, 2.45767795458, 6069.7767545534, 0.00000126184,
1.08302630210, 20.7753954924, 0.00000155516, 0.83306073807, 213.2990954380,
0.00000115132, 0.64544911683, 0.9803210682, 0.00000102851, 0.63599846727, 4694.0029547076, 0.00000101724,
4.26679821365, 7.1135470008, 0.00000099206, 6.20992940258, 2146.1654164752,
0.00000132212, 3.41118275555, 2942.4634232916, 0.00000097607, 0.68101272270, 155.4203994342, 0.00000085128,
1.29870743025, 6275.9623029906, 0.00000074651, 1.75508916159, 5088.6288397668,
0.00000101895, 0.97569221824, 15720.8387848784, 0.00000084711, 3.67080093025, 71430.6956181291, 0.00000073547,
4.67926565481, 801.8209311238, 0.00000073874, 3.50319443167, 3154.6870848956,
0.00000078756, 3.03698313141, 12036.4607348882, 0.00000079637, 1.80791330700, 17260.1546546904, 0.00000085803,
5.98322631256, 161000.6857376741, 0.00000056963, 2.78430398043, 6286.5989683404,
0.00000061148, 1.81839811024, 7084.8967811152, 0.00000069627, 0.83297596966, 9437.7629348870, 0.00000056116,
4.38694880779, 14143.4952424306, 0.00000062449, 3.97763880587, 8827.3902698748,
0.00000051145, 0.28306864501, 5856.4776591154, 0.00000055577, 3.47006009062, 6279.5527316424, 0.00000041036,
5.36817351402, 8429.2412664666, 0.00000051605, 1.33282746983, 1748.0164130670,
0.00000051992, 0.18914945834, 12139.5535091068, 0.00000049000, 0.48735065033, 1194.4470102246, 0.00000039200,
6.16832995016, 10447.3878396044, 0.00000035566, 1.77597314691, 6812.7668150860,
0.00000036770, 6.04133859347, 10213.2855462110, 0.00000036596, 2.56955238628, 1059.3819301892, 0.00000033291,
0.59309499459, 17789.8456197850, 0.00000035954, 1.70876111898, 2352.8661537718]
# 黄经泊松1项
E11 = [6283.31966747491, 0.00000000000, 0.0000000000, 0.00206058863, 2.67823455584, 6283.0758499914, 0.00004303430,
2.63512650414, 12566.1516999828, 0.00000425264, 1.59046980729, 3.5231183490,
0.00000108977, 2.96618001993, 1577.3435424478, 0.00000093478, 2.59212835365, 18849.2275499742, 0.00000119261,
5.79557487799, 26.2983197998, 0.00000072122, 1.13846158196, 529.6909650946,
0.00000067768, 1.87472304791, 398.1490034082, 0.00000067327, 4.40918235168, 5507.5532386674, 0.00000059027,
2.88797038460, 5223.6939198022, 0.00000055976, 2.17471680261, 155.4203994342,
0.00000045407, 0.39803079805, 796.2980068164, 0.00000036369, 0.46624739835, 775.5226113240, 0.00000028958,
2.64707383882, 7.1135470008, 0.00000019097, 1.84628332577, 5486.7778431750,
0.00000020844, 5.34138275149, 0.9803210682, 0.00000018508, 4.96855124577, 213.2990954380, 0.00000016233,
0.03216483047, 2544.3144198834, 0.00000017293, 2.99116864949, 6275.9623029906]
# 黄经泊松2项
E12 = [0.00052918870, 0.00000000000, 0.0000000000, 0.00008719837, 1.07209665242, 6283.0758499914, 0.00000309125,
0.86728818832, 12566.1516999828, 0.00000027339, 0.05297871691, 3.5231183490,
0.00000016334, 5.18826691036, 26.2983197998, 0.00000015752, 3.68457889430, 155.4203994342, 0.00000009541,
0.75742297675, 18849.2275499742, 0.00000008937, 2.05705419118, 77713.7714681205,
0.00000006952, 0.82673305410, 775.5226113240, 0.00000005064, 4.66284525271, 1577.3435424478]
E13 = [0.00000289226, 5.84384198723, 6283.0758499914, 0.00000034955, 0.00000000000, 0.0000000000, 0.00000016819,
5.48766912348, 12566.1516999828]
E14 = [0.00000114084, 3.14159265359, 0.0000000000, 0.00000007717, 4.13446589358, 6283.0758499914, 0.00000000765,
3.83803776214, 12566.1516999828]
E15 = [0.00000000878, 3.14159265359, 0.0000000000]
# 黄纬周期项
E20 = [0.00000279620, 3.19870156017, 84334.6615813083, 0.00000101643, 5.42248619256, 5507.5532386674, 0.00000080445,
3.88013204458, 5223.6939198022, 0.00000043806, 3.70444689758, 2352.8661537718,
0.00000031933, 4.00026369781, 1577.3435424478, 0.00000022724, 3.98473831560, 1047.7473117547, 0.00000016392,
3.56456119782, 5856.4776591154, 0.00000018141, 4.98367470263, 6283.0758499914,
0.00000014443, 3.70275614914, 9437.7629348870, 0.00000014304, 3.41117857525, 10213.2855462110]
E21 = [0.00000009030, 3.89729061890, 5507.5532386674, 0.00000006177, 1.73038850355, 5223.6939198022]
# 距离周期项
E30 = [1.00013988799, 0.00000000000, 0.0000000000, 0.01670699626, 3.09846350771, 6283.0758499914, 0.00013956023,
3.05524609620, 12566.1516999828, 0.00003083720, 5.19846674381, 77713.7714681205,
0.00001628461, 1.17387749012, 5753.3848848968, 0.00001575568, 2.84685245825, 7860.4193924392, 0.00000924799,
5.45292234084, 11506.7697697936, 0.00000542444, 4.56409149777, 3930.2096962196]
E31 = [0.00103018608, 1.10748969588, 6283.0758499914, 0.00001721238, 1.06442301418, 12566.1516999828, 0.00000702215,
3.14159265359, 0.0000000000]
E32 = [0.00004359385, 5.78455133738, 6283.0758499914]
E33 = [0.00000144595, 4.27319435148, 6283.0758499914]
# 月球运动参数
M10 = [22639.5858800, 2.3555545723, 8328.6914247251, 1.5231275E-04, 2.5041111E-07, -1.1863391E-09, 4586.4383203,
8.0413790709, 7214.0628654588, -2.1850087E-04, -1.8646419E-07, 8.7760973E-10, 2369.9139357, 10.3969336431,
15542.7542901840, -6.6188121E-05, 6.3946925E-08, -3.0872935E-10, 769.0257187, 4.7111091445, 16657.3828494503,
3.0462550E-04, 5.0082223E-07, -2.3726782E-09, -666.4175399, -0.0431256817, 628.3019552485, -2.6638815E-06,
6.1639211E-10, -5.4439728E-11, -411.5957339, 3.2558104895, 16866.9323152810, -1.2804259E-04, -9.8998954E-09,
4.0433461E-11, 211.6555524, 5.6858244986, -1114.6285592663, -3.7081362E-04, -4.3687530E-07, 2.0639488E-09,
205.4359530, 8.0845047526, 6585.7609102104, -2.1583699E-04, -1.8708058E-07, 9.3204945E-10,
191.9561973, 12.7524882154, 23871.4457149091, 8.6124629E-05, 3.1435804E-07, -1.4950684E-09, 164.7286185,
10.4400593249, 14914.4523349355, -6.3524240E-05, 6.3330532E-08, -2.5428962E-10, -147.3213842, -2.3986802540,
-7700.3894694766, -1.5497663E-04, -2.4979472E-07, 1.1318993E-09, -124.9881185, 5.1984668216, 7771.3771450920,
-3.3094061E-05, 3.1973462E-08, -1.5436468E-10, -109.3803637, 2.3124288905, 8956.9933799736, 1.4964887E-04,
2.5102751E-07, -1.2407788E-09, 55.1770578, 7.1411231536, -1324.1780250970, 6.1854469E-05, 7.3846820E-08,
-3.4916281E-10, -45.0996092, 5.6113650618, 25195.6237400061, 2.4270161E-05, 2.4051122E-07, -1.1459056E-09,
39.5333010, -0.9002559173, -8538.2408905558, 2.8035534E-04, 2.6031101E-07, -1.2267725E-09,
38.4298346, 18.4383127140, 22756.8171556428, -2.8468899E-04, -1.2251727E-07, 5.6888037E-10, 36.1238141,
7.0666637168, 24986.0742741754, 4.5693825E-04, 7.5123334E-07, -3.5590172E-09, 30.7725751, 16.0827581417,
14428.1257309177, -4.3700174E-04, -3.7292838E-07, 1.7552195E-09, -28.3971008, 7.9982533891, 7842.3648207073,
-2.2116475E-04, -1.8584780E-07, 8.2317000E-10, -24.3582283, 10.3538079614, 16171.0562454324, -6.8852003E-05,
6.4563317E-08, -3.6316908E-10, -18.5847068, 2.8429122493, -557.3142796331, -1.8540681E-04, -2.1843765E-07,
1.0319744E-09, 17.9544674, 5.1553411398, 8399.6791003405, -3.5757942E-05, 3.2589854E-08, -2.0880440E-10,
14.5302779, 12.7956138971, 23243.1437596606, 8.8788511E-05, 3.1374165E-07, -1.4406287E-09,
14.3796974, 15.1080427876, 32200.1371396342, 2.3843738E-04, 5.6476915E-07, -2.6814075E-09, 14.2514576,
-24.0810366320, -2.3011998397, 1.5231275E-04, 2.5041111E-07, -1.1863391E-09, 13.8990596, 20.7938672862,
31085.5085803679, -1.3237624E-04, 1.2789385E-07, -6.1745870E-10, 13.1940636, 3.3302699264, -9443.3199839914,
-5.2312637E-04, -6.8728642E-07, 3.2502879E-09, -9.6790568, -4.7542348263, -16029.0808942018, -3.0728938E-04,
-5.0020584E-07, 2.3182384E-09, -9.3658635, 11.2971895604, 24080.9951807398, -3.4654346E-04, -1.9636409E-07,
9.1804319E-10, 8.6055318, 5.7289501804, -1742.9305145148, -3.6814974E-04, -4.3749170E-07, 2.1183885E-09,
-8.4530982, 7.5540213938, 16100.0685698171, 1.1921869E-04, 2.8238458E-07, -1.3407038E-09,
8.0501724, 10.4831850066, 14286.1503796870, -6.0860358E-05, 6.2714140E-08, -1.9984990E-10, -7.6301553,
4.6679834628, 17285.6848046987, 3.0196162E-04, 5.0143862E-07, -2.4271179E-09, -7.4474952, -0.0862513635,
1256.6039104970, -5.3277630E-06, 1.2327842E-09, -1.0887946E-10, 7.3712011, 8.1276304344, 5957.4589549619,
-2.1317311E-04, -1.8769697E-07, 9.8648918E-10, 7.0629900, 0.9591375719, 33.7570471374, -3.0829302E-05,
-3.6967043E-08, 1.7385419E-10, -6.3831491, 9.4966777258, 7004.5133996281, 2.1416722E-04, 3.2425793E-07,
-1.5355019E-09, -5.7416071, 13.6527441326, 32409.6866054649, -1.9423071E-04, 5.4047029E-08, -2.6829589E-10,
4.3740095, 18.4814383957, 22128.5152003943, -2.8202511E-04, -1.2313366E-07, 6.2332010E-10, -3.9976134,
7.9669196340, 33524.3151647312, 1.7658291E-04, 4.9092233E-07, -2.3322447E-09, -3.2096876, 13.2398458924,
14985.4400105508, -2.5159493E-04, -1.5449073E-07, 7.2324505E-10, -2.9145404, 12.7093625336, 24499.7476701576,
8.3460748E-05, 3.1497443E-07, -1.5495082E-09, 2.7318890, 16.1258838235, 13799.8237756692, -4.3433786E-04,
-3.7354477E-07, 1.8096592E-09, -2.5679459, -2.4418059357, -7072.0875142282, -1.5764051E-04, -2.4917833E-07,
1.0774596E-09, -2.5211990, 7.9551277074, 8470.6667759558, -2.2382863E-04, -1.8523141E-07, 7.6873027E-10,
2.4888871, 5.6426988169, -486.3266040178, -3.7347750E-04, -4.3625891E-07, 2.0095091E-09, 2.1460741, 7.1842488353,
-1952.4799803455, 6.4518350E-05, 7.3230428E-08, -2.9472308E-10,
1.9777270, 23.1494218585, 39414.2000050930, 1.9936508E-05, 3.7830496E-07, -1.8037978E-09, 1.9336825,
9.4222182890, 33314.7656989005, 6.0925100E-04, 1.0016445E-06, -4.7453563E-09, 1.8707647, 20.8369929680,
30457.2066251194, -1.2971236E-04, 1.2727746E-07, -5.6301898E-10, -1.7529659, 0.4873576771, -8886.0057043583,
-3.3771956E-04, -4.6884877E-07, 2.2183135E-09, -1.4371624, 7.0979974718, -695.8760698485, 5.9190587E-05,
7.4463212E-08, -4.0360254E-10, -1.3725701, 1.4552986550, -209.5494658307, 4.3266809E-04, 5.1072212E-07,
-2.4131116E-09, 1.2618162, 7.5108957121, 16728.3705250656, 1.1655481E-04, 2.8300097E-07, -1.3951435E-09]
M11 = [1.6768000, -0.0431256817, 628.3019552485, -2.6638815E-06, 6.1639211E-10, -5.4439728E-11, 0.5164200,
11.2260974062, 6585.7609102104, -2.1583699E-04, -1.8708058E-07, 9.3204945E-10, 0.4138300, 13.5816519784,
14914.4523349355, -6.3524240E-05, 6.3330532E-08, -2.5428962E-10, 0.3711500, 5.5402729076, 7700.3894694766,
1.5497663E-04, 2.4979472E-07, -1.1318993E-09,
0.2756000, 2.3124288905, 8956.9933799736, 1.4964887E-04, 2.5102751E-07, -1.2407788E-09, 0.2459863,
-25.6198212459, -2.3011998397, 1.5231275E-04, 2.5041111E-07, -1.1863391E-09, 0.0711800, 7.9982533891,
7842.3648207073, -2.2116475E-04, -1.8584780E-07, 8.2317000E-10, 0.0612800, 10.3538079614, 16171.0562454324,
-6.8852003E-05, 6.4563317E-08, -3.6316908E-10]
M12 = [0.0048700, -0.0431256817, 628.3019552485, -2.6638815E-06, 6.1639211E-10, -5.4439728E-11, 0.0022800,
-27.1705318325, -2.3011998397, 1.5231275E-04, 2.5041111E-07, -1.1863391E-09, 0.0015000, 11.2260974062,
6585.7609102104, -2.1583699E-04, -1.8708058E-07, 9.3204945E-10]
M20 = [18461.2400600, 1.6279052448, 8433.4661576405, -6.4021295E-05, -4.9499477E-09, 2.0216731E-11, 1010.1671484,
3.9834598170, 16762.1575823656, 8.8291456E-05, 2.4546117E-07, -1.1661223E-09, 999.6936555, 0.7276493275,
-104.7747329154, 2.1633405E-04, 2.5536106E-07, -1.2065558E-09, 623.6524746, 8.7690283983, 7109.2881325435,
-2.1668263E-06, 6.8896872E-08, -3.2894608E-10,
199.4837596, 9.6692843156, 15647.5290230993, -2.8252217E-04, -1.9141414E-07, 8.9782646E-10, 166.5741153,
6.4134738261, -1219.4032921817, -1.5447958E-04, -1.8151424E-07, 8.5739300E-10, 117.2606951, 12.0248388879,
23976.2204478244, -1.3020942E-04, 5.8996977E-08, -2.8851262E-10, 61.9119504, 6.3390143893, 25090.8490070907,
2.4060421E-04, 4.9587228E-07, -2.3524614E-09,
33.3572027, 11.1245829706, 15437.9795572686, 1.5014592E-04, 3.1930799E-07, -1.5152852E-09, 31.7596709,
3.0832038997, 8223.9166918098, 3.6864680E-04, 5.0577218E-07, -2.3928949E-09, 29.5766003, 8.8121540801,
6480.9861772950, 4.9705523E-07, 6.8280480E-08, -2.7450635E-10, 15.5662654, 4.0579192538, -9548.0947169068,
-3.0679233E-04, -4.3192536E-07, 2.0437321E-09,
15.1215543, 14.3803934601, 32304.9118725496, 2.2103334E-05, 3.0940809E-07, -1.4748517E-09, -12.0941511,
8.7259027166, 7737.5900877920, -4.8307078E-06, 6.9513264E-08, -3.8338581E-10, 8.8681426, 9.7124099974,
15019.2270678508, -2.7985829E-04, -1.9203053E-07, 9.5226618E-10, 8.0450400, 0.6687636586, 8399.7091105030,
-3.3191993E-05, 3.2017096E-08, -1.5363746E-10,
7.9585542, 12.0679645696, 23347.9184925760, -1.2754553E-04, 5.8380585E-08, -2.3407289E-10, 7.4345550,
6.4565995078, -1847.7052474301, -1.5181570E-04, -1.8213063E-07, 9.1183272E-10, -6.7314363, -4.0265854988,
-16133.8556271171, -9.0955337E-05, -2.4484477E-07, 1.1116826E-09, 6.5795750, 16.8104074692, 14323.3509980023,
-2.2066770E-04, -1.1756732E-07, 5.4866364E-10, -6.4600721, 1.5847795630, 9061.7681128890, -6.6685176E-05,
-4.3335556E-09, -3.4222998E-11, -6.2964773, 4.8837157343, 25300.3984729215, -1.9206388E-04, -1.4849843E-08,
6.0650192E-11, -5.6323538, -0.7707750092, 733.0766881638, -2.1899793E-04, -2.5474467E-07, 1.1521161E-09,
-5.3683961, 6.8263720663, 16204.8433027325, -9.7115356E-05, 2.7023515E-08, -1.3414795E-10, -5.3112784,
3.9403341353, 17390.4595376141, 8.5627574E-05, 2.4607756E-07, -1.2205621E-09, -5.0759179, 0.6845236457,
523.5272223331, 2.1367016E-04, 2.5597745E-07, -1.2609955E-09, -4.8396143, -1.6710309265, -7805.1642023920,
6.1357413E-05, 5.5663398E-09, -7.4656459E-11, -4.8057401, 3.5705615768, -662.0890125485, 3.0927234E-05,
3.6923410E-08, -1.7458141E-10,
3.9840545, 8.6945689615, 33419.5404318159, 3.9291696E-04, 7.4628340E-07, -3.5388005E-09, 3.6744619,
19.1659620415, 22652.0424227274, -6.8354947E-05, 1.3284380E-07, -6.3767543E-10, 2.9984815, 20.0662179587,
31190.2833132833, -3.4871029E-04, -1.2746721E-07, 5.8909710E-10, 2.7986413, -2.5281611620, -16971.7070481963,
3.4437664E-04, 2.6526096E-07, -1.2469893E-09,
2.4138774, 17.7106633865, 22861.5918885581, -5.0102304E-04, -3.7787833E-07, 1.7754362E-09, 2.1863132,
5.5132179088, -9757.6441827375, 1.2587576E-04, 7.8796768E-08, -3.6937954E-10, 2.1461692, 13.4801375428,
23766.6709819937, 3.0245868E-04, 5.6971910E-07, -2.7016242E-09, 1.7659832, 11.1677086523, 14809.6776020201,
1.5280981E-04, 3.1869159E-07, -1.4608454E-09, -1.6244212, 7.3137297434, 7318.8375983742, -4.3483492E-04,
-4.4182525E-07, 2.0841655E-09, 1.5813036, 5.4387584720, 16552.6081165349, 5.2095955E-04, 7.5618329E-07,
-3.5792340E-09, 1.5197528, 16.7359480324, 40633.6032972747, 1.7441609E-04, 5.5981921E-07, -2.6611908E-09,
1.5156341, 1.7023646816, -17876.7861416319, -4.5910508E-04, -6.8233647E-07, 3.2300712E-09,
1.5102092, 5.4977296450, 8399.6847301375, -3.3094061E-05, 3.1973462E-08, -1.5436468E-10, -1.3178223,
9.6261586339, 16275.8309783478, -2.8518605E-04, -1.9079775E-07, 8.4338673E-10, -1.2642739, 11.9817132061,
24604.5224030729, -1.3287330E-04, 5.9613369E-08, -3.4295235E-10, 1.1918723, 22.4217725310, 39518.9747380084,
-1.9639754E-04, 1.2294390E-07, -5.9724197E-10,
1.1346110, 14.4235191419, 31676.6099173011, 2.4767216E-05, 3.0879170E-07, -1.4204120E-09, 1.0857810,
8.8552797618, 5852.6842220465, 3.1609367E-06, 6.7664088E-08, -2.2006663E-10, -1.0193852, 7.2392703065,
33629.0898976466, -3.9751134E-05, 2.3556127E-07, -1.1256889E-09, -0.8227141, 11.0814572888, 16066.2815125171,
1.4748204E-04, 3.1992438E-07, -1.5697249E-09,
0.8042238, 3.5274358950, -33.7870573000, 2.8263353E-05, 3.7539802E-08, -2.2902113E-10, 0.8025939, 6.7832463846,
16833.1452579809, -9.9779237E-05, 2.7639907E-08, -1.8858767E-10, -0.7931866, -6.3821400710, -24462.5470518423,
-2.4326809E-04, -4.9525589E-07, 2.2980217E-09, -0.7910153, 6.3703481443, -591.1013369332, -1.5714346E-04,
-1.8089785E-07, 8.0295327E-10, -0.6674056, 9.1819266386, 24533.5347274576, 5.5197395E-05, 2.7743463E-07,
-1.3204870E-09, 0.6502226, 4.1010449356, -10176.3966721553, -3.0412845E-04, -4.3254175E-07, 2.0981718E-09,
-0.6388131, 6.2958887075, 25719.1509623392, 2.3794032E-04, 4.9648867E-07, -2.4069012E-09]
M21 = [0.0743000, 11.9537467337, 6480.9861772950, 4.9705523E-07, 6.8280480E-08, -2.7450635E-10, 0.0304300, 8.7259027166,
7737.5900877920, -4.8307078E-06, 6.9513264E-08, -3.8338581E-10, 0.0222900, 12.8540026510, 15019.2270678508,
-2.7985829E-04, -1.9203053E-07, 9.5226618E-10, 0.0199900, 15.2095572232, 23347.9184925760, -1.2754553E-04,
5.8380585E-08, -2.3407289E-10,
0.0186900, 9.5981921614, -1847.7052474301, -1.5181570E-04, -1.8213063E-07, 9.1183272E-10, 0.0169600,
7.1681781524, 16133.8556271171, 9.0955337E-05, 2.4484477E-07, -1.1116826E-09, 0.0162300, 1.5847795630,
9061.7681128890, -6.6685176E-05, -4.3335556E-09, -3.4222998E-11, 0.0141900, -0.7707750092, 733.0766881638,
-2.1899793E-04, -2.5474467E-07, 1.1521161E-09]
M30 = [385000.5290396, 1.5707963268, 0.0000000000, 0.0000000E+00, 0.0000000E+00, 0.0000000E+00, -20905.3551378,
3.9263508990, 8328.6914247251, 1.5231275E-04, 2.5041111E-07, -1.1863391E-09, -3699.1109330, 9.6121753977,
7214.0628654588, -2.1850087E-04, -1.8646419E-07, 8.7760973E-10, -2955.9675626, 11.9677299699, 15542.7542901840,
-6.6188121E-05, 6.3946925E-08, -3.0872935E-10, -569.9251264, 6.2819054713, 16657.3828494503, 3.0462550E-04,
5.0082223E-07, -2.3726782E-09, 246.1584797, 7.2566208254, -1114.6285592663, -3.7081362E-04, -4.3687530E-07,
2.0639488E-09, -204.5861179, 12.0108556517, 14914.4523349355, -6.3524240E-05, 6.3330532E-08, -2.5428962E-10,
-170.7330791, 14.3232845422, 23871.4457149091, 8.6124629E-05, 3.1435804E-07, -1.4950684E-09, -152.1378118,
9.6553010794, 6585.7609102104, -2.1583699E-04, -1.8708058E-07, 9.3204945E-10, -129.6202242, -0.8278839272,
-7700.3894694766, -1.5497663E-04, -2.4979472E-07, 1.1318993E-09, 108.7427014, 6.7692631483, 7771.3771450920,
-3.3094061E-05, 3.1973462E-08, -1.5436468E-10, 104.7552944, 3.8832252173, 8956.9933799736, 1.4964887E-04,
2.5102751E-07, -1.2407788E-09,
79.6605685, 0.6705404095, -8538.2408905558, 2.8035534E-04, 2.6031101E-07, -1.2267725E-09, 48.8883284,
1.5276706450, 628.3019552485, -2.6638815E-06, 6.1639211E-10, -5.4439728E-11, -34.7825237, 20.0091090408,
22756.8171556428, -2.8468899E-04, -1.2251727E-07, 5.6888037E-10, 30.8238599, 11.9246042882, 16171.0562454324,
-6.8852003E-05, 6.4563317E-08, -3.6316908E-10,
24.2084985, 9.5690497159, 7842.3648207073, -2.2116475E-04, -1.8584780E-07, 8.2317000E-10, -23.2104305,
8.6374600436, 24986.0742741754, 4.5693825E-04, 7.5123334E-07, -3.5590172E-09, -21.6363439, 17.6535544685,
14428.1257309177, -4.3700174E-04, -3.7292838E-07, 1.7552195E-09, -16.6747239, 6.7261374666, 8399.6791003405,
-3.5757942E-05, 3.2589854E-08, -2.0880440E-10,
14.4026890, 4.9010662531, -9443.3199839914, -5.2312637E-04, -6.8728642E-07, 3.2502879E-09, -12.8314035,
14.3664102239, 23243.1437596606, 8.8788511E-05, 3.1374165E-07, -1.4406287E-09, -11.6499478, 22.3646636130,
31085.5085803679, -1.3237624E-04, 1.2789385E-07, -6.1745870E-10, -10.4447578, 16.6788391144, 32200.1371396342,
2.3843738E-04, 5.6476915E-07, -2.6814075E-09,
10.3211071, 8.7119194804, -1324.1780250970, 6.1854469E-05, 7.3846820E-08, -3.4916281E-10, 10.0562033,
7.2997465071, -1742.9305145148, -3.6814974E-04, -4.3749170E-07, 2.1183885E-09, -9.8844667, 12.0539813334,
14286.1503796870, -6.0860358E-05, 6.2714140E-08, -1.9984990E-10, 8.7515625, 6.3563649081, -9652.8694498221,
-9.0458282E-05, -1.7656429E-07, 8.3717626E-10, -8.3791067, 4.4137085761, -557.3142796331, -1.8540681E-04,
-2.1843765E-07, 1.0319744E-09, -7.0026961, -3.1834384995, -16029.0808942018, -3.0728938E-04, -5.0020584E-07,
2.3182384E-09, 6.3220032, 9.1248177206, 16100.0685698171, 1.1921869E-04, 2.8238458E-07, -1.3407038E-09,
5.7508579, 6.2387797896, 17285.6848046987, 3.0196162E-04, 5.0143862E-07, -2.4271179E-09, -4.9501349,
9.6984267611, 5957.4589549619, -2.1317311E-04, -1.8769697E-07, 9.8648918E-10, -4.4211770, 3.0260949818,
-209.5494658307, 4.3266809E-04, 5.1072212E-07, -2.4131116E-09, 4.1311145, 11.0674740526, 7004.5133996281,
2.1416722E-04, 3.2425793E-07, -1.5355019E-09, -3.9579827, 20.0522347225, 22128.5152003943, -2.8202511E-04,
-1.2313366E-07, 6.2332010E-10,
3.2582371, 14.8106422192, 14985.4400105508, -2.5159493E-04, -1.5449073E-07, 7.2324505E-10, -3.1483020,
4.8266068163, 16866.9323152810, -1.2804259E-04, -9.8998954E-09, 4.0433461E-11, 2.6164092, 14.2801588604,
24499.7476701576, 8.3460748E-05, 3.1497443E-07, -1.5495082E-09, 2.3536310, 9.5259240342, 8470.6667759558,
-2.2382863E-04, -1.8523141E-07, 7.6873027E-10, -2.1171283, -0.8710096090, -7072.0875142282, -1.5764051E-04,
-2.4917833E-07, 1.0774596E-09, -1.8970368, 17.6966801503, 13799.8237756692, -4.3433786E-04, -3.7354477E-07,
1.8096592E-09, -1.7385258, 2.0581540038, -8886.0057043583, -3.3771956E-04, -4.6884877E-07, 2.2183135E-09,
-1.5713944, 22.4077892948, 30457.2066251194, -1.2971236E-04, 1.2727746E-07, -5.6301898E-10, -1.4225541,
24.7202181853, 39414.2000050930, 1.9936508E-05, 3.7830496E-07, -1.8037978E-09, -1.4189284, 17.1661967915,
23314.1314352759, -9.9282182E-05, 9.5920387E-08, -4.6309403E-10, 1.1655364, 3.8400995356, 9585.2953352221,
1.4698499E-04, 2.5164390E-07, -1.2952185E-09, -1.1169371, 10.9930146158, 33314.7656989005, 6.0925100E-04,
1.0016445E-06, -4.7453563E-09,
1.0656723, 1.4845449633, 1256.6039104970, -5.3277630E-06, 1.2327842E-09, -1.0887946E-10, 1.0586190,
11.9220903668, 8364.7398411275, -2.1850087E-04, -1.8646419E-07, 8.7760973E-10, -0.9333176, 9.0816920389,
16728.3705250656, 1.1655481E-04, 2.8300097E-07, -1.3951435E-09, 0.8624328, 12.4550876470, 6656.7485858257,
-4.0390768E-04, -4.0490184E-07, 1.9095841E-09,
0.8512404, 4.3705828944, 70.9876756153, -1.8807069E-04, -2.1782126E-07, 9.7753467E-10, -0.8488018, 16.7219647962,
31571.8351843857, 2.4110126E-04, 5.6415276E-07, -2.6269678E-09, -0.7956264, 3.5134526588, -9095.5551701890,
9.4948529E-05, 4.1873358E-08, -1.9479814E-10]
M31 = [0.5139500, 12.0108556517, 14914.4523349355, -6.3524240E-05, 6.3330532E-08, -2.5428962E-10, 0.3824500,
9.6553010794, 6585.7609102104, -2.1583699E-04, -1.8708058E-07, 9.3204945E-10, 0.3265400, 3.9694765808,
7700.3894694766, 1.5497663E-04, 2.4979472E-07, -1.1318993E-09, 0.2639600, 0.7416325637, 8956.9933799736,
1.4964887E-04, 2.5102751E-07, -1.2407788E-09,
0.1230200, -1.6139220085, 628.3019552485, -2.6638815E-06, 6.1639211E-10, -5.4439728E-11, 0.0775400, 8.7830116346,
16171.0562454324, -6.8852003E-05, 6.4563317E-08, -3.6316908E-10, 0.0606800, 6.4274570623, 7842.3648207073,
-2.2116475E-04, -1.8584780E-07, 8.2317000E-10, 0.0497000, 12.0539813334, 14286.1503796870, -6.0860358E-05,
6.2714140E-08, -1.9984990E-10]
# 月球平黄经系数
M1n = [3.81034392032, 8.39968473021E+03, -3.31919929753E-05, 3.20170955005E-08, -1.53637455544E-10]
# ==================日位置计算===================
EnnT = 0 # 调用Enn前先设置EnnT时间变量
def Enn(F): # 计算E10,E11,E20等,即:某一组周期项或泊松项算出,计算前先设置EnnT时间
global EnnT
v = 0
for i in range(0, len(F), 3):
v += F[i] * math.cos(F[i + 1] + EnnT * F[i + 2])
return v
def earCal(jd): # 返回地球位置,日心Date黄道分点坐标
global EnnT
EnnT = jd / (365250 - 0.0)
# 测试
# print 'EnnT -----------%s' % (jd / 365250)
llr = []
t1 = EnnT
t2 = t1 * t1
t3 = t2 * t1
t4 = t3 * t1
t5 = t4 * t1
# 测试
# print Enn(E10) #1.75444665847之后的数据不一致
llr.append(Enn(E10) + Enn(E11) * t1 + Enn(E12) * t2 + Enn(E13) * t3 + Enn(E14) * t4 + Enn(E15) * t5)
llr.append(Enn(E20) + Enn(E21) * t1)
llr.append(Enn(E30) + Enn(E31) * t1 + Enn(E32) * t2 + Enn(E33) * t3)
# 测试
# print llr[0]
llr[0] = rad2mrad(llr[0])
# 测试
# print llr
return llr
def sunCal2(jd): # 传回jd时刻太阳的地心视黄经及黄纬
sun = earCal(jd)
sun[0] += math.pi
sun[1] = -sun[1] # 计算太阳真位置
d = nutation(jd)
sun[0] = rad2mrad(sun[0] + d['Lon']) # 补章动
addGxc(jd, sun) # 补周年黄经光行差
return sun # 返回太阳视位置
# ==================月位置计算===================
MnnT = 0 # 调用Mnn前先设置MnnT时间变量
def Mnn(F): # 计算M10,M11,M20等,计算前先设置MnnT时间
v = 0
t1 = MnnT
t2 = t1 * t1
t3 = t2 * t1
t4 = t3 * t1
for i in range(0, len(F), 6):
v += F[i] * math.sin(F[i + 1] + t1 * F[i + 2] + t2 * F[i + 3] + t3 * F[i + 4] + t4 * F[i + 5]);
return v
def moonCal(jd): # 返回月球位置,返回地心Date黄道坐标
global MnnT
MnnT = jd / (36525 - 0.0)
t1 = MnnT
t2 = t1 * t1
t3 = t2 * t1
t4 = t3 * t1
llr = []
llr.append((Mnn(M10) + Mnn(M11) * t1 + Mnn(M12) * t2) / rad)
llr.append((Mnn(M20) + Mnn(M21) * t1) / rad)
llr.append((Mnn(M30) + Mnn(M31) * t1) * 0.999999949827)
llr[0] = llr[0] + M1n[0] + M1n[1] * t1 + M1n[2] * t2 + M1n[3] * t3 + M1n[4] * t4
llr[0] = rad2mrad(llr[0]) # 地心Date黄道原点坐标(不含岁差)
addPrece(jd, llr) # 补岁差
return llr
def moonCal2(jd): # 传回月球的地心视黄经及视黄纬
moon = moonCal(jd)
d = nutation(jd)
moon[0] = rad2mrad(moon[0] + d['Lon']) # 补章动
return moon
def moonCal3(jd): # 传回月球的地心视赤经及视赤纬
moon = moonCal(jd)
HCconv(moon, hcjj1(jd))
nutationRaDec(jd, moon) # 补赤经及赤纬章动
# 如果黄赤转换前补了黄经章动及交章动,就不能再补赤经赤纬章动
return moon
# ==================地心坐标中的日月位置计算===================
def jiaoCai(lx, t, jiao):
# lx=1时计算t时刻日月角距与jiao的差, lx=0计算t时刻太阳黄经与jiao的差
sun = earCal(t) # 计算太阳真位置(先算出日心坐标中地球的位置)
#
# print 'sun-----%s' % sun
sun[0] += math.pi
sun[1] = -sun[1] # 转为地心坐标
addGxc(t, sun) # 补周年光行差
if (lx == 0):
d = nutation(t)
sun[0] += d['Lon'] # 补黄经章动
return rad2mrad(jiao - sun[0])
moon = moonCal(t) # 日月角差与章动无关
return rad2mrad(jiao - (moon[0] - sun[0]))
# ==================已知位置反求时间===================
def jiaoCal(t1, jiao, lx):
# t1是J2000起算儒略日数
# 已知角度(jiao)求时间(t)
# lx=0是太阳黄经达某角度的时刻计算(用于节气计算)
# lx=1是日月角距达某角度的时刻计算(用于定朔望等)
# 传入的t1是指定角度对应真时刻t的前一些天
# 对于节气计算,应满足t在t1到t1+360天之间,对于Y年第n个节气(n=0是春分),t1可取值Y*365.2422+n*15.2
# 对于朔望计算,应满足t在t1到t1+25天之间,在此范围之外,求右边的根
# print (jiao,t1,lx) #当jiao为0的时候t1出现
t2 = t1
t = 0
if (lx == 0):
t2 += 360 # 在t1到t2范围内求解(范气360天范围),结果置于t
else:
t2 += 25
jiao *= math.pi / (180 - 0.0) # 待搜索目标角
# 测试
# print (t1,t2,jiao)
# 利用截弦法计算
v1 = jiaoCai(lx, t1, jiao) # v1,v2为t1,t2时对应的黄经
v2 = jiaoCai(lx, t2, jiao)
# 测试
# print (v1, v2)
# 测试
# print (v1,v2)
if (v1 < v2):
v2 -= 2 * math.pi # 减2pi作用是将周期性角度转为连续角度
# ce
k = 1 # k是截弦的斜率
for i in range(10): # 快速截弦求根,通常截弦三四次就已达所需精度
k2 = (v2 - v1) / (t2 - t1 - 0.0) # 算出斜率
if (abs(k2) > 1e-15):
k = k2 # 差商可能为零,应排除
t = t1 - v1 / (k - 0.0)
v = jiaoCai(lx, t, jiao) # 直线逼近法求根(直线方程的根)
if (v > 1):
v -= 2 * math.pi # 一次逼近后,v1就已接近0,如果很大,则应减1周
if (abs(v) < 1e-8):
break # 已达精度
t1 = t2
v1 = v2
t2 = t
v2 = v # 下一次截弦
# 测试
# print t
return t
# ==================节气计算===================
# 节气表
jqB = ["春分", "清明", "谷雨", "立夏", "小满", "芒种", "夏至", "小暑", "大暑", "立秋", "处暑", "白露",
"秋分", "寒露", "霜降", "立冬", "小雪", "大雪", "冬至", "小寒", "大寒", "立春", "雨水", "惊蛰"]
# =================农历计算========================
# *****
# 1.冬至所在的UTC日期保存在A[0],根据"规定1"得知在A[0]之前(含A[0])的那个UTC朔日定为年首日期
# 冬至之后的中气分保存在A[1],A[2],A[3]...A[13],其中A[12]又回到了冬至,共计算13次中气
# 2.连续计算冬至后14个朔日,即起算时间时A[0]+1
# 14个朔日编号为0,1...12,保存在C[0],C[1]...C[13]
# 这14个朔日表示编号为0月,1月,...12月0月的各月终止日期,但要注意实际终止日是新月初一,不属本月
# 这14个朔日同样表示编号为1月,2月...的开始日期
# 设某月编号为n,那么开始日期为C[n-1],结束日期为C[n],如果每月都含中气,该月所含的中气为A[n]
# 注:为了全总计算出13个月的大小月情况,须算出14个朔日。
# 3.闰年判断:含有13个月的年份是闰年
# 当第13月(月编号12月)终止日期大于冬至日, 即C[12]〉A[12], 那么该月是新年,本年没月12月,本年共12个月
# 当第13月(月编号12月)终止日期小等于冬至日,即C[12]≤A[12],那么该月是本年的有效月份,本年共13个月
# 4.闰年中处理闰月:
# 13个月中至少1个月份无中气,首个无中气的月置闰,在n=1...12月中找到闰月,即C[n]≤A[n]
# 从农历年首的定义知道,0月一定含有中气冬至,所以不可能是闰月。
# 首月有时很贪心,除冬至外还可能再吃掉本年或前年的另一个中气
# 定出闰月后,该月及以后的月编号减1
# 5.以上所述的月编号不是日常生活中说的"正月","二月"等月名称:
# 如果"建子",0月为首月,如果"建寅",2月的月名"正月",3月是"二月",其余类推
# *****
yueMing = ["正", "二", "三", "四", "五", "六", "七", "八", "九", "十", "11", "12"]
def paiYue(year):
jDate = JDate()
y = int(year)
zq = []
jq = []
hs = []
# 从冬至开始,连续计算14个中气时刻
t1 = 365.2422 * (y - 2000) - 50 # 农历年首始于前一年的冬至,为了节气中气一起算,取前年大雪之前
# 测试OK
# print 'paiYue==========',t1
for i in range(14): # 计算节气(从冬至开始),注意:返回的是力学时
# print 't1 + %s * 30.4 = %s' % (i,t1 + i * 30.4)
zq.append(jiaoCal(t1 + i * 30.4, i * 30 - 90, 0)) # 中气计算,冬至的太阳黄经是270度(或-90度)
jq.append(jiaoCal(t1 + i * 30.4, i * 30 - 105, 0)) # 顺便计算节气,它不是农历定朔计算所必需的
# 测试
# print (jiaoCal(t1 + i * 30.4, i * 30 - 90, 0), jiaoCal(t1 + i * 30.4, i * 30 - 105, 0))
# 在冬至过后,连续计算14个日月合朔时刻
# print jq
dongZhiJia1 = zq[0] + 1 - jDate.Dint_dec(zq[0], 8, 0) # 冬至过后的第一天0点的儒略日数
# 测试 OK
# print dongZhiJia1
hs.append(jiaoCal(dongZhiJia1, 0, 1)) # 首月结束的日月合朔时刻
for i in range(1, 14):
hs.append(jiaoCal(hs[i - 1] + 25, 0, 1))
# 算出中气及合朔时刻的日数(不含小数的日数计数,以便计算日期之间的差值)
# 测试
# print hs
A = []
B = []
C = []
for i in range(14): # 取当地UTC日数的整数部分
A.append(jDate.Dint_dec(zq[i], 8, 1))
B.append(jDate.Dint_dec(jq[i], 8, 1))
C.append(jDate.Dint_dec(hs[i], 8, 1))
# 闰月及大小月分析
tot = 12
nun = -1
j = 0
yn = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0, 0] # 月编号
if (C[12] <= A[12]): # 闰月分析
yn[12] = 12
tot = 13 # 编号为12的月是本年的有效月份,本年总月数13个
for i in range(13):
if (C[i] <= A[i]):
break
nun = j - 1
for j in range(nun, 13):
yn[j - 1] -= 1 # 注意yn中不含农历首月(所以取i-1),在公历中农历首月总是去年的所以不多做计算
for i in range(tot): # 转为建寅月名,并做大小月分析
yn[i] = yueMing[(yn[i] + 10) % 12] # 转建寅月名
if (i == nun):
yn[i] += "闰"
else:
yn[i] += "月" # 标记是否闰月
if (C[i + 1] - C[i] > 29):
yn[i] += "大"
else:
yn[i] += "小" # 标记大小月
# 显示
out = "节气 手表时 中气 手表时 农历月 朔的手表时\r\n"
list = []
for i in range(tot):
zm = (i * 2 + 18) % 24
jm = (i * 2 + 17) % 24 # 中气名节气名
jDate.setFromJD(jq[i] + J2000 + 8 / (24 - 0.0), 1)
list.append(jDate.toDateTime())
out += jqB[jm] + ":" + jDate.toStr() + " " # 显示节气
jDate.setFromJD(zq[i] + J2000 + 8 / (24 - 0.0), 1)
list.append(jDate.toDateTime())
out += jqB[zm] + ":" + jDate.toStr() + " " # 显示中气
jDate.setFromJD(hs[i] + J2000 + 8 / (24 - 0.0), 1)
# list.append(jDate.toStr())
out += yn[i] + ":" + jDate.toStr() + "\r\n" # 显示日月合朔
print(out)
return list
def getjieqi_info(year):
jDate = JDate()
y = int(year)
zq = []
jq = []
hs = []
# 从冬至开始,连续计算14个中气时刻
t1 = 365.2422 * (y - 2000) - 50 # 农历年首始于前一年的冬至,为了节气中气一起算,取前年大雪之前
# 测试OK
# print 'paiYue==========',t1
for i in range(14): # 计算节气(从冬至开始),注意:返回的是力学时
# print 't1 + %s * 30.4 = %s' % (i,t1 + i * 30.4)
zq.append(jiaoCal(t1 + i * 30.4, i * 30 - 90, 0)) # 中气计算,冬至的太阳黄经是270度(或-90度)
jq.append(jiaoCal(t1 + i * 30.4, i * 30 - 105, 0)) # 顺便计算节气,它不是农历定朔计算所必需的
tot = 12
# 显示
data = {}
for i in range(tot):
zm = (i * 2 + 18) % 24
jm = (i * 2 + 17) % 24 # 中气名节气名
jDate.setFromJD(jq[i] + J2000 + 8 / (24 - 0.0), 1)
data[jDate.toDateStr()] = jqB[jm]
jDate.setFromJD(zq[i] + J2000 + 8 / (24 - 0.0), 1)
data[jDate.toDateStr()] = jqB[zm]
# print(data)
return data
def getJieQiList(year):
jDate = JDate()
y = int(year)
zq = []
jq = []
hs = []
# 从冬至开始,连续计算14个中气时刻
t1 = 365.2422 * (y - 2000) - 50 # 农历年首始于前一年的冬至,为了节气中气一起算,取前年大雪之前
# 测试OK
# print 'paiYue==========',t1
for i in range(14): # 计算节气(从冬至开始),注意:返回的是力学时
# print 't1 + %s * 30.4 = %s' % (i,t1 + i * 30.4)
zq.append(jiaoCal(t1 + i * 30.4, i * 30 - 90, 0)) # 中气计算,冬至的太阳黄经是270度(或-90度)
jq.append(jiaoCal(t1 + i * 30.4, i * 30 - 105, 0)) # 顺便计算节气,它不是农历定朔计算所必需的
# 测试
# print (jiaoCal(t1 + i * 30.4, i * 30 - 90, 0), jiaoCal(t1 + i * 30.4, i * 30 - 105, 0))
# 在冬至过后,连续计算14个日月合朔时刻
# print jq
dongZhiJia1 = zq[0] + 1 - jDate.Dint_dec(zq[0], 8, 0) # 冬至过后的第一天0点的儒略日数
# 测试 OK
# print dongZhiJia1
hs.append(jiaoCal(dongZhiJia1, 0, 1)) # 首月结束的日月合朔时刻
for i in range(1, 14):
hs.append(jiaoCal(hs[i - 1] + 25, 0, 1))
# 算出中气及合朔时刻的日数(不含小数的日数计数,以便计算日期之间的差值)
# 测试
# print hs
A = []
B = []
C = []
for i in range(14): # 取当地UTC日数的整数部分
A.append(jDate.Dint_dec(zq[i], 8, 1))
B.append(jDate.Dint_dec(jq[i], 8, 1))
C.append(jDate.Dint_dec(hs[i], 8, 1))
# 闰月及大小月分析
tot = 12
nun = -1
j = 0
yn = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0, 0] # 月编号
if (C[12] <= A[12]): # 闰月分析
yn[12] = 12
tot = 13 # 编号为12的月是本年的有效月份,本年总月数13个
for i in range(13):
if (C[i] <= A[i]):
break
nun = j - 1
for j in range(nun, 13):
yn[j - 1] -= 1 # 注意yn中不含农历首月(所以取i-1),在公历中农历首月总是去年的所以不多做计算
for i in range(tot): # 转为建寅月名,并做大小月分析
yn[i] = yueMing[(yn[i] + 10) % 12] # 转建寅月名
if (i == nun):
yn[i] += "闰"
else:
yn[i] += "月" # 标记是否闰月
if (C[i + 1] - C[i] > 29):
yn[i] += "大"
else:
yn[i] += "小" # 标记大小月
# 显示
list = []
for i in range(tot):
zm = (i * 2 + 18) % 24
jm = (i * 2 + 17) % 24 # 中气名节气名
jDate.setFromJD(jq[i] + J2000 + 8 / (24 - 0.0), 1)
list.append(jDate.toDateTime())
jDate.setFromJD(zq[i] + J2000 + 8 / (24 - 0.0), 1)
list.append(jDate.toDateTime())
# jDate.setFromJD(hs[i] + J2000 + 8 / (24 - 0.0), 1)
# list.append(jDate.toStr())
return list
def getJieQiList_12(year):
jDate = JDate()
y = int(year)
zq = []
jq = []
hs = []
# 从冬至开始,连续计算14个中气时刻
t1 = 365.2422 * (y - 2000) - 50 # 农历年首始于前一年的冬至,为了节气中气一起算,取前年大雪之前
# 测试OK
# print 'paiYue==========',t1
for i in range(14): # 计算节气(从冬至开始),注意:返回的是力学时
# print 't1 + %s * 30.4 = %s' % (i,t1 + i * 30.4)
zq.append(jiaoCal(t1 + i * 30.4, i * 30 - 90, 0)) # 中气计算,冬至的太阳黄经是270度(或-90度)
jq.append(jiaoCal(t1 + i * 30.4, i * 30 - 105, 0)) # 顺便计算节气,它不是农历定朔计算所必需的
# 测试
# print (jiaoCal(t1 + i * 30.4, i * 30 - 90, 0), jiaoCal(t1 + i * 30.4, i * 30 - 105, 0))
# 在冬至过后,连续计算14个日月合朔时刻
# print jq
dongZhiJia1 = zq[0] + 1 - jDate.Dint_dec(zq[0], 8, 0) # 冬至过后的第一天0点的儒略日数
# 测试 OK
# print dongZhiJia1
hs.append(jiaoCal(dongZhiJia1, 0, 1)) # 首月结束的日月合朔时刻
for i in range(1, 14):
hs.append(jiaoCal(hs[i - 1] + 25, 0, 1))
# 算出中气及合朔时刻的日数(不含小数的日数计数,以便计算日期之间的差值)
# 测试
# print hs
A = []
B = []
C = []
for i in range(14): # 取当地UTC日数的整数部分
A.append(jDate.Dint_dec(zq[i], 8, 1))
B.append(jDate.Dint_dec(jq[i], 8, 1))
C.append(jDate.Dint_dec(hs[i], 8, 1))
# 闰月及大小月分析
tot = 12
nun = -1
j = 0
yn = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0, 0] # 月编号
if (C[12] <= A[12]): # 闰月分析
yn[12] = 12
tot = 13 # 编号为12的月是本年的有效月份,本年总月数13个
for i in range(13):
if (C[i] <= A[i]):
break
nun = j - 1
for j in range(nun, 13):
yn[j - 1] -= 1 # 注意yn中不含农历首月(所以取i-1),在公历中农历首月总是去年的所以不多做计算
for i in range(tot): # 转为建寅月名,并做大小月分析
yn[i] = yueMing[(yn[i] + 10) % 12] # 转建寅月名
if (i == nun):
yn[i] += "闰"
else:
yn[i] += "月" # 标记是否闰月
if (C[i + 1] - C[i] > 29):
yn[i] += "大"
else:
yn[i] += "小" # 标记大小月
# 显示
list = []
for i in range(tot):
zm = (i * 2 + 18) % 24
jm = (i * 2 + 17) % 24 # 中气名节气名
jDate.setFromJD(jq[i] + J2000 + 8 / (24 - 0.0), 1)
# list.append(zm)
# list.append(jm)
jieqidatetime = jDate.toDateTime()
list.append(jDate.toDateTime())
jDate.setFromJD(zq[i] + J2000 + 8 / (24 - 0.0), 1)
# list.append(jDate.toDateTime())
jDate.setFromJD(hs[i] + J2000 + 8 / (24 - 0.0), 1)
# list.append(jDate.toStr())
# cache.setcache(keystr,list)
return list
if __name__ == '__main__':
list = paiYue(2020)
print(list)
list = getJieQiList_12(2020)
for val in list:
print(val)
# print len(list)
# print rad2mrad(3 * math.pi), rad2mrad(-3 * math.pi)
# print rad2str(math.pi, 0), rad2str(math.pi, 1)
# print hcjj1(1)
# print nutation(6)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import pytest_mock
import contextlib
import unittest.mock
from flask import testing
from app.auth.routes import oauth
from tests.conftest import as_user
from tests.conftest import regular_user_info, blocked_user_info
from tests.conftest import set_user
from authlib.oidc.core import UserInfo
def test_authenticated_users_get_redirected_to_home(
client_without_db: testing.FlaskClient,
):
client = client_without_db
app = client.application
with set_user(app, as_user(client)):
with app.app_context():
resp = client.get("/auth/login")
assert resp.status_code == 302
assert resp.headers.get("Location") == "http://localhost/"
def test_unauthenticated_users_can_choose_login(
client_without_db: testing.FlaskClient,
):
client = client_without_db
resp = client.get("/auth/login")
assert resp.status_code == 200
assert b"fetch_profile" in resp.data
assert b"Sign in with Google" in resp.data
def test_users_get_redirected_to_minimal_oauth_consent_screen_by_default(
client_without_db: testing.FlaskClient,
):
client = client_without_db
resp = client.get("/auth/login?as_user=Google")
assert resp.status_code == 302
target = resp.headers.get("Location")
assert target.startswith("https://accounts.google.com/o/oauth2/v2/auth")
assert "profile" not in target
resp = client.post("/auth/login?as_user=Google")
assert resp.status_code == 302
target = resp.headers.get("Location")
assert target.startswith("https://accounts.google.com/o/oauth2/v2/auth")
assert "profile" not in target
resp = client.post("/auth/login?as_user=Google", data={"fetch_profile": "false"})
assert resp.status_code == 302
target = resp.headers.get("Location")
assert target.startswith("https://accounts.google.com/o/oauth2/v2/auth")
assert "profile" not in target
def test_users_get_redirected_to_full_oauth_consent_screen_with_optin(
client_without_db: testing.FlaskClient,
):
client = client_without_db
resp = client.post("/auth/login?as_user=Google", data={"fetch_profile": "true"})
assert resp.status_code == 302
target = resp.headers.get("Location")
assert target.startswith("https://accounts.google.com/o/oauth2/v2/auth")
assert "profile" in target
def test_logout_clears_the_session(
client_without_db: testing.FlaskClient,
):
client = client_without_db
app = client.application
with set_user(app, as_user(client)):
with app.app_context():
with client.session_transaction() as session:
session["something_else"] = True
# request /maintenance as it doesn't use the database
resp = client.get("/maintenance")
assert resp.status_code == 200
with client.session_transaction() as session:
assert "user_info" in session
assert "something_else" in session
resp = client.get("/auth/logout")
assert resp.status_code == 302
assert resp.headers.get("Location") == "http://localhost/"
with client.session_transaction() as session:
assert "user_info" not in session
assert "something_else" not in session
@contextlib.contextmanager
def patch_query(model: str):
module, name = model.rsplit(".", 1)
model_cls = getattr(__import__(module, fromlist=[name]), name)
query = unittest.mock.MagicMock()
model_cls.query = query
yield query
delattr(model_cls, "query")
def test_authorization_callback_success(
mocker: pytest_mock.MockFixture,
client_without_db: testing.FlaskClient,
):
client = client_without_db
mocker.patch("app.auth.routes.oauth.google.authorize_access_token")
mocker.patch("app.auth.routes.oauth.google.parse_id_token")
# normally one should do
# query = mocker.patch("data.models.user.User.query")
# but this makes issues if there is no database available
with patch_query("data.models.user.User") as query:
oauth.google.authorize_access_token.return_value = {"access_token": "TOKEN"}
oauth.google.parse_id_token.return_value = UserInfo(regular_user_info())
query.filter_by.one_or_none.return_value = True
resp = client.get("/auth/authorized")
assert resp.status_code == 302
assert resp.headers.get("Location") == "http://localhost/"
oauth.google.authorize_access_token.assert_called_once()
oauth.google.parse_id_token.assert_called_once_with({"access_token": "TOKEN"})
with client.session_transaction() as session:
assert "user_info" in session
# TODO: Re-enable this test.
# def test_authorization_callback_access_denied(mocker, client_without_db):
# client = client_without_db
# mocker.patch('app.auth.routes.oauth.google.authorize_access_token')
# mocker.patch('app.auth.routes.oauth.google.get')
# oauth.google.authorize_access_token.return_value = None
#
# resp = client.get('/auth/authorized')
#
# #assert resp.status_code == 200
# #assert b'Access denied' in resp.data
#
# oauth.google.authorize_access_token.assert_called_once()
# with client.session_transaction() as session:
# assert 'user_info' not in session
def test_authorization_callback_access_denied_with_reason(
mocker: pytest_mock.MockFixture,
client_without_db: testing.FlaskClient,
):
client = client_without_db
mocker.patch("app.auth.routes.oauth.google.authorize_access_token")
mocker.patch("app.auth.routes.oauth.google.get")
oauth.google.authorize_access_token.return_value = None
resp = client.get(
"/auth/authorized?error_reason=testing_unauthenticated&error_description=just+testing"
)
assert resp.status_code == 200
assert b"Access denied" in resp.data
assert b"testing_unauthenticated" in resp.data
assert b"just testing" in resp.data
assert not oauth.google.authorize_access_token.called
with client.session_transaction() as session:
assert "user_info" not in session
def test_authorization_callback_redirect(
mocker: pytest_mock.MockFixture,
client_without_db: testing.FlaskClient,
):
client = client_without_db
mocker.patch("app.auth.routes.oauth.google.authorize_access_token")
mocker.patch("app.auth.routes.oauth.google.parse_id_token")
# normally one should do
# query = mocker.patch("data.models.user.User.query")
# but this makes issues if there is no database available
with patch_query("data.models.user.User") as query:
oauth.google.authorize_access_token.return_value = {"access_token": "TOKEN"}
oauth.google.parse_id_token.return_value = UserInfo(regular_user_info())
query.filter_by.one_or_none.return_value = True
with client.session_transaction() as session:
session["redirect_path"] = "/FOO"
resp = client.get("/auth/authorized")
assert resp.status_code == 302
assert resp.headers.get("Location") == "http://localhost/FOO"
oauth.google.authorize_access_token.assert_called_once()
oauth.google.parse_id_token.assert_called_once_with({"access_token": "TOKEN"})
with client.session_transaction() as session:
assert "user_info" in session
assert "redirect_path" not in session
@pytest.mark.integration
def test_blocked_user(mocker: pytest_mock.MockFixture, client: testing.FlaskClient):
mocker.patch("app.auth.routes.oauth.google.authorize_access_token")
mocker.patch("app.auth.routes.oauth.google.parse_id_token")
oauth.google.authorize_access_token.return_value = {"access_token": "TOKEN"}
oauth.google.parse_id_token.return_value = UserInfo(blocked_user_info())
resp = client.get("/auth/authorized")
assert resp.status_code == 302
assert resp.headers.get("Location") == "http://localhost/"
oauth.google.authorize_access_token.assert_called_once()
oauth.google.parse_id_token.assert_called_once_with({"access_token": "TOKEN"})
with client.session_transaction() as session:
assert "user_info" in session
resp = client.get("/")
assert resp.status_code == 200
with client.session_transaction() as session:
assert "user_info" not in session
|
"""
Core bioinformatics abstractions and I/O.
""" |
import base64
import collections
import factorio
import gzip
import lupa
from PIL import Image
import re
import StringIO
def make_table(lua_table):
if isinstance(lua_table, unicode) or isinstance(lua_table, int) \
or isinstance(lua_table, float):
return lua_table
keys = list(lua_table.keys())
if keys == range(1, len(keys) + 1):
return [make_table(lua_table[i + 1]) for i in range(len(keys))]
val = dict((key, make_table(lua_table[key])) for key in keys)
table_clazz = collections.namedtuple('Struct', keys)
return table_clazz(**val)
def json_results(blueprint):
fd = StringIO.StringIO(base64.b64decode(blueprint))
with gzip.GzipFile(fileobj=fd) as gzfd:
string = gzfd.read()
subs = {
'basic-accumulator': 'accumulator',
'basic-armor': 'light-armor',
'basic-beacon': 'beacon',
'basic-bullet-magazine': 'firearm-magazine',
'basic-exoskeleton-equipment': 'exoskeleton-equipment',
'basic-grenade': 'grenade',
'basic-inserter': 'inserter',
'basic-laser-defense-equipment': "personal-laser-defense-equipment",
'basic-mining-drill': "electric-mining-drill",
'basic-modular-armor': "modular-armor",
'basic-splitter': "splitter",
'basic-transport-belt': "transport-belt",
'basic-transport-belt-to-ground': "underground-belt",
'express-transport-belt-to-ground': "express-underground-belt",
'fast-transport-belt-to-ground': "fast-underground-belt",
'piercing-bullet-magazine': "piercing-rounds-magazine",
'smart-chest': "steel-chest",
'smart-inserter': "filter-inserter"
}
string = re.sub('(\\w|-)+', lambda m: subs[m.group(0)] if m.group(0) in subs else m.group(0), string)
lua_table = lupa.LuaRuntime().execute(string)
return make_table(lua_table)
def get_blueprint_image(data, entity, entity_data):
print entity
entity_data = entity_data[entity.name]
if entity_data.animation is None:
return (Image.new("RGBA", (0, 0)), (0, 0))# Skip me for now.
# Grab the base image that we want to display. The first frame of the
# animation should do fine.
ani = entity_data.animation
if isinstance(ani, list):
return (Image.new("RGBA", (0, 0)), (0, 0))# Skip me for now.
base_image = Image.open(
StringIO.StringIO(data.load_path(ani['filename'])))
# For directions, select the row based on direction.
y = 0
if hasattr(entity, 'direction'):
y = entity.direction * ani['height']
# The region we want to extract is [x, y] -> [x + width, y + height]
region = base_image.crop((ani['x'], y, ani['x'] + ani['width'], y + ani['height']))
# Okay, where do we stick this? The entity is centered on the position in
# the blueprint (measured in tiles, so multiply by 32 to get actual value).
# We need the upper-left. So stick the center on the position indicated.
image_center = [32 * (entity.position.x + ani['shift'][0]), 32 * (entity.position.y + ani['shift'][1])]
top_left = [image_center[0] - region.width / 2, image_center[1] - region.height / 2]
return (region, (int(top_left[0]), int(top_left[1])))
def make_image(blueprint_table):
data = factorio.load_factorio()
entity_data = data.load_pseudo_table('entity')
# Convert all entities into images
images = [get_blueprint_image(data, entity, entity_data)
for entity in blueprint_table.entities]
# Compute the bounds for the image. Note that some offsets are negative.
bounds = (0, 0, 0, 0)
for im, off in images:
bounds = (
min(off[0], bounds[0]),
min(off[1], bounds[1]),
max(off[0] + im.width, bounds[2]),
max(off[1] + im.height, bounds[3]))
# With those bounds, create the new image, and paste the images as
# necessary.
image = Image.new("RGBA", (bounds[2] - bounds[0], bounds[3] - bounds[1]))
for im, off in images:
image.paste(im, (off[0] - bounds[0], off[1] - bounds[1]), im)
image.show()
if __name__ == '__main__':
import sys
make_image(json_results(sys.argv[1]))
|
# project/_config.py
import os
# from datetime import timedelta
# Grabs the folder where the script runs.
basedir = os.path.abspath(os.path.dirname(__file__))
# Enable debug mode.
DEBUG = True
# Secret key for session management.
SECRET_KEY = ""
# Session lifetime (matches lifetime of Esri tokens)
# PERMANENT_SESSION_LIFETIME = timedelta(seconds=3600)
# ROKTECH CREDS for accessing 3RWW & CivicMapper ArcGIS Server services
ROK_USER = ''
ROK_PW = ''
ROK_AUTH_URL = 'https://arcgis4.roktech.net/arcgis/tokens/generateToken'
ROK_CLIENT_TYPE = 'requestip'
#ROK_CLIENT_TYPE = 'referer'
ROK_REFERER_URL = 'flush-it.civicmapper.com'
# AGOL CREDS for accessing 3RWW ArcGIS Online
ESRI_APP_CLIENT_ID = ''
ESRI_APP_CLIENT_SECRET = ''
ESRI_APP_TOKEN_EXPIRATION = '-1'
ESRI_AUTH_URL = 'https://www.arcgis.com/sharing/oauth2/token' |
from selenium.webdriver.support.select import Select
class ProjectHelper:
def __init__(self, app):
self.app = app
project_cache = None
def open_home_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("manage_proj_page.php") and
len(wd.find_elements_by_name("manage_proj_create_page_token")) > 0):
wd.find_element_by_link_text("Manage").click()
wd.find_element_by_link_text("Manage Projects").click()
def create(self, project):
wd = self.app.wd
self.open_home_page()
wd.find_element_by_xpath("//input[@value='Create New Project']").click()
self.fill_project_form(project)
self.open_home_page()
self.project_cache = None
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_project_form(self, project):
wd = self.app.wd
self.change_field_value("name", project.name)
self.select_field_value("status", project.status)
self.select_field_value("view_state", project.view_status)
self.change_field_value("description", project.description)
wd.find_element_by_xpath("(//input[@value='Add Project'])").click()
def select_field_value(self, select_param, select_value):
wd = self.app.wd
if select_value is not None:
wd.find_element_by_name(select_param).click()
Select(wd.find_element_by_name(select_param)).select_by_visible_text(select_value)
wd.find_element_by_name(select_param).click()
def find_project_id_by_name(self, name):
wd = self.app.wd
self.open_home_page()
link = wd.find_element_by_link_text(name).get_attribute('href')
id_project = link.split('=')[1]
return id_project
def delete_project_by_index(self, name):
wd = self.app.wd
self.open_home_page()
wd.find_element_by_link_text(name).click()
wd.find_element_by_xpath("//input[@value='Delete Project']").click()
wd.find_element_by_xpath("//input[@value='Delete Project']").click()
|
from telebot import types, apihelper
from types import SimpleNamespace
from modules import jsondb, polling
import requests
import telebot
import base64
import json
import re
# Автор: Роман Сергеев
# Электронная почта: [email protected]
# Telegram: @uheplm
# ------------------------------------
# Если вы - программист, который занимается
# поддержкой этого бота, направляйте любые
# вопросы на электронную почту или телеграм.
# ------------------------------------
# Загрузка конфиурации бота
config_yaml = jsondb.JSONDB('config/bot_config.yaml')
config = SimpleNamespace(**config_yaml.get())
# Создание экземпляра бота
bot = telebot.TeleBot(config.api_token)
# Загрузка файла сценария
scenario = jsondb.JSONDB('config/script.yaml')
# Загрузка строк локализации
string_yaml = jsondb.JSONDB('config/strings.yaml')
strings = SimpleNamespace(**string_yaml.get())
# Инициализация обьекта прослушивания сервера
polling = polling.Polling(bot)
# Переменные для хранения состояний пользователя
# user_states - хранение текущего положения в меню
# user_registration - хранение данных о регистрации
user_states = {}
user_registration = {}
# Обработчик ошибок, связанных с незапланированной
# остановкой бота при переходе по меню
def on_restart_error(bot_obj):
def decorator(fn):
def wrapper(call):
try:
fn(call)
except KeyError:
bot_obj.delete_message(call.from_user.id, call.message.message_id)
bot_obj.send_message(call.from_user.id, strings.ErrorMessage)
return wrapper
return decorator
# Обработчик события: Новый пльзователь в чате
@bot.message_handler(content_types=['new_chat_members'])
def new_member(msg):
keyboard = types.InlineKeyboardMarkup()
keyboard.row(
types.InlineKeyboardButton(
text=strings.OpenButton,
url="https://t.me/" + config.bot_link + "?start"
)
)
bot.reply_to(msg, strings.WelcomeMessage, reply_markup=keyboard)
# Обработчик события: команда help, start
@bot.message_handler(commands=['help', 'start'])
def help_cmd(msg):
keyboard = types.InlineKeyboardMarkup()
keys = list(scenario.get().keys())
user_states[msg.from_user.id] = []
for i in scenario.get():
keyboard.row(
types.InlineKeyboardButton(
text=str(i),
callback_data="open_" + str(keys.index(i))
)
)
bot.send_message(
msg.from_user.id,
strings.StartHeader,
reply_markup=keyboard,
parse_mode='HTML'
)
# Обработчик события: нажатие кнопки
@bot.callback_query_handler(func=lambda call: call)
@on_restart_error(bot)
def callback_inline(call):
bot.answer_callback_query(call.id, strings.ToastLoading, show_alert=False)
# Обработчик кнопок перхода по меню
if call.data.startswith('open_'):
load = call.data.replace("open_", '')
open(call, load)
# Обработчик кнопки назад
if call.data.startswith('back_'):
load = call.data.replace("back_", '')
keyboard = types.InlineKeyboardMarkup()
user_states[call.from_user.id] = user_states[call.from_user.id][:-1]
if user_states[call.from_user.id]:
open(call)
else:
keys = list(scenario.get().keys())
user_states[call.from_user.id] = []
keyboard = types.InlineKeyboardMarkup()
for i in scenario.get():
keyboard.row(
types.InlineKeyboardButton(
text=str(i),
callback_data="open_" + str(keys.index(i))
)
)
bot.edit_message_text(
chat_id=call.from_user.id,
message_id=call.message.message_id,
text=strings.StartHeader,
reply_markup=keyboard,
parse_mode='HTML'
)
# Обработчик кнопки регистрации
if call.data.startswith('reg_'):
keyboard = types.InlineKeyboardMarkup()
user = {
"msg": call.message.message_id,
"state": 0,
"name": '',
"email": '',
"phone": ''
}
keyboard.row(
types.InlineKeyboardButton(
text='Cancel',
callback_data="cancel_"
)
)
user_registration[call.from_user.id] = user
bot.edit_message_text(
chat_id=call.from_user.id,
message_id=call.message.message_id,
text=strings.RegPhone,
reply_markup=keyboard,
parse_mode='HTML'
)
# Обработчик кнопки завершения регистрации
if call.data.startswith('compreg_'):
if call.from_user.id in user_registration:
userdata = {
'phone': user_registration[call.from_user.id]['phone'],
'email': user_registration[call.from_user.id]['email'],
'first_name': (
user_registration[call.from_user.id]['name']
.split(' ')[0]
),
'last_name': (
user_registration[call.from_user.id]['name']
.split(' ')[1]
)
}
complete = register(userdata, call)
keyboard = types.InlineKeyboardMarkup()
if complete['success']:
keyboard.row(
types.InlineKeyboardButton(
text=strings.RegOpen,
url=strings.RegLink
),
types.InlineKeyboardButton(
text='to menu',
callback_data='open_'
)
)
else:
keyboard.row(
types.InlineKeyboardButton(
text='to menu',
callback_data='open_'
)
)
bot.edit_message_text(
chat_id=call.from_user.id,
message_id=user_registration[call.from_user.id]['msg'],
text=(
strings.RegComplete if
complete['success'] else
strings.RegFailed.format(complete['error'])),
reply_markup=keyboard,
parse_mode='HTML'
)
else:
raise KeyError
# Обработчик кнопки отмены регистрации
if call.data.startswith('cancel_'):
if call.from_user.id in user_registration:
keyboard = types.InlineKeyboardMarkup()
keyboard.row(
types.InlineKeyboardButton(
text='to menu',
callback_data='open_'
)
)
bot.edit_message_text(
chat_id=call.from_user.id,
message_id=user_registration[call.from_user.id]['msg'],
text=strings.RegCanceled,
reply_markup=keyboard,
parse_mode='HTML'
)
del user_registration[call.from_user.id]
# Обработчик текстовых сообщений боту
# Используется во время регистрации
@bot.message_handler(content_types=['text'])
@on_restart_error(bot)
def reg_handler(msg):
# Паттерны регулярных выражений
phone_pattern = re.compile(config.regex_phone)
email_pattern = re.compile(config.regex_email)
keyboard = types.InlineKeyboardMarkup()
keyboard.row(
types.InlineKeyboardButton(
text='Cancel',
callback_data="cancel_"
)
)
if msg.from_user.id in user_registration:
if user_registration[msg.from_user.id]['state'] == 0:
if re.match(phone_pattern, msg.text):
user_registration[msg.from_user.id]['phone'] = msg.text
bot.edit_message_text(
chat_id=msg.from_user.id,
message_id=user_registration[msg.from_user.id]['msg'],
text=strings.RegEmail,
reply_markup=keyboard,
parse_mode='HTML'
)
user_registration[msg.from_user.id]['state'] += 1
else:
bot.edit_message_text(
chat_id=msg.from_user.id,
message_id=user_registration[msg.from_user.id]['msg'],
text=strings.ErrorReg,
reply_markup=keyboard,
parse_mode='HTML'
)
elif user_registration[msg.from_user.id]['state'] == 1:
if re.match(email_pattern, msg.text):
user_registration[msg.from_user.id]['email'] = msg.text
bot.edit_message_text(
chat_id=msg.from_user.id,
message_id=user_registration[msg.from_user.id]['msg'],
text=strings.RegName,
reply_markup=keyboard,
parse_mode='HTML'
)
user_registration[msg.from_user.id]['state'] += 1
else:
bot.edit_message_text(
chat_id=msg.from_user.id,
message_id=user_registration[msg.from_user.id]['msg'],
text=strings.ErrorReg,
reply_markup=keyboard,
parse_mode='HTML'
)
elif user_registration[msg.from_user.id]['state'] == 2:
if len(msg.text.split(' ')) == 2:
user_registration[msg.from_user.id]['name'] = msg.text
accept = types.InlineKeyboardMarkup()
accept.row(
types.InlineKeyboardButton(
text='Yes',
callback_data="compreg_"
),
types.InlineKeyboardButton(
text='No',
callback_data="cancel_"
)
)
bot.delete_message(
msg.from_user.id,
user_registration[msg.from_user.id]['msg'],
)
bot.send_message(
chat_id=msg.from_user.id,
text=strings.RegEnd.format(
name=user_registration[msg.from_user.id]['name'],
email=user_registration[msg.from_user.id]['email'],
phone=user_registration[msg.from_user.id]['phone']
),
reply_markup=accept,
parse_mode='HTML'
)
user_registration[msg.from_user.id]['state'] = 0
else:
bot.edit_message_text(
chat_id=msg.from_user.id,
message_id=user_registration[msg.from_user.id]['msg'],
text=strings.ErrorReg,
reply_markup=keyboard,
parse_mode='HTML'
)
# Функция перехода по меню
def open(call, load=False):
keyboard = types.InlineKeyboardMarkup()
if load:
user_states[call.from_user.id].append(int(load))
keys_request = []
if user_states[call.from_user.id]:
keys = list(scenario.get().keys())
for i in user_states[call.from_user.id]:
keys_request.append(keys[i])
keys = (
list(scenario.get(keys_request).keys()) if
isinstance(scenario.get(keys_request), dict)
else []
)
if isinstance(scenario.get(keys_request), str):
if '$perform_reg' in scenario.get(keys_request):
keyboard.row(
types.InlineKeyboardButton(
text=strings.PerformRegButton,
callback_data='reg_'
)
)
keyboard.row(
types.InlineKeyboardButton(
text='Back',
callback_data='back_'
)
)
bot.edit_message_text(
chat_id=call.from_user.id,
message_id=call.message.message_id,
text=scenario.get(keys_request).replace('$perform_reg', ''),
reply_markup=keyboard,
parse_mode='HTML'
)
if isinstance(scenario.get(keys_request), dict):
keys_top = list(scenario.get(keys_request).keys())
for i in scenario.get(keys_request):
keyboard.row(
types.InlineKeyboardButton(
text=str(i),
callback_data="open_" + str(keys_top.index(i))
)
)
keyboard.row(
types.InlineKeyboardButton(
text='Back',
callback_data='back_'
)
)
bot.edit_message_text(
chat_id=call.from_user.id,
message_id=call.message.message_id,
text=strings.PageHeader,
reply_markup=keyboard,
parse_mode='HTML'
)
# Построение запроса на сайт GetCourse
def register(userdata, call):
bot.send_message(call.from_user.id, userdata)
return {"success": True}
# Запуск прослушивания
polling.start()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
__version__ = '1.0.1'
setup(
name='google_music_manager_auth',
python_requires=">=3",
version=__version__,
packages=find_packages(),
author="Jay MOULIN",
author_email="[email protected]",
description="Google MusicManager package to manage your music library to Google Music - Auth module",
long_description=open('README.rst').read(),
install_requires=["gmusicapi"],
include_package_data=True,
url='http://github.com/jaymoulin/google-music-manager/',
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Communications :: File Sharing",
"Topic :: Artistic Software",
"Topic :: Internet :: File Transfer Protocol (FTP)",
"Topic :: Home Automation",
"Topic :: Internet",
"Topic :: Multimedia :: Sound/Audio",
],
entry_points={
'console_scripts': [
'google-music-auth = google_music_manager_auth.auth:main',
],
},
license="MIT",
)
|
# -------------------------------------------------------------------------------------------------------------------------------------------------------------
import sys
# -------------------------------------------------------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------------------------------------------
# Main Program
# -------------------------------------------------------------------------------------------------------------------------------------------------------------
if len(sys.argv) == 2 and sys.argv[1] == "--help":
print "TXT"
print "Operand"
print "Operator (1+ 2- 3* 4/)"
print "Operand"
else:
n1 = float(sys.argv[1])
o = float(sys.argv[2])
n2 = float(sys.argv[3])
if (o == 1): print n1 + n2
elif (o == 2): print n1 - n2
elif (o == 3): print n1 * n2
elif (o == 4): print n1 / n2
else: print "Error"
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import pip
# numpy and scipy are necessary for scikit-learn
pip.main(['install', 'numpy'])
pip.main(['install', 'scipy'])
setup(
name='skool',
version='0.1.0',
author='Michal Vlasak',
author_email='[email protected]',
packages=['skool'],
scripts=[],
url='https://github.com/daeatel/skool',
license='LICENSE.txt',
description='Online school package',
long_description=open('README.md').read(),
install_requires=[
"BeautifulSoup == 3.2.1",
"blinker == 1.3",
"Distance == 0.1.3",
"elasticsearch == 1.4.0",
"iso8601 == 0.1.10",
"jusText == 2.1.1",
"lmdb == 0.84",
"mongoengine == 0.8.7",
"pymongo == 2.8",
"pytz == 2015.2",
"requests == 2.6.0",
"scikit-learn == 0.15.2",
"sumy == 0.3.0",
"textblob == 0.9.0",
],
)
|
import colorsys
import numpy as np
import RPi.GPIO as GPIO
import time
rPin =26
gPin =19
bPin =13
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(rPin, GPIO.OUT)
GPIO.setup(gPin, GPIO.OUT)
GPIO.setup(bPin, GPIO.OUT)
GPIO.output(rPin, GPIO.LOW)
GPIO.output(gPin, GPIO.LOW)
GPIO.output(bPin, GPIO.LOW)
red= GPIO.PWM(rPin, 100)
green= GPIO.PWM(gPin, 100)
blue= GPIO.PWM(bPin, 100)
red.start(0)
green.start(0)
blue.start(0)
def changeColor(r_value, g_value, b_value):
red.ChangeDutyCycle(r_value)
green.ChangeDutyCycle(g_value)
blue.ChangeDutyCycle(b_value)
while True:
img = np.zeros((500, 500, 3), dtype=np.uint8)
for h in range(360):
h_value =h/360
# print(h_value)
array =np.multiply([colorsys.hsv_to_rgb(h_value, 1, 1)], [100.0, 100.0,100.0])
# print((array))
r, g, b = array.ravel()
# red = float(r*100)
# blue =float(b*100)
# green = float(g*100)
# print(r, ' ', g, ' ', b)
# if h==360:
# break
changeColor(r, g, b)
time.sleep(0.06)
# break |
'''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
'''
import os
import sys
import subprocess
# set path to common libraries
sys.path.append(subprocess.check_output("git rev-parse --show-toplevel", shell=True).rstrip("\n") + "/test/fit_tests/common")
import fit_common
# Select test group here using @attr
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class rackhd11_api_lookups(fit_common.unittest.TestCase):
def setUp(self):
# delete any instance of test lookup
api_data = fit_common.rackhdapi("/api/2.0/lookups")
for item in api_data['json']:
if item['macAddress'] == "00:0a:0a:0a:0a:0a":
fit_common.rackhdapi("/api/2.0/lookups/" + item['id'], action="delete")
def test_api_11_lookups_ID(self):
api_data = fit_common.rackhdapi("/api/1.1/lookups")
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
for item in api_data['json']:
self.assertEqual(fit_common.rackhdapi("/api/1.1/lookups/" + item['id'])
['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
# this test cross-references node MAC addresses to lookup tables
def test_api_11_lookups_cross_reference(self):
nodecatalog = fit_common.rackhdapi("/api/1.1/nodes")['json']
lookuptable = fit_common.rackhdapi("/api/1.1/lookups")['json']
errorlist = ""
for node in nodecatalog:
# get list of compute nodes with sku
if node['type'] == "compute" and 'sku' in node and 'identifiers' in node:
# find node entry mac addresses
for macaddr in node['identifiers']:
# find mac address in lookup table
for lookupid in lookuptable:
#verify node ID for mac address
if macaddr in lookupid['macAddress']:
if fit_common.VERBOSITY >= 2:
print "*** Checking Node ID: " + node['id'] + " MAC: " + macaddr
if 'node' not in lookupid:
errorlist = errorlist + "Missing node ID: " + node['id'] + " MAC: " + macaddr + "\n"
if node['id'] != lookupid['node']:
errorlist = errorlist + "Wrong node in lookup table ID: " + lookupid['id'] + "\n"
if errorlist != "":
print "**** Lookup Errors:"
print errorlist
self.assertEqual(errorlist, "", "Errors in lookup table detected.")
def test_api_11_lookups_post_get_delete(self):
node = fit_common.node_select()[0]
data_payload = {
"macAddress": "00:0a:0a:0a:0a:0a",
"ipAddress": "128.128.128.128",
"node": node
}
api_data = fit_common.rackhdapi("/api/1.1/lookups", action="post", payload=data_payload)
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
lookup_id = api_data['json']['id']
api_data = fit_common.rackhdapi("/api/1.1/lookups/" + lookup_id)
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
self.assertEqual(api_data['json']['macAddress'], "00:0a:0a:0a:0a:0a", "Bad lookup MAC Address")
self.assertEqual(api_data['json']['ipAddress'], "128.128.128.128", "Bad lookup IP Address")
self.assertEqual(api_data['json']['node'], node, "Bad lookup node ID")
api_data = fit_common.rackhdapi("/api/1.1/lookups/" + lookup_id, action="delete")
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
if __name__ == '__main__':
fit_common.unittest.main() |
import os
DATA_PATH = os.path.join("raw_data")
RESULTS_PATH = os.path.join("results")
|
from os import system
class Py3status:
def __init__(self):
#pass
self.c = CountDownTimer(1500)
self.notified = False
"""
Empty and basic py3status class.
NOTE: py3status will NOT execute:
- methods starting with '_'
- methods decorated by @property and @staticmethod
NOTE: reserved method names:
- 'kill' method for py3status exit notification
- 'on_click' method for click events from i3bar
"""
def kill(self, i3status_output_json, i3status_config):
pass
def pomodoro(self, i3status_output_json, i3status_config):
"""
This method will return an empty text message
so it will NOT be displayed on your i3bar.
If you want something displayed you should write something
in the 'full_text' key of your response.
See the i3bar protocol spec for more information:
http://i3wm.org/docs/i3bar-protocol.html
"""
if(self.c.get_time() == "0:00" and self.notified == False):
system("notify-send --urgency=critical pomodoro done!")
self.notified = True
# strip the newline and convert to utf-8
response = {'full_text': "pomodoro " + str(self.c.get_time()), 'name': 'pomodoro', 'instance': 'first'}
if (self.c.get_counter() > 900):
response.update({'color': i3status_config['color_good']})
elif (self.c.get_counter() > 300):
response.update({'color': i3status_config['color_degraded']})
else:
response.update({'color': i3status_config['color_bad']})
return (0, response)
def on_click(self, json, i3status_config, event):
"""
Enable/Disable DPMS on left click.
"""
if event['button'] == 1:
# if(self.c.is_running() == False):
self.c.start()
system("notify-send pomodoro started")
# else:
# system("notify-send pomodoro paused")
elif event['button'] == 3:
self.c = CountDownTimer(1500)
system("notify-send pomodoro reset")
#import subprocess
import time
import math
import threading
class Timer(threading.Thread):
def __init__(self, seconds):
self.running = False
self.runTime = seconds
self.counter = seconds
threading.Thread.__init__(self)
def run(self):
time.sleep(self.runTime)
class CountDownTimer(Timer):
def run(self):
self.running = True
self.counter = self.runTime
for sec in range(self.runTime):
time.sleep(1.0)
self.counter -= 1
def get_counter(self):
return self.counter
def get_time(self):
minutes = self.counter / 60
seconds = self.counter % 60
return str(math.floor(minutes)) + ":" + str(seconds).zfill(2)
def is_running(self):
return self.running
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#add the path of the twitter egg
import sys
egg_path = '/home/users/web/........./cgi-bin/PyPkg/twitter-1.14.3-py2.7.egg'
sys.path.append(egg_path)
# Import the CGI, string, sys, and md5crypt modules
import json, urllib2, re, time, datetime, sys, cgi, os
import sqlite3
import MySQLdb as mdb
import string, random
from urlparse import urlparse
from twitter import *
from tempfile import TemporaryFile
from collections import *
from py_site_header import *
def thisPYfile():
return 'twit_analytics.py'
def define_keys():
CONSUMER_KEY="......................"
CONSUMER_SECRET="...................."
ACCESS_TOKEN="..........................."
ACCESS_TOKEN_SECRET="...................................."
return CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET
def start_database_to_store_tweets():
dbhost="......................" # Host name
dbuser="......." # Mysql username
dbpswd="......." # Mysql password
dbname = '........' # MySql db
try:
conn = mdb.connect(host=dbhost,user=dbuser,passwd=dbpswd,db=dbname)
c = conn.cursor()
return c, True, conn
except mdb.Error, e:
return e, False
def site_header(st=''):
site_start()
print '</div>'
site_title(st)
def site_start():
print '''
Content-type:text/html\r\n\r\n
<html>
<div class="wrap" id="wrap_id">
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Financial Models</title>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<script type="text/javascript" src="../js/js_functions.js"></script>
<link rel="stylesheet" href="http://www.w3schools.com/lib/w3.css">
<link rel="stylesheet" href="http://www.w3schools.com/lib/w3-theme-indigo.css">
<link href='http://code.ionicframework.com/ionicons/2.0.1/css/ionicons.min.css' rel='stylesheet' type='text/css'>
<link rel="stylesheet" href="http://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.4.0/css/font-awesome.min.css">
<style>
a:link { text-decoration: none; }
a:visited { text-decoration: none; }
a:hover { text-decoration: none; }
a:active { text-decoration: none; }
</style>
</head>
<body>
'''
def site_title(s_title):
print '''
<div id="site_title" class="w3-container w3-theme-d4 w3-center w3-padding-jumbo">
<p> </p>
<div class="w3-row w3-jumbo">
'''
print s_title
print '''
<br>
</div>
</div>
'''
def site_footer():
import datetime
curr_year = datetime.datetime.now().strftime("%Y")
print '<div class="w3-container w3-border-top" style="text-align:center">'
print '<p> © 2013-'+curr_year+' | '
print '<a>Contact Us</a> </p>'
print '<p><a href="./termsofuse.py">Terms of Use</a> |',
print '<a href="./home.py#aboutus">About Us</a> </p>'
print '</div>'
print '</form>'
print ' </body>'
print ' </div>' #for the div id = wrap
print ' </html>'
def html_start():
# Start the HLML Block
site_header('Twitter Analytics')
def html_end():
site_footer()
def top_list(in_l,topx):
#function to get the top xx items in a list
# Need this because v2.6 of python does not have Counter in collections
counter = {}
for i in in_l:
counter[i] = counter.get(i, 0) + 1
final_dict = sorted([ (freq,word) for word, freq in counter.items() ], reverse=True)[:topx]
return final_dict
def text_sanitize(in_text):
out_text = in_text.replace("'","")
out_text = out_text.replace("\""," ").replace("\\"," ").replace("="," ").replace("''",'\"').replace("' '",'\"')
return out_text
def generate_form():
html_start()
print '<div id="body_sty">'
print '<p>Explore the world of Twitter and discover information about twitter users, their friends and followers as well as lexical analysis of the tweets.</p>'
print '<TABLE style="display: block;" BORDER = 0>'
print "<FORM METHOD = post ACTION=\'"+thisPYfile()+"\'>"
print "<TR><TH align=\"left\">Screen Name:</TH><TD><INPUT type = text name=\"scn_name\"></TD><TR>"
print "</TABLE>"
print "<INPUT TYPE = hidden NAME = \"action\" VALUE = \"display\">"
print "<INPUT TYPE = submit VALUE = \"Enter\">"
print "</FORM>"
print '</div>'
html_end()
def user_public_info(find_id_for):
#html_start()
#this line gets the public info for the user
print '<h2>'+'\nUsers Public Info'+'</h2>'
do_rest_of_module = 0
try:
t = Twitter(auth=OAuth(define_keys()[2],define_keys()[3],define_keys()[0],define_keys()[1]))
response = t.users.lookup(screen_name=find_id_for)
do_rest_of_module = 1
except:
print '<p>', 'Error getting public data' ,'</p>'
if do_rest_of_module == 1:
print '<h3>'+'\nBasic Info for: ', find_id_for+'</h3>'
print '<p>', '\tKey Data' ,'</p>'
print '<ul>'
print '<li>ID:',response[0]['id'],'</li>'
print '<li>Screen Name:',response[0]['screen_name'],'</li>'
print '<li>Name:',response[0]['name'] ,'</li>'
print '<li>Location:',response[0]['location'] ,'</li>'
print '<li>Friends:',response[0]['friends_count'] ,'</li>'
print '<li>Followers:',response[0]['followers_count'] ,'</li>'
print '<li>Messages posted:',response[0]['statuses_count'] ,'</li>'
print '</ul>'
def get_last200_tweets(in_user):
#this method will get the last 200 tweets of the user
#rate limit is 180 requests per 15 min window
#print '<h2>'+'\nAnalysis of Past Tweets for',in_user,'</h2>'
do_rest_of_module = 0
try:
t = Twitter(auth=OAuth(define_keys()[2],define_keys()[3],define_keys()[0],define_keys()[1]))
response=t.statuses.user_timeline(screen_name=in_user,count=200)
#print '<p>', '\tResponses left:', response.headers['x-rate-limit-remaining'] ,'</p>'
#print '<p>Line 201. Response length: ',len(response),'</p>'
if len(response) > 0:
do_rest_of_module = 1
else:
print '<p>', 'No info found for: ',in_user ,'</p>'
except:
print '<p>', 'Error getting tweets info for: ',in_user ,'</p>'
if do_rest_of_module == 1:
base_twit_list = []
data_for_plots = []
x = response
#x = [element.lower() for element in response] #x is list - LOWER CASE
hashtag_list = [] #start an empty list of hashtags
at_list = [] #start an empty list of twitter IDs
re_twt_list = [] #start a list of retweets
#get the start and end dates
sdf = x[0]['created_at'] #get the full date of last tweet
start_date = datetime.date(int(sdf[26:30]), int(time.strptime(sdf[4:7],'%b').tm_mon), int(sdf[8:10]))
edf = x[len(x)-1]['created_at'] #get the full date of first tweet
end_date = datetime.date(int(edf[26:30]), int(time.strptime(edf[4:7],'%b').tm_mon), int(edf[8:10]))
#end_date = str(edf[8:10])+'-'+str(edf[4:7])+'-'+str(edf[26:30])
twit_day_range = (start_date-end_date).days
avg_twit_day = (1.0*len(x)/max(1,twit_day_range))
print >> t2, '<h4>'+'Tweet Stats for ', in_user+'</h4>'
#print x[0]
#print '\tStats for last',len(x), 'tweets by',in_user
fix_nm = x[0]['user']['screen_name']
try:
if str(x[0]['user']['name']).decode('ascii'): fix_nm = str(x[0]['user']['name'])
except:
#print 'something wrong with the name for ', x[0]['user']['name']
fix_nm = x[0]['user']['screen_name']
print >> t2, '<ul>'
print >> t2, '<li>Key Personal Data</li>'
print >> t2, '<ul>'
print >> t2, '<li>ID:',x[0]['user']['id'],'</li>'
print >> t2, '<li>Screen Name:',x[0]['user']['screen_name'],'</li>'
print >> t2, '<li>Name:',fix_nm,'</li>'
#print '<li>Location:',x[0]['user']['location'],'</li>'
print >> t2, '<li>Friends:',x[0]['user']['friends_count'] ,'</li>'
print >> t2, '<li>Followers:',x[0]['user']['followers_count'] ,'</li>'
print >> t2, '<li>Messages posted:',x[0]['user']['statuses_count'] ,'</li>'
foll_frnd_rat = 1.0*x[0]['user']['followers_count'] / max(1,x[0]['user']['friends_count'])
print >> t2, '<li>Follower to Friend Ratio:', '%.1f' %(foll_frnd_rat),'</li>'
print >> t2, '</ul>'
print >> t2, '</ul>'
print >> t2, '<ul>'
print >> t2, '<li>',len(x),'tweets in past',twit_day_range,'days',
print >> t2, '(',end_date,'to',start_date,')' ,'</li>'
print >> t2, '<li>', 'Avg of ','%.1f' %(avg_twit_day),'tweets per day' ,'</li>'
#add info to the data for charts list
data_for_plots.extend([x[0]['user']['screen_name']])
data_for_plots.extend([x[0]['user']['friends_count']])
data_for_plots.extend([x[0]['user']['followers_count']])
data_for_plots.extend([x[0]['user']['statuses_count']])
data_for_plots.extend([twit_day_range])
data_for_plots.extend([len(x)])
for item in x:
#the encode(ascii,ignore) will convert text to ascii and ignore other
td = item['created_at']
twt_date = datetime.date(int(td[26:30]), int(time.strptime(td[4:7],'%b').tm_mon), int(td[8:10]))
fix_nm = item['user']['screen_name']
try:
if str(item['user']['name']).encode('utf8','ignore'): fix_nm = str(item['user']['name'])
except:
fix_nm = item['user']['screen_name']
try:
fix_text = text_sanitize(item['text'].encode('utf8','ignore'))
except:
#print 'something wrong with the text in tweet for: ',in_user
fix_text = 'Did not process'
#print fix_text,'\t',type(item['text']),'\t',len(item['text']),'\t',item['text'],
twt_list_data = [twt_date] + [fix_nm.lower()] + [fix_text]
try:
base_twit_list.append(twt_list_data)
except:
print '<p>Unknown Error:', type(twt_list_data), twt_list_data, '</p>'
textitem = fix_text
newhastags = re.findall('[#]\w+',textitem)
newatitems = re.findall('[@]\w+',textitem)
re_tweets = re.findall('RT',textitem)
#before adding to the final lists, convert the hashtags and atitems
#to lower case. This will avoid issues of double counting same names
newhastags = [hti.lower() for hti in newhastags]
newatitems = [ati.lower() for ati in newatitems]
#Now add to the list.
#Use EXTEND function that adds elements to the list rahter than another list.
hashtag_list.extend(newhastags)
at_list.extend(newatitems)
re_twt_list.extend(re_tweets)
#now try to find some patterns in the last 200 tweets
#print 'use the collections library to find out the top 5'
#Version 2.6 of python does not support Counters within collections
#py2.6 hashcollect = collections.Counter(hashtag_list)
#py2.6 atcollect = collections.Counter(at_list)
totalretweets = len(re_twt_list)
retwpercent = (1.0 * totalretweets / max(1,len(x)) ) * 100
top10users = []
#print '\n.............................' ,'</p>'
print >> t2, '<li>', '\t',"%.2f%%" % retwpercent, 'are retweets (',totalretweets,'of a total of',len(x),'tweets)' ,'</li>'
print >> t2, '<ul>'
print >> t2, '<li>',(len(x)-totalretweets), 'tweets in ',twit_day_range,' days (without retweets)</li>'
print >> t2, '<li>','Avg of ','%.1f' %( 1.0*(len(x)-totalretweets)/max(twit_day_range,1) ),'tweets per day (without retweets)</li>'
print >> t2, '</ul></ul>'
data_for_plots.extend([totalretweets])
print >> t2, '<ul>'
print >> t2, '<li>', '\tHastags referenced over past',len(x),'tweets = ',len(hashtag_list) ,'</li>'
print >> t2, '<li>', '\t10 Most referenced hashtags' ,'</li>'
print >> t2, '<ul>'
#py2.6 for h_item in hashcollect.most_common(10): #can't use in python 2.6
for h_item in top_list(hashtag_list,10):
print >> t2, '<li>',text_sanitize(h_item[1]),'|',h_item[0] ,'</li>'
print >> t2, '</ul></ul>'
print >> t2, '<ul>'
print >> t2, '<li>', '\tTwitter IDs referenced over past',len(x),'tweets = ',len(at_list) ,'</li>'
print >> t2, '<li>', '\t10 Most referenced Tweeter IDs' ,'</li>'
print >> t2, '<ul>'
#py2.6 for at_item in atcollect.most_common(10):
for at_item in top_list(at_list,10):
print >> t2, '<li>', '\t\t',text_sanitize(at_item[1]),'|',at_item[0],'</li>'
#add the list of users to the top10user list
top10users.append(at_item[1].replace('@',''))
print >> t2, '</ul></ul>'
#print '<p>Twit list:',type(base_twit_list),'\t',len(base_twit_list),'</p>'
return top10users, base_twit_list, data_for_plots
def display_data(scn_name):
html_start()
print '<div id="body_sty">'
print '<h4>Data shown for '+scn_name.upper()+' and 10 other users most referenced in '+scn_name.upper()+'\'s tweets.</h4><hr>'
user_to_check = scn_name
if user_to_check[0] == '@':
user_raw = user_to_check
user_to_check = user_raw.replace('@','')
# the following lines get the user info
# -- this is response limited to 180
#user_public_info(user_to_check)
max_items_to_show = 200
max_tweets_to_get = 200
#if temp file exists, close it
global t2
try:
t2.close()
except:
print ''
#open the temp file
t2=TemporaryFile()
print >> t2, '''
<a href="#" onclick="show_hideStuff('detailed_data'); return false;">
<br><br><hr><br>
<h3>Detailed Data (click to see or hide)</h3></a><br>
<div id="detailed_data" style="display:none">
'''
# last xx tweets is response limited to 180
res_last200_tweets = get_last200_tweets(user_to_check.lower())
#print '<p>', type(res_last200_tweets), len(res_last200_tweets), '</p>'
final_tweet_list = []
final_data_for_plots = []
do_rest_of_display_data = 0
try:
user_reference = res_last200_tweets[0]
tweet_last200_tweets = res_last200_tweets[1]
final_tweet_list.append(tweet_last200_tweets)
final_data_for_plots.append(res_last200_tweets[2])
do_rest_of_display_data = 1
except:
print '<p>Something wrong to get the list of twitter IDs</p>'
if (do_rest_of_display_data == 1):
print >> t2, '<br>'
try:
if len(user_reference) > 0:
for newuser in user_reference:
if newuser != user_to_check:
res_last200_tweets = get_last200_tweets(newuser.lower())
tweets_from_res_last200 = res_last200_tweets[1]
final_tweet_list.append(tweets_from_res_last200)
final_data_for_plots.append(res_last200_tweets[2])
else:
print >>t2, '<p>', 'Did not find any instance of other users referenced in your tweets.' ,'</p>'
except:
print >>t2, '<p>', 'No info found.' ,'</p>'
#Add the data to the temp file also
print >> t2, '<br><br><hr><h4>List of Tweets Analyzed</h4>'
print >> t2, '<table id="table1" class="pure-table" width=100% style="display: block;">'
print >> t2, '<thead><tr bgcolor=#def><td>Date</td><td>Sender</td><td>Text</td></tr></thead>'
row_even = True
for i1 in final_tweet_list:
for i2 in i1:
#database fields: current date, username, screen name, twt_date, twt_writer, twt_text
twts = [datetime.date.today(),scn_name,user_to_check,i2[0],text_sanitize(i2[1]),text_sanitize(i2[2])]
try:
if row_even == True:
print >> t2, '<tr><td><sm>', twts[3] ,'</sm></td><td><sm>', str(twts[4]),'</sm></td><td><sm>', str(twts[5]),'</sm></td></tr>'
row_even = False
else:
print >> t2, '<tr class="pure-table-odd"><td><sm>', twts[3] ,'</sm></td><td><sm>', str(twts[4]),'</sm></td><td><sm>', str(twts[5]),'</sm></td></tr>'
row_even = True
except:
print '',
print >> t2, '</table>'
#print out the chart data
#data fields: screen_name, friends, followers, msgs, daterange, tweets, retweets
#print json.dumps(final_data_for_plots,indent=2)
#try doing a chart
#draw a chart showing friends and followers
print '<h3>Friends and Followers</h3>'
x_fdfp = []
y1_fdfp = []
y2_fdfp = []
#print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>'
x_fdfp.append( 'Screen Name' )
y1_fdfp.append( 'Friends' )
y2_fdfp.append( 'Followers' )
for xy1 in range(len(final_data_for_plots)):
x_fdfp.append( final_data_for_plots[xy1][0] )
y1_fdfp.append( final_data_for_plots[xy1][1] )
y2_fdfp.append( final_data_for_plots[xy1][2] )
two_bar_chart_data("Friends and Followers", x_fdfp, y1_fdfp, y2_fdfp)
print '<h3>Followers to Friends Ratio</h3>'
#Draw a bar chart to show followers to friends ratio
x_fdfp = []
y_fdfp = []
#print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>'
for xy1 in range(len(final_data_for_plots)):
x_fdfp.append( final_data_for_plots[xy1][0] )
y_fdfp.append( round( 1.0 * final_data_for_plots[xy1][2] / max(final_data_for_plots[xy1][1],1),1) )
#print '<p>',x_fdfp, y_fdfp, '</p>'
bar_chart_data("Followers to Friends Ratio", x_fdfp, y_fdfp)
print '<h3>Tweets sent per day</h3>'
x_fdfp = []
y1_fdfp = []
y2_fdfp = []
#print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>'
x_fdfp.append( 'Screen Name' )
y1_fdfp.append( 'Tweets per day - with retweets' )
y2_fdfp.append( 'Tweets per day - without retweets' )
for xy1 in range(len(final_data_for_plots)):
x_fdfp.append( final_data_for_plots[xy1][0] )
y1_fdfp.append( final_data_for_plots[xy1][5] / max(final_data_for_plots[xy1][4],1) )
y2_fdfp.append( (final_data_for_plots[xy1][5]-final_data_for_plots[xy1][6]) / max(final_data_for_plots[xy1][4],1) )
two_bar_chart_data("Tweets sent per day", x_fdfp, y1_fdfp, y2_fdfp)
print '<h3>Tweet range (tweets seen per day)</h3>'
x_fdfp = []
y_fdfp = []
#print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>'
for xy1 in range(len(final_data_for_plots)):
x_fdfp.append( final_data_for_plots[xy1][0] )
y_fdfp.append( round( 1.0 * final_data_for_plots[xy1][2] * final_data_for_plots[xy1][5] / max(final_data_for_plots[xy1][4],1) ) )
#print '<p>',x_fdfp, y_fdfp, '</p>'
bar_chart_data("Tweet Range", x_fdfp, y_fdfp)
lex_anal(final_tweet_list)
#print out the detailed data
# go to the first record of the temp file first
print >> t2, ' </div> '
t2.seek(0)
print t2.read()
t2.close()
#if this works - can delete below this.
else:
print '<p>Not able to process this user. Please try another.</p>'
print '</div>' #close the body_sty div
html_end()
def lex_anal(incomingTweetList):
'''
routine to do lexical analysis
'''
#final_tweet_list --- date / sender full name / tweet
#read the tweets and create a list of sender-htag and sender-@
#incoming TweetList has two layer lists
sender_htag = []
sender_at = []
h_tags_all = []
at_items_all = []
ts_all = []
for lex2 in incomingTweetList:
for lex22 in lex2:
td = lex22[0] #this is the tweet date
try:
ts = text_sanitize(lex22[1]) #this is the tweet sender
except:
print 'something wrong with ',lex22[1]
ts = '---'
ts_all.append(ts)
h_tags = re.findall('[#]\w+',lex22[2]) #these are the h-tags
at_items = re.findall('[@]\w+',lex22[2]) #these are the other users
h_tags = [hti.lower() for hti in h_tags]
at_items = [ati.lower() for ati in at_items]
for h2 in h_tags:
sender_htag.append([td,ts.lower()+'-'+h2])
h_tags_all.append(h2)
for at2 in at_items:
sender_at.append([td,ts.lower()+'-'+at2])
at_items_all.append(at2)
#summarize the two new lists
#following lists don't have dates
sender_htag2 = [xx[1] for xx in sender_htag]
sender_at2 = [yy[1] for yy in sender_at]
#make a list of the tweet senders only
ts_all = list(set(ts_all))
#print ts_all
#get the top 10 htags
#py2.6 ht_col = collections.Counter(h_tags_all)
htag_data4heatmap = []
at_data4heatmap = []
#print '<ul>Top 10 Hashtags'
#py2.6 for h_item in ht_col.most_common(10):
for h_item in top_list(h_tags_all,10):
#print '<li>', h_item, '</li>'
#count the number of times each of the hastag was referenced by each tweet sender
try:
for tsitem in ts_all:
try:
itemtocount = str(tsitem+'-'+h_item[1])
htag_data4heatmap.append([tsitem,h_item[1], sender_htag2.count(itemtocount)])
except:
print 'Problem here: ',h_item,tsitem
except:
print 'Problem here',h_item
print '</ul>'
#get the top 10 user references
#py2.6 at_col = collections.Counter(at_items_all)
#print '<ul>Top 10 Users'
#py2.6 for a_item in at_col.most_common(10):
for a_item in top_list(at_items_all,10):
#print '<li>', a_item, '</li>'
#count the number of times each of the hastag was referenced by each tweet sender
try:
for tsitem in ts_all:
itemtocount = str(tsitem+'-'+a_item[1])
at_data4heatmap.append([tsitem,a_item[1], sender_at2.count(itemtocount)])
except:
print 'Problem here 2',a_item
print '</ul>'
#draw the table with the heatmap
tcols = len(ts_all) #number of tweet senders - rows
trows = len(htag_data4heatmap) / tcols #number of hastags - cols
#print trows, tcols
if trows>0:
print '<br><br>'
print '<h3>Most Popular Hashtags</h3>'
heatmap_table(trows,tcols,htag_data4heatmap)
tcols = len(ts_all) #number of tweet senders - rows
trows = len(at_data4heatmap) / tcols #number of hastags - cols
#print trows, tcols
if trows>0:
print '<br><br>'
print '<h3>Most Referenced Users</h3>'
heatmap_table(trows,tcols,at_data4heatmap)
def heatmap_table(trows,tcols,hm):
#calculate the max and min of the references
#and create a normalized color scale
mx = max(i[2] for i in hm)
mn = min(i[2] for i in hm)
itv = mx - mn
#COLOR pallete from http://colorbrewer2.org/
for arow in hm:
rval = 1.0*arow[2]/itv
if rval<0.1:
arow[2]='#FFF5F0'
elif rval>=0.1 and rval<0.25:
arow[2]='#FEE0D2'
elif rval>=0.25 and rval<0.4:
arow[2]='#FCBBA1'
elif rval>=0.4 and rval<0.5:
arow[2]='#FC9272'
elif rval>=0.5 and rval<0.6:
arow[2]='#FB6A4A'
elif rval>=0.6 and rval<0.7:
arow[2]='#EF3B2C'
elif rval>=0.7 and rval<0.8:
arow[2]='#CB181D'
elif rval>=0.8 and rval<0.9:
arow[2]='#A50F15'
elif rval>=0.9:
arow[2]='#67000D'
print '<table width=100% style="display: block;"> '
for i in range(trows+1):
print '<tr>',
for j in range(tcols+1):
if (i==0 and j==0):
print '<td width="15%">','','</td>',
elif i==0 and j>0 and j<(tcols):
print '<td width="8.5%"><sm>',hm[j-1][0][:10],'</sm></td>',
elif i==0 and j==(tcols):
print '<td width="8.5%"><sm>',hm[j-1][0][:10],'</sm></td></tr>'
elif i>0 and j==0:
print '<td><sm>',hm[(i-1)*tcols+j+1-1][1],'</sm></td>',
elif i>0 and j>0 and j<tcols:
print '<td bgcolor=',hm[(i-1)*tcols+j-1][2],'></td>',
elif i>0 and j==tcols:
print '<td bgcolor=',hm[(i-1)*tcols+j-1][2],'></td></tr>'
print '</table> '
def print_detailed_tweets(in_usertocheck):
html_start()
check_another_user_button()
#print '<h3>Listing of tweets analyzed:</h3>'
sd2st = start_database_to_store_tweets()
if sd2st[1] == True:
c2 = sd2st[0]
conn2 = sd2st[2]
#read all the tweets for the username and screen name
read_text = "SELECT * FROM tweetlist WHERE (username =\'"+in_usertocheck+"\')"
#print '<p>Select tweet command:',read_text,'</p>'
try:
c2.execute(read_text)
for crow in c2:
print crow[1]
conn2.close()
#print '<h2>Finished with the tweet list</h2>'
except conn2.Error, e:
print "E Error %d: %s" % (e.args[0], e.args[1])
else:
print "F Error %d: %s" % (sd2st[0].args[0],sd2st[0].args[1])
html_end()
def bar_chart_data(cht_title,xdata,ydata):
#this routine will draw a bar chart
#print '<p>DO NOT PRINT anaything inside chart modules except needed items</p>'
print '<!--Load the AJAX API-->'
print '<script type=\"text/javascript\" src=\"https://www.google.com/jsapi\"></script>'
print '<script type=\"text/javascript\">'
# Load the Visualization API and the piechart package.
print ' google.load(\'visualization\', \'1.0\', {\'packages\':[\'corechart\']}); '
# Set a callback to run when the Google Visualization API is loaded.
print ' google.setOnLoadCallback(drawChart);'
# Callback that creates and populates a data table,
# instantiates the pie chart, passes in the data and
# draws it.
print ' function drawChart() { '
# Create the data table.
print ' var data = new google.visualization.arrayToDataTable([ '
print ' [ \'Screen Name\', \' ' , cht_title, ' \', {role:\'style\'} ], '
for cdi in range(len(xdata)):
if cdi == 0:
print " [ \'", xdata[cdi], "\',", ydata[cdi], ", \'orange\' ], "
else:
print " [ \'", xdata[cdi], "\',", ydata[cdi], ", \'blue\' ], "
print ' ]); '
#Set chart options
print " var options = {\'title\':\'",cht_title,"\', "
print ' \'width\':600, '
print ' \'height\':400, '
print ' \'hAxis\' : {\'logScale\' : true} , '
print ' legend :\'none\' , \'backgroundColor\': { fill: \"none\" } '
print ' }; '
# chart_bottom():
# Instantiate and draw our chart, passing in some options.
print ' var chart = new google.visualization.BarChart(document.getElementById(\"',cht_title+'DIV','\")); '
print ' function selectHandler() { '
print ' var selectedItem = chart.getSelection()[0]; '
print ' if (selectedItem) { '
print ' var topping = data.getValue(selectedItem.row, 0); '
print ' alert(\'The user selected \' + topping); '
print ' } '
print ' } '
print ' google.visualization.events.addListener(chart, \'select\', selectHandler); '
print ' chart.draw(data, options); '
print ' } '
print '</script> '
print '<!--Div that will hold the pie chart--> '
print '<div id=\"',cht_title+'DIV','\" style=\"width:600; height:400\"></div> '
def two_bar_chart_data(cht_title,xdata,ydata1,ydata2):
#this routine will draw a bar chart with two bara
#print '<p>DO NOT PRINT anaything inside chart modules except needed items</p>'
print '<!--Load the AJAX API-->'
print '<script type=\"text/javascript\" src=\"https://www.google.com/jsapi\"></script>'
print '<script type=\"text/javascript\">'
# Load the Visualization API and the piechart package.
print ' google.load(\'visualization\', \'1.0\', {\'packages\':[\'corechart\']}); '
# Set a callback to run when the Google Visualization API is loaded.
print ' google.setOnLoadCallback(drawChart);'
print ' function drawChart() { '
print ' var data = new google.visualization.arrayToDataTable([ '
print " [ \'Screen Name\', \' ",ydata1[0], "\' ,{role:\'style\'}, \'" ,ydata2[0], "\' , {role:\'style\'} ], "
for cdi in range(len(xdata)):
if cdi>0:
print " [ \'", xdata[cdi], "\',", ydata1[cdi],",\'blue\',", ydata2[cdi], ", \'red\' ], "
print ' ]); '
#Set chart options
print " var options = {\'title\':\'",cht_title,"\', "
print ' \'width\':600, '
print ' \'height\':400, '
print ' \'hAxis\' : {\'logScale\' : false} , '
print ' legend :\'top\' , \'backgroundColor\': { fill: \"none\" } '
print ' }; '
# chart_bottom():
# Instantiate and draw our chart, passing in some options.
print ' var chart = new google.visualization.BarChart(document.getElementById(\"',cht_title+'DIV','\")); '
print ' function selectHandler() { '
print ' var selectedItem = chart.getSelection()[0]; '
print ' if (selectedItem) { '
print ' var topping = data.getValue(selectedItem.row, 0); '
print ' alert(\'The user selected \' + topping); '
print ' } '
print ' } '
print ' google.visualization.events.addListener(chart, \'select\', selectHandler); '
print ' chart.draw(data, options); '
print ' } '
print '</script> '
print '<!--Div that will hold the pie chart--> '
print '<div id=\"',cht_title+'DIV','\" style=\"width:600; height:400\"></div> '
def test3():
#Test some random twitter functions on stream data
html_start()
testname = "concession,privatization,public private"
#testname = "mining,mines,metal,oil,gas,petroleum"
try:
ts = TwitterStream(auth=OAuth(define_keys()[2],define_keys()[3],define_keys()[0],define_keys()[1]))
#response = ts.statuses.sample()
response = ts.statuses.filter(track=testname)
showcount = 0
maxshow = 50
for tweet in response:
showcount += 1
if showcount>= maxshow: break
# You must test that your tweet has text. It might be a delete
# or data message.
if tweet is None:
print_para("-- None --")
elif tweet.get('text'):
print_para(tweet['user']['name']+'.....'+str(twit_date(tweet['created_at']))+'---'+tweet['text'])
else:
print_para(str(showcount)+'...')
#print_para(json.dumps(tweet,indent=2))
except TwitterHTTPError, e:
print '<p>Error getting tweets info for:',e['details'],'</p>'
html_end()
def print_para(instr):
print '<p>',instr,'</p>'
def twit_date(in_created_at):
out_date = datetime.date(int(in_created_at[26:30]), int(time.strptime(in_created_at[4:7],'%b').tm_mon), int(in_created_at[8:10]))
return out_date
# Define main function.
def main():
form = cgi.FieldStorage()
if (form.has_key("action") and form.has_key("scn_name")):
if (form["action"].value == "display"):
display_data(text_sanitize(form["scn_name"].value))
else:
generate_form()
main()
|
from .converter import *
from .classifier import *
__all__ = ['Grocery']
class GroceryException(Exception):
pass
class GroceryNotTrainException(GroceryException):
def __init__(self):
self.message = 'Text model has not been trained.'
class Grocery(object):
def __init__(self, name, custom_tokenize=None):
self.name = name
if custom_tokenize is not None and not hasattr(custom_tokenize, '__call__'):
raise GroceryException('Tokenize func must be callable.')
self.custom_tokenize = custom_tokenize
self.model = None
self.classifier = None
self.train_svm_file = None
def get_load_status(self):
return self.model is not None and isinstance(self.model, GroceryTextModel)
def train(self, train_src, delimiter='\t'):
text_converter = GroceryTextConverter(custom_tokenize=self.custom_tokenize)
self.train_svm_file = '%s_train.svm' % self.name
text_converter.convert_text(train_src, output=self.train_svm_file, delimiter=delimiter)
# default parameter
model = train(self.train_svm_file, '', '-s 4')
self.model = GroceryTextModel(text_converter, model)
return self
def predict(self, single_text):
if not self.get_load_status():
raise GroceryNotTrainException()
return self.model.predict_text(single_text)
def predict_filetext(self, single_text):
if not self.get_load_status():
raise GroceryNotTrainException()
return self.model.predict_file(single_text)
def test(self, text_src, delimiter='\t'):
if not self.get_load_status():
raise GroceryNotTrainException()
return GroceryTest(self.model).test(text_src, delimiter)
def save(self):
if not self.get_load_status():
raise GroceryNotTrainException()
self.model.save(self.name, force=True)
def load(self):
text_converter = GroceryTextConverter(custom_tokenize=self.custom_tokenize)
self.model = GroceryTextModel(text_converter)
self.model.load(self.name)
def __del__(self):
if self.train_svm_file and os.path.exists(self.train_svm_file):
os.remove(self.train_svm_file)
|
# https://leetcode.com/problems/rotate-image
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
for i in range(len(matrix)):
for j in range(i,len(matrix)):
if i != j:
#print('sgdfg')
t = matrix[i][j]
matrix[i][j] = matrix[j][i]
matrix[j][i] = t
for i in range(len(matrix)):
left = 0
right = len(matrix) - 1
while(left < right):
t = matrix[i][left]
matrix[i][left] = matrix[i][right]
matrix[i][right] = t
left += 1
right -= 1
#print(matrix)
return matrix
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
from getpass import getpass
import passlib.hash
from six.moves import configparser
from six.moves import input
import sys
from dlrn.config import ConfigOptions
from dlrn.config import setup_logging
from dlrn.db import closeSession
from dlrn.db import getSession
from dlrn.db import User
def create_user(options, db_connection):
try:
session = getSession(db_connection)
olduser = session.query(User).filter(
User.username == options.username).first()
if olduser is None:
if options.password is None:
newpass = getpass("Enter password for %s: " %
options.username)
else:
newpass = options.password
password = passlib.hash.sha512_crypt.encrypt(newpass)
newuser = User(username=options.username,
password=password)
session.add(newuser)
session.commit()
closeSession(session)
print("User %s successfully created" % options.username)
else:
print("User %s already exists" % options.username)
return -1
except Exception as e:
print("Failed to create user %s, %s" % (options.username, e))
return -1
return 0
def delete_user(options, db_connection):
session = getSession(db_connection)
user = session.query(User).filter(
User.username == options.username).first()
if user is None:
print("ERROR: User %s does not exist" % options.username)
return -1
else:
if not options.force:
print("Are you sure you want to delete user %s? "
"If so, type YES to continue." % options.username)
confirm = input()
if confirm != "YES":
print("Action not confirmed, exiting")
return -1
session.delete(user)
session.commit()
print("User %s deleted" % options.username)
closeSession(session)
return 0
def update_user(options, db_connection):
session = getSession(db_connection)
password = passlib.hash.sha512_crypt.encrypt(options.password)
user = session.query(User).filter(
User.username == options.username).first()
if user is None:
print("ERROR: User %s does not exist" % options.username)
return -1
else:
user.password = password
session.add(user)
session.commit()
closeSession(session)
return 0
command_funcs = {
'create': create_user,
'delete': delete_user,
'update': update_user,
}
def user_manager():
parser = argparse.ArgumentParser()
# Some of the non-positional arguments are required, so change the text
# saying "optional arguments" to just "arguments":
parser._optionals.title = 'arguments'
parser.add_argument('--config-file',
default='projects.ini',
help="Config file. Default: projects.ini")
parser.add_argument('--debug', action='store_true',
help="Print debug logs")
subparsers = parser.add_subparsers(dest='command',
title='subcommands',
description='available subcommands')
subparsers.required = True
# Subcommand create
parser_create = subparsers.add_parser('create',
help='Create a user')
parser_create.add_argument('--username', type=str, required=True,
help='User name')
parser_create.add_argument('--password', type=str, help='Password')
# Subcommand delete
parser_delete = subparsers.add_parser('delete',
help='Delete a user')
parser_delete.add_argument('--username', type=str, required=True,
help='User name')
parser_delete.add_argument('--force', dest='force',
action='store_true',
help='Do not request a confirmation')
# Subcommand update
parser_update = subparsers.add_parser('update',
help='Update a user')
parser_update.add_argument('--username', type=str, required=True,
help='User name')
parser_update.add_argument('--password', type=str, required=True,
help='New password')
options = parser.parse_args(sys.argv[1:])
setup_logging(options.debug)
cp = configparser.RawConfigParser()
cp.read(options.config_file)
config_options = ConfigOptions(cp)
return command_funcs[options.command](options,
config_options.database_connection)
|
# -*- coding: utf-8 -*-
"""
Least-squares Transformation (LST).
See https://iopscience.iop.org/article/10.1088/1741-2552/abcb6e.
"""
import numpy as np
from numpy import ndarray
from scipy.linalg import pinv
from sklearn.base import BaseEstimator, TransformerMixin
from joblib import Parallel, delayed
def lst_kernel(S: ndarray, T: ndarray):
P = [email protected]@pinv([email protected])
return P
class LST(BaseEstimator, TransformerMixin):
def __init__(self, n_jobs=None):
self.n_jobs = n_jobs
def fit(self, X: ndarray, y: ndarray):
X = X.reshape((-1, *X.shape[-2:])) # n_trials, n_channels, n_samples
self.classes_ = np.unique(y)
self.T_ = [np.mean(X[y==label], axis=0) for label in self.classes_]
return self
def transform(self, X: ndarray, y: ndarray):
X = np.copy(X)
X = X.reshape((-1, *X.shape[-2:])) # n_trials, n_channels, n_samples
Ts = np.zeros_like(X)
for i, label in enumerate(self.classes_):
Ts[y==label] = self.T_[i]
P = np.stack(
Parallel(n_jobs=self.n_jobs)(delayed(lst_kernel)(S, T) for S, T in zip(X, Ts)))
X = P@X
return X
|
import numpy as np
from addressing import ImpliedAddressing
from helpers import generate_classes_from_string
from instructions.base_instructions import StackPush, StackPull, RegisterModifier, Inc, Dec
# stack push instructions
from instructions.generic_instructions import Instruction
class Php(ImpliedAddressing, StackPush):
"""
N Z C I D V
- - - - - -
"""
identifier_byte = bytes([0x08])
@classmethod
def data_to_push(cls, cpu):
# set bit 4 and 5 to be 1, see: http://wiki.nesdev.com/w/index.php/CPU_status_flag_behavior
return cpu.status_reg.to_int() | 0b110000
class Pha(ImpliedAddressing, StackPush):
"""
N Z C I D V
- - - - - -
"""
identifier_byte = bytes([0x48])
@classmethod
def data_to_push(cls, cpu):
return cpu.a_reg
class Txs(ImpliedAddressing, Instruction):
"""
N Z C I D V
+ + - - - -
"""
# TODO: does not set negative
# sets_negative_bit = True
sets_zero_bit = True
identifier_byte = bytes([0x9A])
@classmethod
def write(cls, cpu, memory_address, value):
cpu.sp_reg = cpu.x_reg
return cpu.sp_reg
# stack pull instructions
class Plp(ImpliedAddressing, StackPull):
"""
sets the stack
ignores bits 4 and 5
"""
identifier_byte = bytes([0x28])
@classmethod
def write_pulled_data(cls, cpu, pulled_data):
cpu.status_reg.from_int(pulled_data, [4, 5])
class Pla(ImpliedAddressing, StackPull):
"""
N Z C I D V
+ + - - - -
"""
sets_negative_bit = True
sets_zero_bit = True
identifier_byte = bytes([0x68])
@classmethod
def write_pulled_data(cls, cpu, pulled_data):
cpu.a_reg = np.uint8(pulled_data)
return cpu.a_reg
class Tsx(ImpliedAddressing, Instruction):
"""
N Z C I D V
+ + - - - -
"""
sets_negative_bit = True
sets_zero_bit = True
identifier_byte = bytes([0xBA])
@classmethod
def write(cls, cpu, memory_address, value):
cpu.x_reg = cpu.sp_reg
return cpu.x_reg
# register instructions
class Iny(ImpliedAddressing, RegisterModifier):
identifier_byte = bytes([0xC8])
@classmethod
def write(cls, cpu, memory_address, value):
cpu.y_reg += np.uint8(1)
return cpu.y_reg
class Dey(ImpliedAddressing, RegisterModifier):
identifier_byte = bytes([0x88])
@classmethod
def write(cls, cpu, memory_address, value):
cpu.y_reg -= np.uint8(1)
return cpu.y_reg
class Inx(ImpliedAddressing, RegisterModifier):
identifier_byte = bytes([0xE8])
@classmethod
def write(cls, cpu, memory_address, value):
cpu.x_reg += np.uint8(1)
return cpu.x_reg
class Dex(ImpliedAddressing, RegisterModifier):
identifier_byte = bytes([0xCA])
@classmethod
def write(cls, cpu, memory_address, value):
cpu.x_reg -= np.uint8(1)
return cpu.x_reg
class Txa(ImpliedAddressing, RegisterModifier):
identifier_byte = bytes([0x8A])
@classmethod
def write(cls, cpu, memory_address, value):
cpu.a_reg = cpu.x_reg
return cpu.a_reg
class Tay(ImpliedAddressing, RegisterModifier):
identifier_byte = bytes([0xA8])
@classmethod
def write(cls, cpu, memory_address, value):
cpu.y_reg = cpu.a_reg
return cpu.y_reg
class Tya(ImpliedAddressing, RegisterModifier):
identifier_byte = bytes([0x98])
@classmethod
def write(cls, cpu, memory_address, value):
cpu.a_reg = cpu.y_reg
return cpu.a_reg
# inc
types = []
inc_types = '''
zeropage INC oper E6 2 5
zeropage,X INC oper,X F6 2 6
absolute INC oper EE 3 6
absolute,X INC oper,X FE 3 7
'''
for generated in generate_classes_from_string(Inc, inc_types):
types.append(generated)
dec_types = '''
zeropage DEC oper C6 2 5
zeropage,X DEC oper,X D6 2 6
absolute DEC oper CE 3 3
absolute,X DEC oper,X DE 3 7
'''
for generated in generate_classes_from_string(Dec, dec_types):
types.append(generated)
|
import os
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
directory = os.path.join(__location__, 'data/raw')
for name in os.listdir(directory):
print name
# with open() |
from flask import Flask, render_template, Response
import yaml
app = Flask(__name__)
with open("config.yml") as file:
web_config = yaml.load(file, Loader=yaml.FullLoader)
@app.route("/")
def index():
return render_template("index.html", invite_url=web_config["web"]["bot_invite"])
@app.route("/added")
def added():
return render_template("index.html", invite_url=web_config["web"]["bot_invite"])
@app.route("/riot.txt")
def riot_verify():
return Response(web_config["web"]["riot_games_key"], mimetype='text/plain')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=web_config["web"]["port"])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('CHANGELOG.rst') as history_file:
history = history_file.read()
requirements = [
'Click>=6.0',
'rueckenwind',
'docker',
'pyyaml',
]
test_requirements = [
'pytest',
'pytest-tornado',
]
setup(
name='devenv',
version='0.1.1',
description="A development environment based upon docker.",
long_description=readme + '\n\n' + history,
author="Florian Ludwig",
author_email='[email protected]',
url='https://github.com/FlorianLudwig/devenv',
packages=[
'devenv',
],
package_dir={'devenv':
'devenv'},
entry_points={
'console_scripts': [
'de=devenv.cli:main'
]
},
include_package_data=True,
install_requires=requirements,
license="Apache Software License 2.0",
zip_safe=False,
keywords='devenv',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements
)
|
from email.mime import image
import praw
import pprint
import requests
import PIL
from PIL import Image
import os
PIL.Image.MAX_IMAGE_PIXELS = 131460604
class Scraper:
def __init__(self, id, secret):
self.reddit = praw.Reddit(client_id=id, client_secret=secret, user_agent='WallpaperScraper')
def scrapeReddit(self, subreddit, numImages, min_width, min_height, savepath):
top_posts = self.reddit.subreddit(subreddit).top("all",limit=numImages)
# for post in top_posts:
# print(post.title)
image_posts = []
for post in top_posts:
url = post.url
if((url.find("jpg") >= 0 or url.find("jpeg") >= 0 or url.find("png") >= 0)
and (url.find("redd.it") >= 0 or url.find("imgur") >= 0)):
image_posts.append(post)
if not os.path.exists(savepath):
os.mkdir(savepath)
for post in image_posts:
print(post.url)
img_data = requests.get(post.url).content
filetype = post.url[post.url.rfind("."):]
filename = post.title.replace(" ","").replace(".", "").replace("\'", "").replace("\"", "").replace("\\", "").replace("/", "") + filetype
with open(f"{savepath}/{filename}", "wb") as handler:
handler.write(img_data)
files = os.listdir(savepath)
for f in files:
img = Image.open(f"{savepath}/{f}")
width = img.width
height = img.height
if(width < min_width and height < min_height):
os.remove(f"{savepath}/{f}")
files.remove(f)
|
import pandas as pd
from cobra.model_building import univariate_selection
def mock_data():
return pd.DataFrame({"var1_enc": [0.42] * 10,
"var2_enc": [0.94] * 10,
"var3_enc": [0.87] * 10})
class TestUnivariateSelection:
def test_preselection_classification(self):
X = mock_data()
y = pd.DataFrame([1] * 5 + [0] * 5, columns=["target"])
basetable = pd.concat([y, X], axis=1)
basetable["split"] = ["train"] * 3 + ["selection"] * 6 + ["train"]
df_auc = univariate_selection.compute_univariate_preselection(
target_enc_train_data=basetable[basetable["split"] == "train"],
target_enc_selection_data=basetable[basetable["split"] == "selection"],
predictors=X.columns,
target_column="target",
model_type="classification",
preselect_auc_threshold=0.48,
preselect_overtrain_threshold=0.05)
assert all(c in df_auc.columns for c in ["AUC train", "AUC selection"])
preselected_predictors = (univariate_selection
.get_preselected_predictors(df_auc))
assert preselected_predictors == ["var1_enc", "var2_enc", "var3_enc"]
def test_preselection_regression(self):
X = mock_data()
y = pd.DataFrame([6.0, 9.0, 4.2, 5.5, 0.7, 1.9, 8.7, 8.0, 2.0, 7.2], columns=["target"])
basetable = pd.concat([y, X], axis=1)
basetable["split"] = ["train"] * 3 + ["selection"] * 6 + ["train"]
df_rmse = univariate_selection.compute_univariate_preselection(
target_enc_train_data=basetable[basetable["split"] == "train"],
target_enc_selection_data=basetable[basetable["split"] == "selection"],
predictors=X.columns,
target_column="target",
model_type="regression",
preselect_auc_threshold=5,
preselect_overtrain_threshold=0.05)
assert all(c in df_rmse.columns for c in ["RMSE train", "RMSE selection"])
preselected_predictors = (univariate_selection
.get_preselected_predictors(df_rmse))
assert preselected_predictors == ["var2_enc", "var3_enc"]
|
"""
@file
@brief
@author
@details
"""
from unittest import TestCase
from agent import Agent
class TestAgent(TestCase):
def setUp(self) -> None:
self.agent = Agent(2, 1, 3)
def test_triangular_profile(self) -> None:
self.agent.generate_motion_profile(4, 100)
|
import datetime
import logging
import os
import xml.etree.ElementTree as etree
from io import StringIO
import numpy as np
import pandas as pd
from hydropandas.observation import GroundwaterObs, WaterlvlObs
from lxml.etree import iterparse
logger = logging.getLogger(__name__)
def read_xml_fname(
fname,
ObsClass,
translate_dic=None,
low_memory=True,
locationIds=None,
filterdict=None,
return_events=True,
keep_flags=(0, 1),
return_df=False,
tags=("series", "header", "event"),
skip_errors=True,
to_mnap=False,
remove_nan=False,
):
"""Read an xml filename into a list of observations objects.
Parameters
----------
fname : str
full path to file
ObsClass : type
class of the observations, e.g. GroundwaterObs or WaterlvlObs
translate_dic : dic or None, optional
translate names from fews. If None this default dictionary is used:
{'locationId': 'locatie'}.
low_memory : bool, optional
whether to use xml-parsing method with lower memory footprint,
default is True
locationIds : tuple or list of str, optional
list of locationId's to read from XML file, others are skipped.
If None (default) all locations are read.
filterdict : dict, optional
dictionary with tag name to apply filter to as keys, and list of
accepted names as dictionary values to keep in final result,
i.e. {"locationId": ["B001", "B002"]}
return_events : bool, optional
return all event-information in a DataFrame per location, instead of
just a Series (defaults to False). Overrules keep_flags kwarg.
keep_flags : list of ints, optional
keep the values with these flags (defaults to 0 and 1). Only used
when return_events is False.
tags : list of strings, optional
Select the tags to be parsed. Defaults to series, header and event
return_df : bool, optional
return a DataFame with the data, instead of two lists (default is
False)
skip_errors: bool, optional
if True, continue after error, else raise error
to_mnap : boolean, optional
if True a column with 'stand_m_tov_nap' is added to the dataframe
remove_nan : boolean, optional
remove nan values from measurements, flag information about the
nan values is also lost
Returns
-------
list of ObsClass objects
list of timeseries stored in ObsClass objects
"""
if translate_dic is None:
translate_dic = {"locationId": "locatie"}
if low_memory is True:
obs_list = iterparse_pi_xml(
fname,
ObsClass,
translate_dic=translate_dic,
locationIds=locationIds,
filterdict=filterdict,
return_events=return_events,
keep_flags=keep_flags,
return_df=return_df,
tags=tags,
skip_errors=skip_errors,
)
else:
tree = etree.parse(fname)
root = tree.getroot()
obs_list = read_xml_root(
root,
ObsClass,
translate_dic=translate_dic,
locationIds=locationIds,
to_mnap=to_mnap,
remove_nan=remove_nan,
)
return obs_list
def iterparse_pi_xml(
fname,
ObsClass,
translate_dic=None,
filterdict=None,
locationIds=None,
return_events=True,
keep_flags=(0, 1),
return_df=False,
tags=("series", "header", "event"),
skip_errors=False,
):
"""Read a FEWS XML-file with measurements, memory efficient.
Parameters
----------
fname : str
full path to file
ObsClass : type
class of the observations, e.g. GroundwaterObs or WaterlvlObs
translate_dic : dic or None, optional
translate names from fews. If None this default dictionary is used:
{'locationId': 'locatie'}.
locationIds : tuple or list of str, optional
list of locationId's to read from XML file, others are skipped.
If None (default) all locations are read.
filterdict : dict, optional
dictionary with tag name to apply filter to as keys, and list of
accepted names as dictionary values to keep in final result,
i.e. {"locationId": ["B001", "B002"]}
return_events : bool, optional
return all event-information in a DataFrame per location, instead of
just a Series (defaults to False). Overrules keep_flags kwarg.
keep_flags : list of ints, optional
keep the values with these flags (defaults to 0 and 1). Only used
when return_events is False.
tags : list of strings, optional
Select the tags to be parsed. Defaults to series, header and event
return_df : bool, optional
return a DataFame with the data, instead of two lists (default is
False)
skip_errors: bool, optional
if True, continue after error, else raise error
Returns
-------
df : pandas.DataFrame
a DataFrame containing the metadata and the series if 'return_df'
is True
obs_list : list of pandas Series
list of timeseries if 'return_df' is False
"""
if translate_dic is None:
translate_dic = {"locationId": "locatie"}
tags = ["{{http://www.wldelft.nl/fews/PI}}{}".format(tag) for tag in tags]
context = iterparse(fname, tag=tags)
# _, root = next(context)
header_list = []
obs_list = []
keep_flags = [str(flag) for flag in keep_flags]
for _, element in context:
if element.tag.endswith("header"):
header = {}
for h_attr in element:
tag = h_attr.tag.replace(
"{{{0}}}".format("http://www.wldelft.nl/fews/PI"), ""
)
if tag.startswith("locationId"):
logger.info(f"reading {h_attr.text}")
# if specific locations are provided only read those
if locationIds is not None and tag.startswith("locationId"):
loc = h_attr.text
if loc not in locationIds:
element.clear()
logger.info(f" ... skipping '{loc}', not in locationIds")
continue
if filterdict is not None:
for k, v in filterdict.items():
if tag.startswith(k):
attr = h_attr.text
if attr not in v:
element.clear()
logger.info(
f" ... skipping '{attr}' not "
f"in accepted values for '{k}'"
)
continue
if h_attr.text is not None:
header[tag] = h_attr.text
elif len(h_attr.attrib) != 0:
header[tag] = {**h_attr.attrib}
else:
header[tag] = None
events = []
elif element.tag.endswith("event"):
# if specific locations are provided only read those
if locationIds is not None:
if loc not in locationIds:
element.clear()
continue
if filterdict is not None:
skip = False
for k, v, in filterdict.items():
if header.get(k, None) not in v:
skip = True
if skip:
element.clear()
continue
events.append({**element.attrib})
elif element.tag.endswith("series"):
# if specific locations are provided only read those
if locationIds is not None:
if loc not in locationIds:
element.clear()
continue
if filterdict is not None:
skip = False
for k, v, in filterdict.items():
if header.get(k, None) not in v:
skip = True
if skip:
element.clear()
continue
if len(events) == 0:
if return_events:
ts = pd.DataFrame()
else:
ts = pd.Series()
else:
df = pd.DataFrame(events)
df.index = pd.to_datetime(
[d + " " + t for d, t in zip(df["date"], df["time"])],
errors="coerce",
)
df.drop(columns=["date", "time"], inplace=True)
if return_events:
df["value"] = pd.to_numeric(df["value"], errors="coerce")
df["flag"] = pd.to_numeric(df["flag"])
ts = df
else:
mask = df["flag"].isin(keep_flags)
ts = pd.to_numeric(df.loc[mask, "value"], errors="coerce")
o, header = _obs_from_meta(ts, header, translate_dic, ObsClass)
header_list.append(header)
obs_list.append(o)
# Free memory.
element.clear()
if return_df:
for h, s in zip(header_list, obs_list):
h["series"] = s
return pd.DataFrame(header_list)
else:
return obs_list
def read_xmlstring(
xmlstring,
ObsClass,
translate_dic=None,
filterdict=None,
locationIds=None,
low_memory=True,
to_mnap=False,
remove_nan=False,
):
"""Read xmlstring into an list of Obs objects. Xmlstrings are usually
obtained using a fews api.
Parameters
----------
xmlstring : str
xml string to be parsed. Typically from a fews api.
ObsClass : type
class of the observations, e.g. GroundwaterObs or WaterlvlObs
translate_dic : dic or None, optional
translate names from fews. If None this default dictionary is used:
{'locationId': 'locatie'}.
locationIds : tuple or list of str, optional
list of locationId's to read from XML file, others are skipped.
If None (default) all locations are read.
low_memory : bool, optional
whether to use xml-parsing method with lower memory footprint,
default is True
to_mnap : boolean, optional
if True a column with 'stand_m_tov_nap' is added to the dataframe
remove_nan : boolean, optional
remove nan values from measurements, flag information about the
nan values is also lost
Returns
-------
list of ObsClass objects
list of timeseries stored in ObsClass objects
"""
if translate_dic is None:
translate_dic = {"locationId": "locatie"}
if low_memory:
obs_list = iterparse_pi_xml(
StringIO(xmlstring),
ObsClass,
translate_dic=translate_dic,
filterdict=filterdict,
locationIds=locationIds,
)
else:
root = etree.fromstring(xmlstring)
obs_list = read_xml_root(
root,
ObsClass,
translate_dic=translate_dic,
locationIds=locationIds,
to_mnap=to_mnap,
remove_nan=remove_nan,
)
return obs_list
def read_xml_root(
root,
ObsClass,
translate_dic=None,
locationIds=None,
to_mnap=False,
remove_nan=False,
):
"""Read a FEWS XML-file with measurements, return list of ObsClass objects.
Parameters
----------
root : xml.etree.ElementTree.Element
root element of a fews xml
ObsClass : type
class of the observations, e.g. GroundwaterObs or WaterlvlObs
translate_dic : dic or None, optional
translate names from fews. If None this default dictionary is used:
{'locationId': 'locatie'}.
locationIds : tuple or list of str, optional
list of locationId's to read from XML file, others are skipped.
If None (default) all locations are read.
to_mnap : boolean, optional
if True a column with 'stand_m_tov_nap' is added to the dataframe
remove_nan : boolean, optional
remove nan values from measurements, flag information about the
nan values is also lost
Returns
-------
list of ObsClass objects
list of timeseries stored in ObsClass objects
"""
if translate_dic is None:
translate_dic = {"locationId": "locatie"}
obs_list = []
for item in root:
if item.tag.endswith("series"):
header = {}
date = []
time = []
events = []
for subitem in item:
if subitem.tag.endswith("header"):
for subsubitem in subitem:
prop = subsubitem.tag.split("}")[-1]
val = subsubitem.text
if prop == "x" or prop == "y" or prop == "lat" or prop == "lon":
val = float(val)
header[prop] = val
if prop == "locationId":
logger.info(f"read {val}")
elif subitem.tag.endswith("event"):
date.append(subitem.attrib.pop("date"))
time.append(subitem.attrib.pop("time"))
events.append({**subitem.attrib})
# combine events in a dataframe
index = pd.to_datetime(
[d + " " + t for d, t in zip(date, time)], errors="coerce"
)
ts = pd.DataFrame(events, index=index, dtype=float)
if remove_nan and (not ts.empty):
ts.dropna(subset=["value"], inplace=True)
if to_mnap and (not ts.empty):
ts["stand_m_tov_nap"] = ts["value"]
o, header = _obs_from_meta(ts, header, translate_dic, ObsClass)
if locationIds is not None:
if header["locatie"] in locationIds:
obs_list.append(o)
else:
obs_list.append(o)
return obs_list
def _obs_from_meta(ts, header, translate_dic, ObsClass):
"""Internal function to convert timeseries and header into Obs objects.
Parameters
----------
ts : pd.DataFrame
timeseries data.
header : dictionary
metadata.
translate_dic : dictionary
translate dictionary.
ObsClass : type
class of the observations, e.g. GroundwaterObs or WaterlvlObs
Returns
-------
o : GroundwaterObs or WaterlvlObs
hyrdopandas observation object.
header : dictionary
metadata.
"""
for key, item in translate_dic.items():
header[item] = header.pop(key)
if "x" in header.keys():
x = float(header["x"])
else:
x = np.nan
if "y" in header.keys():
y = float(header["y"])
else:
y = np.nan
if np.isnan(x) or np.isnan(y):
metadata_available = False
else:
metadata_available = True
if ObsClass in [GroundwaterObs, WaterlvlObs]:
o = ObsClass(
ts,
x=x,
y=y,
meta=header,
name=header["locatie"],
locatie=header["locatie"],
metadata_available=metadata_available,
)
else:
o = ObsClass(ts, x=x, y=y, meta=header, name=header["locatie"])
return o, header
def write_pi_xml(obs_coll, fname, timezone=1.0, version="1.24"):
"""Write TimeSeries object to PI-XML file.
Parameters
----------
fname: path
path to XML file
"""
assert fname.endswith(".xml"), "Output file should have '.xml' extension!"
# first line of XML file
line0 = '<?xml version="1.0" encoding="UTF-8"?>\n'
# some definitions for timeseries XML file
NS = r"http://www.wldelft.nl/fews/PI"
FS = r"http://www.wldelft.nl/fews/fs"
XSI = r"http://www.w3.org/2001/XMLSchema-instance"
schemaLocation = (
r"http://fews.wldelft.nl/schemas/version1.0" r"/Pi-schemas/pi_timeseries.xsd"
)
timeseriesline = (
'<TimeSeries xmlns="{NS}" xmlns:xsi="{XSI}" '
'xsi:schemaLocation="{NS} {schema}" version="{version}" '
'xmlns:fs="{FS}">\n'
)
# line templates
paramline = "<{tag}>{param}</{tag}>\n"
# write file
with open(fname, "w") as f:
f.write(line0)
f.write(
timeseriesline.format(
NS=NS, FS=FS, XSI=XSI, schema=schemaLocation, version=version
)
)
tzline = "\t" + paramline.format(tag="timeZone", param=timezone)
f.write(tzline)
for o in obs_coll.obs:
# start series
start = "\t" + "<series>\n"
f.write(start)
# write header
hlines = []
hstart = 2 * "\t" + "<header>\n"
hlines.append(hstart)
for htag, hval in o.meta.items():
if htag.endswith("Date"):
try:
hdate = hval.strftime("%Y-%m-%d")
htime = hval.strftime("%H:%M:%S")
except AttributeError as e:
if htag.startswith("start"):
hdate = o.index[0].strftime("%Y-%m-%d")
htime = o.index[0].strftime("%H:%M:%S")
elif htag.startswith("end"):
hdate = o.index[-1].strftime("%Y-%m-%d")
htime = o.index[-1].strftime("%H:%M:%S")
else:
raise (e)
hline = '<{tag} date="{date}" time="{time}"/>\n'.format(
tag=htag, date=hdate, time=htime
)
elif htag.endswith("timeStep"):
hline = '<{tag} unit="{unit}"/>\n'.format(tag=htag, unit=hval)
else:
hline = paramline.format(tag=htag, param=hval)
hlines.append(3 * "\t" + hline)
hlines.append(2 * "\t" + "</header>\n")
f.writelines(hlines)
# write timeseries
dates = o.reset_index()["index"].apply(
lambda s: datetime.datetime.strftime(s, "%Y-%m-%d")
)
times = o.reset_index()["index"].apply(
lambda s: datetime.datetime.strftime(s, "%H:%M:%S")
)
# set date and time attributes
events = (
2 * "\t" + '<event date="' + dates.values + '" time="' + times.values
)
# loop through columns and add to event
for icol in o.columns:
val = o[icol].astype(str)
events += '" {}="'.format(icol) + val.values
# close event
events += '"/>\n'
# write to file
f.writelines(events)
# end series
f.write("\t" + "</series>\n")
# end Timeseries
f.write("</TimeSeries>\n")
def read_xml_filelist(
fnames,
ObsClass,
directory=None,
locations=None,
translate_dic=None,
filterdict=None,
to_mnap=False,
remove_nan=False,
low_memory=True,
):
"""Read a list of xml files into a list of observation objects.
Parameters
----------
fnames : TYPE
DESCRIPTION.
ObsClass : type
class of the observations, e.g. GroundwaterObs or WaterlvlObs
directory : TYPE, optional
DESCRIPTION. The default is None.
locations : tuple or list of str, optional
list of locationId's to read from XML file, others are skipped.
If None (default) all locations are read.
translate_dic : dic or None, optional
translate names from fews. If None this default dictionary is used:
{'locationId': 'locatie'}.
filterdict : dict, optional
dictionary with tag name to apply filter to as keys, and list of
accepted names as dictionary values to keep in final result,
i.e. {"locationId": ["B001", "B002"]}
to_mnap : boolean, optional
if True a column with 'stand_m_tov_nap' is added to the dataframe
remove_nan : boolean, optional
remove nan values from measurements, flag information about the
nan values is also lost
low_memory : bool, optional
whether to use xml-parsing method with lower memory footprint,
default is True
Returns
-------
list of ObsClass objects
list of timeseries stored in ObsClass objects
"""
if translate_dic is None:
translate_dic = {"locationId": "locatie"}
obs_list = []
nfiles = len(fnames)
for j, ixml in enumerate(fnames):
# print message
logger.info(f"{j+1}/{nfiles} read {ixml}")
# join directory to filename if provided
if directory is None:
fullpath = ixml
else:
fullpath = os.path.join(directory, ixml)
# read xml fname
obs_list += read_xml_fname(
fullpath,
ObsClass,
translate_dic=translate_dic,
filterdict=filterdict,
low_memory=low_memory,
locationIds=locations,
)
return obs_list
|
"""
This controller provides helper methods to the front-end views that manage lookup files.
"""
import logging
import csv
import json
import time
import datetime
from splunk.clilib.bundle_paths import make_splunkhome_path
from splunk import AuthorizationFailed, ResourceNotFound
from splunk.rest import simpleRequest
# The default of the csv module is 128KB; upping to 10MB. See SPL-12117 for
# the background on issues surrounding field sizes.
# (this method is new in python 2.5)
csv.field_size_limit(10485760)
def setup_logger(level):
"""
Setup a logger for the REST handler
"""
logger = logging.getLogger('splunk.appserver.lookup_editor.rest_handler')
logger.propagate = False # Prevent the log messages from being duplicated in the python.log file
logger.setLevel(level)
log_file_path = make_splunkhome_path(['var', 'log', 'splunk', 'lookup_editor_rest_handler.log'])
file_handler = logging.handlers.RotatingFileHandler(log_file_path, maxBytes=25000000,
backupCount=5)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p %z')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
logger = setup_logger(logging.DEBUG)
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from lookup_editor import LookupEditor
from lookup_editor import shortcuts
from lookup_editor.exceptions import LookupFileTooBigException, PermissionDeniedException, LookupNameInvalidException
from lookup_editor import rest_handler
class LookupEditorHandler(rest_handler.RESTHandler):
"""
This is a REST handler that supports editing lookup files. All calls from the user-interface
should pass through this handler.
"""
def __init__(self, command_line, command_arg):
super(LookupEditorHandler, self).__init__(command_line, command_arg, logger)
self.lookup_editor = LookupEditor(logger)
def get_lookup_info(self, request_info, lookup_file=None, namespace="lookup_editor", **kwargs):
"""
Get information about a lookup file (owner, size, etc.)
"""
return {
'payload': str(lookup_file), # Payload of the request.
'status': 200 # HTTP status code
}
def get_lookup_backups(self, request_info, lookup_file=None, namespace=None, owner=None,
**kwargs):
"""
Get a list of the lookup file backups rendered as JSON.
"""
backups = self.lookup_editor.get_backup_files(request_info.session_key, lookup_file,
namespace, owner)
# Make the response
backups_meta = []
for backup in backups:
try:
backups_meta.append(
{
'time': backup,
'time_readable' : datetime.datetime.fromtimestamp(float(backup)).strftime('%Y-%m-%d %H:%M:%S')
}
)
except ValueError:
self.logger.warning("Backup file name is invalid, file_name=%s", backup)
# Sort the list
backups_meta = sorted(backups_meta, key=lambda x: float(x['time']), reverse=True)
return self.render_json(backups_meta)
def get_lookup_contents(self, request_info, lookup_file=None, namespace="lookup_editor",
owner=None, header_only=False, version=None, lookup_type=None,
**kwargs):
"""
Provides the contents of a lookup file as JSON.
"""
self.logger.info("Retrieving lookup contents, namespace=%s, lookup=%s, type=%s, owner=%s,"
" version=%s", namespace, lookup_file, lookup_type, owner, version)
if lookup_type is None or len(lookup_type) == 0:
lookup_type = "csv"
self.logger.warning("No type for the lookup provided when attempting to load a lookup" +
" file, it will default to CSV")
if header_only in ["1", "true", 1, True]:
header_only = True
else:
header_only = False
try:
# Load the KV store lookup
if lookup_type == "kv":
return self.render_json(self.lookup_editor.get_kv_lookup(request_info.session_key,
lookup_file, namespace,
owner))
# Load the CSV lookup
elif lookup_type == "csv":
with self.lookup_editor.get_lookup(request_info.session_key, lookup_file, namespace,
owner, version=version,
throw_exception_if_too_big=True) as csv_file:
csv_reader = csv.reader(csv_file)
# Convert the content to JSON
lookup_contents = []
for row in csv_reader:
lookup_contents.append(row)
# If we are only loading the header, then stop here
if header_only:
break
return self.render_json(lookup_contents)
else:
self.logger.warning('Lookup file type is not recognized,' +
' lookup_type=' + lookup_type)
return self.render_error_json('Lookup file type is not recognized', 421)
except IOError:
self.logger.warning("Unable to find the requested lookup")
return self.render_error_json("Unable to find the lookup", 404)
except (AuthorizationFailed, PermissionDeniedException) as e:
self.logger.warning("Access to lookup denied")
return self.render_error_json(str(e), 403)
except LookupFileTooBigException as e:
self.logger.warning("Lookup file is too large to load")
data = {
'message': 'Lookup file is too large to load' +
'(file-size must be less than 10 MB to be edited)',
'file_size' : e.file_size
}
return {
'payload': json.dumps(data),
'status': 420
}
except:
self.logger.exception('Lookup file could not be loaded')
return self.render_error_json('Lookup file could not be loaded', 500)
return {
'payload': 'Response',
'status': 500
}
def get_lookup_as_file(self, request_info, lookup_file=None, namespace="lookup_editor",
owner=None, lookup_type='csv', **kwargs):
"""
Provides the lookup file in a way to be downloaded by the browser
"""
self.logger.info("Exporting lookup, namespace=%s, lookup=%s, type=%s, owner=%s", namespace,
lookup_file, lookup_type, owner)
try:
# If we are getting the CSV, then just pipe the file to the user
if lookup_type == "csv":
with self.lookup_editor.get_lookup(request_info.session_key, lookup_file, namespace, owner) as csv_file_handle:
csv_data = csv_file_handle.read()
# If we are getting a KV store lookup, then convert it to a CSV file
else:
rows = self.lookup_editor.get_kv_lookup(request_info.session_key, lookup_file, namespace, owner)
csv_data = shortcuts.convert_array_to_csv(rows)
# Tell the browser to download this as a file
if lookup_file.endswith(".csv"):
filename = 'attachment; filename="%s"' % lookup_file
else:
filename = 'attachment; filename="%s"' % (lookup_file + ".csv")
return {
'payload': csv_data,
'status': 200,
'headers': {
'Content-Type': 'text/csv',
'Content-Disposition': filename
},
}
except IOError:
return self.render_json([], 404)
except PermissionDeniedException as exception:
return self.render_error_json(str(exception), 403)
return {
'payload': str(lookup_file), # Payload of the request.
'status': 200 # HTTP status code
}
def post_lookup_contents(self, request_info, contents=None, lookup_file=None,
namespace="lookup_editor", owner=None, **kwargs):
"""
Save the JSON contents to the lookup file.
"""
self.logger.info("Saving lookup contents...")
try:
# Backup the lookup file
data = {
'lookup_file' : lookup_file,
'namespace' : namespace,
'owner' : owner,
'file_time' : time.time()
}
try:
_, _ = simpleRequest('/services/data/lookup_backup/backup',
sessionKey=request_info.session_key,
method='POST', postargs=data)
except ResourceNotFound:
self.logger.info("Existing lookup could not be found for backup")
file_name = self.lookup_editor.update(contents, lookup_file, namespace, owner,
request_info.session_key, request_info.user)
# Everything worked, return accordingly
return {
'payload': str(file_name), # Payload of the request.
'status': 200, # HTTP status code
'headers': {
'Content-Type': 'application/octet' # Content Type
},
}
except (AuthorizationFailed, PermissionDeniedException):
return self.render_error_json("You do not have permission to perform this operation", 403)
except LookupNameInvalidException:
return self.render_error_json("Lookup name is invalid", 400)
except:
self.logger.exception("Unable to save the lookup")
return self.render_error_json("Unable to save the lookup")
|
# -*- coding: utf-8 -*-
__author__ = "[email protected]"
from cgiserver import runServer, PathContainer
__SYMBOLNAMES = None
__SYMBOLS = None
__DUMP_SYMBOLS_PROC = None
def getWFSData(databaseName, tableName, bbox, srsName, schemaName="", sqlQuery=""):
from shapely.wkb import loads
import geojson
import pygeotoolbox.sharedtools.log as logger
from pygeotoolbox.database import databaseByName
from pygeotoolbox.sharedtools import listToSqlStr
logger.openSection("Building WFS Data...")
FIELD_DEFS = 'graphic_symbol'
GEOMETRY_FIELD_NAME = 'geom'
"""
sampleRequest:
http://localhost/cgi-bin/mapserv.exe?map=C:/ms4w/Apache/hidden/MapGen/osm_lund.map&service=wfs&version=1.1.0&request=GetFeature&typename=data_area&outputFormat=application/json&srsname=EPSG:3857&bbox=1471006.5443115234,7501014.372741699,1471356.5870361328,7502471.5505981445,EPSG:3857
"""
if not schemaName:
schemaName = "mg_work"
if __SYMBOLNAMES:
symbolNamesWhereClause = "graphic_symbol in %s and " % listToSqlStr(__SYMBOLNAMES)
else:
symbolNamesWhereClause = ""
logger.info("Symbol names:" + str(listToSqlStr(__SYMBOLNAMES)))
logger.info("bbox:" + str(bbox))
sql = "select %s, (st_dump(%s)).geom from %s.%s, (select ST_MakeEnvelope(%s) as envelope) as bbox where %s %s && bbox.envelope" % (FIELD_DEFS, GEOMETRY_FIELD_NAME, schemaName, tableName, bbox, symbolNamesWhereClause, GEOMETRY_FIELD_NAME)
if sqlQuery:
sql += " and (%s)" % sqlQuery
fieldNames = FIELD_DEFS.split(',')
logger.info("Loading features...")
features = []
database = databaseByName(databaseName)
for row in database.executeSelectSQL(sql):
shape = loads(row[len(row) - 1], hex=True)
properties = {}
for fieldName, fieldIndex in zip(fieldNames, range(len(fieldNames))):
properties[fieldName] = row[fieldIndex]
if __DUMP_SYMBOLS_PROC:
features.extend(__DUMP_SYMBOLS_PROC(shape, properties))
else:
features.append(geojson.Feature(geometry=shape, properties=properties))
logger.info("%d features found" % len(features))
logger.closeSection()
return '{ "crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:EPSG::%s" } }, ' % srsName + geojson.dumps(geojson.FeatureCollection(features))[1:]
def getFeaturesProcessor(request, response):
values = []
for item in [( 'service', 'wfs'), ('request', 'GetFeature'), ('outputFormat', 'application/json'), ('typename', None), ('bbox', None), ('srsname', None), ('databasename', None)]:
key, defaultValue = item
value = request.query.get(key, defaultValue)
if value:
values.append(value)
else:
response.buildResult("Error: parameter %s is missing" % key, "text/html")
return
service, request, outputFormat, typeName, bbox, srsName, databaseName = values
response.buildResult(getWFSData(databaseName, typeName, bbox, srsName), "application/json")
def processGetFeaturesRequest(request, response, defaultValues={}, sqlQuery="", schemaName=""):
values = []
for item in [( 'service', 'wfs'), ('request', 'GetFeature'), ('outputFormat', 'application/json'), ('typename', None), ('bbox', None), ('srsname', None), ('databasename', None)]:
key, defaultValue = item
value = request.query.get(key, defaultValue)
if not value and key in defaultValues:
value = defaultValues[key]
if value:
values.append(value)
else:
response.buildResult("Error: parameter %s is missing" % key, "text/html")
return
service, request, outputFormat, typeName, bbox, srsName, databaseName = values
response.buildResult(getWFSData(databaseName, typeName, bbox, srsName, schemaName, sqlQuery), "application/json")
def getRestPaths(symbolNames=None, symbols=None, dumpSymbolsProc=None):
global __SYMBOLNAMES, __SYMBOLS, __DUMP_SYMBOLS_PROC
if symbols and not symbolNames:
__SYMBOLNAMES = symbols.names()
else:
__SYMBOLNAMES = symbolNames
__SYMBOLS = symbols
__DUMP_SYMBOLS_PROC = dumpSymbolsProc
return { "wfs/GetFeatures": PathContainer(getFeaturesProcessor, "Gets data_table features as WFS request") }
def runWFSServer(port, symbolNames=None, symbols=None, dumpSymbolsProc=None):
runServer(port, getRestPaths(symbolNames, symbols, dumpSymbolsProc))
if __name__ == "__main__":
runWFSServer(15368)
|
# 使用**解包映射型实参的示例
def puts(n, s):
"""连续打印输出n个s"""
for _ in range(n):
print(s, end='')
d1 = {'n': 3, 's': '*'} # 3个'*'
d2 = {'s': '+', 'n' :7} # 7个'+'
puts(**d1)
print()
puts(**d2)
|
import random
x=0
i = int(raw_input("\nWhich one do you need? (1 = coin, 2 = dice):"))
while x==0:
if i==1:
y = random.randint(1, 2)
if y == 1:
print "heads"
if y == 2:
print "tails"
x=1
if i==2:
z = random.randint(1, 6)
print (z)
x=1
|
import pytest
pytest_plugins = ["pytester"]
@pytest.fixture
def sample_test_file(testdir):
testdir.makepyfile(
"""
import pytest
def test_ok():
assert True
def test_not_ok():
assert False
@pytest.mark.parametrize('param', ("foo", "bar"))
def test_params(param):
assert True
@pytest.mark.skip(reason='some reason')
def test_skipped():
assert False
@pytest.mark.xfail(reason='a reason')
def test_broken():
assert False
"""
)
|
"""
Test for issue 51:
https://github.com/pandas-profiling/pandas-profiling/issues/51
"""
from pathlib import Path
import pandas as pd
import pandas_profiling
import requests
import numpy as np
def test_issue51(get_data_file):
# Categorical has empty ('') value
file_name = get_data_file(
"buggy1.pkl",
"https://raw.githubusercontent.com/adamrossnelson/HelloWorld/master/sparefiles/buggy1.pkl",
)
df = pd.read_pickle(str(file_name))
report = df.profile_report(title="Pandas Profiling Report")
assert (
"<title>Pandas Profiling Report</title>" in report.to_html()
), "Profile report should be generated."
def test_issue51_similar():
df = pd.DataFrame(
{
"test": ["", "hoi", None],
"blest": [None, "", "geert"],
"bert": ["snor", "", None],
}
)
report = df.profile_report(title="Pandas Profiling Report")
assert (
"<title>Pandas Profiling Report</title>" in report.to_html()
), "Profile report should be generated."
# def test_issue51_mixed():
# df = pd.DataFrame(
# {
# "test": ["", "hoi", None, "friet"],
# "blest": [None, "", "geert", "pizza"],
# "bert": ["snor", "", np.nan, ""],
# "fruit": ["", "ok", np.nan, ""],
# }
# )
# report = df.profile_report(title="Pandas Profiling Report")
# assert (
# "data-toggle=tab>Recoded</a>" in report.to_html()
# ), "Recoded should be present"
def test_issue51_empty():
df = pd.DataFrame(
{"test": ["", "", ""], "blest": ["", "", ""], "bert": ["", "", ""]}
)
report = df.profile_report(title="Pandas Profiling Report")
assert (
"<title>Pandas Profiling Report</title>" in report.to_html()
), "Profile report should be generated."
|
import os.path
import re
#import xml.etree.ElementTree as ET
from bs4 import BeautifulSoup
#...................#
#VERSION 6 - 8/30/18
#THIS WORKS - prints all AJ files where search matches, prints paper publication date, and line matches
rootDir = '/Users/jenniferkoch/Documents/astro_software_cite/AJ/'
searchstring = '(.*)(c|C)(i|I)(a|A)(o|O)|(.*)(c|C)handra\b|(.*)(s|S)(h|H)(e|E)(r|R)(p|P)(a|A)|(.*)(a|A)stro(\s*)(b|B)lend|(.*)(d|D)(s|S)9\b|(.*)(SAO)(\s*)Image|(.*)(h|H)oudini|(.*)(a|A)stro(\s*)(p|P)y|(.*)(s|S)pec2(d|D)|(.*)(p|P)lasma(\s*)(p|P)y'
patterns = re.compile(searchstring)
for dirName, subdirList, fileList in os.walk(rootDir):
print('Found directory: %s' % dirName)
for xmlfile in fileList:
if xmlfile.endswith(".xml"):
textfile = open(dirName+'/'+xmlfile, 'r')
contents = textfile.read()
f = contents.splitlines()
for line in f:
match = re.match(patterns, line)
if match:
soup = BeautifulSoup(contents, "lxml")
print('\033[1m','\n---> Pattern found in',xmlfile,'\033[0m')
#x = soup.article.findAll(string=patterns)
#print(x)
date = soup.article.find("pub-date")
title = soup.article.find("title")
print('--Paper pub-year:',date.year)
#--needs revision; retrieves only first title 'ABSTRACT':
print('--Section title: ',title)
print('--Match found: ',line)
#--Attempt to use BeautifulSoup to look for section title
#section_title = soup.article.find(searchstring)
#section_title.find_parents("p", class="title")
textfile.close()
else:
print('No match found in', xmlfile)
else:
print('File not xml') |
import torch
from torch import nn
import torch.nn.functional as F
from .utils import NestedTensor, nested_tensor_from_tensor_list
from .backbone import build_backbone
from .transformer import build_transformer
class Caption(nn.Module):
def __init__(self, backbone, transformer, hidden_dim, vocab_size):
super().__init__()
self.backbone = backbone
self.input_proj = nn.Conv2d(
backbone.num_channels, hidden_dim, kernel_size=1)
self.transformer = transformer
self.mlp = MLP(hidden_dim, 512, vocab_size, 3)
def forward(self, samples, target, target_mask):
if not isinstance(samples, NestedTensor):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.backbone(samples)
src, mask = features[-1].decompose()
assert mask is not None
hs = self.transformer(self.input_proj(src), mask,
pos[-1], target, target_mask)
out = self.mlp(hs.permute(1, 0, 2))
return out
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k)
for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def build_model(config):
backbone = build_backbone(config)
transformer = build_transformer(config)
model = Caption(backbone, transformer, config.hidden_dim, config.vocab_size)
criterion = torch.nn.CrossEntropyLoss()
return model, criterion |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
RULES = [
('Daily, except Xmas day', 'RRULE:FREQ=DAILY;\nEXRULE:FREQ=YEARLY;BYMONTH=12;BYMONTHDAY=25'),
('Daily, Weekdays, except Xmas day',
'RRULE:FREQ=DAILY;BYDAY=MO,TU,WE,TH,FR;\nEXRULE:FREQ=YEARLY;BYMONTH=12;BYMONTHDAY=25'),
('Daily, Weekends, except Xmas day', 'RRULE:FREQ=DAILY;BYDAY=SA,SU;\nEXRULE:FREQ=YEARLY;BYMONTH=12;BYMONTHDAY=25'),
('Weekly, except Xmas day', 'RRULE:FREQ=WEEKLY;\nEXRULE:FREQ=YEARLY;BYMONTH=12;BYMONTHDAY=25'),
('Monthly, except Xmas day', 'RRULE:FREQ=MONTHLY;\nEXRULE:FREQ=YEARLY;BYMONTH=12;BYMONTHDAY=25'),
('Yearly, except Xmas day', 'RRULE:FREQ=YEARLY;\nEXRULE:FREQ=YEARLY;BYMONTH=12;BYMONTHDAY=25'),
]
def forwards(apps, schema_editor):
"""
Create initial recurrence rules.
"""
RecurrenceRule = apps.get_model('icekit_events', 'RecurrenceRule')
for description, recurrence_rule in RULES:
RecurrenceRule.objects.get_or_create(
description=description,
defaults=dict(recurrence_rule=recurrence_rule),
)
def backwards(apps, schema_editor):
"""
Delete initial recurrence rules.
"""
RecurrenceRule = apps.get_model('icekit_events', 'RecurrenceRule')
descriptions = [d for d, rr in RULES]
RecurrenceRule.objects.filter(description__in=descriptions).delete()
class Migration(migrations.Migration):
dependencies = [
('icekit_events', '0001_initial'),
]
operations = [
migrations.RunPython(forwards, backwards),
]
|
"""
Create Traffic Study Tables
"""
import pdb
import sys
import psycopg2
import _setpath
from config.secrets import TRAFFIC_STUDY_DB
import table_config
dbname = TRAFFIC_STUDY_DB["dbname"]
user = TRAFFIC_STUDY_DB["user"]
password = TRAFFIC_STUDY_DB["password"]
host = TRAFFIC_STUDY_DB["host"]
port = 5432
pdb.set_trace()
conn = psycopg2.connect(
dbname=dbname, user=user, host=host, password=password, port=port
)
cursor = conn.cursor()
def create_table_query(tbl_cfg):
sql = ""
for field in tbl_cfg["fields"]:
sql = sql + '"{}" {}, \n'.format(field["name"], field["type"])
return sql
for table in tables:
sql = create_table_query(table)
cursor.execute(sql)
sql = """
SELECT * from information_schema.tables
"""
cursor.execute(sql)
res = cursor.fetchall()
for t in res:
if t[2] in tables:
print(t)
conn.close()
|
import webbrowser
class Movie():
"""
The "Movie" class represents a movie object with the following attributes:
Args:
param1(string): Title
param2(int): Release Year
param3(string): Storyline
param4(string): Poster Image URL
param5(string): Trailer Youtube URL
"""
def __init__(self, movie_title, movie_release_year, movie_storyline, poster_image, trailer_youtube):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
self.release_year = movie_release_year
def show_trailer(self):
"""
Opens the trailer url in the default web browser.
"""
webbrowser.open(self.trailer_youtube_url) |
print('='*8,'Dissecando uma String','='*8)
a = input('Digite algo qualquer:')
print('O tipo primitivo desse valor e {}'.format(type(a)))
print('O valor e alphanumerico? {}.'.format(a.isalnum()))
print('O valor e alfabetico? {}.'.format(a.isalpha()))
print('O valor e um numero? {}.'.format(a.isnumeric()))
print('O valor esta todo em minusculo? {}.'.format(a.islower()))
print('O valor esta capitalizado? {}.'.format(a.istitle()))
print('O valor so possui espacos? {}.'.format(a.isspace()))
|
import re
text = input()
matches = re.finditer(r"(^|(?<=\s))-?([0]|[1-9][0-9]*)(\.[0-9]+)?($|(?=\s))", text)
output = list()
for match in matches:
output.append(match.group())
print(' '.join(output))
|
class request_handler():
"""
This Class Handles the Parsing of Dialogflow Requests and get details like Intent, Parameters, Session ID etc
:param dialogflowRequestJson: The Dialogflow Request JSON
"""
def __init__(self,dialogflowRequestJson):
self.resjson = dialogflowRequestJson
def get_intent(self):
"""
Returns the Intent Dictionary which triggered the Webhook
:raises TypeError: This Error is Raised if the Intent Dictionary can't be retived if the Request JSON is Malformed
:return: Intent Object
:rtype: dict
"""
try:
return self.resjson["queryResult"]["intent"]
except:
raise TypeError("Malformed Request JSON: Failed to find Intent JSON")
def get_intent_name(self):
"""
Returns the Intent Name which triggered the Webhook
:raises TypeError: This Error is Raised if the Intent Name can't be retived if the Request JSON is Malformed
:return: Intent Name
:rtype: str
"""
try:
return self.resjson["queryResult"]["intent"]["name"]
except:
raise TypeError("Malformed Request JSON: Failed to find Intent Name")
def get_intent_displayName(self):
"""
Returns the Intent Display Name (this is the Intent Name which you would have specified in Dialogflow) which triggered the Webhook
:raises TypeError: This Error is Raised if the Intent Display Name can't be retived if the Request JSON is Malformed
:return: Intent Display Name
:rtype: str
"""
try:
return self.resjson["queryResult"]["intent"]["displayName"]
except:
raise TypeError("Malformed Request JSON: Failed to find Intent Display Name")
def get_parameters(self):
"""
Returns a Dictionary of filled Parameter Values
:return: Parameter Object
:rtype: dict
"""
try:
return self.resjson["queryResult"]["parameters"]
except:
return {}
def get_parameter(self,param):
"""
Returns a Parameter Value by Parameter Name
:param param: The Parameter name to retrive the Value
:raises KeyError: This Error is Rasied if the Parameter is not found
:return: Parameter Value
:rtype: str
"""
try:
return self.resjson["queryResult"]["parameters"][param]
except:
raise KeyError("Parameter "+param+" not found")
def get_action(self):
"""
Returns the Action Name Specified for the Intent
:return: Action Name
:rtype: str
"""
try:
return self.resjson["queryResult"]["action"]
except:
return ""
def get_session_id(self):
"""
Returns the Session ID of the Dialogflow Session
:raises TypeError: This Error is Raised if the Session ID can't be retived if the Request JSON is Malformed
:return: Session ID
:rtype: str
"""
try:
return self.resjson["session"]
except:
raise TypeError("Malformed Request JSON: Failed to find Session ID")
def get_context_by_name(self,contextName):
"""
Returns a Context Dictionary by Context Name
:param contextName: The Context Name to retrive the Context JSON
:type contextName: str
:raises LookupError: This Error is Raised if The Context is not found
:return: Context Object
:rtype: dict
"""
fres = {}
for i in self.resjson["queryResult"]["outputContexts"]:
if i["name"].split("/")[len(i["name"].split("/"))-1] == contextName:
fres = i
break
if fres == {}:
raise LookupError("Context with name "+contextName+" not found!")
else:
return fres
def get_capabilities(self):
"""
Returns a list Google Assistant Capabilities for a particular surface (eg. Smart Display, Mobile Phone, Chromebook etc.) from where the bot is accessed.
:return: Capabilities List
:rtype: list
.. note:: This Feature is specific only for Google Assistant. This will return an empty list if the bot is accessed from platforms which are not Google Assistant
"""
try:
retjson = []
for i in self.resjson["originalDetectIntentRequest"]["payload"]["surface"]["capabilities"]:
retjson.append(i["name"])
return retjson
except:
return []
def get_payload(self):
"""
Returns the Platform Specific Payload from where the request originated
:return: Payload Object
:rtype: dict
"""
try:
return self.resjson["originalDetectIntentRequest"]["payload"]
except:
return {}
def get_source(self):
"""
Returns the source where the request originated
:return: Source where the request originated
:rtype: str
"""
try:
return self.resjson["originalDetectIntentRequest"]["source"]
except:
return "" |
#import required packages
from flask import Flask, render_template, request,session
from flask_sqlalchemy import SQLAlchemy
from static.mpesa_config import generate_access_token, register_mpesa_url, stk_push
import os
import pymysql
#create a Flask object
application = Flask(__name__)
#establish a connection to our MYSQL server using sqlalchemy ORM. I assume you are saving all creds into environment variables
database = os.environ.get("NAME_OF_YOUR_MYSQL_DB")
db_username = os.environ.get("YOUR_MYSQL_USERNAME")
db_password = os.environ.get("YOUR_MYSQL_PASSWD")
db_host = os.environ.get("YOUR_MYSQL_HOST") #the uri to the db
#create a database connection
conn = "mysql+pymysql://{0}:{1}@{2}/{3}".format(db_username, db_password, db_host, database)
application.config['SQLALCHEMY_DATABASE_URI'] = (conn)
db = SQLAlchemy(application)
from static import models
@application.route('/', methods=['GET'])
def Home():
return render_template ("home.html")
@application.route('/mpesa_token')
def access_token():
consumer_key = os.environ.get("MPESA_CONSUMER_KEY")
consumer_secret = os.environ.get("MPESA_CONSUMER_SECRET")
return generate_access_token(consumer_key, consumer_secret)
@application.route('/register_mpesa_url')
def register_url():
return register_mpesa_url()
@application.route('/validate', methods=['POST'])
def validate():
if request.method == 'POST':
jsonMpesaResponse = request.get_json()
print(jsonMpesaResponse)
return render_template ("home.html")
@application.route('/confirm', methods=['POST'])
def confirm():
if request.method == 'POST':
jsonMpesaResponse = request.get_json() #receive the json from Daraja API and write it to your MYSQL database
#We shall write every payment details into our MYSQL database by the help of the sqlalchemy ORM session
try:
add_pmt = models.client_payments_table(TransactionType=jsonMpesaResponse['TransactionType'], TransID=jsonMpesaResponse['TransID'], \
TransTime=jsonMpesaResponse['TransTime'], TransAmount=jsonMpesaResponse['TransAmount'], \
BusinessShortCode=jsonMpesaResponse['BusinessShortCode'], BillRefNumber=jsonMpesaResponse['BillRefNumber'], \
InvoiceNumber=jsonMpesaResponse['InvoiceNumber'], OrgAccountBalance=jsonMpesaResponse['OrgAccountBalance'], \
ThirdPartyTransID=jsonMpesaResponse['ThirdPartyTransID'], MSISDN=jsonMpesaResponse['MSISDN'], \
FirstName=jsonMpesaResponse['FirstName'], MiddleName=jsonMpesaResponse['MiddleName'], LastName=jsonMpesaResponse['LastName'])
db.session.add(add_pmt)
except Exception as e:
print("Could not write payment details to database")
print(jsonMpesaResponse)
print(e)
db.session.rollback()
else:
db.session.commit()
finally:
db.session.close()
return render_template ("home.html")
#Our application URL for collecting request data and firing the payment process
@application.route('/mobile_payment')
def mobilePayment():
phone_number = '254722456789'
amount = 1000 #ensure amount is an integer and not a floating point number
account_reference = 'TEST123' #This is the reference that will appear as the account number on the paybill payment
transaction_desc = 'Payment for supplies' #Any description
return stk_push(phone_number, amount, account_reference, transaction_desc) #Invoke the stk-push function with the request data
if __name__== "__main__":
application.run() |
import re
import enchant
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from nltk.stem import WordNetLemmatizer
from pipe import Pipe
from .patterns import NEGATIVE_CONSTRUCTS
from .patterns import NEGATIVE_EMOTICONS
from .patterns import POSITIVE_EMOTICONS
from .patterns import URLS
class Options(object):
EMAILS = "emails"
EMOTICONS = "emoticons"
LEMMATIZER = "lemmatizer"
NEGATIVE_CONSTRUCTS = "negative_constructs"
PUNCTUATION = "punctuation"
REPEATING_VOWELS = "repeating_vowels"
SPELLING = "spelling"
STEMMER = "stemmer"
STOPWORDS = "stopwords"
URLS = "urls"
def all():
return (
Options.EMAILS,
Options.EMOTICONS,
Options.LEMMATIZER,
Options.NEGATIVE_CONSTRUCTS,
Options.REPEATING_VOWELS,
Options.SPELLING,
Options.STEMMER,
Options.STOPWORDS,
Options.URLS
)
OPTIONS = Options.all()
def configure(options):
global OPTIONS
OPTIONS = options
def clean(sentence):
return sentence.lower() \
| remove_repeating_vowels \
| replace_negative_constructs \
| replace_emoticons_with_tags \
| remove_urls \
| remove_emails \
| remove_punctuation \
| remove_misspelled_words \
| stem \
| lemmatize
@Pipe
def remove_repeating_vowels(sentence):
if Options.REPEATING_VOWELS not in OPTIONS:
return sentence
return re.sub(r"(.)\1+", r"\1\1", sentence)
@Pipe
def replace_negative_constructs(sentence):
if Options.NEGATIVE_CONSTRUCTS not in OPTIONS:
return sentence
words = []
for word in sentence.lower().split():
if word in NEGATIVE_CONSTRUCTS:
words.append("not")
else:
words.append(word)
return " ".join(words)
@Pipe
def replace_emoticons_with_tags(sentence):
if Options.EMOTICONS not in OPTIONS:
return sentence
words = sentence.split()
for i, word in enumerate(words):
if word in POSITIVE_EMOTICONS:
words[i] = "positive"
if word in NEGATIVE_EMOTICONS:
words[i] = "negative"
return " ".join(words)
@Pipe
def remove_urls(sentence):
if Options.URLS not in OPTIONS:
return sentence
return re.sub(URLS, "", sentence)
@Pipe
def remove_emails(sentence):
if Options.EMAILS not in OPTIONS:
return sentence
return re.sub(r"\S*@\S*\s?", "", sentence)
@Pipe
def remove_stopwords(sentence):
if Options.STOPWORDS not in OPTIONS:
return sentence
stop = set(stopwords.words("english"))
words = sentence.lower().split()
return " ".join([word for word in words if word not in stop])
LEMMATIZER = None
@Pipe
def lemmatize(sentence):
if Options.LEMMATIZER not in OPTIONS:
return sentence
global LEMMATIZER
if LEMMATIZER is None:
LEMMATIZER = WordNetLemmatizer()
lemmatized = [LEMMATIZER.lemmatize(word, pos="v")
for word in sentence.split()]
return " ".join(lemmatized)
STEMMER = None
@Pipe
def stem(sentence):
if Options.STEMMER not in OPTIONS:
return sentence
global STEMMER
if STEMMER is None:
STEMMER = SnowballStemmer("english")
stemmed = [STEMMER.stem(word) for word in sentence.split()]
return " ".join(stemmed)
DICTIONARY = None
@Pipe
def remove_misspelled_words(sentence):
if Options.SPELLING not in OPTIONS:
return sentence
global DICTIONARY
if DICTIONARY is None:
DICTIONARY = enchant.Dict("en_US")
checked = [word for word in sentence.split() if DICTIONARY.check(word)]
return " ".join(checked)
@Pipe
def remove_whitespace(sentence):
return re.sub(r"\s+", " ", sentence)
@Pipe
def remove_punctuation(sentence):
if Options.PUNCTUATION not in OPTIONS:
return sentence
return re.sub(r"[^\w\s\"]", "", sentence)
|
import qrcode
def get_qrcode(ssid, psk):
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
qr.add_data("WIFI:S:{0};T:WPA;P:{1};;".format(ssid, psk))
qr.make(fit=True)
return qr.get_matrix()
def get_qrcode_as_html(ssid, psk):
qr = get_qrcode(ssid, psk)
qr_html = ""
fg_color = "#ffffff"
bg_color = "#000000"
for i in qr:
qr_html+="<tr>"
for j in i:
if j: color = bg_color
else: color = fg_color
qr_html +="<td style=\"background-color:{0}; height:5px; width: 5px; padding: 0px; margin: 0px\"></td>\r\n".format(color)
qr_html+="</tr>\r\n"
return qr_html |
from flask import url_for
import flask_admin
from flask_admin import helpers as admin_helpers
from app_core import app, db
from models import security, RestrictedModelView, UserModelView, InvoiceModelView, UtilityModelView, Role, User, BronzeData, Invoice, Utility
# Create admin
admin = flask_admin.Admin(
app,
'Zap Payments',
base_template='my_master.html',
template_mode='bootstrap3',
)
# Add model views
admin.add_view(RestrictedModelView(Role, db.session, category='Admin'))
admin.add_view(UserModelView(User, db.session, category='Admin'))
admin.add_view(RestrictedModelView(BronzeData, db.session, category='Admin'))
admin.add_view(InvoiceModelView(Invoice, db.session))
admin.add_view(UtilityModelView(Utility, db.session))
# define a context processor for merging flask-admin's template context into the
# flask-security views.
@security.context_processor
def security_context_processor():
return dict(
admin_base_template=admin.base_template,
admin_view=admin.index_view,
h=admin_helpers,
get_url=url_for
)
|
import os
from environs import Env
env = Env()
env.read_env()
BOT_TOKEN = env.str('BOT_TOKEN')
ADMINS = env.list('ADMINS')
if not BOT_TOKEN:
print('You have forgot to set BOT_TOKEN')
quit()
HEROKU_APP_NAME = os.getenv('HEROKU_APP_NAME')
# webhook settings https://epicker-bot.herokuapp.com/
WEBHOOK_HOST = f'https://{HEROKU_APP_NAME}.herokuapp.com'
WEBHOOK_PATH = f'/webhook/{BOT_TOKEN}'
WEBHOOK_URL = f'{WEBHOOK_HOST}{WEBHOOK_PATH}'
# webserver settings
WEBAPP_HOST = '0.0.0.0'
WEBAPP_PORT = env.int('PORT')
|
import json
def register_user(self):
"""helper function for registering a user."""
return self.client.post(
'api/v1/auth/signup',
data=json.dumps({
"confirm": "password123",
"password": "password123",
"email": '[email protected]',
"username": "kamar"
}),
content_type='application/json'
)
def login_user(self):
"""helper function for login a user."""
return self.client.post(
'api/v1/auth/login',
data=json.dumps({
"password": "password123",
"username": "kamar"
}),
content_type='application/json'
)
|
# Copyright 2017 The Armada Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent.futures import ThreadPoolExecutor, as_completed
from oslo_config import cfg
from oslo_log import log as logging
from armada import const
from armada.conf import set_current_chart
from armada.exceptions import armada_exceptions
from armada.exceptions import override_exceptions
from armada.exceptions import source_exceptions
from armada.exceptions import tiller_exceptions
from armada.exceptions import validate_exceptions
from armada.handlers.chart_deploy import ChartDeploy
from armada.handlers.manifest import Manifest
from armada.handlers.override import Override
from armada.utils.release import release_prefixer
from armada.utils import source
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Armada(object):
'''
This is the main Armada class handling the Armada
workflows
'''
def __init__(self,
documents,
tiller,
disable_update_pre=False,
disable_update_post=False,
enable_chart_cleanup=False,
dry_run=False,
set_ovr=None,
force_wait=False,
timeout=None,
values=None,
target_manifest=None,
k8s_wait_attempts=1,
k8s_wait_attempt_sleep=1):
'''
Initialize the Armada engine and establish a connection to Tiller.
:param List[dict] documents: Armada documents.
:param tiller: Tiller instance to use.
:param bool disable_update_pre: Disable pre-update Tiller operations.
:param bool disable_update_post: Disable post-update Tiller
operations.
:param bool enable_chart_cleanup: Clean up unmanaged charts.
:param bool dry_run: Run charts without installing them.
:param bool force_wait: Force Tiller to wait until all charts are
deployed, rather than using each chart's specified wait policy.
:param int timeout: Specifies overall time in seconds that Tiller
should wait for charts until timing out.
:param str target_manifest: The target manifest to run. Useful for
specifying which manifest to run when multiple are available.
:param int k8s_wait_attempts: The number of times to attempt waiting
for pods to become ready.
:param int k8s_wait_attempt_sleep: The time in seconds to sleep
between attempts.
'''
self.enable_chart_cleanup = enable_chart_cleanup
self.dry_run = dry_run
self.force_wait = force_wait
self.tiller = tiller
try:
self.documents = Override(
documents, overrides=set_ovr,
values=values).update_manifests()
except (validate_exceptions.InvalidManifestException,
override_exceptions.InvalidOverrideValueException):
raise
self.manifest = Manifest(
self.documents, target_manifest=target_manifest).get_manifest()
self.chart_cache = {}
self.chart_deploy = ChartDeploy(
disable_update_pre, disable_update_post, self.dry_run,
k8s_wait_attempts, k8s_wait_attempt_sleep, timeout, self.tiller)
def pre_flight_ops(self):
"""Perform a series of checks and operations to ensure proper
deployment.
"""
LOG.info("Performing pre-flight operations.")
# Ensure Tiller is available and manifest is valid
if not self.tiller.tiller_status():
raise tiller_exceptions.TillerServicesUnavailableException()
# Clone the chart sources
manifest_data = self.manifest.get(const.KEYWORD_ARMADA, {})
for group in manifest_data.get(const.KEYWORD_GROUPS, []):
for ch in group.get(const.KEYWORD_CHARTS, []):
self.get_chart(ch)
def get_chart(self, ch):
chart = ch.get('chart', {})
chart_source = chart.get('source', {})
location = chart_source.get('location')
ct_type = chart_source.get('type')
subpath = chart_source.get('subpath', '.')
if ct_type == 'local':
chart['source_dir'] = (location, subpath)
elif ct_type == 'tar':
source_key = (ct_type, location)
if source_key not in self.chart_cache:
LOG.info('Downloading tarball from: %s', location)
if not CONF.certs:
LOG.warn(
'Disabling server validation certs to extract charts')
tarball_dir = source.get_tarball(location, verify=False)
else:
tarball_dir = source.get_tarball(
location, verify=CONF.cert)
self.chart_cache[source_key] = tarball_dir
chart['source_dir'] = (self.chart_cache.get(source_key), subpath)
elif ct_type == 'git':
reference = chart_source.get('reference', 'master')
source_key = (ct_type, location, reference)
if source_key not in self.chart_cache:
auth_method = chart_source.get('auth_method')
proxy_server = chart_source.get('proxy_server')
logstr = 'Cloning repo: {} from branch: {}'.format(
location, reference)
if proxy_server:
logstr += ' proxy: {}'.format(proxy_server)
if auth_method:
logstr += ' auth method: {}'.format(auth_method)
LOG.info(logstr)
repo_dir = source.git_clone(
location,
reference,
proxy_server=proxy_server,
auth_method=auth_method)
self.chart_cache[source_key] = repo_dir
chart['source_dir'] = (self.chart_cache.get(source_key), subpath)
else:
chart_name = chart.get('chart_name')
raise source_exceptions.ChartSourceException(ct_type, chart_name)
for dep in ch.get('chart', {}).get('dependencies', []):
self.get_chart(dep)
def sync(self):
'''
Synchronize Helm with the Armada Config(s)
'''
if self.dry_run:
LOG.info('Armada is in DRY RUN mode, no changes being made.')
msg = {
'install': [],
'upgrade': [],
'diff': [],
'purge': [],
'protected': []
}
# TODO: (gardlt) we need to break up this func into
# a more cleaner format
self.pre_flight_ops()
known_releases = self.tiller.list_releases()
manifest_data = self.manifest.get(const.KEYWORD_ARMADA, {})
prefix = manifest_data.get(const.KEYWORD_PREFIX)
for chartgroup in manifest_data.get(const.KEYWORD_GROUPS, []):
cg_name = chartgroup.get('name', '<missing name>')
cg_desc = chartgroup.get('description', '<missing description>')
cg_sequenced = chartgroup.get('sequenced',
False) or self.force_wait
LOG.info('Processing ChartGroup: %s (%s), sequenced=%s%s', cg_name,
cg_desc, cg_sequenced,
' (forced)' if self.force_wait else '')
# TODO(MarshM): Deprecate the `test_charts` key
cg_test_all_charts = chartgroup.get('test_charts')
cg_charts = chartgroup.get(const.KEYWORD_CHARTS, [])
charts = map(lambda x: x.get('chart', {}), cg_charts)
def deploy_chart(chart):
set_current_chart(chart)
try:
return self.chart_deploy.execute(chart, cg_test_all_charts,
prefix, known_releases)
finally:
set_current_chart(None)
results = []
failures = []
# Returns whether or not there was a failure
def handle_result(chart, get_result):
name = chart['chart_name']
try:
result = get_result()
except Exception:
LOG.exception('Chart deploy [{}] failed'.format(name))
failures.append(name)
return True
else:
results.append(result)
return False
if cg_sequenced:
for chart in charts:
if (handle_result(chart, lambda: deploy_chart(chart))):
break
else:
with ThreadPoolExecutor(
max_workers=len(cg_charts)) as executor:
future_to_chart = {
executor.submit(deploy_chart, chart): chart
for chart in charts
}
for future in as_completed(future_to_chart):
chart = future_to_chart[future]
handle_result(chart, future.result)
if failures:
LOG.error('Chart deploy(s) failed: %s', failures)
raise armada_exceptions.ChartDeployException(failures)
for result in results:
for k, v in result.items():
msg[k].append(v)
# End of Charts in ChartGroup
LOG.info('All Charts applied in ChartGroup %s.', cg_name)
self.post_flight_ops()
if self.enable_chart_cleanup:
self._chart_cleanup(
prefix,
self.manifest[const.KEYWORD_ARMADA][const.KEYWORD_GROUPS], msg)
LOG.info('Done applying manifest.')
return msg
def post_flight_ops(self):
'''
Operations to run after deployment process has terminated
'''
LOG.info("Performing post-flight operations.")
# Delete temp dirs used for deployment
for chart_dir in self.chart_cache.values():
LOG.debug('Removing temp chart directory: %s', chart_dir)
source.source_cleanup(chart_dir)
def _chart_cleanup(self, prefix, charts, msg):
LOG.info('Processing chart cleanup to remove unspecified releases.')
valid_releases = []
for gchart in charts:
for chart in gchart.get(const.KEYWORD_CHARTS, []):
valid_releases.append(
release_prefixer(prefix,
chart.get('chart', {}).get('release')))
actual_releases = [x.name for x in self.tiller.list_releases()]
release_diff = list(set(actual_releases) - set(valid_releases))
for release in release_diff:
if release.startswith(prefix):
LOG.info('Purging release %s as part of chart cleanup.',
release)
self.tiller.uninstall_release(release)
msg['purge'].append(release)
|
# -*- coding: utf-8 -*-
#
# ramstk.views.gtk3.design_electric.components.meter.py is part of the RAMSTK
# Project.
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Meter Input Panel."""
# Standard Library Imports
from typing import Any, Dict, List
# Third Party Imports
from pubsub import pub
# RAMSTK Package Imports
from ramstk.views.gtk3 import _
from ramstk.views.gtk3.widgets import RAMSTKComboBox, RAMSTKFixedPanel
class MeterDesignElectricInputPanel(RAMSTKFixedPanel):
"""Display Meter assessment input attribute data in the RAMSTK Work Book.
The Meter assessment input view displays all the assessment inputs for
the selected Meter item. This includes, currently, inputs for
MIL-HDBK-217FN2. The attributes of a Meter assessment input view are:
:cvar dict _dic_quality: dictionary of meter quality levels. Key is
meter subcategory ID; values are lists of quality levels.
:cvar dict _dic_type: dictionary of meter types. Key is meter
subcategory ID; values are lists of types.
:cvar dict _dic_specification: dictionary of meter MIL-SPECs. Key is
meter tye ID; values are lists of specifications.
:cvar dict _dic_insert: dictionary of meter insert materials. First
key is meter type ID, second key is meter specification ID; values are
lists of insert materials.
:ivar cmbApplication: select and display the application of the meter.
:ivar cmbType: select and display the type of meter.
"""
# Define private dict class attributes.
# Quality levels; key is the subcategory ID.
_dic_quality: Dict[int, List[List[str]]] = {
2: [["MIL-SPEC"], [_("Lower")]],
1: [["MIL-SPEC"], [_("Lower")]],
}
# Meter types; key is the subcategory ID.
_dic_types: Dict[int, List[List[str]]] = {
1: [[_("AC")], [_("Inverter Driver")], [_("Commutator DC")]],
2: [[_("Direct Current")], [_("Alternating Current")]],
}
# Define private list class attributes.
# Define private scalar class attributes.
_record_field: str = "hardware_id"
_select_msg: str = "selected_hardware"
_tag: str = "design_electric"
_title: str = _("Meter Design Inputs")
# Define public dictionary class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(self) -> None:
"""Initialize an instance of the Meter assessment input view."""
super().__init__()
# Initialize widgets.
self.cmbApplication: RAMSTKComboBox = RAMSTKComboBox()
self.cmbQuality: RAMSTKComboBox = RAMSTKComboBox()
self.cmbType: RAMSTKComboBox = RAMSTKComboBox()
# Initialize private dictionary attributes.
# Initialize private list attributes.
# Initialize private scalar attributes.
self._hazard_rate_method_id: int = 0
self._quality_id: int = 0
# Initialize public dictionary attributes.
self.dic_attribute_widget_map: Dict[str, List[Any]] = {
"quality_id": [
32,
self.cmbQuality,
"changed",
super().on_changed_combo,
"wvw_editing_reliability",
0,
{
"tooltip": _("The quality level of the meter."),
},
_("Quality Level:"),
"gint",
],
"type_id": [
48,
self.cmbType,
"changed",
super().on_changed_combo,
self.on_edit_callback,
0,
{
"tooltip": _("The type of meter."),
},
_("Meter Type:"),
"gint",
],
"application_id": [
2,
self.cmbApplication,
"changed",
super().on_changed_combo,
self.on_edit_callback,
0,
{
"tooltip": _("The application of the panel meter."),
},
_("Meter Function:"),
"gint",
],
}
# Initialize public list attributes.
# Initialize public scalar attributes.
self.category_id: int = 0
self.subcategory_id: int = 0
super().do_set_properties()
super().do_make_panel()
super().do_set_callbacks()
# Subscribe to PyPubSub messages.
pub.subscribe(
self.do_load_comboboxes,
"changed_subcategory",
)
pub.subscribe(
self._do_set_reliability_attributes,
"succeed_get_reliability_attributes",
)
def do_load_comboboxes(self, subcategory_id: int) -> None:
"""Load the meter assessment input RAMSTKComboBox()s.
:param subcategory_id: the subcategory ID of the selected meter.
:return: None
:rtype: None
"""
self.subcategory_id = subcategory_id
# Load the quality level RAMSTKComboBox().
if self._hazard_rate_method_id == 1:
_data = [["MIL-SPEC"], [_("Lower")]]
else:
try:
_data = self._dic_quality[self.subcategory_id]
except KeyError:
_data = []
self.cmbQuality.do_load_combo(_data, signal="changed")
# Load the meter application RAMSTKComboBox().
self.cmbApplication.do_load_combo(
[[_("Ammeter")], [_("Voltmeter")], [_("Other")]], signal="changed"
)
# Load the meter type RAMSTKComboBox().
try:
_data = self._dic_types[self.subcategory_id]
except KeyError:
_data = []
self.cmbType.do_load_combo(_data, signal="changed")
def _do_set_reliability_attributes(self, attributes: Dict[str, Any]) -> None:
"""Set the attributes when the reliability attributes are retrieved.
:param attributes: the dict of reliability attributes.
:return: None
:rtype: None
"""
if attributes["hardware_id"] == self._record_id:
self._hazard_rate_method_id = attributes["hazard_rate_method_id"]
self._quality_id = attributes["quality_id"]
def _do_set_sensitive(self, attributes: Dict[str, Any]) -> None:
"""Set widget sensitivity as needed for the selected meter.
:return: None
:rtype: None
"""
self.cmbQuality.set_sensitive(True)
self.cmbQuality.do_update(
self._quality_id,
signal="changed",
)
self.cmbApplication.set_sensitive(False)
self.cmbType.set_sensitive(True)
self.cmbType.do_update(
attributes["type_id"],
signal="changed",
)
if self._hazard_rate_method_id == 2 and self.subcategory_id == 2:
self.cmbApplication.set_sensitive(True)
self.cmbApplication.do_update(
attributes["application_id"],
signal="changed",
)
|
from .octree import *
from .frontend import *
|
from django.apps import AppConfig
class AjaxSelectConfig(AppConfig):
"""
Django 1.7+ enables initializing installed applications
and autodiscovering modules
On startup, search for and import any modules called `lookups.py` in all installed apps.
Your LookupClass subclass may register itself.
"""
name = 'ajax_select'
verbose_name = 'Ajax Selects'
def ready(self):
from ajax_select.registry import registry
registry.load_channels()
|
from __future__ import division
import numpy as np
from itertools import izip
from dipy.viz import fvtk
from dipy.viz import window, actor
from dipy.viz.axycolor import distinguishable_colormap
from dipy.tracking.streamline import get_bounding_box_streamlines
from itertools import chain
from dipy.viz import interactor
from dipy.viz.utils import get_grid_cells_position, get_bounding_box_sizes, auto_orient, shallow_copy
from dipy.tracking.metrics import principal_components
import vtk
# With autoresize
def show_grid(ren, actors, texts=None, title="Grid view", size=(800, 600)):
ren.projection('parallel')
show_m = window.ShowManager(ren, title=title, size=size,
#interactor_style=InteractorStyleImageAndTrackballActor())
interactor_style=InteractorStyleBundlesGrid(actors))
#interactor_style="trackball")
# Size of every cell corresponds to the diagonal of the largest bounding box.
longest_diagonal = np.max([a.GetLength() for a in actors])
shapes = [(longest_diagonal, longest_diagonal)] * len(actors)
positions = get_grid_cells_position(shapes, aspect_ratio=size[0]/size[1])
for a, pos in zip(actors, positions):
a.SetPosition(pos - a.GetCenter())
ren.add(a)
last_ren_size = [size]
def resize_grid(obj, ev):
ren_size = ren.GetSize()
if last_ren_size[0] != ren_size:
last_ren_size[0] = ren_size
print "Resizing..."
ren.ComputeAspect()
positions = get_grid_cells_position(shapes, aspect_ratio=ren.GetAspect()[0])
for a, pos in zip(actors, positions):
a.SetPosition(pos - a.GetCenter())
ren.reset_camera_tight()
show_m.render()
show_m.add_window_callback(resize_grid)
ren.reset_camera_tight()
show_m.initialize()
show_m.render()
show_m.start()
def cluster_and_interactive_show(streamlines):
from dipy.segment.clustering import QuickBundles
if streamlines is None:
import nibabel as nib
streamlines = nib.streamlines.load("/home/marc/research/dat/streamlines/ismrm/bundles_af.left.trk", ref=None)
qb = QuickBundles(threshold=12.)
clusters = qb.cluster(streamlines.points)
bg = (0, 0, 0)
colormap = distinguishable_colormap(bg=bg)
ren = window.Renderer()
actors = []
texts = []
for cluster, color in izip(clusters, colormap):
stream_actor = actor.line(cluster, [color]*len(cluster), linewidth=1)
actors.append(stream_actor)
text = actor.text_3d(str(len(cluster)), font_size=32, justification="center", vertical_justification="top")
texts.append(text)
brain = actor.Container()
brain.add(*actors, borrow=False)
grid = actor.grid(actors, texts, cell_padding=(50, 100), cell_shape="rect")
grid.SetVisibility(False)
# Grid renderer
ren.background(bg)
ren.projection("perspective")
ren.add(brain)
ren.add(grid)
#ren.add(actor.axes((50, 50, 50)))
ren.reset_camera()
show_m = window.ShowManager(ren, interactor_style="trackball")
brain_interactor_style = vtk.vtkInteractorStyleTrackballCamera()
grid_interactor_style = interactor.InteractorStyleBundlesGrid(actors)
def toggle_grid_view(obj, event):
if obj.GetKeySym() == "g":
grid.SetVisibility(not grid.GetVisibility())
brain.SetVisibility(not brain.GetVisibility())
if grid.GetVisibility():
ren.projection("parallel")
grid_interactor_style.SetInteractor(show_m.iren)
show_m.iren.SetInteractorStyle(grid_interactor_style)
ren.reset_camera_tight()
else:
ren.projection("perspective")
brain_interactor_style.SetInteractor(show_m.iren)
show_m.iren.SetInteractorStyle(brain_interactor_style)
ren.reset_camera()
# We have to reset the callback since InteractorStyleBundlesGrid erase them :/
show_m.iren.AddObserver("KeyPressEvent", toggle_grid_view)
show_m.iren.Render()
show_m.iren.AddObserver("KeyPressEvent", toggle_grid_view)
show_m.start()
def auto_orient_example(streamlines=None):
from dipy.segment.clustering import QuickBundles
if streamlines is None:
import nibabel as nib
#streamlines = nib.streamlines.load("/home/marc/research/dat/streamlines/ismrm/bundles_af.left.trk", ref=None)
#streamlines2 = nib.streamlines.load("/home/marc/research/dat/streamlines/ismrm/bundles_cst.right.trk", ref=None)
streamlines = nib.streamlines.load("/home/marc/research/dat/streamlines/ismrm/bundles_cc_mohawk.trk", ref=None)
qb = QuickBundles(threshold=16.)
#clusters = qb.cluster(streamlines.points + streamlines2.points)
clusters = qb.cluster(streamlines.points[::100])
bg = (0, 0, 0)
colormap = distinguishable_colormap(bg=bg)
ren = window.Renderer()
ren.background(bg)
ren.projection("parallel")
actors = []
texts = []
for cluster, color in izip(clusters[:15], colormap):
stream_actor = actor.line(cluster, [color]*len(cluster), linewidth=1)
pretty_actor = auto_orient(stream_actor, ren.camera_direction(), data_up=(0, 0, 1), show_bounds=True)
pretty_actor_aabb = auto_orient(stream_actor, ren.camera_direction(), bbox_type="AABB", show_bounds=True)
actors.append(stream_actor)
actors.append(pretty_actor_aabb)
actors.append(pretty_actor)
text = actor.text_3d(str(len(cluster)), font_size=32, justification="center", vertical_justification="top")
texts.append(text)
text = actor.text_3d("AABB", font_size=32, justification="center", vertical_justification="top")
texts.append(text)
text = actor.text_3d("OBB", font_size=32, justification="center", vertical_justification="top")
texts.append(text)
grid = actor.grid(actors, texts, cell_padding=(50, 100), cell_shape="rect")
ren.add(grid)
ren.reset_camera_tight()
show_m = window.ShowManager(ren, interactor_style=interactor.InteractorStyleBundlesGrid(actor))
show_m.start()
# def show_hierarchical_clusters(tree, theta_range=(0, np.pi), show_circles=False, size=(900, 900)):
# bg = (1, 1, 1)
# ren = fvtk.ren()
# fvtk.clear(ren)
# ren.SetBackground(*bg)
# box_min, box_max = get_bounding_box_streamlines(tree.root)
# width, height, depth = box_max - box_min
# box_size = max(width, height, depth)
# thresholds = set()
# max_threshold = tree.root.threshold
# box_size *= len(tree.root.children) * (theta_range[1]-theta_range[0]) / (2*np.pi)
# def _draw_subtree(node, color=fvtk.colors.orange_red, theta_range=theta_range, parent_pos=(0, 0, 0)):
# print np.array(theta_range) / np.pi * 360
# # Draw node
# offset = np.zeros(3)
# theta = theta_range[0] + (theta_range[1] - theta_range[0]) / 2.
# radius = max_threshold - node.threshold
# thresholds.add(node.threshold)
# offset[0] += radius*box_size * np.cos(theta)
# offset[1] -= radius*box_size * np.sin(theta)
# fvtk.add(ren, fvtk.line([s + offset for s in node], [color]*len(node), linewidth=2))
# fvtk.add(ren, fvtk.line(np.array([parent_pos, offset]), fvtk.colors.black, linewidth=1))
# if len(node.children) == 0:
# return
# children = sorted(node.children, key=lambda c: len(c))
# ratios = np.maximum([len(c) / len(node) for c in children], 0.1)
# ratios = ratios / np.sum(ratios) # Renormalize
# sections = theta_range[0] + np.cumsum([0] + ratios.tolist()) * (theta_range[1] - theta_range[0])
# colormap = distinguishable_colormap(bg=bg)
# for i, (node, color) in enumerate(izip(children, colormap)):
# _draw_subtree(node, color, (sections[i], sections[i+1]), offset)
# _draw_subtree(tree.root)
# # Draw circles for the different radius
# if show_circles:
# for threshold in sorted(thresholds)[:-1]:
# radius = max_threshold - threshold
# theta = -np.linspace(*theta_range, num=200)
# X = radius*box_size * np.cos(theta)
# Y = radius*box_size * np.sin(theta)
# Z = np.zeros_like(X)
# dashed_line = zip(np.array([X, Y, Z]).T[::4], np.array([X, Y, Z]).T[1::4])
# fvtk.add(ren, fvtk.line(dashed_line, fvtk.colors.black, linewidth=1))
# scale = box_size/8.
# text = "{:.1f}mm".format(threshold)
# pos = np.array([X[0], Y[0], Z[0]]) + np.array([-len(text)/2.*scale, scale/2., 0])
# fvtk.label(ren, text=text, pos=pos, scale=scale, color=(0, 0, 0))
# pos = np.array([X[-1], Y[-1], Z[-1]]) + np.array([-len(text)/2.*scale, scale/2., 0])
# fvtk.label(ren, text=text, pos=pos, scale=scale, color=(0, 0, 0))
# fvtk.show(ren, size=size)
|
"""Required modules"""
import re
import csv
import sys
import numpy as np
import scipy.io as sio
import xlrd
import numexpr as ne
DATE = xlrd.XL_CELL_DATE
TEXT = xlrd.XL_CELL_TEXT
BLANK = xlrd.XL_CELL_BLANK
EMPTY = xlrd.XL_CELL_EMPTY
ERROR = xlrd.XL_CELL_ERROR
NUMBER = xlrd.XL_CELL_NUMBER
def read_excel(filename, sheet=None):
"""Read sheet data or sheet names from an Excel workbook into a
:class:`Spreadsheet`.
:example:
sheet_names = read_excel('parameter.xlsx') # returns a list of sheet names
:example:
spreadsheet = read_excel('parameter.xlsx', 0) # read the first sheet
:example:
spreadsheet = read_excel(parameter.xls', 'sheet_2') # load 'sheet_2'
:param filename: name of the excel woorkbook to import
:param sheet: spreadsheet name or index to import
:type filename: string
:type sheet: string or integer or None
:return: sheet names if sheet is None, otherwise sheet data
:rtype: list of strings if sheet is None, otherwise :class:`Spreadsheet`"""
book = xlrd.open_workbook(filename)
spreadsheet = Spreadsheet()
if sheet is None:
return book.sheet_names()
elif isinstance(sheet, int):
xl_sheet = book.sheet_by_index(sheet)
spreadsheet.set_data(xl_sheet.get_rows())
return spreadsheet
else:
xl_sheet = book.sheet_by_name(sheet)
spreadsheet.set_data(xl_sheet.get_rows())
return spreadsheet
def loadtxt(filename, dtype='float', comments='#', delimiter=None, skiprows=0,
usecols=None, unpack=False):
"""Load ascii files into a numpy ndarray using numpy.loadtxt."""
return np.loadtxt(
filename, dtype, comments, delimiter,
None, skiprows, usecols, unpack)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""Load numpy .npy and .npz files to an array or map of arrays
respectively using np.load"""
return np.load(file, mmap_mode, allow_pickle, fix_imports, encoding)
def read_csv(filename, start=1, stop=None, assume=TEXT):
"""Read a csv file into a :class:`Spreadsheet`
:example:
sheet = read_csv('parameters.csv', start=9, assume=NUMBER)
:param filename: name of the file to read
:param start: row to start reading
:param stop: row to stop reading
:param assume: type of data to assume
:type filename: string
:type start: integer
:type stop: integer
:type assume: integer
:return: spreadsheet data
:rtype: :class:`Spreadsheet`"""
values = []
spreadsheet = Spreadsheet(assume)
with open(filename) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
values.append(row)
if stop is None:
stop = len(values)
values = values[start-1:stop]
spreadsheet.set_values(values)
return spreadsheet
def load_mat(filename, variable):
"""Read the variable from filename
:example:
sheet = read_mat("parameter.mat", "cse")
:param filename: name of the .mat file to read
:param variable: variable to load
:type filename: string
:type variable: string
:return: variable data
:rtype: array"""
contents = sio.loadmat(filename)
return contents[variable]
def load_section(sheet, row_range=None, col_range=None):
"""Read a 'chunk' of data from a spreadsheet.
Given a selection of rows and columns, this function will return the
intersection of the two ranges. Note that the minimum value for each range
is 1.
:example:
spreadsheet = read_excel('parameters.xlsx', 'Parameters')
cell_data = load_section(
spreadsheet, [1, 3, 5], range(7, 42))
:param sheet: spreadsheet data
:param row_range: selected rows
:param col_range: selected columns
:type sheet: :class:`xlrd.sheet`
:type row_range: list of integers or integer
:type col_range: list of integers or integer
:return: section of sheet data
:rtype: array if assume=NUMBER else list"""
if row_range is None:
row_range = range(1, len(sheet.values)+1)
if col_range is None:
col_range = range(1, len(sheet.values[0])+1)
if isinstance(row_range, int):
row_range = [row_range]
if isinstance(col_range, int):
col_range = [col_range]
rval = [[sheet.cell(x-1, y-1) for y in col_range] for x in row_range]
if sheet.assume == NUMBER:
return np.array(
[[rval[x-1][y-1].value for y in col_range] for x in row_range],
dtype='float')
return rval
def _multiple_replace(repl, text):
"""Replace multiple regex expressions
:param repl: dictionary of values to replace
:param text: text to perform regex on
:type repl: dict
:type text: string
:return: processed text
:rtype: string"""
# Create a regular expression from the dictionary keys
regex = re.compile("(%s)" % "|".join(map(re.escape, repl.keys())))
# For each match, look-up corresponding value in dictionary
return regex.sub(lambda mo: repl[mo.string[mo.start():mo.end()]], text)
def _fun_to_lambda(entry):
"""Convert a given string representing a matlab anonymous
function to a lambda function
:example:
lambdafun = "@(x) cos(x)"
lambdafun(np.pi)
:param entry: string of matlab anonymous equation
:type: string
:return: mathmatical function
:rtype: lambda function"""
repl = {
'./': '/',
'.*': '*',
'.^': '**'
}
# pull out function variable definition
vari = re.findall(r'\@\(.*?\)', entry)
vari = [re.sub(r'\@|\(|\)', '', x) for x in vari]
# remove variable definition
entry = re.sub(r'\@\(.*?\)', '', entry)
# replace operators to suit numpy
entry = _multiple_replace(repl, entry)
# separate equations into different functions
entry = re.sub('{|}', '', entry).split(',')
return list(lambda x, z=i: ne.evaluate(entry[z], local_dict={vari[z]: x})
for i in range(0, len(entry)))
def load_params(sheet, rows=None, ncols=None, pcols=None, cols=None,
nrows=None, prows=None):
"""Read designated parameters from the sheet
:example:
sheet=read_excel('parameter_list.xlsx', 0, 'index')
params["pos"] = load_params(sheet, range(55, 75), ncols=2, pcols=3)
:param sheet: spreadsheet data
:param rows: same as nrows=prows
:param cols: same as ncols=pcols
:param nrows: cell rows to read for parameter names
:param ncols: cell columns to read for parameter names
:param prows: cell rows to read for parameter data
:param pcols: cell columns to read for parameter data
:type sheet: :class:`Spreadsheet`
:type rows: list of integers or integer
:type cols: list of integers or integer
:type nrows: list of integers or integer
:type ncols: list of integers or integer
:type prows: list of integers or integer
:type pcols: list of integers or integer
:return: mapping of parameter names to values
:rtype: dict"""
if rows:
nrows = rows
prows = rows
if cols:
ncols = cols
pcols = cols
name_cells = load_section(sheet, nrows, ncols)
data_cells = load_section(sheet, prows, pcols)
# Verify the number of names matches the number of params
assert len(name_cells) == len(data_cells)
data = [_fun_to_lambda(x.value) if x.ctype == TEXT else
x.value if x.ctype == NUMBER else None
for y in data_cells for x in y]
return dict(zip([x.value for y in name_cells for x in y], data))
class Spreadsheet(object):
"""Hold spreadsheet data"""
def __init__(self, assumption=None):
"""Entry point for :class:`Spreadsheet`"""
self.values = None
self.ctypes = None
self.assume = assumption
def set_data(self, data_in):
"""Set spreadsheet data using cell generators"""
data = list(data_in)
self.values = [[col.value for col in row] for row in data]
self.ctypes = [[col.ctype for col in row] for row in data]
def set_values(self, values):
"""Set spreadsheet cell values
:param values: values to set
:type values: container, e.g. list"""
self.values = values
def set_ctypes(self, ctype):
"""Set spreadsheet cell types. I.e. NUMBER, TEXT, etc.
:param ctype: cell types to set
:type values: container, e.g. list"""
self.ctypes = ctype
def size(self):
"""Retrieve the dimensions of the spreadsheet
:return: spreadsheed dimensions
:rtype: tuple"""
if self.values is not None:
return len(self.values), len(self.values[0])
else:
return None
def cell(self, xpos, ypos):
"""Retrieve cell information
:param xpos: cell row
:param ypos: cell column
:type xpos: integer
:type ypos: integer
:return: cell values and info
:rtype: :class:`xlrd.sheet.Cell`"""
if self.ctypes:
return xlrd.sheet.Cell(
self.ctypes[xpos][ypos], self.values[xpos][ypos])
elif self.assume:
return xlrd.sheet.Cell(self.assume, self.values[xpos][ypos])
else:
return None
def main():
"""Module entry point"""
pass
if __name__ == '__main__':
sys.exit(main())
|
import time
import unittest
from unittest.mock import patch
import pytest
from werkzeug.exceptions import Unauthorized
from flask_slack import decorators
@pytest.mark.usefixtures('client_class', 'config')
class SlackSignatureRequiredTests(unittest.TestCase):
@patch('hmac.compare_digest', unsafe=True)
def test_slack_event_required(self, compare_digest_mock):
headers = {
'X-Slack-Signature': '-',
'X-Slack-Request-Timestamp': int(time.time()),
}
self.client.get('/', headers=headers, json={})
result = decorators.slack_event_required(bool)()
self.assertFalse(result)
compare_digest_mock.assert_called()
def test_unauthorized(self):
with self.assertRaises(Unauthorized) as http_error:
decorators.slack_signature_required(bool)()
self.assertEqual(http_error.exception.code, 401)
@patch('hmac.compare_digest', unsafe=True)
def test_expired_event(self, compare_digest_mock):
headers = {
'X-Slack-Signature': '-',
'X-Slack-Request-Timestamp': '0',
}
self.client.get('/', headers=headers)
_, status_code = decorators.slack_signature_required(bool)()
self.assertEqual(status_code, 403)
compare_digest_mock.assert_not_called()
@patch('hmac.compare_digest', unsafe=True, return_value=False)
def test_invalid_signature(self, compare_digest_mock):
headers = {
'X-Slack-Signature': '-',
'X-Slack-Request-Timestamp': int(time.time()),
}
self.client.get('/', headers=headers)
_, status_code = decorators.slack_signature_required(bool)()
self.assertEqual(status_code, 403)
compare_digest_mock.assert_called()
@pytest.mark.usefixtures('client_class')
class SlackChallengeValidationTests(unittest.TestCase):
def test_slack_challenge_validation(self):
self.client.post('/', json={'challenge': True})
result = decorators.slack_challenge_validation(bool)()
self.assertTrue(result)
|
"""Replace Arabic characters."""
# ------------------------ Import libraries and functions ---------------------
from typing import Any
import re
from cleaning_utils.constants import DIACRITICS, DIGITS, LETTERS, NUNATIONS
from cleaning_utils.types import FunctionType
# ---------------------------- function definition ----------------------------
def replace_arabic_char(
text: str,
letter: bool = True,
number: bool = True,
nunation: bool = True,
diacritic: bool = True,
) -> Any:
"""Replace Arabic characters.
It is a general-purpose normalizer, which can replace Arabic letters with
Persian letters (e.g., ك with ک). Also, it can replace Hindi numerals with
Arabic numerals (e.g., ۰۱۲۳۴۵۶۷۸۹ with 0123456789). Moreover, it can
clear the input string from arabic diacritics (e.g., َ ِ ُ ) or nunations
(e.g., ً ٍ ٌ ْ ّ ).
Args:
text (str): Accepts only one element (i.e., scalar).
letter (bool): To replace Arabic letters with Persian
letters.
number (bool): To replace Hindi numerals with Arabic
numerals.
nunation (bool): To remove Arabic nunations.
diacritic (bool): To remove Arabic diacritics.
Returns:
A text variable of <class "str"> after removing specified characters.
Examples:
>>> input_text = "آنژيوکت 20 صورتي ك"
>>> replace_arabic_char(input_text)
'آنژیوکت 20 صورتی ک'
>>> text = "مُسْتَقِيمٌ سلامُ مُتَمَكِّنًائ ۰۱۲۳۴۵۶۷۸۹ ؤإئأء موسیٰ"
>>> replace_arabic_char(text)
'مستقیم سلام متمکنای 0123456789 وایا موسی'
"""
if any([letter, number, nunation, diacritic]):
operators = list(
k
for k, v in {
replace_arabic_letters: letter,
replace_arabic_numbers: number,
remove_arabic_nunations: nunation,
remove_arabic_diacritics: diacritic,
}.items()
if v
)
return chain(operators.pop(0), *operators)(text)
return text
def remove_arabic_nunations(text: str) -> str:
"""Removes Arabic nunations.
It is a general-purpose normalizer, which can remove Arabic nunations.
Args:
text (str): the text to be cleaned.
Returns:
A text variable of <class "str"> after removing specified characters.
Examples:
>>> text = "مٌ"
>>> replace_arabic_char(text)
'م'
"""
# -------------------------- Make regex pattern ---------------------------
rep = {re.escape(k): v for k, v in NUNATIONS.items()}
pattern = re.compile("|".join(rep.keys()))
# ---------------------------- Exert Function -----------------------------
clear_text: str = pattern.sub(lambda m: rep[re.escape(m.group(0))], text)
return clear_text
def remove_arabic_diacritics(text: str) -> str:
"""Removes Arabic diacritics.
It is a general-purpose normalizer, which can remove Arabic diacritics.
Args:
text (str): the text to be cleaned.
Returns:
A text variable of <class "str"> after removing specified characters.
Examples:
>>> text = "مُسْ"
>>> replace_arabic_char(text)
'مس'
"""
# -------------------------- Make regex pattern ---------------------------
rep = {re.escape(k): v for k, v in DIACRITICS.items()}
pattern = re.compile("|".join(rep.keys()))
# ---------------------------- Exert Function -----------------------------
clear_text: str = pattern.sub(lambda m: rep[re.escape(m.group(0))], text)
return clear_text
def replace_arabic_numbers(text: str) -> str:
"""Replaces Arabic numbers with English numbers.
It is a general-purpose normalizer, which replaces Arabic numbers with English.
Args:
text (str): the text to be cleaned.
Returns:
A text variable of <class "str"> after removing specified characters.
Examples:
>>> text = "۰۱۲۳۴۵۶۷۸۹"
>>> replace_arabic_char(text)
'0123456789'
"""
# -------------------------- Make regex pattern ---------------------------
rep = {re.escape(k): v for k, v in DIGITS.items()}
pattern = re.compile("|".join(rep.keys()))
# ---------------------------- Exert Function -----------------------------
clear_text: str = pattern.sub(lambda m: rep[re.escape(m.group(0))], text)
return clear_text
def replace_arabic_letters(text: str) -> str:
"""Replaces Arabic letters with Persian (i.e., Farsi) letters.
It is a general-purpose normalizer, which replaces Arabic letters with Persian.
Args:
text (str): the text to be cleaned.
Returns:
A text variable of <class "str"> after removing specified characters.
Examples:
>>> text = "ك"
>>> replace_arabic_char(text)
'ک'
"""
# -------------------------- Make regex pattern ---------------------------
rep = {re.escape(k): v for k, v in LETTERS.items()}
pattern = re.compile("|".join(rep.keys()))
# ---------------------------- Exert Function -----------------------------
clear_text: str = pattern.sub(lambda m: rep[re.escape(m.group(0))], text)
return clear_text
def chain(first: FunctionType, *rest: FunctionType) -> Any:
"""Chains functions."""
# pylint: disable = no-value-for-parameter
return lambda x: first(chain(*rest)(x) if rest else x)
|
""" Defines the ContourPolyPlot class.
"""
from __future__ import with_statement
# Major library imports
from numpy import array, linspace, meshgrid, transpose
# Enthought library imports
from traits.api import Bool, Dict
# Local relative imports
from base_contour_plot import BaseContourPlot
from contour.contour import Cntr
class ContourPolyPlot(BaseContourPlot):
""" Contour image plot. Takes a value data object whose elements are
scalars, and renders them as a contour plot.
"""
# TODO: Modify ImageData to explicitly support scalar value arrays
#------------------------------------------------------------------------
# Private traits
#------------------------------------------------------------------------
# Are the cached contours valid? If False, new ones need to be computed.
_poly_cache_valid = Bool(False)
# Cached collection of traces.
_cached_polys = Dict
#------------------------------------------------------------------------
# Private methods
#------------------------------------------------------------------------
def _render(self, gc):
""" Actually draws the plot.
Implements the Base2DPlot interface.
"""
if not self._level_cache_valid:
self._update_levels()
if not self._poly_cache_valid:
self._update_polys()
if not self._colors_cache_valid:
self._update_colors()
with gc:
gc.set_antialias(True)
gc.clip_to_rect(self.x, self.y, self.width, self.height)
gc.set_line_width(0)
gc.set_alpha(self.alpha)
for i in range(len(self._levels)-1):
gc.set_fill_color(self._colors[i])
gc.set_stroke_color(self._colors[i])
key = (self._levels[i], self._levels[i+1])
for poly in self._cached_polys[key]:
if self.orientation == "h":
spoly = self.index_mapper.map_screen(poly)
else:
spoly = array(self.index_mapper.map_screen(poly))[:,::-1]
gc.lines(spoly)
gc.close_path()
gc.draw_path()
def _update_polys(self):
""" Updates the cache of contour polygons """
# x and ydata are "fenceposts" so ignore the last value
# XXX: this truncation is causing errors in Cntr() as of r13735
xdata = self.index._xdata.get_data()
ydata = self.index._ydata.get_data()
xs = linspace(xdata[0], xdata[-1], len(xdata)-1)
ys = linspace(ydata[0], ydata[-1], len(ydata)-1)
xg, yg = meshgrid(xs, ys)
if self.orientation == "h":
c = Cntr(xg, yg, self.value.raw_value)
else:
c = Cntr(xg, yg, self.value.raw_value.T)
self._cached_contours = {}
for i in range(len(self._levels)-1):
key = (self._levels[i], self._levels[i+1])
self._cached_polys[key] = []
polys = c.trace(*key)
for poly in polys:
self._cached_polys[key].append(transpose(poly))
self._poly_cache_valid = True
def _update_levels(self):
""" Extends the parent method to also invalidate some other things """
super(ContourPolyPlot, self)._update_levels()
self._poly_cache_valid = False
def _update_colors(self):
BaseContourPlot._update_colors(self, numcolors = len(self._levels) - 1)
|
# example input:
# categories names and their corresponding intervals
# category at location x corresponds to interval equal or greater than intervals location x and less than location x + 1
# except for last category, has no end
# categories = pd.Series(["low", "moderate", "high", "very high", "extremely high"], dtype="category")
# intervals_categories = [0, 20, 30, 40, 50]
# map a value to its interval
def numToCat(row_in, column_in, categories_in, intervalsCategories_in):
assert len(categories_in) == len(intervalsCategories_in), "categories and their intervals lens are not equal"
row_catValu = row_in[column_in]
# check if value is in between two boundaries
for idx in range(len(intervalsCategories_in)-1):
if row_catValu >= intervalsCategories_in[idx] and row_catValu < intervalsCategories_in[idx+1]:
return categories_in.iloc[idx]
# if not, then check if it is greater than latest boundary
lastIndex = len(categories_in)-1
if row_catValu >= intervalsCategories_in[lastIndex]:
return categories_in.iloc[lastIndex]
# if not either, raise error
raise ValueError("unexpected value within supposed ranges")
|
c_open = '\x1B['
close = c_open + 'm'
colors = {
# fg only
'red':';31m',
'green':';32m',
'white':';37m',
'blue':';34m',
# fg and bg
'redblack':'31;40m',
'greenblack':'32;40m',
'whiteblack':'37;40m',
'blueblack':'34;40m',
'magenta':'35;40m',
'cayn':'36;40m',
'yellow':'33;40m'
}
def encode(text, color):
'''Convert text to a color for terminal printing'''
if color in colors:
return c_open + colors[color] + text + c_open + 'm' + close
else:
raise ColorError('Color %s does not exist' % color)
class ColorError(Exception):
def __init__(self, msg):
self.args.append(msg)
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import socket
from resource_management.core import shell
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from resource_management.libraries.functions import conf_select, stack_select
from resource_management.libraries.functions.constants import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.decorator import retry
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions import check_process_status
def prestart(env, stack_component):
import params
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
conf_select.select(params.stack_name, "hbase", params.version)
stack_select.select(stack_component, params.version)
def post_regionserver(env):
import params
env.set_params(params)
check_cmd = "echo 'status \"simple\"' | {0} shell".format(params.hbase_cmd)
exec_cmd = "{0} {1}".format(params.kinit_cmd, check_cmd)
is_regionserver_registered(exec_cmd, params.hbase_user, params.hostname, re.IGNORECASE)
def is_region_server_process_running():
try:
pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
check_process_status(pid_file)
return True
except ComponentIsNotRunning:
return False
@retry(times=30, sleep_time=30, err_class=Fail)
def is_regionserver_registered(cmd, user, hostname, regex_search_flags):
"""
Queries HBase through the HBase shell to see which servers have successfully registered. This is
useful in cases, such as upgrades, where we must ensure that a RegionServer has not only started,
but also completed it's registration handshake before moving into upgrading the next RegionServer.
The hbase shell is used along with the "show 'simple'" command in order to determine if the
specified host has registered.
:param cmd:
:param user:
:param hostname:
:param regex_search_flags:
:return:
"""
if not is_region_server_process_running():
Logger.info("RegionServer process is not running")
raise Fail("RegionServer process is not running")
# use hbase shell with "status 'simple'" command
code, out = shell.call(cmd, user=user)
# if we don't have ouput, then we can't check
if not out:
raise Fail("Unable to retrieve status information from the HBase shell")
# try matching the hostname with a colon (which indicates a bound port)
bound_hostname_to_match = hostname + ":"
match = re.search(bound_hostname_to_match, out, regex_search_flags)
# if there's no match, try again with the IP address
if not match:
try:
ip_address = socket.gethostbyname(hostname)
bound_ip_address_to_match = ip_address + ":"
match = re.search(bound_ip_address_to_match, out, regex_search_flags)
except socket.error:
# this is merely a backup, so just log that it failed
Logger.warning("Unable to lookup the IP address of {0}, reverse DNS lookup may not be working.".format(hostname))
pass
# failed with both a hostname and an IP address, so raise the Fail and let the function auto retry
if not match:
raise Fail(
"The RegionServer named {0} has not yet registered with the HBase Master".format(hostname))
|
import streamlit as st
from src import home, about, source, mail, guess_number, guess_word, tic_tac_toe
def init():
st.session_state.page = 'Homepage'
st.session_state.project = False
st.session_state.game = False
st.session_state.pages = {
'Homepage': home.main,
'About me': about.main,
'Source': source.main,
'Message me': mail.main,
'Guess Number': guess_number.main,
'Guess Word': guess_word.main,
'Tic Tac Toe': tic_tac_toe.main,
}
def draw_style():
st.set_page_config(page_title='ccrsxx\'s Project', page_icon='📚')
style = """
<style>
header {visibility: visible;}
footer {visibility: hidden;}
</style>
"""
st.markdown(style, unsafe_allow_html=True)
def load_page():
st.session_state.pages[st.session_state.page]()
def set_page(loc=None, reset=False):
if not st.session_state.page == 'Homepage':
for key in list(st.session_state.keys()):
if key not in ('page', 'project', 'game', 'pages', 'set'):
st.session_state.pop(key)
if loc:
st.session_state.page = loc
else:
st.session_state.page = st.session_state.set
if reset:
st.session_state.project = False
elif st.session_state.page in ('Message me', 'About me'):
st.session_state.project = True
st.session_state.game = False
else:
pass
def change_button():
set_page('Guess Number')
st.session_state.game = True
st.session_state.project = True
def main():
if 'page' not in st.session_state:
init()
draw_style()
with st.sidebar:
project, about, source = st.columns([1.2, 1, 1])
contact = st.columns([0.2, 1])
if not st.session_state.project:
project.button('📌 Projects', on_click=change_button)
else:
project.button('🏠 Homepage', on_click=set_page, args=('Homepage', True))
if st.session_state.project and st.session_state.game:
st.selectbox(
'List of projects',
['Guess Number', 'Guess Word', 'Tic Tac Toe'],
key='set',
on_change=set_page,
)
about.button('🧑💻 Myself', on_click=set_page, args=('About me',))
source.button('📁 Source', on_click=set_page, args=('Source',))
contact[1].button(
'✉️ Send me a message', on_click=set_page, args=('Message me',)
)
if st.session_state.page == 'Homepage':
st.image('https://c.tenor.com/-420uI8y-RkAAAAd/anime-welcome.gif')
load_page()
if __name__ == '__main__':
main()
|
import math
import numba
import numpy as np
import torch
import torchvision
from functools import partial
from torch import Tensor
from typing import Any, Dict, List, Tuple, Union
from layers.functions.prior_box import PriorBox
from models.retinaface import RetinaFace
from utils.nms.py_cpu_nms import py_cpu_nms
@numba.njit
def adjust_bs(bs: int, height: int, width: int) -> int:
pixels = width * height
# full_hd = 1, quad_hd = 4
down_ratio = math.ceil(pixels / 2073600)**2
return bs // down_ratio
# @numba.njit
# def adjust_bs(bs: int, height: int, width: int) -> int:
# pixels = width * height
# # full_hd = 1, quad_hd = 3
# down_ratio = math.ceil((pixels / 2073600) - 1) * 2 + 1
# return bs // down_ratio
def detect(sample: Union[np.ndarray, Tensor], model: torch.nn.Module,
cfg: Dict[str,any], device: torch.device) -> List[np.ndarray]:
num_frames, height, width, ch = sample.shape
bs = cfg['batch_size']
bs = adjust_bs(bs, height, width)
imgs, scale = prepare_imgs(sample)
priorbox = PriorBox(cfg, image_size=(height, width))
priors = priorbox.forward().to(device)
scale = scale.to(device)
detections = []
for start in range(0, num_frames, bs):
end = start + bs
imgs_batch = imgs[start:end].to(device)
with torch.no_grad():
loc, conf, landms = model(imgs_batch)
imgs_batch, landms = None, None
dets = postproc_detections(loc, conf, priors, scale, cfg)
detections.extend(dets)
loc, conf = None, None
return detections
def prepare_imgs(sample: Union[np.ndarray, Tensor]) -> Tuple[Tensor, Tensor]:
n, h, w, c = sample.shape
mean = [104, 117, 123]
if isinstance(sample, Tensor):
imgs = sample.float()
imgs -= torch.tensor(mean, device=imgs.device)
imgs = imgs.permute(0, 3, 1, 2)
else:
imgs = np.float32(sample)
imgs -= mean
imgs = imgs.transpose(0, 3, 1, 2)
imgs = torch.from_numpy(imgs)
scale = torch.tensor([w, h, w, h])
return imgs, scale
def postproc_detections(
locations: Tensor, confidence: Tensor, priors: Tensor,
scale: Tensor, cfg: Dict[str, any], resize=1) -> List[np.ndarray]:
boxes = decode_batch(locations, priors, cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = confidence.cpu().numpy()[:, :, 1]
num_frames = scores.shape[0]
proc_fn = partial(postproc_frame,
score_thresh=cfg['score_thresh'],
nms_thresh=cfg['nms_thresh'],
top_k=cfg['top_k'],
keep_top_k=cfg['keep_top_k'])
dets = [proc_fn(boxes[i], scores[i]) for i in range(num_frames)]
return dets
def postproc_detections_gpu(locations: Tensor, confidence: Tensor,
priors: Tensor, scale: Tensor, conf: Dict[str, any],
resize=1) -> List[Tensor]:
boxes = decode_batch(locations, priors, conf['variance'])
boxes = boxes * scale / resize
scores = confidence[:, :, 1]
N = boxes.size(0)
out = []
for f in range(N):
boxes_f = postproc_frame_torch(boxes[f], scores[f], conf)
out.append(boxes_f)
return out
def decode_batch(loc: Tensor, priors: Tensor, variances) -> Tensor:
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [n_samples, num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((
priors[:, :2] + loc[:, :, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, :, 2:] * variances[1])), 2)
boxes[:, :, :2] -= boxes[:, :, 2:] / 2
boxes[:, :, 2:] += boxes[:, :, :2]
return boxes
def postproc_frame(
boxes: np.ndarray, scores: np.ndarray,
score_thresh=0.75, nms_thresh=0.4,
top_k=500, keep_top_k=5) -> np.ndarray:
inds = (scores > score_thresh).nonzero()[0]
boxes = boxes[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:top_k]
boxes = boxes[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis]))
dets = dets.astype(np.float32, copy=False)
keep = py_cpu_nms(dets, nms_thresh)
dets = dets[keep, :]
# keep top-K faster NMS
dets = dets[:keep_top_k, :]
return dets
def postproc_frame_torch(boxes: Tensor, scores: Tensor, conf: Dict[str, Any]) -> Tensor:
idxs = (scores > conf['score_thresh']).nonzero().squeeze_(1)
if idxs.size(0):
boxes = boxes[idxs]
scores = scores[idxs]
# keep top-K before NMS
top_k = conf['top_k']
scores, idxs = scores.sort(descending=True)
scores, idxs = scores[:top_k], idxs[:top_k]
boxes = boxes[idxs]
# do NMS
nms_thresh = conf['nms_thresh']
keep_top_k = conf['keep_top_k']
keep = torchvision.ops.nms(boxes, scores, nms_thresh)
boxes = boxes[keep][:keep_top_k]
scores = scores[keep][:keep_top_k]
scores = scores.unsqueeze_(1)
return torch.cat([boxes, scores], dim=1)
else:
return torch.empty(0, 5, device=boxes.device, dtype=torch.float32)
def init_detector(cfg: Dict[str, any], weights: str, device: torch.device) -> torch.nn.Module:
cfg['pretrain'] = False
net = RetinaFace(cfg=cfg, phase='test')
net = load_model(net, weights, device)
net.eval()
return net
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters
sharing common prefix 'module.' '''
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, device=None):
print('Loading pretrained model from {}'.format(pretrained_path))
if device:
pretrained_dict = torch.load(
pretrained_path,
map_location=lambda storage, loc: storage.cuda(device))
else:
pretrained_dict = torch.load(
pretrained_path,
map_location=lambda storage, loc: storage)
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
|
from .discovery import FileDiscovery, load_config
|
from .models import (
User,
Chat,
YandexDiskToken,
UserSettings
)
from .queries import (
UserQuery,
ChatQuery,
YandexDiskTokenQuery
)
|
# i += 1 |
def add_edge(edge1: int, edge2: int, edge_list: int) -> bool:
try:
edge_list.append((edge1, edge2))
return True
except:
return False
#bruteforce search
f=lambda n,e,m=1:any(all(t*m//m**a%m!=t*m//m**b%m for(a,b)in e)for t in range(m**n))and m or f(n,e,m+1)
if __name__ == "__main__":
filename = input("Unesite ime datoteke: ")
mat = []
upis =[] #novo
with open(filename, 'r') as file:
n = int(file.readline())
file.readline() # skip newline
for _ in range(n):
mat.append(list(map(int, file.readline().split(' '))))
for i, row in enumerate(mat):
for j, elem in enumerate(row):
if elem == 1:
add_edge(i, j, upis) #novi poziv
print (f(n, upis)) |
#!/usr/bin/env python3
#
# Author: Yipeng Sun <syp at umd dot edu>
# License: BSD 2-clause
# Last Change: Thu Mar 11, 2021 at 12:30 AM +0100
import os
import pytest
import yaml
from pyBabyMaker.io.NestedYAMLLoader import NestedYAMLLoader
from pyBabyMaker.io.TupleDump import PyTupleDump
PWD = os.path.dirname(os.path.realpath(__file__))
PARDIR = os.path.join(PWD, os.pardir)
SAMPLE_YAML = os.path.join(PARDIR, 'samples', 'sample-babymaker.yml')
SAMPLE_NTP = os.path.join(PARDIR, 'samples', 'sample.root')
@pytest.fixture
def default_Loader():
with open(SAMPLE_YAML, 'r') as f:
return yaml.load(f, NestedYAMLLoader)
def test_NestedYAMLLoader_values(default_Loader):
result = default_Loader
assert result['rename'] == {
k: k.lower() for k in ['Y_PT', 'Y_PX', 'Y_PY', 'Y_PZ', 'b0_PE']}
def test_NestedYAMLLoader_subfile_values(default_Loader):
result = default_Loader
assert result['output']['YetAnotherTuple']['drop'] == [
'Y_OWNPV_COV_', 'Y_OWNPV_P.*']
def test_PyTupleDump():
result = PyTupleDump(SAMPLE_NTP).dump()
assert result['TupleB0/DecayTree']['CaloPrsE'] == 'float'
assert result['TupleB0WSPi/DecayTree']['D0_ENDVERTEX_COV_'] == 'float[3][3]'
|
# Auto-download any Github/Gitlab raw file and save it with custom name
# Jakob Ketterer, November 2020
import urllib.request
import os
if __name__ == "__main__":
file_name = "pred_world_03-07.csv"
# file_name = "ihme-covid19.zip"
dir_name = os.path.join("./data-raw/UCLA-SuEIR", file_name)
url = "https://raw.githubusercontent.com/uclaml/ucla-covid19-forecasts/master/projection_result/" + file_name
# url = "https://ihmecovid19storage.blob.core.windows.net/archive/2021-02-20/" + file_name
urllib.request.urlretrieve(url, dir_name)
print("Downloaded and saved forecast to", dir_name)
# try:
# urllib.request.urlretrieve(url, dir_name)
# print("Downloaded and saved forecast to", dir_name)
# except:
# print("Download failed for", file_name, ". The file probably doesn't exist.") |
from .base_datamodule import BaseDataModule
from .scannet_datamodule import ScanNetDataModule
from .synthetic_datamodule import SyntheticDataModule
|
#!/usr/bin/env python3
from LogLevels import LogLevel
from LoggerAbc import Logger
import datetime
class Text_Logger(Logger):
def __init__ (self,logLocation, logSource):
self._logLocation_ = logLocation
self._logSource_ = logSource
def _writeLog_(self, logLevel, logMessage):
logLine = "[{0}] : {1} - {2} - {3}\n".format(self._logSource_, datetime.datetime.now(), logLevel.name, logMessage)
logFile = open(self._logLocation_, 'a')
logFile.write(logLine)
logFile.close()
def LogInfo(self, logMessage):
self._writeLog_(LogLevel.INFO, logMessage)
def LogWarn(self, logMessage):
self._writeLog_(LogLevel.WARN, logMessage)
def LogError(self, logMessage):
self._writeLog_(LogLevel.ERROR, logMessage)
def LogSecurity(self, logMessage):
self._writeLog_(LogLevel.SECURITY, logMessage)
def LogCritical(self, logMessage):
self._writeLog_(LogLevel.CRITICAL, logMessage)
|
class arithmetic():
def __init__(self):
pass
''' levenshtein distance '''
def levenshtein(self,first,second):
if len(first) > len(second):
first,second = second,first
if len(first) == 0:
return len(second)
if len(second) == 0:
return len(first)
first_length = len(first) + 1
second_length = len(second) + 1
distance_matrix = [range(second_length) for x in range(first_length)]
#print distance_matrix
for i in range(1,first_length):
for j in range(1,second_length):
deletion = distance_matrix[i-1][j] + 1
insertion = distance_matrix[i][j-1] + 1
substitution = distance_matrix[i-1][j-1]
if first[i-1] != second[j-1]:
substitution += 1
distance_matrix[i][j] = min(insertion,deletion,substitution)
#print distance_matrix
return distance_matrix[first_length-1][second_length-1]
def lcs(self,first,second):
first_length = len(first)
second_length = len(second)
size = 0
x = 0
y = 0
matrix = [range(second_length) for x in range(first_length)]
#print matrix
for i in range(first_length):
for j in range(second_length):
#print i,j
if first[i] == second[j]:
if i - 1 >= 0 and j - 1 >=0:
matrix[i][j] = matrix[i-1][j-1] + 1
else:
matrix[i][j] = 1
if matrix[i][j] > size:
size = matrix[i][j]
x = j
y = i
else:
matrix[i][j] = 0
#print matrix
#print size,x,y
return second[x-size+1:x+1] |
from file_indexer_api.common.connection_handler_factory import ConnectionHandlerFactory
class Searcher():
def __init__(self):
self.searcher = ConnectionHandlerFactory.create_connection_handler('indexer')
def search(self, query):
return self.searcher.get_handler(query)
|
"""Input/output utility functions for UCCA scripts."""
import os
import sys
import time
from collections import defaultdict
from glob import glob
from itertools import filterfalse, chain
from xml.etree.ElementTree import ParseError
from ucca.convert import file2passage, passage2file, from_text, to_text, split2segments
from ucca.core import Passage
DEFAULT_LANG = "en"
DEFAULT_ATTEMPTS = 3
DEFAULT_DELAY = 5
class LazyLoadedPassages:
"""
Iterable interface to Passage objects that loads files on-the-go and can be iterated more than once
"""
def __init__(self, files, sentences=False, paragraphs=False, converters=None, lang=DEFAULT_LANG,
attempts=DEFAULT_ATTEMPTS, delay=DEFAULT_DELAY):
self.files = files
self.sentences = sentences
self.paragraphs = paragraphs
self.split = self.sentences or self.paragraphs
self.converters = defaultdict(lambda: from_text) if converters is None else converters
self.lang = lang
self.attempts = attempts
self.delay = delay
self._files_iter = None
self._split_iter = None
self._file_handle = None
def __iter__(self):
self._files_iter = iter(self.files)
self._split_iter = None
self._file_handle = None
return self
def __next__(self):
while True:
passage = self._next_passage()
if passage is not None:
return passage
def _next_passage(self):
passage = None
if self._split_iter is None:
try:
file = next(self._files_iter)
except StopIteration: # Finished iteration
raise
if isinstance(file, Passage): # Not really a file, but a Passage
passage = file
else: # A file
attempts = self.attempts
while not os.path.exists(file):
if attempts == 0:
print("File not found: %s" % file, file=sys.stderr)
return None
print("Failed reading %s, trying %d more times..." % (file, attempts), file=sys.stderr)
time.sleep(self.delay)
attempts -= 1
try:
passage = file2passage(file) # XML or binary format
except (IOError, ParseError) as e: # Failed to read as passage file
base, ext = os.path.splitext(os.path.basename(file))
converter = self.converters.get(ext.lstrip("."))
if converter is None:
raise IOError("Could not read %s file. Try adding '.txt' suffix: '%s'" % (ext, file)) from e
self._file_handle = open(file, encoding="utf-8")
self._split_iter = iter(converter(chain(self._file_handle, [""]), passage_id=base, lang=self.lang))
if self.split:
if self._split_iter is None:
self._split_iter = (passage,)
self._split_iter = iter(s for p in self._split_iter for s in
split2segments(p, is_sentences=self.sentences, lang=self.lang))
if self._split_iter is not None: # Either set before or initialized now
try:
passage = next(self._split_iter)
except StopIteration: # Finished this converter
self._split_iter = None
if self._file_handle is not None:
self._file_handle.close()
self._file_handle = None
return None
return passage
# The following three methods are implemented to support shuffle;
# note files are shuffled but there is no shuffling within files, as it would not be efficient.
# Note also the inconsistency because these access the files while __iter__ accesses individual passages.
def __len__(self):
return len(self.files)
def __getitem__(self, i):
return self.files[i]
def __setitem__(self, i, value):
self.files[i] = value
def __bool__(self):
return bool(self.files)
def resolve_patterns(filename_patterns):
for pattern in [filename_patterns] if isinstance(filename_patterns, str) else filename_patterns:
yield from sorted(glob(pattern)) or [pattern]
def get_passages(filename_patterns, **kwargs):
for filenames in resolve_patterns(filename_patterns):
yield from read_files_and_dirs(filenames, **kwargs)
def gen_files(files_and_dirs):
"""
:param files_and_dirs: iterable of files and/or directories to look in
:return: all files given, plus any files directly under any directory given
"""
for file_or_dir in [files_and_dirs] if isinstance(files_and_dirs, str) else files_and_dirs:
if os.path.isdir(file_or_dir):
yield from filterfalse(os.path.isdir, (os.path.join(file_or_dir, f)
for f in sorted(os.listdir(file_or_dir))))
else:
yield file_or_dir
def read_files_and_dirs(files_and_dirs, sentences=False, paragraphs=False, converters=None, lang=DEFAULT_LANG,
attempts=DEFAULT_ATTEMPTS, delay=DEFAULT_DELAY):
"""
:param files_and_dirs: iterable of files and/or directories to look in
:param sentences: whether to split to sentences
:param paragraphs: whether to split to paragraphs
:param converters: dict of input format converters to use based on the file extension
:param lang: language to use for tokenization model
:param attempts: number of times to try reading a file before giving up
:param delay: number of seconds to wait before subsequent attempts to read a file
:return: lazy-loaded passages from all files given, plus any files directly under any directory given
"""
return LazyLoadedPassages(list(gen_files(files_and_dirs)), sentences=sentences, paragraphs=paragraphs,
converters=converters, lang=lang, attempts=attempts, delay=delay)
def write_passage(passage, output_format=None, binary=False, outdir=".", prefix="", converter=None, verbose=True,
append=False, basename=None):
"""
Write a given UCCA passage in any format.
:param passage: Passage object to write
:param output_format: filename suffix (if given "ucca", suffix will be ".pickle" or ".xml" depending on `binary')
:param binary: save in pickle format with ".pickle" suffix
:param outdir: output directory, should exist already
:param prefix: string to prepend to output filename
:param converter: function to apply to passage before saving (if output_format is not "ucca"/"pickle"/"xml"),
returning iterable of strings, each corresponding to an output line
:param verbose: print "Writing passage" message
:param append: if using converter, append to output file rather than creating a new file
:param basename: use this instead of `passage.ID' for the output filename
:return: path of created output file
"""
os.makedirs(outdir, exist_ok=True)
suffix = output_format if output_format and output_format != "ucca" else ("pickle" if binary else "xml")
outfile = os.path.join(outdir, prefix + (basename or passage.ID) + "." + suffix)
if verbose:
print("%s '%s'..." % ("Appending to" if append else "Writing passage", outfile))
if output_format is None or output_format in ("ucca", "pickle", "xml"):
passage2file(passage, outfile, binary=binary)
else:
with open(outfile, "a" if append else "w", encoding="utf-8") as f:
f.writelines(map("{}\n".format, (converter or to_text)(passage)))
return outfile
|
# lint-amnesty, pylint: disable=missing-function-docstring, missing-module-docstring
def plugin_settings(settings):
# Queue to use for updating persistent grades
settings.RECALCULATE_GRADES_ROUTING_KEY = settings.DEFAULT_PRIORITY_QUEUE
# Queue to use for updating grades due to grading policy change
settings.POLICY_CHANGE_GRADES_ROUTING_KEY = settings.DEFAULT_PRIORITY_QUEUE
|
import requests
import codecs
import os
import time
import json
url = 'http://pinyin.sogou.com/dict/ywz/ajax/make_list.php'
def download(tag_id, tag_page, tag_type="tag"):
fn = 'data-%s-%s-%s.json' % (tag_type, tag_id, tag_page)
if os.path.exists(fn):
print '* exists %s' % fn
return
form = {
'tag_id': tag_id,
'type': tag_type,
'page': tag_page
}
req = requests.post(url, data=form)
txt = req.text
codecs.open(fn, 'w', 'utf-8').write(txt)
print '* got %s' % fn
def manuel_mode():
tag_id = raw_input('Tag ID: ')
tag_page = raw_input('Tage Page: ')
download(tag_id, tag_page)
def auto_mode():
for i in range(1, 24):
download(i, 1)
time.sleep(3)
def auto_all_mode_by_pageone():
for i in range(1, 24):
# load page 1 info
fn = 'data-%s-%s-%s.json' % ('tag', i, 1)
obj = json.loads(codecs.open(fn, 'r', 'utf-8').read())
num_pages = obj['page']
if num_pages <= 1: continue
# download pages from page 2
for j in range(2, num_pages+1):
download(i, j)
time.sleep(3)
auto_all_mode_by_pageone() |
import sqlalchemy as sa
# Define a version number for the database generated by these writers
# Increment this version number any time a change is made to the schema of the
# assets database
ASSET_DB_VERSION = 2
def generate_asset_db_metadata(bind=None):
# NOTE: When modifying this schema, update the ASSET_DB_VERSION value
metadata = sa.MetaData(bind=bind)
_version_table_schema(metadata)
_equities_table_schema(metadata)
_futures_exchanges_schema(metadata)
_futures_root_symbols_schema(metadata)
_futures_contracts_schema(metadata)
_asset_router_schema(metadata)
return metadata
# A list of the names of all tables in the assets db
# NOTE: When modifying this schema, update the ASSET_DB_VERSION value
asset_db_table_names = ['version_info', 'equities', 'futures_exchanges',
'futures_root_symbols', 'futures_contracts',
'asset_router']
def _equities_table_schema(metadata):
# NOTE: When modifying this schema, update the ASSET_DB_VERSION value
return sa.Table(
'equities',
metadata,
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('symbol', sa.Text),
sa.Column('company_symbol', sa.Text, index=True),
sa.Column('share_class_symbol', sa.Text),
sa.Column('fuzzy_symbol', sa.Text, index=True),
sa.Column('asset_name', sa.Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_traded', sa.Integer, nullable=False),
sa.Column('auto_close_date', sa.Integer),
sa.Column('exchange', sa.Text),
)
def _futures_exchanges_schema(metadata):
# NOTE: When modifying this schema, update the ASSET_DB_VERSION value
return sa.Table(
'futures_exchanges',
metadata,
sa.Column(
'exchange',
sa.Text,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('timezone', sa.Text),
)
def _futures_root_symbols_schema(metadata):
# NOTE: When modifying this schema, update the ASSET_DB_VERSION value
return sa.Table(
'futures_root_symbols',
metadata,
sa.Column(
'root_symbol',
sa.Text,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('root_symbol_id', sa.Integer),
sa.Column('sector', sa.Text),
sa.Column('description', sa.Text),
sa.Column(
'exchange',
sa.Text,
sa.ForeignKey('futures_exchanges.exchange'),
),
)
def _futures_contracts_schema(metadata):
# NOTE: When modifying this schema, update the ASSET_DB_VERSION value
return sa.Table(
'futures_contracts',
metadata,
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('symbol', sa.Text, unique=True, index=True),
sa.Column(
'root_symbol',
sa.Text,
sa.ForeignKey('futures_root_symbols.root_symbol'),
index=True
),
sa.Column('asset_name', sa.Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_traded', sa.Integer, nullable=False),
sa.Column(
'exchange',
sa.Text,
sa.ForeignKey('futures_exchanges.exchange'),
),
sa.Column('notice_date', sa.Integer, nullable=False),
sa.Column('expiration_date', sa.Integer, nullable=False),
sa.Column('auto_close_date', sa.Integer, nullable=False),
sa.Column('multiplier', sa.Float),
sa.Column('tick_size', sa.Float),
)
def _asset_router_schema(metadata):
# NOTE: When modifying this schema, update the ASSET_DB_VERSION value
return sa.Table(
'asset_router',
metadata,
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True),
sa.Column('asset_type', sa.Text),
)
def _version_table_schema(metadata):
# NOTE: When modifying this schema, update the ASSET_DB_VERSION value
return sa.Table(
'version_info',
metadata,
sa.Column(
'id',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column(
'version',
sa.Integer,
unique=True,
nullable=False,
),
# This constraint ensures a single entry in this table
sa.CheckConstraint('id <= 1'),
)
|
import logging
class hereApi(object):
"""Base class for HERE Search,
which is used to fetch address using HERE.
"""
def __init__(self, config, timeout=None):
"""Returns a Api instance.
Args:
config (array): Json object to fetch keys.
timeout (int): Timeout limit for requests.
"""
self.__set_credentials(config)
self.__set_timeout(timeout)
self._base_url = 'https://reverse.geocoder.api.here.com/6.2/reversegeocode.json'
def __set_credentials(self, config):
"""Setter for credentials.
Args:
config (array): Json object to fetch keys.
"""
self._app_id = config['here'][0]
self._app_code = config['here'][1]
def __set_timeout(self, timeout):
"""Setter for timeout.
Args:
timeout (int): timeout for rest api.
"""
self._timeout = timeout if timeout else 20
def form_params(self, lat, long):
"""Form Url params given lat and long
Args:
lat (float): latitude of a location
long (float): longitude of a location
Returns:
A human readable address or None.
"""
data = {'mode': 'retrieveAddresses',
'prox': "{0},{1}".format(lat,long),
'app_id': self._app_id,
'app_code': self._app_code}
return data
def address(self, json):
"""Gets address from given Json.
Model is based on service provider response format.
Args:
lat (float): latitude of a location
long (float): longitude of a location
"""
if json['Response'] != None and json['Response']['View'] != None and len(json['Response']['View'])>0:
location = json['Response']['View'][0]['Result'][0]['Location']
self._address = location.get('Address')
return self._address.get('Label')
else:
logging.error("Problem with JSON Response, Json Dump %s, fetched using %s!", json, self._getName())
self._address = None
return None
def _getName(self):
"""Getter for API location provider service.
"""
return 'HERE'
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math
import operator
import warnings
from typing import Any, List, Union, Dict, Optional, Callable, Iterable, NoReturn, TypeVar
import torch
import torch.nn as nn
from nni.common.serializer import Translatable
from nni.retiarii.serializer import basic_unit
from nni.retiarii.utils import STATE_DICT_PY_MAPPING_PARTIAL
from .utils import Mutable, generate_new_label, get_fixed_value
__all__ = ['LayerChoice', 'InputChoice', 'ValueChoice', 'Placeholder', 'ChosenInputs']
class LayerChoice(Mutable):
"""
Layer choice selects one of the ``candidates``, then apply it on inputs and return results.
Layer choice does not allow itself to be nested.
Parameters
----------
candidates : list of nn.Module or OrderedDict
A module list to be selected from.
prior : list of float
Prior distribution used in random sampling.
label : str
Identifier of the layer choice.
Attributes
----------
length : int
Deprecated. Number of ops to choose from. ``len(layer_choice)`` is recommended.
names : list of str
Names of candidates.
choices : list of Module
Deprecated. A list of all candidate modules in the layer choice module.
``list(layer_choice)`` is recommended, which will serve the same purpose.
Notes
-----
``candidates`` can be a list of modules or a ordered dict of named modules, for example,
.. code-block:: python
self.op_choice = LayerChoice(OrderedDict([
("conv3x3", nn.Conv2d(3, 16, 128)),
("conv5x5", nn.Conv2d(5, 16, 128)),
("conv7x7", nn.Conv2d(7, 16, 128))
]))
Elements in layer choice can be modified or deleted. Use ``del self.op_choice["conv5x5"]`` or
``self.op_choice[1] = nn.Conv3d(...)``. Adding more choices is not supported yet.
"""
# FIXME: prior is designed but not supported yet
@classmethod
def create_fixed_module(cls, candidates: Union[Dict[str, nn.Module], List[nn.Module]], *,
label: Optional[str] = None, **kwargs):
chosen = get_fixed_value(label)
if isinstance(candidates, list):
result = candidates[int(chosen)]
else:
result = candidates[chosen]
# map the named hierarchies to support weight inheritance for python engine
if hasattr(result, STATE_DICT_PY_MAPPING_PARTIAL):
# handle cases where layer choices are nested
# already has a mapping, will merge with it
prev_mapping = getattr(result, STATE_DICT_PY_MAPPING_PARTIAL)
setattr(result, STATE_DICT_PY_MAPPING_PARTIAL, {k: f'{chosen}.{v}' for k, v in prev_mapping.items()})
else:
# "result" needs to know where to map itself.
# Ideally, we should put a _mapping_ in the module where "result" is located,
# but it's impossible to put mapping into parent module here.
setattr(result, STATE_DICT_PY_MAPPING_PARTIAL, {'__self__': str(chosen)})
return result
def __init__(self, candidates: Union[Dict[str, nn.Module], List[nn.Module]], *,
prior: Optional[List[float]] = None, label: Optional[str] = None, **kwargs):
super(LayerChoice, self).__init__()
if 'key' in kwargs:
warnings.warn(f'"key" is deprecated. Assuming label.')
label = kwargs['key']
if 'return_mask' in kwargs:
warnings.warn(f'"return_mask" is deprecated. Ignoring...')
if 'reduction' in kwargs:
warnings.warn(f'"reduction" is deprecated. Ignoring...')
self.candidates = candidates
self.prior = prior or [1 / len(candidates) for _ in range(len(candidates))]
assert abs(sum(self.prior) - 1) < 1e-5, 'Sum of prior distribution is not 1.'
self._label = generate_new_label(label)
self.names = []
if isinstance(candidates, dict):
for name, module in candidates.items():
assert name not in ["length", "reduction", "return_mask", "_key", "key", "names"], \
"Please don't use a reserved name '{}' for your module.".format(name)
self.add_module(name, module)
self.names.append(name)
elif isinstance(candidates, list):
for i, module in enumerate(candidates):
self.add_module(str(i), module)
self.names.append(str(i))
else:
raise TypeError("Unsupported candidates type: {}".format(type(candidates)))
self._first_module = self._modules[self.names[0]] # to make the dummy forward meaningful
@property
def key(self):
return self._key()
@torch.jit.ignore
def _key(self):
warnings.warn('Using key to access the identifier of LayerChoice is deprecated. Please use label instead.',
category=DeprecationWarning)
return self._label
@property
def label(self):
return self._label
def __getitem__(self, idx):
if isinstance(idx, str):
return self._modules[idx]
return list(self)[idx]
def __setitem__(self, idx, module):
key = idx if isinstance(idx, str) else self.names[idx]
return setattr(self, key, module)
def __delitem__(self, idx):
if isinstance(idx, slice):
for key in self.names[idx]:
delattr(self, key)
else:
if isinstance(idx, str):
key, idx = idx, self.names.index(idx)
else:
key = self.names[idx]
delattr(self, key)
del self.names[idx]
def __len__(self):
return len(self.names)
def __iter__(self):
return map(lambda name: self._modules[name], self.names)
@property
def choices(self):
return self._choices()
@torch.jit.ignore
def _choices(self):
warnings.warn("layer_choice.choices is deprecated. Use `list(layer_choice)` instead.", category=DeprecationWarning)
return list(self)
def forward(self, x):
warnings.warn('You should not run forward of this module directly.')
return self._first_module(x)
def __repr__(self):
return f'LayerChoice({self.candidates}, label={repr(self.label)})'
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
ReductionType = Literal['mean', 'concat', 'sum', 'none']
class InputChoice(Mutable):
"""
Input choice selects ``n_chosen`` inputs from ``choose_from`` (contains ``n_candidates`` keys).
Use ``reduction`` to specify how chosen inputs are reduced into one output. A few options are:
* ``none``: do nothing and return the list directly.
* ``sum``: summing all the chosen inputs.
* ``mean``: taking the average of all chosen inputs.
* ``concat``: concatenate all chosen inputs at dimension 1.
We don't support customizing reduction yet.
Parameters
----------
n_candidates : int
Number of inputs to choose from. It is required.
n_chosen : int
Recommended inputs to choose. If None, mutator is instructed to select any.
reduction : str
``mean``, ``concat``, ``sum`` or ``none``.
prior : list of float
Prior distribution used in random sampling.
label : str
Identifier of the input choice.
"""
@classmethod
def create_fixed_module(cls, n_candidates: int, n_chosen: Optional[int] = 1,
reduction: ReductionType = 'sum', *,
prior: Optional[List[float]] = None, label: Optional[str] = None, **kwargs):
return ChosenInputs(get_fixed_value(label), reduction=reduction)
def __init__(self, n_candidates: int, n_chosen: Optional[int] = 1,
reduction: str = 'sum', *,
prior: Optional[List[float]] = None, label: Optional[str] = None, **kwargs):
super(InputChoice, self).__init__()
if 'key' in kwargs:
warnings.warn(f'"key" is deprecated. Assuming label.')
label = kwargs['key']
if 'return_mask' in kwargs:
warnings.warn(f'"return_mask" is deprecated. Ignoring...')
if 'choose_from' in kwargs:
warnings.warn(f'"reduction" is deprecated. Ignoring...')
self.n_candidates = n_candidates
self.n_chosen = n_chosen
self.reduction = reduction
self.prior = prior or [1 / n_candidates for _ in range(n_candidates)]
assert self.reduction in ['mean', 'concat', 'sum', 'none']
self._label = generate_new_label(label)
@property
def key(self):
return self._key()
@torch.jit.ignore
def _key(self):
warnings.warn('Using key to access the identifier of InputChoice is deprecated. Please use label instead.',
category=DeprecationWarning)
return self._label
@property
def label(self):
return self._label
def forward(self, candidate_inputs: List[torch.Tensor]) -> torch.Tensor:
warnings.warn('You should not run forward of this module directly.')
return candidate_inputs[0]
def __repr__(self):
return f'InputChoice(n_candidates={self.n_candidates}, n_chosen={self.n_chosen}, ' \
f'reduction={repr(self.reduction)}, label={repr(self.label)})'
class ChosenInputs(nn.Module):
"""
A module that chooses from a tensor list and outputs a reduced tensor.
The already-chosen version of InputChoice.
When forward, ``chosen`` will be used to select inputs from ``candidate_inputs``,
and ``reduction`` will be used to choose from those inputs to form a tensor.
Attributes
----------
chosen : list of int
Indices of chosen inputs.
reduction : ``mean`` | ``concat`` | ``sum`` | ``none``
How to reduce the inputs when multiple are selected.
"""
def __init__(self, chosen: Union[List[int], int], reduction: ReductionType):
super().__init__()
self.chosen = chosen if isinstance(chosen, list) else [chosen]
self.reduction = reduction
def forward(self, candidate_inputs):
return self._tensor_reduction(self.reduction, [candidate_inputs[i] for i in self.chosen])
def _tensor_reduction(self, reduction_type, tensor_list):
if reduction_type == 'none':
return tensor_list
if not tensor_list:
return None # empty. return None for now
if len(tensor_list) == 1:
return tensor_list[0]
if reduction_type == 'sum':
return sum(tensor_list)
if reduction_type == 'mean':
return sum(tensor_list) / len(tensor_list)
if reduction_type == 'concat':
return torch.cat(tensor_list, dim=1)
raise ValueError(f'Unrecognized reduction policy: "{reduction_type}"')
# the code in ValueChoice can be generated with this codegen
# this is not done online because I want to have type-hint supports
# $ python -c "from nni.retiarii.nn.pytorch.api import _valuechoice_codegen; _valuechoice_codegen(_internal=True)"
def _valuechoice_codegen(*, _internal: bool = False):
if not _internal:
raise RuntimeError("This method is set to be internal. Please don't use it directly.")
MAPPING = {
# unary
'neg': '-', 'pos': '+', 'invert': '~',
# binary
'add': '+', 'sub': '-', 'mul': '*', 'matmul': '@',
'truediv': '//', 'floordiv': '/', 'mod': '%',
'lshift': '<<', 'rshift': '>>',
'and': '&', 'xor': '^', 'or': '|',
# no reflection
'lt': '<', 'le': '<=', 'eq': '==',
'ne': '!=', 'ge': '>=', 'gt': '>',
# NOTE
# Currently we don't support operators like __contains__ (b in a),
# Might support them in future when we actually need them.
}
binary_template = """ def __{op}__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.{opt}, '{{}} {sym} {{}}', [self, other])"""
binary_r_template = """ def __r{op}__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.{opt}, '{{}} {sym} {{}}', [other, self])"""
unary_template = """ def __{op}__(self) -> 'ValueChoiceX':
return ValueChoiceX(operator.{op}, '{sym}{{}}', [self])"""
for op, sym in MAPPING.items():
if op in ['neg', 'pos', 'invert']:
print(unary_template.format(op=op, sym=sym) + '\n')
else:
opt = op + '_' if op in ['and', 'or'] else op
print(binary_template.format(op=op, opt=opt, sym=sym) + '\n')
if op not in ['lt', 'le', 'eq', 'ne', 'ge', 'gt']:
print(binary_r_template.format(op=op, opt=opt, sym=sym) + '\n')
def _valuechoice_staticmethod_helper(orig_func):
orig_func.__doc__ += """
Notes
-----
This function performs lazy evaluation.
Only the expression will be recorded when the function is called.
The real evaluation happens when the inner value choice has determined its final decision.
If no value choice is contained in the parameter list, the evaluation will be intermediate."""
return orig_func
class ValueChoiceX(Translatable):
"""Internal API. Implementation note:
The transformed (X) version of value choice.
It can be the result of composition (transformation) of one or several value choices. For example,
.. code-block:: python
nn.ValueChoice([1, 2]) + nn.ValueChoice([3, 4]) + 5
The instance of base class cannot be created directly. Instead, they should be only the result of transformation of value choice.
Therefore, there is no need to implement ``create_fixed_module`` in this class, because,
1. For python-engine, value choice itself has create fixed module. Consequently, the transformation is born to be fixed.
2. For graph-engine, it uses evaluate to calculate the result.
Potentially, we have to implement the evaluation logic in oneshot algorithms. I believe we can postpone the discussion till then.
"""
def __init__(self, function: Callable[..., Any], repr_template: str, arguments: List[Any], dry_run: bool = True):
super().__init__()
if function is None:
# this case is a hack for ValueChoice subclass
# it will reach here only because ``__init__`` in ``nn.Module`` is useful.
return
self.function = function
self.repr_template = repr_template
self.arguments = arguments
assert any(isinstance(arg, ValueChoiceX) for arg in self.arguments)
if dry_run:
# for sanity check
self.dry_run()
def inner_choices(self) -> Iterable['ValueChoice']:
"""
Return an iterable of all leaf value choices.
Useful for composition of value choices.
No deduplication on labels. Mutators should take care.
"""
for arg in self.arguments:
if isinstance(arg, ValueChoiceX):
yield from arg.inner_choices()
def dry_run(self) -> Any:
"""
Dry run the value choice to get one of its possible evaluation results.
"""
# values are not used
return self._evaluate(iter([]), True)
def evaluate(self, values: Iterable[Any]) -> Any:
"""
Evaluate the result of this group.
``values`` should in the same order of ``inner_choices()``.
"""
return self._evaluate(iter(values), False)
def _evaluate(self, values: Iterable[Any], dry_run: bool = False) -> Any:
# "values" iterates in the recursion
eval_args = []
for arg in self.arguments:
if isinstance(arg, ValueChoiceX):
# recursive evaluation
eval_args.append(arg._evaluate(values, dry_run))
# the recursion will stop when it hits a leaf node (value choice)
# the implementation is in `ValueChoice`
else:
# constant value
eval_args.append(arg)
return self.function(*eval_args)
def _translate(self):
"""
Try to behave like one of its candidates when used in ``basic_unit``.
"""
return self.dry_run()
def __repr__(self):
reprs = []
for arg in self.arguments:
if isinstance(arg, ValueChoiceX) and not isinstance(arg, ValueChoice):
reprs.append('(' + repr(arg) + ')') # add parenthesis for operator priority
else:
reprs.append(repr(arg))
return self.repr_template.format(*reprs)
# the following are a series of methods to create "ValueChoiceX"
# which is a transformed version of value choice
# https://docs.python.org/3/reference/datamodel.html#special-method-names
# Special operators that can be useful in place of built-in conditional operators.
@staticmethod
@_valuechoice_staticmethod_helper
def to_int(obj: 'ValueChoiceOrAny') -> Union['ValueChoiceX', int]:
"""
Convert a ``ValueChoice`` to an integer.
"""
if isinstance(obj, ValueChoiceX):
return ValueChoiceX(int, 'int({})', [obj])
return int(obj)
@staticmethod
@_valuechoice_staticmethod_helper
def to_float(obj: 'ValueChoiceOrAny') -> Union['ValueChoiceX', float]:
"""
Convert a ``ValueChoice`` to a float.
"""
if isinstance(obj, ValueChoiceX):
return ValueChoiceX(float, 'float({})', [obj])
return float(obj)
@staticmethod
@_valuechoice_staticmethod_helper
def condition(pred: 'ValueChoiceOrAny',
true: 'ValueChoiceOrAny',
false: 'ValueChoiceOrAny') -> 'ValueChoiceOrAny':
"""
Return ``true`` if the predicate ``pred`` is true else ``false``.
Examples
--------
>>> ValueChoice.condition(ValueChoice([1, 2]) > ValueChoice([0, 3]), 2, 1)
"""
if any(isinstance(obj, ValueChoiceX) for obj in [pred, true, false]):
return ValueChoiceX(lambda t, c, f: t if c else f, '{} if {} else {}', [true, pred, false])
return true if pred else false
@staticmethod
@_valuechoice_staticmethod_helper
def max(arg0: Union[Iterable['ValueChoiceOrAny'], 'ValueChoiceOrAny'],
*args: List['ValueChoiceOrAny']) -> 'ValueChoiceOrAny':
"""
Returns the maximum value from a list of value choices.
The usage should be similar to Python's built-in value choices,
where the parameters could be an iterable, or at least two arguments.
"""
if not args:
return ValueChoiceX.max(*list(arg0))
lst = [arg0] + list(args)
if any(isinstance(obj, ValueChoiceX) for obj in lst):
return ValueChoiceX(max, 'max({})', lst)
return max(lst)
@staticmethod
@_valuechoice_staticmethod_helper
def min(arg0: Union[Iterable['ValueChoiceOrAny'], 'ValueChoiceOrAny'],
*args: List['ValueChoiceOrAny']) -> 'ValueChoiceOrAny':
"""
Returns the minunum value from a list of value choices.
The usage should be similar to Python's built-in value choices,
where the parameters could be an iterable, or at least two arguments.
"""
if not args:
return ValueChoiceX.min(*list(arg0))
lst = [arg0] + list(args)
if any(isinstance(obj, ValueChoiceX) for obj in lst):
return ValueChoiceX(min, 'min({})', lst)
return min(lst)
def __hash__(self):
# this is required because we have implemented ``__eq__``
return id(self)
# NOTE:
# Write operations are not supported. Reasons follow:
# - Semantics are not clear. It can be applied to "all" the inner candidates, or only the chosen one.
# - Implementation effort is too huge.
# As a result, inplace operators like +=, *=, magic methods like `__getattr__` are not included in this list.
def __getitem__(self, key: Any) -> 'ValueChoiceX':
return ValueChoiceX(lambda x, y: x[y], '{}[{}]', [self, key])
# region implement int, float, round, trunc, floor, ceil
# because I believe sometimes we need them to calculate #channels
# `__int__` and `__float__` are not supported because `__int__` is required to return int.
def __round__(self, ndigits: Optional[Any] = None) -> 'ValueChoiceX':
if ndigits is not None:
return ValueChoiceX(round, 'round({}, {})', [self, ndigits])
return ValueChoiceX(round, 'round({})', [self])
def __trunc__(self) -> 'ValueChoiceX':
raise RuntimeError("Try to use `ValueChoice.to_int()` instead of `math.trunc()` on value choices.")
def __floor__(self) -> 'ValueChoiceX':
return ValueChoiceX(math.floor, 'math.floor({})', [self])
def __ceil__(self) -> 'ValueChoiceX':
return ValueChoiceX(math.ceil, 'math.ceil({})', [self])
def __index__(self) -> NoReturn:
# https://docs.python.org/3/reference/datamodel.html#object.__index__
raise RuntimeError("`__index__` is not allowed on ValueChoice, which means you can't "
"use int(), float(), complex(), range() on a ValueChoice.")
def __bool__(self) -> NoReturn:
raise RuntimeError('Cannot use bool() on ValueChoice. That means, using ValueChoice in a if-clause is illegal. '
'Please try methods like `ValueChoice.max(a, b)` to see whether that meets your needs.')
# endregion
# region the following code is generated with codegen (see above)
# Annotated with "region" because I want to collapse them in vscode
def __neg__(self) -> 'ValueChoiceX':
return ValueChoiceX(operator.neg, '-{}', [self])
def __pos__(self) -> 'ValueChoiceX':
return ValueChoiceX(operator.pos, '+{}', [self])
def __invert__(self) -> 'ValueChoiceX':
return ValueChoiceX(operator.invert, '~{}', [self])
def __add__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.add, '{} + {}', [self, other])
def __radd__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.add, '{} + {}', [other, self])
def __sub__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.sub, '{} - {}', [self, other])
def __rsub__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.sub, '{} - {}', [other, self])
def __mul__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.mul, '{} * {}', [self, other])
def __rmul__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.mul, '{} * {}', [other, self])
def __matmul__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.matmul, '{} @ {}', [self, other])
def __rmatmul__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.matmul, '{} @ {}', [other, self])
def __truediv__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.truediv, '{} // {}', [self, other])
def __rtruediv__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.truediv, '{} // {}', [other, self])
def __floordiv__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.floordiv, '{} / {}', [self, other])
def __rfloordiv__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.floordiv, '{} / {}', [other, self])
def __mod__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.mod, '{} % {}', [self, other])
def __rmod__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.mod, '{} % {}', [other, self])
def __lshift__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.lshift, '{} << {}', [self, other])
def __rlshift__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.lshift, '{} << {}', [other, self])
def __rshift__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.rshift, '{} >> {}', [self, other])
def __rrshift__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.rshift, '{} >> {}', [other, self])
def __and__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.and_, '{} & {}', [self, other])
def __rand__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.and_, '{} & {}', [other, self])
def __xor__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.xor, '{} ^ {}', [self, other])
def __rxor__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.xor, '{} ^ {}', [other, self])
def __or__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.or_, '{} | {}', [self, other])
def __ror__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.or_, '{} | {}', [other, self])
def __lt__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.lt, '{} < {}', [self, other])
def __le__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.le, '{} <= {}', [self, other])
def __eq__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.eq, '{} == {}', [self, other])
def __ne__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.ne, '{} != {}', [self, other])
def __ge__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.ge, '{} >= {}', [self, other])
def __gt__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.gt, '{} > {}', [self, other])
# endregion
# __pow__, __divmod__, __abs__ are special ones.
# Not easy to cover those cases with codegen.
def __pow__(self, other: Any, modulo: Optional[Any] = None) -> 'ValueChoiceX':
if modulo is not None:
return ValueChoiceX(pow, 'pow({}, {}, {})', [self, other, modulo])
return ValueChoiceX(lambda a, b: a ** b, '{} ** {}', [self, other])
def __rpow__(self, other: Any, modulo: Optional[Any] = None) -> 'ValueChoiceX':
if modulo is not None:
return ValueChoiceX(pow, 'pow({}, {}, {})', [other, self, modulo])
return ValueChoiceX(lambda a, b: a ** b, '{} ** {}', [other, self])
def __divmod__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(divmod, 'divmod({}, {})', [self, other])
def __rdivmod__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(divmod, 'divmod({}, {})', [other, self])
def __abs__(self) -> 'ValueChoiceX':
return ValueChoiceX(abs, 'abs({})', [self])
ValueChoiceOrAny = TypeVar('ValueChoiceOrAny', ValueChoiceX, Any)
class ValueChoice(ValueChoiceX, Mutable):
"""
ValueChoice is to choose one from ``candidates``.
In most use scenarios, ValueChoice should be passed to the init parameters of a serializable module. For example,
.. code-block:: python
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, nn.ValueChoice([32, 64]), kernel_size=nn.ValueChoice([3, 5, 7]))
def forward(self, x):
return self.conv(x)
In case, you want to search a parameter that is used repeatedly, this is also possible by sharing the same value choice instance.
(Sharing the label should have the same effect.) For example,
.. code-block:: python
class Net(nn.Module):
def __init__(self):
super().__init__()
hidden_dim = nn.ValueChoice([128, 512])
self.fc = nn.Sequential(
nn.Linear(64, hidden_dim),
nn.Linear(hidden_dim, 10)
)
# the following code has the same effect.
# self.fc = nn.Sequential(
# nn.Linear(64, nn.ValueChoice([128, 512], label='dim')),
# nn.Linear(nn.ValueChoice([128, 512], label='dim'), 10)
# )
def forward(self, x):
return self.fc(x)
Note that ValueChoice should be used directly. Transformations like ``nn.Linear(32, nn.ValueChoice([64, 128]) * 2)``
are not supported.
Another common use case is to initialize the values to choose from in init and call the module in forward to get the chosen value.
Usually, this is used to pass a mutable value to a functional API like ``torch.xxx`` or ``nn.functional.xxx```.
For example,
.. code-block:: python
class Net(nn.Module):
def __init__(self):
super().__init__()
self.dropout_rate = nn.ValueChoice([0., 1.])
def forward(self, x):
return F.dropout(x, self.dropout_rate())
Parameters
----------
candidates : list
List of values to choose from.
prior : list of float
Prior distribution to sample from.
label : str
Identifier of the value choice.
"""
# FIXME: prior is designed but not supported yet
@classmethod
def create_fixed_module(cls, candidates: List[Any], *, label: Optional[str] = None, **kwargs):
value = get_fixed_value(label)
if value not in candidates:
raise ValueError(f'Value {value} does not belong to the candidates: {candidates}.')
return value
def __init__(self, candidates: List[Any], *, prior: Optional[List[float]] = None, label: Optional[str] = None):
super().__init__(None, None, None)
self.candidates = candidates
self.prior = prior or [1 / len(candidates) for _ in range(len(candidates))]
assert abs(sum(self.prior) - 1) < 1e-5, 'Sum of prior distribution is not 1.'
self._label = generate_new_label(label)
self._accessor = []
@property
def label(self):
return self._label
def forward(self):
warnings.warn('You should not run forward of this module directly.')
return self.candidates[0]
def inner_choices(self) -> Iterable['ValueChoice']:
# yield self because self is the only value choice here
yield self
def dry_run(self) -> Any:
return self.candidates[0]
def _evaluate(self, values: Iterable[Any], dry_run: bool = False) -> Any:
if dry_run:
return self.candidates[0]
try:
value = next(values)
except StopIteration:
raise ValueError(f'Value list {values} is exhausted when trying to get a chosen value of {self}.')
if value not in self.candidates:
raise ValueError(f'Value {value} does not belong to the candidates of {self}.')
return value
def __repr__(self):
return f'ValueChoice({self.candidates}, label={repr(self.label)})'
@basic_unit
class Placeholder(nn.Module):
"""
The API that creates an empty module for later mutations.
For advanced usages only.
"""
def __init__(self, label, **related_info):
self.label = label
self.related_info = related_info
super().__init__()
def forward(self, x):
return x
|
from distutils.dir_util import copy_tree, remove_tree
import os
import shutil
def _copy_function(source, destination):
print('Bootstrapping project at %s' % destination)
copy_tree(source, destination)
def create_app():
cwd = os.getcwd()
game_logic_path = os.path.join(cwd, 'game_logic')
game_app_interface = os.path.join(cwd, 'game_app.py')
app_template = os.path.join(cwd, 'engine', 'app_template')
_game_logic_path_exists = os.path.exists(game_logic_path)
_game_app_interface_exists = os.path.exists(game_app_interface)
if _game_logic_path_exists or _game_app_interface_exists:
answer = input(
'game_app.py or game_logic module already exists. Continue? (y/n). ' +
'\nWARNING: This will remove all contents of game_logic module, use at your own risk:'.upper()
)
if answer == 'y':
if _game_app_interface_exists:
os.remove(game_app_interface)
if _game_logic_path_exists:
remove_tree(game_logic_path)
_copy_function(app_template, cwd)
else:
_copy_function(app_template, cwd)
if not os.path.exists('settings.yaml'):
shutil.copy2('settings.yaml.template', 'settings.yaml')
if not os.path.exists('logging.yaml'):
shutil.copy2('logging.yaml.template', 'logging.yaml')
if __name__ == '__main__':
create_app()
|
from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
class Chat(models.Model):
from_user = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='from_chats')
to_user = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='to_chats')
message = models.TextField(verbose_name='Message', default='')
|
from typing import Optional
from watchmen_auth import PrincipalService
from watchmen_model.admin import User, UserRole
from watchmen_model.common import TenantId, UserId
from watchmen_utilities import is_blank
def fake_super_admin() -> PrincipalService:
return PrincipalService(User(
userId='1',
userName='imma-super',
tenantId='-1',
role=UserRole.SUPER_ADMIN
))
def fake_tenant_admin(
tenant_id: TenantId,
user_id: Optional[UserId] = None, user_name: Optional[str] = None, ) -> PrincipalService:
return PrincipalService(User(
userId='1' if is_blank(user_id) else user_id,
userName='imma-super' if is_blank(user_name) else user_name,
tenantId='-1' if is_blank(tenant_id) else tenant_id,
role=UserRole.ADMIN
))
|
import tensorflow as tf
a = tf.linspace(-10., 10., 10)
with tf.GradientTape() as tape:
tape.watch(a)
y = tf.sigmoid(a)
grads = tape.gradient(y, [a])
print('x:', a.numpy())
print('y:', y.numpy())
print('grad:', grads[0].numpy())
|
from respite.inflector import pluralize, cc2us
def route(regex, view, method, name):
"""
Route the given view.
:param regex: A string describing a regular expression to which the request path will be matched.
:param view: A string describing the name of the view to delegate the request to.
:param method: A string describing the HTTP method that this view accepts.
:param name: A string describing the name of the URL pattern.
``regex`` may also be a lambda that accepts the parent resource's ``prefix`` argument and returns
a string describing a regular expression to which the request path will be matched.
``name`` may also be a lambda that accepts the parent resource's ``views`` argument and returns
a string describing the name of the URL pattern.
"""
return _Route(regex, view, method, name)
class _Route(object):
def __init__(self, regex, view, method, name):
self.regex = regex
self.view = view
self.method = method
self.name = name
|
from django.db import models
class Account(models.Model):
"""
Stores all attachments of the IHNA accounts to the corefacility accounts
"""
email = models.EmailField(db_index=True, unique=True,
help_text="The user e-mail as typed for the IHNA personal page")
user = models.OneToOneField("core.User", on_delete=models.CASCADE, related_name="ihna_account",
help_text="The corefacility user to which ihna account is attached")
|
"""
A module for establishing fractional upper limits on modulation for a
light curve *with calibrated photon weights*. Uses include upper
limits on pulsars and on orbital modulation eclipsing MSP binaries.
The basic idea is to proceed by Monte Carlo. For each iteration, psuedo-
random numbers are drawn from a uniform distribution and compared to the
weights to determine if a given photon is "from the source" or "from the
background".
This is accomplished with the usual LC* machinery, which also requires
a template, which the user should supply in the form of, e.g., a typical
pulsar light curve, a notch, sinusoidal modulation, etc.
Then, the amplitude of the modulated component is gradually increased
until the weighted H-test (or other statistic) surprasses some threshold
for some fraction, e.g. 2/3, of the simulated population. This gives
the fractional detection threshold for orbital (or any) modulation.
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/pulsar/modtest.py,v 1.3 2012/11/29 00:29:45 kerrm Exp $
author: M. Kerr <[email protected]>
"""
import numpy as np
from stats import hmw
import lcprimitives
import lctemplate
def get_stat_dist(lct,weights,stat,n=100):
""" Use the provided template, photon weights (CALIBRATED) and
statistic (TS) to generate a distribution of the TS at the
specified amplitude."""
stats = np.empty(n)
for i in xrange(n):
phases = lct.random(len(weights),weights=weights)
stats[i] = stat(phases,weights)
return np.sort(stats)
def fill_grid(lct,weights,stat=hmw,gmin=1e-5,gmax=1-1e-5,ngrid=20,
fraction=[0.05,0.5,0.95],n=100):
""" Find the distribution of a pulsation test statistic over a grid of
modulation normalizations.
lct -- instance of LCTemplate; the light curve shape
weights -- probability a photon comes from the pulsar
stat -- [hmw] the pulsation test statistic to use
gmin -- [1e-5] minimum modulation to use
gmax -- [1-1e-5] maximum modulation to use
fraction -- positions in the cdf of the MC population
n -- [100] size of the Monte Carlo sample to use
Returns: (grid,vals)
grid -- the modulation amplitudes tested
vals -- the specified [fraction] contours of the distribution
"""
index = np.round(n*np.asarray(fraction)).astype(int)
grid = np.linspace(1e-5,1-1e-5,ngrid)
vals = np.empty([len(index),len(grid)])
for i,g in enumerate(grid):
lct.norms.set_total(g)
s = get_stat_dist(lct,weights,stat,n)
import pylab as pl
pl.plot(s+5*i)
vals[:,i] = s[index]
return grid,vals
def find_threshold(grid,vals,threshold,index=0,degree=2,tol=0.02):
""" Use polynomial fits to solve for the modulation fraction giving
an appropriate confidence interval.
grid/vals -- results of fill_grid
threshold -- the stat value for which to compute the interval
[e.g. H=3]
index -- index into vals
[vals is an array for i=0 is 5% confidence, i=1
is 50% confidence, and i=2 is 95% confidence, by
default]
tol -- requested agreement between smooth and linearly
interpolated values; essentially sanity check
So if using the default values, one finds a 95% upper limit on
the modulation fraction for the given threshold.
"""
v = vals[index,:]-threshold
if not np.any(v>0):
print ('No modulation value satisfies threshold.')
return None
if np.all(v>0):
print ('All modulation values are above threshold, returning min.')
return grid[0]
# first, use a "smoothed" version of sig vs. modfrac
p = np.polyfit(grid,v,degree)
if degree==2:
a2,a1,a0 = p
s1 = (-a1 + (a1**2-4*a0*a2)**0.5)/(2*a2)
s2 = (-a1 - (a1**2-4*a0*a2)**0.5)/(2*a2)
dom = np.linspace(grid[0],grid[-1],1001)
cod = np.polyval(p,dom)
idx = np.searchsorted(cod,0)
x0 = dom[idx:idx+2].sum()/2
# now, the simple-minded linear interpolation; note we are guaranteed
# zero crossing by the boundary checks at start of function
idx = np.searchsorted(v,0)
xhi,xlo = grid[idx],grid[idx-1]
yhi,ylo = v[idx],v[idx-1]
x1 = xlo - ylo/(yhi-ylo)*(xhi-xlo)
# check for consistency
if abs(x1-x0) > tol:
print ('Warning! Potential failure of smoothed method:')
print ('Smoothed: %.2f'%x0)
print ('Linear: %.2f'%x1)
print ('Returning linear value.')
return x1
return x0
def test_sinusoid(weights,order=1):
""" Calculate limits on (by default) sinusoidal modulation. Higher
harmonics are supported by the [order] kwarg."""
p0 = lcprimitives.LCHarmonic(order=order)
lct = lctemplate.LCTemplate([p0],[1])
grid,vals = fill_grid(lct,weights)
return grid,vals
def test_notch(weights,width=0.05):
""" Test for a dropout of gamma-ray emission with a notch shape."""
p0 = lcprimitives.LCTopHat(p=[1-width,0])
lct = lctemplate.LCTemplate([p0],[1])
grid,vals = fill_grid(lct,weights)
return grid,vals
def test_pulsar(weights):
""" Look for a canonical 2peak+bridge pulsar light curve."""
x2 = 0.55
lor = True
bfrac = 0.1
skew = False
lct = lctemplate.get_gauss2(pulse_frac=0.25,bridge_frac=bfrac,lorentzian=lor,skew=skew,x1=0.2,x2=x2,ratio=1.1,width1=0.02,width2=0.02)
grid,vals = fill_grid(lct,weights)
return grid,vals
def test_eclipse(weights,frac=0.95,threshold=8):
""" Find the narrowest eclipse (total modulation) width that can be
detected (frac of the Monte Carlo trials exceeding
threshold)."""
width_grid = np.arange(0.05,1,0.05)
stat_grid = np.empty_like(width_grid)
n = 100
idx = int(round((1-frac)*n))
for i in xrange(len(width_grid)):
p0 = lcprimitives.LCTopHat(p=[1-width_grid[i],0.5])
lct = lctemplate.LCTemplate([p0],[1]) # 1 == full eclipse
stats = get_stat_dist(lct,weights,hmw,n=n)
stat_grid[i] = stats[idx]
stat_grid = np.array([stat_grid]) # match shape expectation
width = find_threshold(width_grid,stat_grid,threshold)
return stat_grid[0],width
"""
# example
import pylab as pl
import astropy.io.fits as pyfits
f = pyfits.open('/edata/single_sources/j1124m3653/gamma_products/j1124m3653-ft1_gtselect_gtmktime_r2.fits')
weights = f[1].data.field('WEIGHT')
f.close()
g1,v1 = test_sinusoid(weights)
g2,v2 = test_notch(weights)
g3,v3 = test_pulsar(weights)
# display results and print threshold
for v in [v1,v2,v3]:
pl.errorbar(x=g1,y=v[1],yerr=[v[1]-v[0],v[2]-v[1]])
print (find_threshold(g1,v,15))
"""
|
import logging
import threading
from contextlib import contextmanager
import ipfshttpclient
import multibase
from lru import LRU
from . import unixfs_pb2
logger = logging.getLogger(__name__)
class InvalidIPFSPathException(Exception):
pass
class CachedIPFS:
def __init__(
self,
ipfs_client, # ipfshttpclient client instance
attr_cache_size=1024 * 128,
ls_cache_size=64,
block_cache_size=16, # ~16MB assuming 1MB max block size
link_cache_size=256,
timeout=30.0, # in seconds
):
self.client = ipfs_client
self.client_request_kwargs = {
'timeout': timeout,
}
self.resolve_cache = LockingLRU(attr_cache_size)
self.cid_type_cache = LockingLRU(attr_cache_size)
self.path_size_cache = LockingLRU(attr_cache_size)
self.ls_cache = LockingLRU(ls_cache_size)
self.block_cache = LockingLRU(block_cache_size)
self.subblock_cids_cache = LockingLRU(link_cache_size)
self.subblock_sizes_cache = LockingLRU(link_cache_size)
def resolve(self, path):
""" Get CID (content id) of a path. """
with self.resolve_cache.get_or_lock(path) as (in_cache, value):
if in_cache:
return value
try:
absolute_path = self.client.resolve(path, **self.client_request_kwargs)['Path']
except ipfshttpclient.exceptions.ErrorResponse:
absolute_path = None
if absolute_path is None or not absolute_path.startswith('/ipfs/'):
self.resolve_cache[path] = None
return None
cid = absolute_path[6:] # strip '/ipfs/'
self.resolve_cache[path] = cid
return cid
def block(self, cid):
""" Get payload of IPFS object or raw block """
with self.block_cache.get_or_lock(cid) as (in_cache, value):
if in_cache:
return value
if self._is_object(cid):
# object
object_data = self._load_object(cid)
return object_data.Data
elif self._is_raw_block(cid):
# raw block
block = self.client.block.get(cid, **self.client_request_kwargs)
self.block_cache[cid] = block
return block
else:
# unknown object type
raise InvalidIPFSPathException()
def subblock_cids(self, cid):
""" Get blocks linked from given IPFS object / block """
if self._is_object(cid):
# object
with self.subblock_cids_cache.get_or_lock(cid) as (in_cache, value):
if in_cache:
return value
subblock_cids = [
link['Hash']
for link in self.client.object.links(
cid,
**self.client_request_kwargs,
).get('Links', [])
]
self.subblock_cids_cache[cid] = subblock_cids
return subblock_cids
elif self._is_raw_block(cid):
# raw block - it has no subblocks
return []
else:
# unknown object type
raise InvalidIPFSPathException()
def subblock_sizes(self, cid):
""" Get sizes of blocks linked from given IPFS object / block
(in the same order as in subblock_cids)
"""
if self._is_object(cid):
# object
with self.subblock_sizes_cache.get_or_lock(cid) as (in_cache, value):
if in_cache:
return value
object_data = self._load_object(cid)
return object_data.blocksizes
elif self._is_raw_block(cid):
# raw block - it has no subblocks
return []
else:
# unknown object type
raise InvalidIPFSPathException()
def ls(self, path):
with self.ls_cache.get_or_lock(path) as (in_cache, value):
if in_cache:
return value
try:
ls_result = self.client.ls(
path,
**self.client_request_kwargs,
)['Objects'][0]['Links']
except ipfshttpclient.exceptions.ErrorResponse:
ls_result = None
self.ls_cache[path] = ls_result
return ls_result
def cid_ls(self, cid):
# cid is a valid path
return self.ls(cid)
def cid_size(self, cid):
if cid is None:
return None
with self.path_size_cache.get_or_lock(cid) as (in_cache, value):
if in_cache:
return value
if self._is_object(cid):
# object
object_data = self._load_object(cid)
return object_data.filesize
elif self._is_raw_block(cid):
# raw block
in_cache, block = self.block_cache.get(cid)
if in_cache:
size = len(block)
else:
size = self.client.block.stat(
cid,
**self.client_request_kwargs,
)['Size']
self.path_size_cache[cid] = size
return size
else:
# unknown object type
raise InvalidIPFSPathException()
def cid_type(self, cid):
if self._is_object(cid):
with self.cid_type_cache.get_or_lock(cid) as (in_cache, value):
if in_cache:
return value
object_data = self._load_object(cid)
return object_data.Type
elif self._is_raw_block(cid):
return unixfs_pb2.Data.Raw
else:
raise InvalidIPFSPathException()
def cid_is_dir(self, cid):
if cid is None:
return False
return self.cid_type(cid) in (
unixfs_pb2.Data.Directory,
unixfs_pb2.Data.HAMTShard,
)
def cid_is_file(self, cid):
if cid is None:
return False
return self.cid_type(cid) in (
unixfs_pb2.Data.File,
unixfs_pb2.Data.Raw,
)
def read_into(self, cid, offset, buff):
""" Read bytes begining at `offset` from given object/raw into
buffer. Returns end offset of copied data. """
size = len(buff)
end = offset
# copy data contained in this object
d = self.block(cid)[offset:(offset + size)]
n = len(d)
buff[0:n] = d
end += n
# copied all requested data?
if size <= n:
return end
# descend into child objects
block_offset = len(self.block(cid))
for blocksize, child_hash in zip(
self.subblock_sizes(cid),
self.subblock_cids(cid),
):
if offset + size <= block_offset:
# current block is past requested range
break
elif block_offset + blocksize <= offset:
# current block is before requested range
pass
else:
end = self.read_into(
child_hash,
max(0, offset - block_offset),
buff[(end - offset):(end - offset + blocksize)],
) + block_offset
# update offset to next block
block_offset += blocksize
return end
def _load_object(self, cid):
""" Get object data and fill relevant caches """
object_data = unixfs_pb2.Data()
object_data.ParseFromString(self.client.object.data(
cid,
**self.client_request_kwargs,
))
self.cid_type_cache[cid] = object_data.Type
self.path_size_cache[cid] = object_data.filesize
self.block_cache[cid] = object_data.Data
self.subblock_sizes_cache[cid] = object_data.blocksizes
return object_data
def _is_object(self, cid):
if cid.startswith('Q'):
# v0 object
return True
try:
cid_bytes = multibase.decode(cid)
except ValueError:
logger.exception("encountered malformed object/block id")
return False
# v1 object
return cid_bytes.startswith(bytes([0x01, 0x70]))
def _is_raw_block(self, cid):
try:
cid_bytes = multibase.decode(cid)
except ValueError:
logger.exception("encountered malformed object/block id")
return False
# v1 raw block
return cid_bytes.startswith(bytes([0x01, 0x55]))
class LockingLRU:
def __init__(self, *args, **kwargs):
self.cache = LRU(*args, **kwargs)
self.global_lock = threading.Lock()
self.key_events = {}
def get(self, key):
while True:
with self.global_lock:
if key in self.cache:
return True, self.cache[key]
if key in self.key_events:
key_event = self.key_events[key]
else:
return False, None
key_event.wait()
@contextmanager
def get_or_lock(self, key):
value, event = self._get_value_or_release_event(key)
if event:
try:
yield False, None
finally:
with self.global_lock:
del self.key_events[key]
event.set()
else:
yield True, value
def __setitem__(self, key, value):
with self.global_lock:
self.cache[key] = value
def _get_value_or_release_event(self, key):
while True:
with self.global_lock:
if key in self.cache:
return self.cache[key], None
if key in self.key_events:
key_event = self.key_events[key]
else:
key_event = threading.Event()
self.key_events[key] = key_event
return None, key_event
key_event.wait()
|
# Juju requires higher frame size for large models
MAX_FRAME_SIZE = 2**26
# Not all charms use the openstack-origin. The openstack specific
# charms do, but some of the others use an alternate origin key
# depending on who the author was.
ORIGIN_KEYS = {
'ceph': 'source',
'ceph-osd': 'source',
'ceph-mon': 'source',
'ceph-radosgw': 'source',
}
# Default list of services, used in upgrade if apps not specified in params
# Services are upgraded in the order specified
# The order of services is based on:
# https://github.com/openstack-charmers/openstack-charms-tools/blob/master/os-upgrade.py
# https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-upgrade-openstack.html
SERVICES = [
# Identity
'keystone',
# Ceph
'ceph-mon',
'ceph-osd',
'ceph-radosgw',
# Image
'glance',
# Upgrade nova
'nova-cloud-controller',
'nova-compute',
# Neutron upgrades
'neutron-api',
'neutron-gateway',
'neutron-openvswitch',
# Backend block-storage upgrade.
# Note: just upgrade cinder service.
'cinder',
'cinder-ceph',
# Upgrade dashboard
'openstack-dashboard',
'rabbitmq-server',
]
|
# Copyright 2009-2014 Eucalyptus Systems, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
# Please contact Eucalyptus Systems, Inc., 6755 Hollister Ave., Goleta
# CA 93117, USA or visit http://www.eucalyptus.com/licenses/ if you need
# additional information or have any questions.
class ServerCertificate(object):
def __init__(self, cert, pk):
self.certificate = cert
self.pk = pk
def get_certificate(self):
return self.certificate
def get_private_key(self):
return self.pk
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
# TODO (bev) validate entire list of props
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
import mock
# Bokeh imports
from bokeh.core.validation import check_integrity
from bokeh.models import LayoutDOM
# Module under test
from bokeh.models.tools import Toolbar, ToolbarBox # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_Toolbar() -> None:
tb = Toolbar()
assert tb.active_drag == 'auto'
assert tb.active_inspect == 'auto'
assert tb.active_scroll == 'auto'
assert tb.active_tap == 'auto'
assert tb.autohide is False
def test_Toolbar_with_autohide() -> None:
tb = Toolbar(autohide=True)
assert tb.active_drag == 'auto'
assert tb.active_inspect == 'auto'
assert tb.active_scroll == 'auto'
assert tb.active_tap == 'auto'
assert tb.autohide is True
#
# ToolbarBox
#
def test_toolbar_box_is_instance_of_LayoutDOM() -> None:
tb_box = ToolbarBox()
assert isinstance(tb_box, LayoutDOM)
def test_toolbar_box_properties() -> None:
tb_box = ToolbarBox()
assert tb_box.toolbar_location == "right"
@mock.patch('bokeh.io.showing._show_with_state')
def test_toolbar_box_with_no_children_does_not_raise_a_bokeh_warning(mock__show_with_state) -> None:
# This is the normal way a ToolbarBox would be instantiated for example in
# a gridplot. So we don't want to worry people with warnings. The children
# for the ToolbarBox are created on the JS side.
tb_box = ToolbarBox()
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([tb_box])
assert mock_logger.warning.call_count == 0
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
import backend.Modules.Routes.Routes as Routes
class Notification():
__instance = None
@staticmethod
def getInstance():
""" Static access method. """
if Notification.__instance == None:
Notification()
return Notification.__instance
def __init__(self,email):
if Notification.__instance != None:
raise Exception("This class is a singleton!")
else:
Notification.__instance = self
self._email = email
def email(self):
return self._email
def setEmail(self, email):
self._email = email
def sendEmail(self):
if len(self._email) > 0:
print("::::::::::::::::::::::::::::")
print(":::: SENDING EMAIL...... ::::")
print("::::::::::::::::::::::::::::")
sender = Routes.returnEmail()
receiver = self._email
serverEmail = smtplib.SMTP(host='smtp.gmail.com', port=587)
serverEmail.ehlo()
serverEmail.starttls()
serverEmail.ehlo()
serverEmail.login(sender, Routes.returnEmailPassword())
msg = MIMEMultipart()
message = MIMEText("Este correo ha sido enviado desde - Analysis Of Time Windows To Detect Botnet Behaviour-2")
msg['From'] = sender
msg['To'] = receiver
msg['Subject'] = "Anomalía presentada en el análisis de red"
msg.attach(MIMEText(message.as_string()))
print(sender)
print(receiver)
serverEmail.sendmail(sender, receiver, msg.as_string())
serverEmail.close()
|
# xxx4pods plugin by AliAbdul
from Podcast import Podcast
##################################################
class xxx4pods(Podcast):
def __init__(self):
Podcast.__init__(self, "xxx4pods", "xxx4pods.png", "http://xxx4pods.com/podcasts/podcast.xml")
##################################################
def getPlugin():
return xxx4pods()
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import pkg_resources
OUTPUT = "output"
HASH_PREFIX = "a"
CONFIG_PARAM_NAME = "/servicecatalog-factory/config"
PUBLISHED_VERSION = pkg_resources.require("aws-service-catalog-factory")[0].version
VERSION = PUBLISHED_VERSION
BOOTSTRAP_STACK_NAME = "servicecatalog-factory"
SERVICE_CATALOG_FACTORY_REPO_NAME = "ServiceCatalogFactory"
NON_RECOVERABLE_STATES = [
"ROLLBACK_COMPLETE",
"CREATE_IN_PROGRESS",
"ROLLBACK_IN_PROGRESS",
"DELETE_IN_PROGRESS",
"UPDATE_IN_PROGRESS",
"UPDATE_COMPLETE_CLEANUP_IN_PROGRESS",
"UPDATE_ROLLBACK_IN_PROGRESS",
"UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS",
"REVIEW_IN_PROGRESS",
]
PRODUCT = "product.j2"
PRODUCT_TERRAFORM = "product-terraform.j2"
PRODUCT_CLOUDFORMATION = "product-cloudformation.j2"
PRODUCT_COMBINED_CLOUDFORMATION = "product-combined-cloudformation.j2"
TERRAFORM_TEMPLATE = "terraform.template.yaml.j2"
ASSOCIATIONS = "associations.j2"
HOME_REGION = os.environ.get(
"AWS_REGION", os.environ.get("AWS_DEFAULT_REGION", "eu-west-1")
)
RESULTS_DIRECTORY = "results"
PIPELINE_MODE_COMBINED = "combined"
PIPELINE_MODE_SPILT = "split"
PIPELINE_MODE_DEFAULT = PIPELINE_MODE_SPILT
PROVISIONERS_CLOUDFORMATION = "CloudFormation"
PROVISIONERS_DEFAULT = PROVISIONERS_CLOUDFORMATION
TEMPLATE_FORMATS_YAML = "yaml"
TEMPLATE_FORMATS_DEFAULT = TEMPLATE_FORMATS_YAML
STATUS_ACTIVE = "active"
STATUS_TERMINATED = "terminated"
STATUS_DEFAULT = STATUS_ACTIVE
PACKAGE_BUILD_SPEC_IMAGE_DEFAULT = "aws/codebuild/standard:4.0"
PACKAGE_BUILD_SPEC_DEFAULT = """
version: 0.2
phases:
install:
runtime-versions:
python: 3.7
build:
commands:
{% for region in ALL_REGIONS %}
- aws cloudformation package --region {{ region }} --template $(pwd)/product.template.yaml --s3-bucket sc-factory-artifacts-${ACCOUNT_ID}-{{ region }} --s3-prefix ${STACK_NAME} --output-template-file product.template-{{ region }}.yaml
{% endfor %}
artifacts:
files:
- '*'
- '**/*'
"""
|
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
return self.searchInsertRecurse(nums, 0, target)
def searchInsertRecurse(self, nums: List[int], start: int, target: int) -> int:
if not nums:
return 0
mid = len(nums) // 2
if nums[mid] == target:
return start + mid
if nums[mid] > target:
if mid == 0:
return start
return self.searchInsertRecurse(nums[:mid], start, target)
if mid == len(nums) - 1:
return start + len(nums)
return self.searchInsertRecurse(nums[mid + 1:], start + mid + 1, target)
|
from datetime import datetime
from sqlalchemy import desc
from app.models import Session, DatabaseHelper
from app.models.environment import Environment
from app.models.schema.environment import EnvironmentForm
from app.utils.logger import Log
class EnvironmentDao(object):
log = Log("EnvironmentDao")
@staticmethod
def insert_env(data: EnvironmentForm, user):
try:
with Session() as session:
query = session.query(Environment).filter_by(name=data.name, deleted_at=None).first()
if query is not None:
return f"环境{data.name}已存在"
env = Environment(**data.dict(), user=user)
session.add(env)
session.commit()
except Exception as e:
EnvironmentDao.log.error(f"新增环境: {data.name}失败, {e}")
return f"新增环境: {data.name}失败, {str(e)}"
return None
@staticmethod
def update_env(data: EnvironmentForm, user):
try:
with Session() as session:
query = session.query(Environment).filter_by(id=data.id, deleted_at=None).first()
if query is None:
return f"环境{data.name}不存在"
DatabaseHelper.update_model(query, data, user)
session.commit()
except Exception as e:
EnvironmentDao.log.error(f"编辑环境失败: {str(e)}")
return f"编辑环境失败: {str(e)}"
return None
@staticmethod
def list_env(page, size, name=None, exactly=False):
try:
search = [Environment.deleted_at == None]
with Session() as session:
if name:
search.append(Environment.name.ilike("%{}%".format(name)))
if exactly:
data = session.query(Environment).filter(*search).all()
return data, len(data), None
data = session.query(Environment).filter(*search)
total = data.count()
return data.order_by(desc(Environment.created_at)).offset((page - 1) * size).limit(
size).all(), total, None
except Exception as e:
EnvironmentDao.log.error(f"获取环境列表失败, {str(e)}")
return [], 0, f"获取环境列表失败, {str(e)}"
@staticmethod
def delete_env(id, user):
try:
with Session() as session:
query = session.query(Environment).filter_by(id=id).first()
if query is None:
return f"环境{id}不存在"
query.deleted_at = datetime.now()
query.update_user = user
session.commit()
except Exception as e:
EnvironmentDao.log.error(f"删除环境失败: {str(e)}")
return f"删除环境失败: {str(e)}"
return None
|
import pandas as pd
from sqlalchemy import create_engine
import os
import urllib
# Create a connection to the database.
DB_CREDENTIALS = os.environ.get("DATABASE_PARAMS")
SQLALCHEMY_DATABASE_URI = "mssql+pyodbc:///?odbc_connect=%s" % urllib.parse.quote_plus(
DB_CREDENTIALS
)
# Change file variable when an updated CSV needs to be used.
file = "lkp_postal_nodes.csv"
engine = create_engine(SQLALCHEMY_DATABASE_URI, echo=True)
# Create a new table (with data) based on the CSV file whenever the docker container is rebuilt.
def add_lrf_data():
try:
data = pd.read_csv(file, index_col=0)
data.to_sql("lrf_data", engine, if_exists="replace")
except Exception as e:
print(e)
else:
pass
if __name__ == "__main__":
add_lrf_data()
|
""" Path to href conversion (and back). """
from mdtools import util
def path_to_href_abs(target, base_dir):
""" Generate absolute href (/root/a/b/c.md)
- target: Path to target
- base_dir_abs: The Path to base directory, used to generate absolute hrefs
- Output: href if successful, or None.
"""
href = util.path_to_href(target, base_dir)
if href:
return '/' + href
def path_to_href_rel(target, base_rel):
""" Generate relative href (b/c.md)
- target: Path to target
- base_rel: The Path to the file containing the link, used to generate relative hrefs
- Output: href if successful, or None.
"""
if target == base_rel:
# The target is actually the file itself.
return ''
else:
# The directory containing the markdown file that contains the offending link
base_dir_rel = base_rel.parent
# Try to find a relative path without going up ("..")
href = util.path_to_href(target, base_dir_rel)
if href:
return href
else:
# Try to find a relative path with going up ("..").
max_up = 3 # Max number of "ups" allowed
dir_up = ''
for _ in range(max_up):
dir_up = dir_up + '../'
base_dir_rel = base_dir_rel.parent
href = util.path_to_href(target, base_dir_rel)
if href:
# Found
return dir_up + href
|
import json
from django.http import HttpResponseNotAllowed, HttpResponseRedirect, HttpResponse, Http404
from shorturls import ShortURL
from longurls import LongURL
from urlparse import urlparse
import webapp2
import secure.settings as settings
from parsers import Parsers
from trafficfilters import TrafficFilters
import loggr
def maintenance_page(self, request):
return HttpResponse("<H1>The system is down for maintenance.</H1>", content_type="text/html")
class Dispatcher(webapp2.RequestHandler):
def __init__(self):
super(Dispatcher, self).__init__()
self._botdetector = TrafficFilters()
self._event = loggr.SnakrEventLogger()
return
def dispatch(self, **table):
def invalid_method(request, *args, **kwargs):
r = request.method
return HttpResponseNotAllowed(r)
def d(request, *args, **kwargs):
h = table.get(request.method, invalid_method)
return h(request, *args, **kwargs)
return d
def get_handler(self, request, *args, **kwargs):
#
# check for unfriendly bot first
#
self._botdetector.filter_traffic(request)
#
# if this is a request to reload the blacklists, do so
#
url_parts = urlparse(request.build_absolute_uri())
if settings.THIRDPARTY_FILELOAD_CRONJOB_URL in url_parts.geturl():
if settings.THRIDPARTY_BLACKLISTS:
self._event.log(event_type='I',
message='START automatic daily third party blacklist reload.')
self._botdetector.reload_thirdpartyblacklists()
self._event.log(event_type='I',
message='END automatic daily third party blacklist reload.')
return HttpResponse("<H2>OkeyDokey</H2>", content_type="text/html")
#
# favicon handling
#
if 'favicon.ico' in url_parts.geturl():
raise Http404
#
# create an instance of the ShortURL object, validate the short URL,
# and if successful load the ShortURL instance with it
#
s = ShortURL(request)
#
# lookup the long url previously used to generate the short url
#
longurl = s.getlongurl(request)
#
# if found, 302 to it; otherwise, 404
#
if longurl:
return HttpResponseRedirect(longurl)
else:
raise Http404
def post_handler(self, request, *args, **kwargs):
#
# Restrict new short url creation to GAE project owners
# Outside offworlders will get a 404 on POST
#
# user = users.get_current_user()
# # raise SuspiciousOperation(str(user))
# if user:
# if not users.is_current_user_admin():
# raise Http404
# else:
# raise Http404
#
# check for unfriendly bot first
#
self._botdetector.filter_traffic(request)
#
#
# create an instance of the LongURL object, validate the long URL, and if successful load the LongURL instance with it
#
l = LongURL(request)
#
# generate the shorturl and either persist both the long and short urls if new,
# or lookup the matching short url if it already exists (i.e. the long url was submitted a 2nd or subsequent time)
#
shorturl = l.get_or_make_shorturl(request)
#
# get document title
#
response_data = {}
response_data['errmsg'] = ''
title = None
p = Parsers()
try:
title = p.get_title(l.longurl)
except Exception as e:
response_data['errmsg'] = 'The long URL responded with `%s` when attempting to get its page title for link generation. The long URL site may have Google Cloud or this service blacklisted as a possible source of bot traffic. Your short URLs will still resolve, tho.' % e.message
else:
pass
#
# prepare JSON and add shorturl to return it to the caller
#
response_data['version'] = settings.SNAKR_VERSION
response_data['shorturl'] = shorturl
#
# return the doc title too if we got one
#
response_data['title'] = title
if title:
lt = len(title)
ls = len(shorturl)
ltmax = 140 - ls - 1
if lt > ltmax:
socialmediapost = title[:ltmax-3]+'... '+shorturl
else:
socialmediapost = title + ' ' + shorturl
response_data['socialmediapost'] = socialmediapost
#
# meta tag values as well if requested
#
if settings.RETURN_ALL_META:
j = json.JSONEncoder()
for key, value in request.META.items():
if isinstance(key, (list, dict, str, unicode, int, float, bool, type(None))):
try:
response_data[key] = j.encode(value)
except:
response_data[key] = 'nonserializable'
#
# return JSON to the caller
#
return HttpResponse(json.dumps(response_data), content_type="application/json")
def _test_post_handler(self, request, *args, **kwargs):
return HttpResponse("<H2>Test value: {%s}</H2>", content_type="text/html")
|
import pytest
from cleo import CommandTester
@pytest.mark.parametrize(
"commandline,expected",
[
("-c my_new_container", "container my_new_container created.\n"),
# ("-m my_container", "X-Object-Meta: test\n"),
# ("-u my_container --headers {\"X-Container-Meta-Test\": \"my metadata\"}"),
("-d my_container", "container my_container deleted.\n"),
],
)
def test_get_containers_command(cmd_app, commandline, expected):
command = cmd_app.find("containers")
command_tester = CommandTester(command)
command_tester.execute(commandline)
assert command_tester.io.fetch_output() == expected
|
#
# This file is part of LiteDRAM.
#
# Copyright (c) 2018-2021 Florent Kermarrec <[email protected]>
# Copyright (c) 2020 Antmicro <www.antmicro.com>
# SPDX-License-Identifier: BSD-2-Clause
import math
from migen import *
from litex.soc.interconnect import stream
from litedram.common import LiteDRAMNativePort
from litedram.frontend import dma
# Helpers ------------------------------------------------------------------------------------------
def _inc(signal, modulo):
if modulo == 2**len(signal):
return signal.eq(signal + 1)
else:
return If(signal == (modulo - 1),
signal.eq(0)
).Else(
signal.eq(signal + 1)
)
# LiteDRAMFIFOCtrl ---------------------------------------------------------------------------------
class _LiteDRAMFIFOCtrl(Module):
def __init__(self, base, depth):
self.base = base
self.depth = depth
self.level = Signal(max=depth+1)
# # #
# To write buffer
self.writable = Signal()
self.write_address = Signal(max=depth)
# From write buffer
self.write = Signal()
# To read buffer
self.readable = Signal()
self.read_address = Signal(max=depth)
# From read buffer
self.read = Signal()
# # #
produce = self.write_address
consume = self.read_address
self.sync += [
If(self.write,
_inc(produce, depth)
),
If(self.read,
_inc(consume, depth)
),
self.level.eq(self.level + self.write - self.read),
]
self.comb += [
self.writable.eq(self.level < depth),
self.readable.eq(self.level > 0)
]
# LiteDRAMFIFOWriter -------------------------------------------------------------------------------
class _LiteDRAMFIFOWriter(Module):
def __init__(self, data_width, port, ctrl, fifo_depth=16):
self.sink = sink = stream.Endpoint([("data", data_width)])
# # #
self.submodules.writer = writer = dma.LiteDRAMDMAWriter(port, fifo_depth=fifo_depth)
self.comb += [
writer.sink.valid.eq(sink.valid & ctrl.writable),
writer.sink.address.eq(ctrl.base + ctrl.write_address),
writer.sink.data.eq(sink.data),
If(writer.sink.valid & writer.sink.ready,
sink.ready.eq(1),
ctrl.write.eq(1)
),
]
# LiteDRAMFIFOReader -------------------------------------------------------------------------------
class _LiteDRAMFIFOReader(Module):
def __init__(self, data_width, port, ctrl, fifo_depth=16):
self.source = source = stream.Endpoint([("data", data_width)])
# # #
self.submodules.reader = reader = dma.LiteDRAMDMAReader(port, fifo_depth=fifo_depth)
self.comb += [
reader.sink.valid.eq(ctrl.readable),
reader.sink.address.eq(ctrl.base + ctrl.read_address),
If(reader.sink.valid & reader.sink.ready,
ctrl.read.eq(1)
)
]
self.comb += reader.source.connect(source)
# _LiteDRAMFIFO ------------------------------------------------------------------------------------
class _LiteDRAMFIFO(Module):
"""LiteDRAM frontend that allows to use DRAM as a FIFO"""
def __init__(self, data_width, base, depth, write_port, read_port,
writer_fifo_depth = 16,
reader_fifo_depth = 16):
assert isinstance(write_port, LiteDRAMNativePort)
assert isinstance(read_port, LiteDRAMNativePort)
self.sink = stream.Endpoint([("data", data_width)])
self.source = stream.Endpoint([("data", data_width)])
# # #
self.submodules.ctrl = _LiteDRAMFIFOCtrl(base, depth)
self.submodules.writer = _LiteDRAMFIFOWriter(data_width, write_port, self.ctrl, writer_fifo_depth)
self.submodules.reader = _LiteDRAMFIFOReader(data_width, read_port, self.ctrl, reader_fifo_depth)
self.comb += [
self.sink.connect(self.writer.sink),
self.reader.source.connect(self.source)
]
# LiteDRAMFIFO -------------------------------------------------------------------------------------
class LiteDRAMFIFO(Module):
"""LiteDRAM FIFO with optional/automatic Bypass.
Description
-----------
┌──────────┐ ┌──────────┐
Sink │ Pre- │ Bypass │ Post- │ Source
─────────► FIFO ├────────► FIFO ├───────►
└────┬─────┘ └─────▲────┘
│ │
┌────▼─────┐ ┌─────┴────┐
│ Pre- │ │ Post- │
│Converter │ │Converter │
└────┬─────┘ └─────▲────┘
│ │
│ ┌─────────────┐ │
│ │ DRAM │ │
└──► FIFO ├───┘
└──────┬──────┘
│
▼
DRAM
The DRAM FIFO allows creation of very large FIFO with storage in DRAM. The data-width of the
input/output streams is automatically adapted to the DRAM's data-width with the Pre/Post con-
verters and the module switches seamlessly between 2 modes:
- 1) Bypass mode.
- 2) DRAM mode.
1) The module is initialized in Bypass mode, connecting the its Sink to its Source.
Backpressure from the Source is propagated from the Source to the Post-FIFO, Pre-FIFO
and the Sink.
┌──────────┐ ┌──────────┐
Sink │ Pre- │ Bypass │ Post- │ Source
─────────► FIFO ├────────► FIFO ├───────►
└──────────┘ └──────────┘
Backpressure
◄─────────────────────
Once the Post-FIFO is full and the Pre-FIFO has enough data to form a DRAM Word, the module
switches to DRAM mode.
2) In DRAM mode, the Bypass connection is disabled and Pre-FIFO's Source is redirected to
Pre-Converter's Sink. Once Pre-Converter has a full DRAM word, the word can be written to the
DRAM FIFO's Sink
┌──────────┐ ┌──────────┐
Sink │ Pre- │ │ Post- │ Source
─────────► FIFO │ │ FIFO ├───────►
└────┬─────┘ └─────▲────┘
│ │
┌────▼─────┐ ┌─────┴────┐
│ Pre- │ │ Post- │
│Converter │ │Converter │
└────┬─────┘ └─────▲────┘
│ │
│ ┌─────────────┐ │
│ │ DRAM │ │
└──► FIFO ├───┘
└──────┬──────┘
│
▼
DRAM
This data from DRAM FIFO will be generated back on the DRAM FIFO's Source and connected to
the Post-Converter to re-generate the data with the correct data-width. Data will then be
generated on the Source.
Once we no longer have data in the Pre-Converter/DRAM FIFO/Post-Converter path and Pre-FIFO's
level is below threshold, the modules switches back to Bypass mode.
Parameters
----------
data_width : int, in
FIFO data-width.
base : int, in
FIFO base address in DRAM (bytes).
depth: in, in
FIFO depth (bytes).
write_port: LiteDRAMNativePort
DRAM Write port.
read_port: LiteDRAMNativePort
DRAM Read port.
with_bypass: bool, in
Automatic Bypass Mode Enable.
"""
def __init__(self, data_width, base, depth, write_port, read_port, with_bypass=False,
pre_fifo_depth = 16,
post_fifo_depth = 16):
assert isinstance(write_port, LiteDRAMNativePort)
assert isinstance(read_port, LiteDRAMNativePort)
self.sink = stream.Endpoint([("data", data_width)])
self.source = stream.Endpoint([("data", data_width)])
# # #
# Parameters.
# -----------
assert write_port.data_width == read_port.data_width
port_data_width = write_port.data_width
port_address_width = write_port.address_width
assert data_width <= port_data_width
data_width_ratio = port_data_width//data_width
if not with_bypass:
assert data_width_ratio == 1
fifo_base = int(base/(port_data_width/8))
fifo_depth = int(depth/(port_data_width/8))
pre_fifo_depth = max( pre_fifo_depth, 2*data_width_ratio)
post_fifo_depth = max(post_fifo_depth, 2*data_width_ratio)
# Submodules.
# -----------
# Pre-FIFO.
self.submodules.pre_fifo = pre_fifo = stream.SyncFIFO([("data", data_width)], pre_fifo_depth)
# Pre-Converter.
self.submodules.pre_converter = pre_converter = stream.Converter(data_width, port_data_width)
# DRAM-FIFO.
self.submodules.dram_fifo = dram_fifo = _LiteDRAMFIFO(
data_width = port_data_width,
base = fifo_base,
depth = fifo_depth,
write_port = write_port,
read_port = read_port,
)
# Post-Converter.
self.submodules.post_converter = post_converter = stream.Converter(port_data_width, data_width)
# Post-FIFO.
self.submodules.post_fifo = post_fifo = stream.SyncFIFO([("data", data_width)], post_fifo_depth)
# Data-Flow.
# ----------
dram_bypass = Signal()
dram_store = Signal()
dram_store_threshold = Signal()
self.comb += [
# Sink --> Pre-FIFO.
self.sink.connect(pre_fifo.sink),
# DRAM Threshold. We can only enable path to DRAM when we have enough data for a full
# DRAM word.
dram_store_threshold.eq(pre_fifo.level >= data_width_ratio),
# Bypass / DRAM.
If(with_bypass & dram_bypass,
# Pre-FIFO --> Post-FIFO.
pre_fifo.source.connect(post_fifo.sink),
).Else(
# Pre-FIFO --> Pre-Converter.
If(dram_store | (not with_bypass),
pre_fifo.source.connect(pre_converter.sink),
),
# Post-Converter --> Post-FIFO.
post_converter.source.connect(post_fifo.sink)
),
# Pre-Converter --> DRAM-FIFO.
pre_converter.source.connect(dram_fifo.sink),
# DRAM-FIFO --> Post-Converter.
dram_fifo.source.connect(post_converter.sink),
# Post-FIFO --> Source.
post_fifo.source.connect(self.source)
]
# FSM.
# ----
if with_bypass:
dram_first = Signal()
dram_inc = Signal()
dram_dec = Signal()
dram_cnt = Signal(port_address_width)
dram_inc_mod = Signal(max(int(math.log2(data_width_ratio)), 1))
dram_dec_mod = Signal(max(int(math.log2(data_width_ratio)), 1))
self.submodules.fsm = fsm = FSM(reset_state="BYPASS")
fsm.act("BYPASS",
dram_bypass.eq(1),
# Switch to DRAM mode when enough data to store a DRAM word.
If(dram_store_threshold,
NextValue(dram_first, 1),
NextValue(dram_cnt, 0),
NextState("DRAM")
)
)
fsm.act("DRAM",
# Store in DRAM.
dram_store.eq(1),
# Increment DRAM Data Count on Pre-Converter's Sink cycle.
If(pre_converter.sink.valid & pre_converter.sink.ready,
dram_inc.eq(1),
NextValue(dram_first, 0),
If(data_width_ratio > 1,
NextValue(dram_inc_mod, dram_inc_mod + 1),
)
),
# Decrement DRAM Data Count on Post-Converter's Source cycle.
If(post_converter.source.valid & post_converter.source.ready,
dram_dec.eq(1),
If(data_width_ratio > 1,
NextValue(dram_dec_mod, dram_dec_mod + 1),
)
),
# Maintain DRAM Data Count.
NextValue(dram_cnt, dram_cnt + dram_inc - dram_dec),
# Switch back to Bypass mode when no remaining DRAM word.
If((dram_first == 0) & (dram_cnt == 0) & (dram_inc_mod == 0) & (dram_dec_mod == 0),
dram_store.eq(0),
NextState("BYPASS")
)
)
|
"""
入口程序
包都按需导入, 不需要使用的模块则不会导入
因此安装过程可以选用不完整安装
但依赖模块都都是固定的
"""
import sys
import os
from conf import Config
app = None
def can_not_load(name):
print(f"无法加载 {name} 系统, 该系统或其依赖可能不存在", file=sys.stderr)
def main():
"""
入口程序
:return:
"""
if __name__ != "__main__" and Config.program != "website":
print("运行程序出错", file=sys.stderr)
exit(1)
if Config.mysql_url is None or Config.mysql_name is None:
print("请提供MySQL信息")
sys.exit(1)
program_name = Config.program
if program_name == "setup": # setup程序不需要数据库链接等操作
__main = os.path.dirname(os.path.abspath(__file__))
exe = list(os.path.split(sys.executable))
exe[-1] = exe[-1].replace("pythonw", "python")
exe = os.path.join(*exe)
res = os.system(f"{exe} {os.path.join(__main, 'init.py')} "
f"--mysql_url={Config.mysql_url} "
f"--mysql_name={Config.mysql_name} "
f"--mysql_passwd={Config.mysql_passwd} "
f"--mysql_port={Config.mysql_port} "
f"--program=setup")
if res != 0:
print("初始化程序加载失败, 请检查配置是否正确而", file=sys.stderr)
sys.exit(1)
sys.exit(0)
import pymysql # 下面才需要使用 pymysql
try:
from sql.db import DB
mysql = DB()
except pymysql.Error:
print("无法连接到 MySQL")
sys.exit(1)
if program_name == "garbage":
from equipment.aliyun import Aliyun
if Config.aliyun_key is None or Config.aliyun_secret is None:
print("请提供Aliyun key信息")
sys.exit(1)
try:
from equipment.scan import HGSCapture, HGSQRCoder
import tk_ui.station as garbage_station
except ImportError:
can_not_load("垃圾站系统")
sys.exit(1)
aliyun = Aliyun()
cap = HGSCapture()
qr = HGSQRCoder(cap)
station = garbage_station.GarbageStation(mysql, cap, qr, aliyun)
station.mainloop()
elif program_name == "ranking":
try:
import tk_ui.ranking as ranking_station
except ImportError:
can_not_load("排行榜系统")
sys.exit(1)
station = ranking_station.RankingStation(mysql)
station.mainloop()
elif program_name == "manager":
try:
import tk_ui.admin as admin_station
except ImportError:
can_not_load("管理员系统")
sys.exit(1)
station = admin_station.AdminStation(mysql)
station.mainloop()
elif program_name == "website":
try:
from app import creat_web, App
from flask import Flask
from app.views import register
except ImportError:
can_not_load("在线排行榜服务")
sys.exit(1)
global app
if __name__ == "__main__":
app = creat_web(mysql) # 暴露 app 接口
print("Waitress Web 服务启动 访问: http://127.0.0.1:8080/")
app.run_waitress(host='0.0.0.0', port="8080")
else:
tmp = creat_web(mysql) # 暴露 app 接口
app = tmp.get_app()
else:
can_not_load(program_name)
sys.exit(1)
main()
|
from rest_framework import mixins, viewsets, status
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.permissions import (
AllowAny,
IsAuthenticated
)
from api.serializers import (RetrieveCourseModelSerializer,
ResultContestModelSerializer, ListCourseModelSerializer,
ViewVideoModelSerializer, RetrieveViewVideo, CourseModelSerializer,
ResultContestOnly)
from api.models import Course, ResultContest, ViewVideo
from django_filters import rest_framework as filters
class ViewVideoViewSet(viewsets.GenericViewSet,
mixins.CreateModelMixin,
mixins.ListModelMixin):
permission_classes = [IsAuthenticated]
queryset = ViewVideo.objects.all()
lookup = 'id'
filter_backends = (filters.DjangoFilterBackend,)
def get_serializer_class(self):
if self.action == 'list':
return RetrieveViewVideo
else:
return ViewVideoModelSerializer
class ViewVideoFilters(filters.FilterSet):
class Meta:
model = ViewVideo
fields = {
'video': ['exact'],
'user': ['exact'],
'course': ['exact']
}
filterset_class = ViewVideoFilters
class CourseViewSet(viewsets.GenericViewSet,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.ListModelMixin):
permission_classes = [AllowAny]
queryset = Course.objects.all()
lookup = 'id'
filter_backends = (filters.DjangoFilterBackend,)
def get_serializer_class(self):
if self.action == 'list':
return ListCourseModelSerializer
if self.action == 'retrieve':
return RetrieveCourseModelSerializer
if self.action == 'finish':
return ResultContestModelSerializer
if self.action == 'finish_only':
return ResultContestOnly
else:
return CourseModelSerializer
class CourseFilters(filters.FilterSet):
class Meta:
model = Course
fields = {
'is_free': ['exact'],
'authorized_user': ['exact', 'contains']
}
filterset_class = CourseFilters
@action(detail=True, methods=['post'])
def finish_only(self, request, *args, **kwargs):
course = self.get_object()
data_add = request.data
data_add['course'] = course.id
serializer_class = self.get_serializer_class()
serializer = serializer_class(
course,
data=data_add,
context = self.get_serializer_context()
)
serializer.is_valid(raise_exception=True)
course = serializer.save()
return Response({'status': 'OK'}, status=status.HTTP_200_OK)
@action(detail=True, methods=['post'])
def finish(self, request, *args, **kwargs):
course = self.get_object()
data_add = request.data
data_add['course'] = course.id
serializer_class = self.get_serializer_class()
serializer = serializer_class(
course,
data=data_add,
context = self.get_serializer_context()
)
serializer.is_valid(raise_exception=True)
course = serializer.save()
return Response({'status': 'OK'}, status=status.HTTP_200_OK)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.